diff --git a/v1/checkpoint-850/README.md b/v1/checkpoint-850/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/v1/checkpoint-850/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/v1/checkpoint-850/adapter_config.json b/v1/checkpoint-850/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3cd6dba5d79f7ca21fd4ad465cbbcac1e0960476
--- /dev/null
+++ b/v1/checkpoint-850/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "k_proj",
+ "q_proj",
+ "o_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/v1/checkpoint-850/adapter_model.safetensors b/v1/checkpoint-850/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..10d052d8f4748e41ad458b626bf25a2ad4914646
--- /dev/null
+++ b/v1/checkpoint-850/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e47d6773abcf5644ba6a405617bfa837a6613104bc0b8f72c19cad1e6a9469ea
+size 1048664848
diff --git a/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a76726879d0e0c537be0e632106da2a6e59b965b
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d235e193cd7bcd180418c87044bf4637e0d1fb7f0193a16c08d0fad834e572d4
+size 787270042
diff --git a/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7620d4797064cdc48d2c78b8dd2a0b4b22018d00
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:16f7e20aa03c820caf398ff4cbb9ccd788ef9e67b876770dcd1406ac4bfe78ec
+size 787270042
diff --git a/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e3cdaa69e5a329266a0b5a18d85ef0fb36552f4e
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:03eb49545d563d6fa3d3dc741de9de5b06fb7a626065049d71d801a89a1eb5ac
+size 787270042
diff --git a/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..faea03582b1fbff403406c1d9fb5cc323e86a05a
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af8d3aa02a4e002edbf9ce82be11eaf4fb3a1d171e6a473267b66d53c6dc37fe
+size 787270042
diff --git a/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..24da1e5a15f9345b5b259168e7300f4ea9a52254
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1b62bac61955e9d114f01427dc0a4c1d3b977d921308e880154e99bca10b1745
+size 787270042
diff --git a/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2e9e7a43663f82d85714d148c3b5f9ffdd805940
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f6d16b6d2adc76b2cde15e8ce31369a8beadfa955bc059bd98f27489def6aa87
+size 787270042
diff --git a/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..61d0305f69a265cc1bc1fcd743deaf01872af549
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d7190139caa8e21da1d41377db269422822ce97c71820aa74af1ae545b2ee8c
+size 787270042
diff --git a/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6bffd2c39ddd5d5951561fb050a20a0bffc089c3
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d102dd5497f9f2e51891051829504f9d9248cad8723addfcaa3c71d0c3c46c52
+size 787270042
diff --git a/v1/checkpoint-850/global_step850/zero_pp_rank_0_mp_rank_00_model_states.pt b/v1/checkpoint-850/global_step850/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4266102da870765feb8c6f4931dd09173cc4a635
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c4636383f7f0687a09f235b47de5324f37846c740b1f4d010d36adad52ee512
+size 653742
diff --git a/v1/checkpoint-850/global_step850/zero_pp_rank_1_mp_rank_00_model_states.pt b/v1/checkpoint-850/global_step850/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..11a7e399cbaeea9b1d0221d94e9a5aeef34ef063
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3059c7b370b27cc5e627a87a08c4b19e7cd3639b7a839379299724a3ab0d9361
+size 653742
diff --git a/v1/checkpoint-850/global_step850/zero_pp_rank_2_mp_rank_00_model_states.pt b/v1/checkpoint-850/global_step850/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..eeb138a36fcfd340418b4d32cb974f13b7697694
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c711e98fb08c163203dc87b78751464a86ead4b2fd198f8789cc169707d4777d
+size 653742
diff --git a/v1/checkpoint-850/global_step850/zero_pp_rank_3_mp_rank_00_model_states.pt b/v1/checkpoint-850/global_step850/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..25032ea96c6490ce3d796172a32126864cdf416d
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f780e605edce29c8a4c8cabf71af90b8ab02bc1a9ff4d499a57f7bc8bddf73ae
+size 653742
diff --git a/v1/checkpoint-850/global_step850/zero_pp_rank_4_mp_rank_00_model_states.pt b/v1/checkpoint-850/global_step850/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..21daac9e3b0da385f2b5d5c010418cf2ac2c471b
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a80187552ff311ab5f3292c94b850ebeab169f3a8accf72fb9d556dd3895e7a
+size 653742
diff --git a/v1/checkpoint-850/global_step850/zero_pp_rank_5_mp_rank_00_model_states.pt b/v1/checkpoint-850/global_step850/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..11c0875a0ca935cb9c9f02b372af124165e9bbfe
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2879bde5625128d1ce9d91bb79a25eb3ea12549772c9df8f8c4c89025fffd9dc
+size 653742
diff --git a/v1/checkpoint-850/global_step850/zero_pp_rank_6_mp_rank_00_model_states.pt b/v1/checkpoint-850/global_step850/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7e9d7004a780769cfd567a5bf98afc7f08c661ea
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2ad50c5a7d9b587b16f3ae5dfcd43af013ddb103d983b5c5638fc3329d553c91
+size 653742
diff --git a/v1/checkpoint-850/global_step850/zero_pp_rank_7_mp_rank_00_model_states.pt b/v1/checkpoint-850/global_step850/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e65614fbda6c94141248f18d46b6f71f66a72809
--- /dev/null
+++ b/v1/checkpoint-850/global_step850/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0fb0b6e0853703d42cf555fd9864d4db4c2b6d04f08be900fe24218069f955a9
+size 653742
diff --git a/v1/checkpoint-850/latest b/v1/checkpoint-850/latest
new file mode 100644
index 0000000000000000000000000000000000000000..3691022819d6dd7dd441445e8bf742e36ca808cd
--- /dev/null
+++ b/v1/checkpoint-850/latest
@@ -0,0 +1 @@
+global_step850
\ No newline at end of file
diff --git a/v1/checkpoint-850/rng_state_0.pth b/v1/checkpoint-850/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4e5b7e2ec90fdb824c8932464c1d9068330655a7
--- /dev/null
+++ b/v1/checkpoint-850/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:36d2a2034ebb05cb71c510897f2795b31164e50f17b270bc25d2be3ad9a17b22
+size 15984
diff --git a/v1/checkpoint-850/rng_state_1.pth b/v1/checkpoint-850/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7d8d7722fc72cab6d492b76cb99c8177dcc47544
--- /dev/null
+++ b/v1/checkpoint-850/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:060dfdb1c49102cbdc8868a6031e68787601b4ccd782f3fb9b137e20c1fd2c7a
+size 15984
diff --git a/v1/checkpoint-850/rng_state_2.pth b/v1/checkpoint-850/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3c9f84eff30cfa9ea1feedaf262d61fb12e4cba7
--- /dev/null
+++ b/v1/checkpoint-850/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af01895cb66e616591f2e4baa8dcd8151530eab133c73571ccb31c74f35422ce
+size 15984
diff --git a/v1/checkpoint-850/rng_state_3.pth b/v1/checkpoint-850/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6eebfb928f8e91eff0ea1645a20b5aa4465c705b
--- /dev/null
+++ b/v1/checkpoint-850/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:677921992b1e0cef3aee776f245975003d22f51d9bd6ed20f248ded1deb72fa9
+size 15984
diff --git a/v1/checkpoint-850/rng_state_4.pth b/v1/checkpoint-850/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0866030a266c6d003cc378a9418a723f69e8ab99
--- /dev/null
+++ b/v1/checkpoint-850/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d69353c629541c690c5471f8ec05fdab2bfecf3d37afaa436bc45939da6db68f
+size 15984
diff --git a/v1/checkpoint-850/rng_state_5.pth b/v1/checkpoint-850/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..554638d77107f832d7aa51c61645ee2d6c48a36d
--- /dev/null
+++ b/v1/checkpoint-850/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e40ba6668cc03c9162c68a933d164bf38ae2d196a9a6fec03ae615491201185
+size 15984
diff --git a/v1/checkpoint-850/rng_state_6.pth b/v1/checkpoint-850/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..964331b65172a1bcac03e4673415fa787f724268
--- /dev/null
+++ b/v1/checkpoint-850/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:870968fea834e24b2e099cf3e4fe1e3fb8caf38d8f8e5b790d7d47386d4d05f5
+size 15984
diff --git a/v1/checkpoint-850/rng_state_7.pth b/v1/checkpoint-850/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..cd4754d65217d0f9d1f2d3334397df7a8a079652
--- /dev/null
+++ b/v1/checkpoint-850/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9e19618bee7c6ef43256fea25abe19bca88535eb1e7dc213cde8929ae4e8180
+size 15984
diff --git a/v1/checkpoint-850/scheduler.pt b/v1/checkpoint-850/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..58ac1ab9bfba8e8a2e6e6e316e2f5c7c070cb178
--- /dev/null
+++ b/v1/checkpoint-850/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c4b4462e080a5c39faf9317093c20bf1f40a2d57c50836da1f781d634a5c527
+size 1064
diff --git a/v1/checkpoint-850/special_tokens_map.json b/v1/checkpoint-850/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/v1/checkpoint-850/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/v1/checkpoint-850/tokenizer.model b/v1/checkpoint-850/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/v1/checkpoint-850/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/v1/checkpoint-850/tokenizer_config.json b/v1/checkpoint-850/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/v1/checkpoint-850/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/v1/checkpoint-850/trainer_state.json b/v1/checkpoint-850/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..f863ad1f0b4da631dc2cd65ce0f36b492aedd814
--- /dev/null
+++ b/v1/checkpoint-850/trainer_state.json
@@ -0,0 +1,5971 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.5546410608139003,
+ "eval_steps": 500,
+ "global_step": 850,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2698278585334131,
+ "learning_rate": 7.274705407864332e-05,
+ "loss": 1.194,
+ "step": 451
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.23678313024953834,
+ "learning_rate": 7.26980224535416e-05,
+ "loss": 1.2349,
+ "step": 452
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24851875792002978,
+ "learning_rate": 7.264884229436293e-05,
+ "loss": 1.1758,
+ "step": 453
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24122080121681125,
+ "learning_rate": 7.259951382451318e-05,
+ "loss": 1.1962,
+ "step": 454
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.22741322959884405,
+ "learning_rate": 7.25500372680719e-05,
+ "loss": 1.1702,
+ "step": 455
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.2297475610861458,
+ "learning_rate": 7.250041284979137e-05,
+ "loss": 1.1466,
+ "step": 456
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.3057605989721467,
+ "learning_rate": 7.245064079509553e-05,
+ "loss": 1.246,
+ "step": 457
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2719638501597136,
+ "learning_rate": 7.240072133007899e-05,
+ "loss": 1.2184,
+ "step": 458
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2436807816414479,
+ "learning_rate": 7.235065468150593e-05,
+ "loss": 1.2324,
+ "step": 459
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.23436349430255515,
+ "learning_rate": 7.23004410768092e-05,
+ "loss": 1.1813,
+ "step": 460
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2398940990211377,
+ "learning_rate": 7.22500807440892e-05,
+ "loss": 1.1924,
+ "step": 461
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2605716625062531,
+ "learning_rate": 7.219957391211281e-05,
+ "loss": 1.182,
+ "step": 462
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.260462524570941,
+ "learning_rate": 7.214892081031244e-05,
+ "loss": 1.2136,
+ "step": 463
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.21979766512306334,
+ "learning_rate": 7.209812166878491e-05,
+ "loss": 1.2066,
+ "step": 464
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.23324453647530663,
+ "learning_rate": 7.204717671829051e-05,
+ "loss": 1.1657,
+ "step": 465
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.2529434935507481,
+ "learning_rate": 7.199608619025177e-05,
+ "loss": 1.2093,
+ "step": 466
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.25371701891720116,
+ "learning_rate": 7.194485031675265e-05,
+ "loss": 1.2225,
+ "step": 467
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.23272423066292103,
+ "learning_rate": 7.189346933053725e-05,
+ "loss": 1.1721,
+ "step": 468
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.25122928735587546,
+ "learning_rate": 7.184194346500892e-05,
+ "loss": 1.2537,
+ "step": 469
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2159270875490409,
+ "learning_rate": 7.179027295422913e-05,
+ "loss": 1.197,
+ "step": 470
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2633111059076544,
+ "learning_rate": 7.173845803291636e-05,
+ "loss": 1.1721,
+ "step": 471
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.30555936322098703,
+ "learning_rate": 7.168649893644517e-05,
+ "loss": 1.3011,
+ "step": 472
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.23492670111453726,
+ "learning_rate": 7.163439590084502e-05,
+ "loss": 1.1601,
+ "step": 473
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.26602734263721806,
+ "learning_rate": 7.158214916279923e-05,
+ "loss": 1.2808,
+ "step": 474
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.3182695007856262,
+ "learning_rate": 7.152975895964386e-05,
+ "loss": 1.2967,
+ "step": 475
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2785021674736721,
+ "learning_rate": 7.147722552936673e-05,
+ "loss": 1.1789,
+ "step": 476
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.279474303138652,
+ "learning_rate": 7.142454911060627e-05,
+ "loss": 1.2596,
+ "step": 477
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2556980144910755,
+ "learning_rate": 7.137172994265044e-05,
+ "loss": 1.2426,
+ "step": 478
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.3311256331993533,
+ "learning_rate": 7.131876826543565e-05,
+ "loss": 1.2059,
+ "step": 479
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.26467296197775253,
+ "learning_rate": 7.12656643195457e-05,
+ "loss": 1.2482,
+ "step": 480
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.27444885274652553,
+ "learning_rate": 7.121241834621064e-05,
+ "loss": 1.2528,
+ "step": 481
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2572283861115396,
+ "learning_rate": 7.115903058730567e-05,
+ "loss": 1.1849,
+ "step": 482
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2677065778235683,
+ "learning_rate": 7.11055012853501e-05,
+ "loss": 1.2011,
+ "step": 483
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29470622036742816,
+ "learning_rate": 7.105183068350619e-05,
+ "loss": 1.2398,
+ "step": 484
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.27609230248969197,
+ "learning_rate": 7.099801902557811e-05,
+ "loss": 1.2259,
+ "step": 485
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.24248634168099284,
+ "learning_rate": 7.094406655601073e-05,
+ "loss": 1.2282,
+ "step": 486
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.2765941767688746,
+ "learning_rate": 7.088997351988865e-05,
+ "loss": 1.2319,
+ "step": 487
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29347776909858947,
+ "learning_rate": 7.083574016293493e-05,
+ "loss": 1.1765,
+ "step": 488
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.285370295424537,
+ "learning_rate": 7.078136673151008e-05,
+ "loss": 1.26,
+ "step": 489
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.29408734903836536,
+ "learning_rate": 7.072685347261093e-05,
+ "loss": 1.226,
+ "step": 490
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27437470239205813,
+ "learning_rate": 7.067220063386947e-05,
+ "loss": 1.1976,
+ "step": 491
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2680770258777871,
+ "learning_rate": 7.061740846355176e-05,
+ "loss": 1.1915,
+ "step": 492
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27200362879502954,
+ "learning_rate": 7.056247721055678e-05,
+ "loss": 1.2002,
+ "step": 493
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2637811092577037,
+ "learning_rate": 7.050740712441528e-05,
+ "loss": 1.287,
+ "step": 494
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.24657959209271266,
+ "learning_rate": 7.045219845528875e-05,
+ "loss": 1.2284,
+ "step": 495
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.25311992110358666,
+ "learning_rate": 7.039685145396812e-05,
+ "loss": 1.1616,
+ "step": 496
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2564633694193358,
+ "learning_rate": 7.034136637187275e-05,
+ "loss": 1.2067,
+ "step": 497
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2446797651174144,
+ "learning_rate": 7.028574346104926e-05,
+ "loss": 1.2284,
+ "step": 498
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2592751463399255,
+ "learning_rate": 7.022998297417034e-05,
+ "loss": 1.2371,
+ "step": 499
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2500713943206808,
+ "learning_rate": 7.017408516453365e-05,
+ "loss": 1.1061,
+ "step": 500
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2812266276040743,
+ "learning_rate": 7.011805028606064e-05,
+ "loss": 1.1949,
+ "step": 501
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.298829667668083,
+ "learning_rate": 7.006187859329544e-05,
+ "loss": 1.2313,
+ "step": 502
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.26518768159745104,
+ "learning_rate": 7.000557034140361e-05,
+ "loss": 1.2246,
+ "step": 503
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.3037280360760458,
+ "learning_rate": 6.994912578617113e-05,
+ "loss": 1.1617,
+ "step": 504
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2726903109255714,
+ "learning_rate": 6.989254518400309e-05,
+ "loss": 1.2415,
+ "step": 505
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25568082003046966,
+ "learning_rate": 6.98358287919226e-05,
+ "loss": 1.1817,
+ "step": 506
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25633294893705044,
+ "learning_rate": 6.97789768675696e-05,
+ "loss": 1.2149,
+ "step": 507
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.28291439435087123,
+ "learning_rate": 6.972198966919972e-05,
+ "loss": 1.1578,
+ "step": 508
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.27195184756655516,
+ "learning_rate": 6.966486745568308e-05,
+ "loss": 1.2355,
+ "step": 509
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.239159568376005,
+ "learning_rate": 6.960761048650312e-05,
+ "loss": 1.1688,
+ "step": 510
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.22961475425949177,
+ "learning_rate": 6.955021902175543e-05,
+ "loss": 1.2094,
+ "step": 511
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.27443773600741117,
+ "learning_rate": 6.949269332214651e-05,
+ "loss": 1.2559,
+ "step": 512
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.26230551832002097,
+ "learning_rate": 6.94350336489927e-05,
+ "loss": 1.2121,
+ "step": 513
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2716742985303849,
+ "learning_rate": 6.937724026421892e-05,
+ "loss": 1.2444,
+ "step": 514
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2537850139439542,
+ "learning_rate": 6.931931343035742e-05,
+ "loss": 1.1327,
+ "step": 515
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.28599587967496826,
+ "learning_rate": 6.926125341054676e-05,
+ "loss": 1.2236,
+ "step": 516
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.26780654378470103,
+ "learning_rate": 6.920306046853043e-05,
+ "loss": 1.2295,
+ "step": 517
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.23606296888412015,
+ "learning_rate": 6.914473486865577e-05,
+ "loss": 1.1543,
+ "step": 518
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.34976881174240837,
+ "learning_rate": 6.90862768758727e-05,
+ "loss": 1.2067,
+ "step": 519
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2481257873494882,
+ "learning_rate": 6.902768675573258e-05,
+ "loss": 1.2188,
+ "step": 520
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2996395778117021,
+ "learning_rate": 6.896896477438699e-05,
+ "loss": 1.2326,
+ "step": 521
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.8839768816333193,
+ "learning_rate": 6.891011119858643e-05,
+ "loss": 1.2435,
+ "step": 522
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2851882482058998,
+ "learning_rate": 6.885112629567927e-05,
+ "loss": 1.2644,
+ "step": 523
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2813663482913699,
+ "learning_rate": 6.879201033361035e-05,
+ "loss": 1.2309,
+ "step": 524
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3257551560135454,
+ "learning_rate": 6.873276358091996e-05,
+ "loss": 1.2755,
+ "step": 525
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.28930479952494365,
+ "learning_rate": 6.867338630674247e-05,
+ "loss": 1.1962,
+ "step": 526
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3077462996938649,
+ "learning_rate": 6.861387878080511e-05,
+ "loss": 1.2402,
+ "step": 527
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.2848900193452761,
+ "learning_rate": 6.855424127342688e-05,
+ "loss": 1.2748,
+ "step": 528
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.4765938812802202,
+ "learning_rate": 6.849447405551718e-05,
+ "loss": 1.2226,
+ "step": 529
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.53184473292579,
+ "learning_rate": 6.843457739857467e-05,
+ "loss": 1.2347,
+ "step": 530
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.6416239346492343,
+ "learning_rate": 6.837455157468596e-05,
+ "loss": 1.2429,
+ "step": 531
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3188092712502773,
+ "learning_rate": 6.831439685652442e-05,
+ "loss": 1.216,
+ "step": 532
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3527495731006385,
+ "learning_rate": 6.825411351734895e-05,
+ "loss": 1.1682,
+ "step": 533
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.29603753744741856,
+ "learning_rate": 6.819370183100274e-05,
+ "loss": 1.1434,
+ "step": 534
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.5252450389976622,
+ "learning_rate": 6.813316207191198e-05,
+ "loss": 1.1943,
+ "step": 535
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.32999419558659937,
+ "learning_rate": 6.807249451508466e-05,
+ "loss": 1.192,
+ "step": 536
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.3650175469778724,
+ "learning_rate": 6.801169943610929e-05,
+ "loss": 1.2141,
+ "step": 537
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 1.0643532150783557,
+ "learning_rate": 6.795077711115368e-05,
+ "loss": 1.2253,
+ "step": 538
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5041310609130145,
+ "learning_rate": 6.788972781696363e-05,
+ "loss": 1.278,
+ "step": 539
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5123058164360991,
+ "learning_rate": 6.782855183086177e-05,
+ "loss": 1.2231,
+ "step": 540
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.3533015702394419,
+ "learning_rate": 6.776724943074619e-05,
+ "loss": 1.2072,
+ "step": 541
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.30253964625417207,
+ "learning_rate": 6.770582089508927e-05,
+ "loss": 1.1382,
+ "step": 542
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.348991618828202,
+ "learning_rate": 6.764426650293633e-05,
+ "loss": 1.2079,
+ "step": 543
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.46017440578788743,
+ "learning_rate": 6.758258653390444e-05,
+ "loss": 1.1813,
+ "step": 544
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.31962101755594885,
+ "learning_rate": 6.75207812681811e-05,
+ "loss": 1.1339,
+ "step": 545
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.37092024548285923,
+ "learning_rate": 6.745885098652298e-05,
+ "loss": 1.2591,
+ "step": 546
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.32347106450715835,
+ "learning_rate": 6.739679597025466e-05,
+ "loss": 1.2017,
+ "step": 547
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39250187112342494,
+ "learning_rate": 6.733461650126733e-05,
+ "loss": 1.0933,
+ "step": 548
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.473522452217324,
+ "learning_rate": 6.727231286201752e-05,
+ "loss": 1.1124,
+ "step": 549
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4809062179622052,
+ "learning_rate": 6.720988533552582e-05,
+ "loss": 1.1585,
+ "step": 550
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3529662801059162,
+ "learning_rate": 6.714733420537559e-05,
+ "loss": 1.0501,
+ "step": 551
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5958247214391118,
+ "learning_rate": 6.708465975571168e-05,
+ "loss": 1.1086,
+ "step": 552
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5341364205022454,
+ "learning_rate": 6.70218622712391e-05,
+ "loss": 1.0518,
+ "step": 553
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3601805724462006,
+ "learning_rate": 6.695894203722181e-05,
+ "loss": 1.1779,
+ "step": 554
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.43410190338280613,
+ "learning_rate": 6.68958993394813e-05,
+ "loss": 1.093,
+ "step": 555
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46217742572873594,
+ "learning_rate": 6.683273446439546e-05,
+ "loss": 1.0117,
+ "step": 556
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.8591682373623357,
+ "learning_rate": 6.676944769889708e-05,
+ "loss": 1.1002,
+ "step": 557
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.7383229487622726,
+ "learning_rate": 6.670603933047272e-05,
+ "loss": 1.0779,
+ "step": 558
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5965305891207813,
+ "learning_rate": 6.664250964716131e-05,
+ "loss": 1.0889,
+ "step": 559
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.6030858606684543,
+ "learning_rate": 6.657885893755288e-05,
+ "loss": 1.0982,
+ "step": 560
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4644510682398409,
+ "learning_rate": 6.65150874907872e-05,
+ "loss": 1.1004,
+ "step": 561
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.43943285132452564,
+ "learning_rate": 6.645119559655254e-05,
+ "loss": 1.0536,
+ "step": 562
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4456395978600012,
+ "learning_rate": 6.638718354508427e-05,
+ "loss": 1.0733,
+ "step": 563
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3303824433217466,
+ "learning_rate": 6.632305162716365e-05,
+ "loss": 1.0552,
+ "step": 564
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3617704823170143,
+ "learning_rate": 6.62588001341164e-05,
+ "loss": 1.1092,
+ "step": 565
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4465013349903427,
+ "learning_rate": 6.619442935781141e-05,
+ "loss": 1.0781,
+ "step": 566
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.48516780613791277,
+ "learning_rate": 6.612993959065947e-05,
+ "loss": 1.0686,
+ "step": 567
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38867820318536633,
+ "learning_rate": 6.606533112561186e-05,
+ "loss": 1.1215,
+ "step": 568
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38566119820378336,
+ "learning_rate": 6.600060425615907e-05,
+ "loss": 1.1213,
+ "step": 569
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.35534855445058544,
+ "learning_rate": 6.593575927632947e-05,
+ "loss": 1.0955,
+ "step": 570
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38124406233349717,
+ "learning_rate": 6.587079648068795e-05,
+ "loss": 1.0659,
+ "step": 571
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.454750160923548,
+ "learning_rate": 6.580571616433457e-05,
+ "loss": 1.1149,
+ "step": 572
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.35353190088025255,
+ "learning_rate": 6.574051862290325e-05,
+ "loss": 1.0388,
+ "step": 573
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3249395594793626,
+ "learning_rate": 6.567520415256045e-05,
+ "loss": 1.0784,
+ "step": 574
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.40078898818247227,
+ "learning_rate": 6.560977305000375e-05,
+ "loss": 1.0859,
+ "step": 575
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4115264795060035,
+ "learning_rate": 6.554422561246054e-05,
+ "loss": 1.1828,
+ "step": 576
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.30090229228069215,
+ "learning_rate": 6.54785621376867e-05,
+ "loss": 1.0901,
+ "step": 577
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.28827860350299206,
+ "learning_rate": 6.541278292396523e-05,
+ "loss": 1.0277,
+ "step": 578
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.34690404488996757,
+ "learning_rate": 6.534688827010484e-05,
+ "loss": 1.048,
+ "step": 579
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.29943113556644785,
+ "learning_rate": 6.528087847543867e-05,
+ "loss": 1.0646,
+ "step": 580
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.37318202575874415,
+ "learning_rate": 6.521475383982291e-05,
+ "loss": 1.1091,
+ "step": 581
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3049663659203959,
+ "learning_rate": 6.51485146636354e-05,
+ "loss": 1.0552,
+ "step": 582
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3342407867509692,
+ "learning_rate": 6.508216124777431e-05,
+ "loss": 1.2227,
+ "step": 583
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3348396047855952,
+ "learning_rate": 6.501569389365674e-05,
+ "loss": 1.0861,
+ "step": 584
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.30951429367513383,
+ "learning_rate": 6.494911290321737e-05,
+ "loss": 1.0461,
+ "step": 585
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.33898401361064606,
+ "learning_rate": 6.488241857890711e-05,
+ "loss": 1.0854,
+ "step": 586
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4901462068263497,
+ "learning_rate": 6.481561122369164e-05,
+ "loss": 1.1012,
+ "step": 587
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3179574879809652,
+ "learning_rate": 6.474869114105018e-05,
+ "loss": 1.0451,
+ "step": 588
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.32159328915060714,
+ "learning_rate": 6.468165863497395e-05,
+ "loss": 1.0458,
+ "step": 589
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.36462235008537297,
+ "learning_rate": 6.461451400996491e-05,
+ "loss": 1.1247,
+ "step": 590
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.5373862753611778,
+ "learning_rate": 6.454725757103432e-05,
+ "loss": 1.0542,
+ "step": 591
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3160409270291303,
+ "learning_rate": 6.447988962370133e-05,
+ "loss": 1.0829,
+ "step": 592
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.390452102978435,
+ "learning_rate": 6.441241047399169e-05,
+ "loss": 1.192,
+ "step": 593
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3802122712014928,
+ "learning_rate": 6.434482042843627e-05,
+ "loss": 1.1153,
+ "step": 594
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4081584328242501,
+ "learning_rate": 6.427711979406966e-05,
+ "loss": 1.1635,
+ "step": 595
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3791962989638633,
+ "learning_rate": 6.420930887842889e-05,
+ "loss": 1.1581,
+ "step": 596
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.33239440056484193,
+ "learning_rate": 6.414138798955189e-05,
+ "loss": 1.0926,
+ "step": 597
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3279881540815014,
+ "learning_rate": 6.407335743597616e-05,
+ "loss": 1.1386,
+ "step": 598
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.30309644763750837,
+ "learning_rate": 6.40052175267374e-05,
+ "loss": 1.0523,
+ "step": 599
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3349097308403333,
+ "learning_rate": 6.393696857136801e-05,
+ "loss": 1.0815,
+ "step": 600
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3288227593556618,
+ "learning_rate": 6.386861087989581e-05,
+ "loss": 1.015,
+ "step": 601
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36685586740843157,
+ "learning_rate": 6.380014476284255e-05,
+ "loss": 1.1232,
+ "step": 602
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3620977714204643,
+ "learning_rate": 6.373157053122243e-05,
+ "loss": 1.1138,
+ "step": 603
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3130587018197183,
+ "learning_rate": 6.366288849654091e-05,
+ "loss": 1.1255,
+ "step": 604
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3602737087072766,
+ "learning_rate": 6.359409897079303e-05,
+ "loss": 1.0282,
+ "step": 605
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.31168852571991945,
+ "learning_rate": 6.352520226646222e-05,
+ "loss": 1.0779,
+ "step": 606
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3516045580189353,
+ "learning_rate": 6.345619869651871e-05,
+ "loss": 1.1028,
+ "step": 607
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3231857927563657,
+ "learning_rate": 6.33870885744182e-05,
+ "loss": 1.1202,
+ "step": 608
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.30205205129701157,
+ "learning_rate": 6.331787221410041e-05,
+ "loss": 1.1369,
+ "step": 609
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3198359813888166,
+ "learning_rate": 6.32485499299877e-05,
+ "loss": 1.1763,
+ "step": 610
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3128641370321787,
+ "learning_rate": 6.31791220369835e-05,
+ "loss": 1.0223,
+ "step": 611
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.2989105616213649,
+ "learning_rate": 6.31095888504711e-05,
+ "loss": 1.0358,
+ "step": 612
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3103537906853337,
+ "learning_rate": 6.303995068631203e-05,
+ "loss": 1.1261,
+ "step": 613
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.28598715532508207,
+ "learning_rate": 6.297020786084467e-05,
+ "loss": 1.0629,
+ "step": 614
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.29809789918093255,
+ "learning_rate": 6.290036069088288e-05,
+ "loss": 1.035,
+ "step": 615
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.33765270252261453,
+ "learning_rate": 6.283040949371451e-05,
+ "loss": 1.1221,
+ "step": 616
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3424617501293415,
+ "learning_rate": 6.276035458709993e-05,
+ "loss": 1.155,
+ "step": 617
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3799189737987811,
+ "learning_rate": 6.269019628927067e-05,
+ "loss": 1.0701,
+ "step": 618
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3358898935253196,
+ "learning_rate": 6.261993491892791e-05,
+ "loss": 1.1649,
+ "step": 619
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.31569979424117356,
+ "learning_rate": 6.254957079524099e-05,
+ "loss": 1.0633,
+ "step": 620
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3002168156888237,
+ "learning_rate": 6.247910423784609e-05,
+ "loss": 1.0846,
+ "step": 621
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3097238823450595,
+ "learning_rate": 6.24085355668447e-05,
+ "loss": 1.0808,
+ "step": 622
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3120312761417578,
+ "learning_rate": 6.233786510280212e-05,
+ "loss": 1.0142,
+ "step": 623
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3335343015064923,
+ "learning_rate": 6.22670931667461e-05,
+ "loss": 1.0674,
+ "step": 624
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3234062304634526,
+ "learning_rate": 6.219622008016533e-05,
+ "loss": 1.0981,
+ "step": 625
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.32152678786547273,
+ "learning_rate": 6.212524616500798e-05,
+ "loss": 1.0244,
+ "step": 626
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.39031977608147594,
+ "learning_rate": 6.205417174368023e-05,
+ "loss": 1.1205,
+ "step": 627
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3806189090017157,
+ "learning_rate": 6.198299713904485e-05,
+ "loss": 1.1134,
+ "step": 628
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.2978349276971668,
+ "learning_rate": 6.191172267441967e-05,
+ "loss": 1.0088,
+ "step": 629
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3190354077382501,
+ "learning_rate": 6.184034867357617e-05,
+ "loss": 1.108,
+ "step": 630
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.32633048665038994,
+ "learning_rate": 6.176887546073797e-05,
+ "loss": 1.0825,
+ "step": 631
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3428026413020903,
+ "learning_rate": 6.169730336057939e-05,
+ "loss": 1.0765,
+ "step": 632
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3475737151929015,
+ "learning_rate": 6.162563269822391e-05,
+ "loss": 1.0693,
+ "step": 633
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3870252154591392,
+ "learning_rate": 6.15538637992428e-05,
+ "loss": 1.1081,
+ "step": 634
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.33597355193652834,
+ "learning_rate": 6.148199698965352e-05,
+ "loss": 1.0893,
+ "step": 635
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.30805894179787247,
+ "learning_rate": 6.141003259591834e-05,
+ "loss": 1.0995,
+ "step": 636
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3025073882734066,
+ "learning_rate": 6.133797094494281e-05,
+ "loss": 1.0388,
+ "step": 637
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3524395196391662,
+ "learning_rate": 6.126581236407429e-05,
+ "loss": 1.1196,
+ "step": 638
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3377646188130345,
+ "learning_rate": 6.119355718110039e-05,
+ "loss": 1.0382,
+ "step": 639
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.35508400659785483,
+ "learning_rate": 6.112120572424763e-05,
+ "loss": 1.1402,
+ "step": 640
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3454418793700457,
+ "learning_rate": 6.104875832217982e-05,
+ "loss": 1.1032,
+ "step": 641
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.32629806837059866,
+ "learning_rate": 6.097621530399661e-05,
+ "loss": 1.0959,
+ "step": 642
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3329536837751315,
+ "learning_rate": 6.090357699923202e-05,
+ "loss": 1.0467,
+ "step": 643
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.32302233828349475,
+ "learning_rate": 6.083084373785287e-05,
+ "loss": 1.0858,
+ "step": 644
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3310358826507611,
+ "learning_rate": 6.075801585025739e-05,
+ "loss": 1.0715,
+ "step": 645
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.319322035854079,
+ "learning_rate": 6.068509366727362e-05,
+ "loss": 1.177,
+ "step": 646
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3065230667302707,
+ "learning_rate": 6.061207752015797e-05,
+ "loss": 1.0649,
+ "step": 647
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.29926795565748227,
+ "learning_rate": 6.053896774059368e-05,
+ "loss": 1.1325,
+ "step": 648
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3556069634279046,
+ "learning_rate": 6.046576466068931e-05,
+ "loss": 1.1366,
+ "step": 649
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3189191131461966,
+ "learning_rate": 6.039246861297727e-05,
+ "loss": 1.0693,
+ "step": 650
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3347197156648834,
+ "learning_rate": 6.031907993041227e-05,
+ "loss": 1.1009,
+ "step": 651
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.32274156348185445,
+ "learning_rate": 6.0245598946369826e-05,
+ "loss": 1.1675,
+ "step": 652
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.35534089035455224,
+ "learning_rate": 6.017202599464476e-05,
+ "loss": 1.1723,
+ "step": 653
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3106026578570133,
+ "learning_rate": 6.009836140944965e-05,
+ "loss": 1.0954,
+ "step": 654
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3309144454564729,
+ "learning_rate": 6.002460552541331e-05,
+ "loss": 1.0209,
+ "step": 655
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3023619281400003,
+ "learning_rate": 5.9950758677579345e-05,
+ "loss": 1.0363,
+ "step": 656
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3311182880219704,
+ "learning_rate": 5.987682120140451e-05,
+ "loss": 1.0515,
+ "step": 657
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.33396486010030413,
+ "learning_rate": 5.980279343275729e-05,
+ "loss": 1.1251,
+ "step": 658
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3465764556678002,
+ "learning_rate": 5.97286757079163e-05,
+ "loss": 1.165,
+ "step": 659
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.304193441363374,
+ "learning_rate": 5.965446836356882e-05,
+ "loss": 1.0228,
+ "step": 660
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3415149030413082,
+ "learning_rate": 5.9580171736809224e-05,
+ "loss": 1.0742,
+ "step": 661
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.33138658321132064,
+ "learning_rate": 5.950578616513746e-05,
+ "loss": 1.0843,
+ "step": 662
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.30774403421162994,
+ "learning_rate": 5.943131198645752e-05,
+ "loss": 1.065,
+ "step": 663
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3428877492183819,
+ "learning_rate": 5.9356749539075885e-05,
+ "loss": 1.1101,
+ "step": 664
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3621290546130101,
+ "learning_rate": 5.928209916170003e-05,
+ "loss": 1.1372,
+ "step": 665
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3482375945469884,
+ "learning_rate": 5.9207361193436865e-05,
+ "loss": 1.132,
+ "step": 666
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.31754384974068384,
+ "learning_rate": 5.9132535973791156e-05,
+ "loss": 1.148,
+ "step": 667
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.36003834782050365,
+ "learning_rate": 5.9057623842664044e-05,
+ "loss": 1.1099,
+ "step": 668
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.2963701622969662,
+ "learning_rate": 5.8982625140351464e-05,
+ "loss": 1.0755,
+ "step": 669
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.32579569606066516,
+ "learning_rate": 5.8907540207542616e-05,
+ "loss": 1.0809,
+ "step": 670
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4247563451753457,
+ "learning_rate": 5.8832369385318416e-05,
+ "loss": 1.097,
+ "step": 671
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.33076932102169776,
+ "learning_rate": 5.875711301514992e-05,
+ "loss": 1.1078,
+ "step": 672
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3609238032332309,
+ "learning_rate": 5.8681771438896815e-05,
+ "loss": 1.1031,
+ "step": 673
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.325159585649425,
+ "learning_rate": 5.860634499880583e-05,
+ "loss": 1.0707,
+ "step": 674
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4620687271068983,
+ "learning_rate": 5.853083403750922e-05,
+ "loss": 1.1017,
+ "step": 675
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33485279064365936,
+ "learning_rate": 5.845523889802316e-05,
+ "loss": 1.0989,
+ "step": 676
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.30952573170841513,
+ "learning_rate": 5.8379559923746214e-05,
+ "loss": 1.0393,
+ "step": 677
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33498605810588283,
+ "learning_rate": 5.830379745845781e-05,
+ "loss": 1.1259,
+ "step": 678
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.35771921163037307,
+ "learning_rate": 5.822795184631659e-05,
+ "loss": 1.0815,
+ "step": 679
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.3329650192347647,
+ "learning_rate": 5.815202343185894e-05,
+ "loss": 1.1344,
+ "step": 680
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3356634465845771,
+ "learning_rate": 5.807601255999736e-05,
+ "loss": 1.1297,
+ "step": 681
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3289442034151235,
+ "learning_rate": 5.7999919576018934e-05,
+ "loss": 1.022,
+ "step": 682
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3207007334784113,
+ "learning_rate": 5.7923744825583745e-05,
+ "loss": 1.0571,
+ "step": 683
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3582460325329284,
+ "learning_rate": 5.7847488654723304e-05,
+ "loss": 1.0778,
+ "step": 684
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3563317666176927,
+ "learning_rate": 5.777115140983899e-05,
+ "loss": 1.1003,
+ "step": 685
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 3.4694912945702105,
+ "learning_rate": 5.769473343770047e-05,
+ "loss": 1.121,
+ "step": 686
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.43002349520483113,
+ "learning_rate": 5.761823508544411e-05,
+ "loss": 1.0765,
+ "step": 687
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39467783104839754,
+ "learning_rate": 5.754165670057142e-05,
+ "loss": 1.0788,
+ "step": 688
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39629029674867916,
+ "learning_rate": 5.7464998630947464e-05,
+ "loss": 1.0812,
+ "step": 689
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3880152093965208,
+ "learning_rate": 5.738826122479929e-05,
+ "loss": 1.1228,
+ "step": 690
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3777874121959188,
+ "learning_rate": 5.7311444830714324e-05,
+ "loss": 1.0907,
+ "step": 691
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.38004041653523696,
+ "learning_rate": 5.723454979763882e-05,
+ "loss": 1.1263,
+ "step": 692
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.37049672627797636,
+ "learning_rate": 5.7157576474876246e-05,
+ "loss": 1.1438,
+ "step": 693
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32973606103437614,
+ "learning_rate": 5.7080525212085725e-05,
+ "loss": 1.0553,
+ "step": 694
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.31674639252070325,
+ "learning_rate": 5.700339635928038e-05,
+ "loss": 1.06,
+ "step": 695
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32282199426553837,
+ "learning_rate": 5.692619026682588e-05,
+ "loss": 1.0841,
+ "step": 696
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4810882958061859,
+ "learning_rate": 5.684890728543869e-05,
+ "loss": 1.0803,
+ "step": 697
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3995638550178378,
+ "learning_rate": 5.6771547766184566e-05,
+ "loss": 1.1187,
+ "step": 698
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35264932960583484,
+ "learning_rate": 5.669411206047699e-05,
+ "loss": 1.0641,
+ "step": 699
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35240640524733,
+ "learning_rate": 5.661660052007547e-05,
+ "loss": 1.076,
+ "step": 700
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3540694609860389,
+ "learning_rate": 5.653901349708401e-05,
+ "loss": 1.1369,
+ "step": 701
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3196055112925304,
+ "learning_rate": 5.646135134394955e-05,
+ "loss": 1.0677,
+ "step": 702
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4214141007955914,
+ "learning_rate": 5.6383614413460266e-05,
+ "loss": 1.1139,
+ "step": 703
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3625611311798579,
+ "learning_rate": 5.630580305874402e-05,
+ "loss": 1.1845,
+ "step": 704
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3425208672181188,
+ "learning_rate": 5.62279176332668e-05,
+ "loss": 1.174,
+ "step": 705
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3108419862818321,
+ "learning_rate": 5.6149958490830996e-05,
+ "loss": 1.0331,
+ "step": 706
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3274644181571904,
+ "learning_rate": 5.607192598557394e-05,
+ "loss": 1.0664,
+ "step": 707
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.346218197215145,
+ "learning_rate": 5.599382047196617e-05,
+ "loss": 1.2088,
+ "step": 708
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.328497632267458,
+ "learning_rate": 5.591564230480989e-05,
+ "loss": 1.0287,
+ "step": 709
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3708173720611468,
+ "learning_rate": 5.583739183923732e-05,
+ "loss": 1.0883,
+ "step": 710
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3631427403535479,
+ "learning_rate": 5.575906943070915e-05,
+ "loss": 1.1155,
+ "step": 711
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3305201458598695,
+ "learning_rate": 5.5680675435012834e-05,
+ "loss": 1.0958,
+ "step": 712
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.34978833532083714,
+ "learning_rate": 5.5602210208261036e-05,
+ "loss": 1.1437,
+ "step": 713
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3510553882510229,
+ "learning_rate": 5.552367410688999e-05,
+ "loss": 1.0941,
+ "step": 714
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3523747462465078,
+ "learning_rate": 5.544506748765789e-05,
+ "loss": 1.1289,
+ "step": 715
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38262637783927445,
+ "learning_rate": 5.5366390707643266e-05,
+ "loss": 1.099,
+ "step": 716
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38620065989073454,
+ "learning_rate": 5.528764412424334e-05,
+ "loss": 1.083,
+ "step": 717
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3401355276121096,
+ "learning_rate": 5.520882809517245e-05,
+ "loss": 1.028,
+ "step": 718
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3392061008943934,
+ "learning_rate": 5.512994297846039e-05,
+ "loss": 1.1083,
+ "step": 719
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.34219480421015414,
+ "learning_rate": 5.505098913245077e-05,
+ "loss": 1.1108,
+ "step": 720
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3275058061553761,
+ "learning_rate": 5.497196691579945e-05,
+ "loss": 1.111,
+ "step": 721
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36800249746509384,
+ "learning_rate": 5.489287668747283e-05,
+ "loss": 1.1221,
+ "step": 722
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4129005533101575,
+ "learning_rate": 5.481371880674628e-05,
+ "loss": 1.0966,
+ "step": 723
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36563906596251655,
+ "learning_rate": 5.4734493633202505e-05,
+ "loss": 1.0927,
+ "step": 724
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3614650536839971,
+ "learning_rate": 5.465520152672986e-05,
+ "loss": 1.13,
+ "step": 725
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.36419665098633497,
+ "learning_rate": 5.4575842847520765e-05,
+ "loss": 1.1183,
+ "step": 726
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.34490689807258995,
+ "learning_rate": 5.449641795607005e-05,
+ "loss": 1.0919,
+ "step": 727
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3627643746876298,
+ "learning_rate": 5.441692721317334e-05,
+ "loss": 1.0411,
+ "step": 728
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.323620411949565,
+ "learning_rate": 5.433737097992537e-05,
+ "loss": 1.0725,
+ "step": 729
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3521599501824965,
+ "learning_rate": 5.425774961771838e-05,
+ "loss": 1.0926,
+ "step": 730
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3302390546764222,
+ "learning_rate": 5.417806348824047e-05,
+ "loss": 1.0468,
+ "step": 731
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3833325802616019,
+ "learning_rate": 5.4098312953473956e-05,
+ "loss": 1.1291,
+ "step": 732
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3708621126835512,
+ "learning_rate": 5.401849837569372e-05,
+ "loss": 1.0887,
+ "step": 733
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3625834373416278,
+ "learning_rate": 5.393862011746555e-05,
+ "loss": 1.0981,
+ "step": 734
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3583343965080617,
+ "learning_rate": 5.385867854164451e-05,
+ "loss": 1.1021,
+ "step": 735
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34598320594096066,
+ "learning_rate": 5.377867401137332e-05,
+ "loss": 1.1376,
+ "step": 736
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3046382791315433,
+ "learning_rate": 5.369860689008066e-05,
+ "loss": 1.0206,
+ "step": 737
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34464948380043725,
+ "learning_rate": 5.3618477541479505e-05,
+ "loss": 1.1084,
+ "step": 738
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3203242519627101,
+ "learning_rate": 5.353828632956557e-05,
+ "loss": 1.0731,
+ "step": 739
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3431169960355163,
+ "learning_rate": 5.3458033618615516e-05,
+ "loss": 1.091,
+ "step": 740
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.33492074521678705,
+ "learning_rate": 5.337771977318543e-05,
+ "loss": 1.1112,
+ "step": 741
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.32576546585541344,
+ "learning_rate": 5.3297345158109086e-05,
+ "loss": 1.0993,
+ "step": 742
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3410007245037574,
+ "learning_rate": 5.3216910138496286e-05,
+ "loss": 1.094,
+ "step": 743
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.34891180680896833,
+ "learning_rate": 5.313641507973128e-05,
+ "loss": 1.1331,
+ "step": 744
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.37135766946717214,
+ "learning_rate": 5.3055860347471006e-05,
+ "loss": 1.1,
+ "step": 745
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3465019415478411,
+ "learning_rate": 5.297524630764349e-05,
+ "loss": 1.1256,
+ "step": 746
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37035388481626563,
+ "learning_rate": 5.289457332644615e-05,
+ "loss": 1.0366,
+ "step": 747
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.33853883270759155,
+ "learning_rate": 5.281384177034421e-05,
+ "loss": 1.0547,
+ "step": 748
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.364306618627317,
+ "learning_rate": 5.2733052006068897e-05,
+ "loss": 1.0768,
+ "step": 749
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.4021754315731627,
+ "learning_rate": 5.2652204400615916e-05,
+ "loss": 1.1382,
+ "step": 750
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.3332185389039008,
+ "learning_rate": 5.257129932124368e-05,
+ "loss": 1.0815,
+ "step": 751
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3453105709879854,
+ "learning_rate": 5.249033713547173e-05,
+ "loss": 1.1109,
+ "step": 752
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3385397539717797,
+ "learning_rate": 5.2409318211078966e-05,
+ "loss": 1.0529,
+ "step": 753
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.33197994450130447,
+ "learning_rate": 5.232824291610206e-05,
+ "loss": 1.0721,
+ "step": 754
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32836289576124167,
+ "learning_rate": 5.224711161883375e-05,
+ "loss": 1.0459,
+ "step": 755
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32491620058831744,
+ "learning_rate": 5.216592468782117e-05,
+ "loss": 1.0897,
+ "step": 756
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3137879047811153,
+ "learning_rate": 5.2084682491864155e-05,
+ "loss": 1.096,
+ "step": 757
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3356938043023012,
+ "learning_rate": 5.200338540001364e-05,
+ "loss": 1.0827,
+ "step": 758
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.36044340490819055,
+ "learning_rate": 5.192203378156984e-05,
+ "loss": 1.0617,
+ "step": 759
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.34674262047888293,
+ "learning_rate": 5.184062800608077e-05,
+ "loss": 1.1267,
+ "step": 760
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.32469442322149333,
+ "learning_rate": 5.1759168443340375e-05,
+ "loss": 1.1483,
+ "step": 761
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3290384307774216,
+ "learning_rate": 5.167765546338698e-05,
+ "loss": 1.047,
+ "step": 762
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.31637612188770403,
+ "learning_rate": 5.1596089436501525e-05,
+ "loss": 1.0311,
+ "step": 763
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3168693829641207,
+ "learning_rate": 5.151447073320597e-05,
+ "loss": 1.1405,
+ "step": 764
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.34322421571238926,
+ "learning_rate": 5.143279972426153e-05,
+ "loss": 1.1428,
+ "step": 765
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3291030435830325,
+ "learning_rate": 5.1351076780667026e-05,
+ "loss": 1.0473,
+ "step": 766
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.33772039158758044,
+ "learning_rate": 5.1269302273657195e-05,
+ "loss": 1.0909,
+ "step": 767
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3802031736890876,
+ "learning_rate": 5.118747657470102e-05,
+ "loss": 1.1482,
+ "step": 768
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3296067628997962,
+ "learning_rate": 5.1105600055500025e-05,
+ "loss": 1.0085,
+ "step": 769
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3707139982828035,
+ "learning_rate": 5.102367308798658e-05,
+ "loss": 1.0746,
+ "step": 770
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3378537316757011,
+ "learning_rate": 5.094169604432225e-05,
+ "loss": 1.0482,
+ "step": 771
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.4008417246255145,
+ "learning_rate": 5.085966929689601e-05,
+ "loss": 1.1065,
+ "step": 772
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3244385106988064,
+ "learning_rate": 5.077759321832271e-05,
+ "loss": 1.0827,
+ "step": 773
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.37228575732812336,
+ "learning_rate": 5.0695468181441215e-05,
+ "loss": 1.1146,
+ "step": 774
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33761714797540276,
+ "learning_rate": 5.061329455931283e-05,
+ "loss": 1.092,
+ "step": 775
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3158158390913494,
+ "learning_rate": 5.053107272521955e-05,
+ "loss": 1.1058,
+ "step": 776
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3691501929738938,
+ "learning_rate": 5.044880305266239e-05,
+ "loss": 1.1599,
+ "step": 777
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33730914019805525,
+ "learning_rate": 5.0366485915359645e-05,
+ "loss": 1.0615,
+ "step": 778
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.34970059240017,
+ "learning_rate": 5.0284121687245257e-05,
+ "loss": 1.1475,
+ "step": 779
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3374028029407197,
+ "learning_rate": 5.020171074246707e-05,
+ "loss": 1.0926,
+ "step": 780
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3350020681123992,
+ "learning_rate": 5.011925345538514e-05,
+ "loss": 1.1276,
+ "step": 781
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3224228965786606,
+ "learning_rate": 5.003675020057003e-05,
+ "loss": 1.0183,
+ "step": 782
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3357310714740298,
+ "learning_rate": 4.995420135280114e-05,
+ "loss": 1.1114,
+ "step": 783
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3590203255363759,
+ "learning_rate": 4.9871607287064966e-05,
+ "loss": 1.1504,
+ "step": 784
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.33011195419611655,
+ "learning_rate": 4.9788968378553396e-05,
+ "loss": 1.0826,
+ "step": 785
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.31088868195439445,
+ "learning_rate": 4.970628500266207e-05,
+ "loss": 1.0704,
+ "step": 786
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3144996103179409,
+ "learning_rate": 4.962355753498858e-05,
+ "loss": 1.1403,
+ "step": 787
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3147269555419068,
+ "learning_rate": 4.954078635133081e-05,
+ "loss": 1.0898,
+ "step": 788
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3280151747783868,
+ "learning_rate": 4.945797182768524e-05,
+ "loss": 1.1115,
+ "step": 789
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3551996569232493,
+ "learning_rate": 4.937511434024524e-05,
+ "loss": 1.1731,
+ "step": 790
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.343863208057807,
+ "learning_rate": 4.9292214265399336e-05,
+ "loss": 1.0866,
+ "step": 791
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.37316699385322466,
+ "learning_rate": 4.920927197972949e-05,
+ "loss": 1.1083,
+ "step": 792
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635739774067832,
+ "learning_rate": 4.9126287860009453e-05,
+ "loss": 1.1393,
+ "step": 793
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3755910554972886,
+ "learning_rate": 4.9043262283202974e-05,
+ "loss": 1.1624,
+ "step": 794
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635899120146823,
+ "learning_rate": 4.8960195626462145e-05,
+ "loss": 1.2095,
+ "step": 795
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3642202684342816,
+ "learning_rate": 4.8877088267125664e-05,
+ "loss": 1.1099,
+ "step": 796
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3339946548799316,
+ "learning_rate": 4.879394058271712e-05,
+ "loss": 1.1157,
+ "step": 797
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3457189703100475,
+ "learning_rate": 4.871075295094329e-05,
+ "loss": 1.129,
+ "step": 798
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3550931839691424,
+ "learning_rate": 4.862752574969241e-05,
+ "loss": 1.076,
+ "step": 799
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.36139108917966734,
+ "learning_rate": 4.8544259357032475e-05,
+ "loss": 1.1577,
+ "step": 800
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.32133637199528886,
+ "learning_rate": 4.8460954151209486e-05,
+ "loss": 1.0148,
+ "step": 801
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 1.043434839098495,
+ "learning_rate": 4.837761051064579e-05,
+ "loss": 1.1145,
+ "step": 802
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.36683258883118186,
+ "learning_rate": 4.8294228813938285e-05,
+ "loss": 1.0952,
+ "step": 803
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.4664840097151361,
+ "learning_rate": 4.8210809439856804e-05,
+ "loss": 1.1644,
+ "step": 804
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.355267647636684,
+ "learning_rate": 4.8127352767342276e-05,
+ "loss": 1.1062,
+ "step": 805
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.369440033402877,
+ "learning_rate": 4.8043859175505095e-05,
+ "loss": 1.0846,
+ "step": 806
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.3530796042687365,
+ "learning_rate": 4.7960329043623344e-05,
+ "loss": 1.1608,
+ "step": 807
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.4321129927586221,
+ "learning_rate": 4.787676275114111e-05,
+ "loss": 1.1339,
+ "step": 808
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.349782342376565,
+ "learning_rate": 4.779316067766673e-05,
+ "loss": 1.0685,
+ "step": 809
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.36418077406249405,
+ "learning_rate": 4.770952320297109e-05,
+ "loss": 1.1467,
+ "step": 810
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.35831016374198177,
+ "learning_rate": 4.7625850706985886e-05,
+ "loss": 1.064,
+ "step": 811
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.3596998725696811,
+ "learning_rate": 4.7542143569801894e-05,
+ "loss": 1.119,
+ "step": 812
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.3437403316058801,
+ "learning_rate": 4.745840217166725e-05,
+ "loss": 1.0451,
+ "step": 813
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.36614738184483053,
+ "learning_rate": 4.737462689298577e-05,
+ "loss": 1.1388,
+ "step": 814
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.4127884784033637,
+ "learning_rate": 4.7290818114315086e-05,
+ "loss": 1.1786,
+ "step": 815
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.4110838984805364,
+ "learning_rate": 4.72069762163651e-05,
+ "loss": 1.0709,
+ "step": 816
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.42581764215872087,
+ "learning_rate": 4.7123101579996106e-05,
+ "loss": 1.1019,
+ "step": 817
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.39442487793142056,
+ "learning_rate": 4.7039194586217136e-05,
+ "loss": 1.1532,
+ "step": 818
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.3362730343659587,
+ "learning_rate": 4.695525561618418e-05,
+ "loss": 1.1149,
+ "step": 819
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.5554581175413662,
+ "learning_rate": 4.687128505119853e-05,
+ "loss": 1.0697,
+ "step": 820
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.369476979421362,
+ "learning_rate": 4.6787283272704966e-05,
+ "loss": 1.1038,
+ "step": 821
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.4002553035181225,
+ "learning_rate": 4.670325066229009e-05,
+ "loss": 1.0714,
+ "step": 822
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.5196234078753172,
+ "learning_rate": 4.661918760168052e-05,
+ "loss": 1.1027,
+ "step": 823
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.39318954902323083,
+ "learning_rate": 4.653509447274121e-05,
+ "loss": 1.1562,
+ "step": 824
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.3822202885742058,
+ "learning_rate": 4.6450971657473743e-05,
+ "loss": 1.0662,
+ "step": 825
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.4203426346301331,
+ "learning_rate": 4.63668195380145e-05,
+ "loss": 1.0713,
+ "step": 826
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.3850673744429753,
+ "learning_rate": 4.628263849663301e-05,
+ "loss": 1.123,
+ "step": 827
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.3615517608466556,
+ "learning_rate": 4.619842891573016e-05,
+ "loss": 1.0565,
+ "step": 828
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.34448279386839303,
+ "learning_rate": 4.6114191177836514e-05,
+ "loss": 1.0703,
+ "step": 829
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.3649438531637055,
+ "learning_rate": 4.6029925665610524e-05,
+ "loss": 1.061,
+ "step": 830
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.360215034359632,
+ "learning_rate": 4.59456327618368e-05,
+ "loss": 1.075,
+ "step": 831
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.3297320285815758,
+ "learning_rate": 4.5861312849424386e-05,
+ "loss": 1.0177,
+ "step": 832
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.3687310180731473,
+ "learning_rate": 4.5776966311405035e-05,
+ "loss": 1.078,
+ "step": 833
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.40273770874860215,
+ "learning_rate": 4.5692593530931416e-05,
+ "loss": 1.1237,
+ "step": 834
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.41430404774660234,
+ "learning_rate": 4.560819489127545e-05,
+ "loss": 1.0891,
+ "step": 835
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.39133407251294566,
+ "learning_rate": 4.552377077582646e-05,
+ "loss": 1.1244,
+ "step": 836
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.45340195323000915,
+ "learning_rate": 4.543932156808959e-05,
+ "loss": 1.1659,
+ "step": 837
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.37488623560038786,
+ "learning_rate": 4.535484765168386e-05,
+ "loss": 1.1026,
+ "step": 838
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.36994669175389283,
+ "learning_rate": 4.527034941034063e-05,
+ "loss": 1.1481,
+ "step": 839
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 19.06482385633655,
+ "learning_rate": 4.51858272279017e-05,
+ "loss": 1.1897,
+ "step": 840
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 0.7974039643314161,
+ "learning_rate": 4.5101281488317634e-05,
+ "loss": 1.1467,
+ "step": 841
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 1211.6099030928651,
+ "learning_rate": 4.501671257564602e-05,
+ "loss": 1.9989,
+ "step": 842
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 1.438002431348562,
+ "learning_rate": 4.49321208740497e-05,
+ "loss": 1.1715,
+ "step": 843
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 5.653373858566177,
+ "learning_rate": 4.484750676779504e-05,
+ "loss": 1.1828,
+ "step": 844
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 625.3410428452096,
+ "learning_rate": 4.4762870641250185e-05,
+ "loss": 1.3283,
+ "step": 845
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 5.653780923251844,
+ "learning_rate": 4.467821287888331e-05,
+ "loss": 1.1532,
+ "step": 846
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 177.17210534716736,
+ "learning_rate": 4.459353386526086e-05,
+ "loss": 1.1178,
+ "step": 847
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 4.805542680722976,
+ "learning_rate": 4.450883398504584e-05,
+ "loss": 1.1057,
+ "step": 848
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 2.055060058990183,
+ "learning_rate": 4.442411362299602e-05,
+ "loss": 1.1423,
+ "step": 849
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 750.168576149713,
+ "learning_rate": 4.433937316396224e-05,
+ "loss": 1.6724,
+ "step": 850
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 881390103035904.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/v1/checkpoint-850/training_args.bin b/v1/checkpoint-850/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c5d2416a3b70bb5260978ec9996f00154a724ba7
--- /dev/null
+++ b/v1/checkpoint-850/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b22e8f9d51a16d03a2c506fa3d1eafa8f4b1ae992992c2086a4d435ffd97387e
+size 6712
diff --git a/v1/checkpoint-850/zero_to_fp32.py b/v1/checkpoint-850/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/v1/checkpoint-850/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/v1/checkpoint-900/README.md b/v1/checkpoint-900/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/v1/checkpoint-900/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/v1/checkpoint-900/adapter_config.json b/v1/checkpoint-900/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3cd6dba5d79f7ca21fd4ad465cbbcac1e0960476
--- /dev/null
+++ b/v1/checkpoint-900/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "k_proj",
+ "q_proj",
+ "o_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/v1/checkpoint-900/adapter_model.safetensors b/v1/checkpoint-900/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d0d41dc16e57fd1e8f98f99b162fcd9e56da23b6
--- /dev/null
+++ b/v1/checkpoint-900/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:215651d0de9897edbabc4cbb2f0d639077c12bc36765a03c309015bc46e774fb
+size 1048664848
diff --git a/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4fef5b603f20994a8c16e22a8640efcf067b102a
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:83851896066b2a670cef85b29d5cbd02324b67aab33f491502645208b96ce9e2
+size 787270042
diff --git a/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..dfdb01f9055ea08b40698db9694c49761a593496
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eb74e2b96ba4d034785e4dc5a6df3f1c705253ff507cbffedcf01952ed7c67d1
+size 787270042
diff --git a/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0d8b3681841482b769f5b9dcb749140ae02c5ddd
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:39fc3e961bb6a339c13821e8b54f6712258de163aefcd3dfbea9aefbe2eaa139
+size 787270042
diff --git a/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..636fb00be0d3cf2c4d5cb0788c440abc2389a7e7
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af40baca9b0ab1ef941361eea450d4028f146449d1f8a1c8561f98098d8c72e7
+size 787270042
diff --git a/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c465e7aa5ce65d952697c71338fea58b666875f3
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8e662e531b384cefb5d7ce4c9b4910b356d766ad51bdc681505ede98124e330
+size 787270042
diff --git a/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e9c4497000aa49759860ff0ff3e9624e34fdd3c3
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f827433561f1beb6a042908d42b67ac034fab2a8fc2427923a4a602b7ccf072a
+size 787270042
diff --git a/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..98c4f31a8d36904db1e855d1ebed7e5f284bd63c
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b9e980aa3df9be02ddc3fb1789091433841b760202bf12b339f61be7e59baedf
+size 787270042
diff --git a/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e46d41274ee53cd747825273e488b6b3f6849e3b
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:74c3ece63cff6b285a9d0738d0589555f262a14941d1d0e7d7c276615aaeba67
+size 787270042
diff --git a/v1/checkpoint-900/global_step900/zero_pp_rank_0_mp_rank_00_model_states.pt b/v1/checkpoint-900/global_step900/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..632037350e1a68e2e87a5bf2fae6a5d106b62fbe
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:85598ae16499587d8bfb6e946d707439395a9f4089d47e8f174215d7ea23f02b
+size 653742
diff --git a/v1/checkpoint-900/global_step900/zero_pp_rank_1_mp_rank_00_model_states.pt b/v1/checkpoint-900/global_step900/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b1ea7f4f640df53c9594c4d57fb8c19f63ada920
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fa9e715a8e0cb0844d785705bd57fbdeef4b9b9199295164cc3d47b826b41177
+size 653742
diff --git a/v1/checkpoint-900/global_step900/zero_pp_rank_2_mp_rank_00_model_states.pt b/v1/checkpoint-900/global_step900/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4751dd40d90d849eeb6ea28745dca90ebfca182a
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c14e123bb4970bf48a088eea86d29b2097207dda07f22651360d88159b9d3ebd
+size 653742
diff --git a/v1/checkpoint-900/global_step900/zero_pp_rank_3_mp_rank_00_model_states.pt b/v1/checkpoint-900/global_step900/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b170f57b5cb9a4f9e473c4b1fe3d5a29177f93cc
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a3dcaa39c072e2ffe8b900370eecb5557a04bd37a1b3a2245ca649d1cc2e8e76
+size 653742
diff --git a/v1/checkpoint-900/global_step900/zero_pp_rank_4_mp_rank_00_model_states.pt b/v1/checkpoint-900/global_step900/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c103ed3d0821ec80b47eb12839f1a288c7700a70
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:00408388525bfb3d34283867e75ea3a1070bd379b53e454b37e6d20b8c1e1b86
+size 653742
diff --git a/v1/checkpoint-900/global_step900/zero_pp_rank_5_mp_rank_00_model_states.pt b/v1/checkpoint-900/global_step900/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..03614089c267d7750d00f3d0cf2b2ca278f0b6a2
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5c40bdb5ad8352a66d4e601623bede45033fe7ae887c92e45dc7ec9d3f2d166a
+size 653742
diff --git a/v1/checkpoint-900/global_step900/zero_pp_rank_6_mp_rank_00_model_states.pt b/v1/checkpoint-900/global_step900/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d65dce6d23ce131db2ce4985f758199922a36b5f
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8a2d7af4eab09244be1883dec71dc5a455934b824d94b0a839fe707ceea50df6
+size 653742
diff --git a/v1/checkpoint-900/global_step900/zero_pp_rank_7_mp_rank_00_model_states.pt b/v1/checkpoint-900/global_step900/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7bc7e1483b4260f770b1784a3f8d517d32c6cbfc
--- /dev/null
+++ b/v1/checkpoint-900/global_step900/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b88c9e96f0f393697fb1e0d8d34ff85a700c5ecdac2d255f4522c93a56c82b3
+size 653742
diff --git a/v1/checkpoint-900/latest b/v1/checkpoint-900/latest
new file mode 100644
index 0000000000000000000000000000000000000000..4b10acccf3e8395339ff8799cea202bbc54d7f7d
--- /dev/null
+++ b/v1/checkpoint-900/latest
@@ -0,0 +1 @@
+global_step900
\ No newline at end of file
diff --git a/v1/checkpoint-900/rng_state_0.pth b/v1/checkpoint-900/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4e5b7e2ec90fdb824c8932464c1d9068330655a7
--- /dev/null
+++ b/v1/checkpoint-900/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:36d2a2034ebb05cb71c510897f2795b31164e50f17b270bc25d2be3ad9a17b22
+size 15984
diff --git a/v1/checkpoint-900/rng_state_1.pth b/v1/checkpoint-900/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7d8d7722fc72cab6d492b76cb99c8177dcc47544
--- /dev/null
+++ b/v1/checkpoint-900/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:060dfdb1c49102cbdc8868a6031e68787601b4ccd782f3fb9b137e20c1fd2c7a
+size 15984
diff --git a/v1/checkpoint-900/rng_state_2.pth b/v1/checkpoint-900/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3c9f84eff30cfa9ea1feedaf262d61fb12e4cba7
--- /dev/null
+++ b/v1/checkpoint-900/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af01895cb66e616591f2e4baa8dcd8151530eab133c73571ccb31c74f35422ce
+size 15984
diff --git a/v1/checkpoint-900/rng_state_3.pth b/v1/checkpoint-900/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6eebfb928f8e91eff0ea1645a20b5aa4465c705b
--- /dev/null
+++ b/v1/checkpoint-900/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:677921992b1e0cef3aee776f245975003d22f51d9bd6ed20f248ded1deb72fa9
+size 15984
diff --git a/v1/checkpoint-900/rng_state_4.pth b/v1/checkpoint-900/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0866030a266c6d003cc378a9418a723f69e8ab99
--- /dev/null
+++ b/v1/checkpoint-900/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d69353c629541c690c5471f8ec05fdab2bfecf3d37afaa436bc45939da6db68f
+size 15984
diff --git a/v1/checkpoint-900/rng_state_5.pth b/v1/checkpoint-900/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..554638d77107f832d7aa51c61645ee2d6c48a36d
--- /dev/null
+++ b/v1/checkpoint-900/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e40ba6668cc03c9162c68a933d164bf38ae2d196a9a6fec03ae615491201185
+size 15984
diff --git a/v1/checkpoint-900/rng_state_6.pth b/v1/checkpoint-900/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..964331b65172a1bcac03e4673415fa787f724268
--- /dev/null
+++ b/v1/checkpoint-900/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:870968fea834e24b2e099cf3e4fe1e3fb8caf38d8f8e5b790d7d47386d4d05f5
+size 15984
diff --git a/v1/checkpoint-900/rng_state_7.pth b/v1/checkpoint-900/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..cd4754d65217d0f9d1f2d3334397df7a8a079652
--- /dev/null
+++ b/v1/checkpoint-900/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9e19618bee7c6ef43256fea25abe19bca88535eb1e7dc213cde8929ae4e8180
+size 15984
diff --git a/v1/checkpoint-900/scheduler.pt b/v1/checkpoint-900/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f7b87852ce7e9d89a9838a81558c5114b66b5f1d
--- /dev/null
+++ b/v1/checkpoint-900/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c5b870237fe31eb4ea81715c47c8e28db6bbabdd4eab183c6763777a08ea25c9
+size 1064
diff --git a/v1/checkpoint-900/special_tokens_map.json b/v1/checkpoint-900/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/v1/checkpoint-900/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/v1/checkpoint-900/tokenizer.model b/v1/checkpoint-900/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/v1/checkpoint-900/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/v1/checkpoint-900/tokenizer_config.json b/v1/checkpoint-900/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/v1/checkpoint-900/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/v1/checkpoint-900/trainer_state.json b/v1/checkpoint-900/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..b95e764d9f380f78084960d160ba7e527762977f
--- /dev/null
+++ b/v1/checkpoint-900/trainer_state.json
@@ -0,0 +1,6321 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.646090534979424,
+ "eval_steps": 500,
+ "global_step": 900,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2698278585334131,
+ "learning_rate": 7.274705407864332e-05,
+ "loss": 1.194,
+ "step": 451
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.23678313024953834,
+ "learning_rate": 7.26980224535416e-05,
+ "loss": 1.2349,
+ "step": 452
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24851875792002978,
+ "learning_rate": 7.264884229436293e-05,
+ "loss": 1.1758,
+ "step": 453
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24122080121681125,
+ "learning_rate": 7.259951382451318e-05,
+ "loss": 1.1962,
+ "step": 454
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.22741322959884405,
+ "learning_rate": 7.25500372680719e-05,
+ "loss": 1.1702,
+ "step": 455
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.2297475610861458,
+ "learning_rate": 7.250041284979137e-05,
+ "loss": 1.1466,
+ "step": 456
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.3057605989721467,
+ "learning_rate": 7.245064079509553e-05,
+ "loss": 1.246,
+ "step": 457
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2719638501597136,
+ "learning_rate": 7.240072133007899e-05,
+ "loss": 1.2184,
+ "step": 458
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2436807816414479,
+ "learning_rate": 7.235065468150593e-05,
+ "loss": 1.2324,
+ "step": 459
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.23436349430255515,
+ "learning_rate": 7.23004410768092e-05,
+ "loss": 1.1813,
+ "step": 460
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2398940990211377,
+ "learning_rate": 7.22500807440892e-05,
+ "loss": 1.1924,
+ "step": 461
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2605716625062531,
+ "learning_rate": 7.219957391211281e-05,
+ "loss": 1.182,
+ "step": 462
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.260462524570941,
+ "learning_rate": 7.214892081031244e-05,
+ "loss": 1.2136,
+ "step": 463
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.21979766512306334,
+ "learning_rate": 7.209812166878491e-05,
+ "loss": 1.2066,
+ "step": 464
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.23324453647530663,
+ "learning_rate": 7.204717671829051e-05,
+ "loss": 1.1657,
+ "step": 465
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.2529434935507481,
+ "learning_rate": 7.199608619025177e-05,
+ "loss": 1.2093,
+ "step": 466
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.25371701891720116,
+ "learning_rate": 7.194485031675265e-05,
+ "loss": 1.2225,
+ "step": 467
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.23272423066292103,
+ "learning_rate": 7.189346933053725e-05,
+ "loss": 1.1721,
+ "step": 468
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.25122928735587546,
+ "learning_rate": 7.184194346500892e-05,
+ "loss": 1.2537,
+ "step": 469
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2159270875490409,
+ "learning_rate": 7.179027295422913e-05,
+ "loss": 1.197,
+ "step": 470
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2633111059076544,
+ "learning_rate": 7.173845803291636e-05,
+ "loss": 1.1721,
+ "step": 471
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.30555936322098703,
+ "learning_rate": 7.168649893644517e-05,
+ "loss": 1.3011,
+ "step": 472
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.23492670111453726,
+ "learning_rate": 7.163439590084502e-05,
+ "loss": 1.1601,
+ "step": 473
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.26602734263721806,
+ "learning_rate": 7.158214916279923e-05,
+ "loss": 1.2808,
+ "step": 474
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.3182695007856262,
+ "learning_rate": 7.152975895964386e-05,
+ "loss": 1.2967,
+ "step": 475
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2785021674736721,
+ "learning_rate": 7.147722552936673e-05,
+ "loss": 1.1789,
+ "step": 476
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.279474303138652,
+ "learning_rate": 7.142454911060627e-05,
+ "loss": 1.2596,
+ "step": 477
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2556980144910755,
+ "learning_rate": 7.137172994265044e-05,
+ "loss": 1.2426,
+ "step": 478
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.3311256331993533,
+ "learning_rate": 7.131876826543565e-05,
+ "loss": 1.2059,
+ "step": 479
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.26467296197775253,
+ "learning_rate": 7.12656643195457e-05,
+ "loss": 1.2482,
+ "step": 480
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.27444885274652553,
+ "learning_rate": 7.121241834621064e-05,
+ "loss": 1.2528,
+ "step": 481
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2572283861115396,
+ "learning_rate": 7.115903058730567e-05,
+ "loss": 1.1849,
+ "step": 482
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2677065778235683,
+ "learning_rate": 7.11055012853501e-05,
+ "loss": 1.2011,
+ "step": 483
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29470622036742816,
+ "learning_rate": 7.105183068350619e-05,
+ "loss": 1.2398,
+ "step": 484
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.27609230248969197,
+ "learning_rate": 7.099801902557811e-05,
+ "loss": 1.2259,
+ "step": 485
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.24248634168099284,
+ "learning_rate": 7.094406655601073e-05,
+ "loss": 1.2282,
+ "step": 486
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.2765941767688746,
+ "learning_rate": 7.088997351988865e-05,
+ "loss": 1.2319,
+ "step": 487
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29347776909858947,
+ "learning_rate": 7.083574016293493e-05,
+ "loss": 1.1765,
+ "step": 488
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.285370295424537,
+ "learning_rate": 7.078136673151008e-05,
+ "loss": 1.26,
+ "step": 489
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.29408734903836536,
+ "learning_rate": 7.072685347261093e-05,
+ "loss": 1.226,
+ "step": 490
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27437470239205813,
+ "learning_rate": 7.067220063386947e-05,
+ "loss": 1.1976,
+ "step": 491
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2680770258777871,
+ "learning_rate": 7.061740846355176e-05,
+ "loss": 1.1915,
+ "step": 492
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27200362879502954,
+ "learning_rate": 7.056247721055678e-05,
+ "loss": 1.2002,
+ "step": 493
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2637811092577037,
+ "learning_rate": 7.050740712441528e-05,
+ "loss": 1.287,
+ "step": 494
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.24657959209271266,
+ "learning_rate": 7.045219845528875e-05,
+ "loss": 1.2284,
+ "step": 495
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.25311992110358666,
+ "learning_rate": 7.039685145396812e-05,
+ "loss": 1.1616,
+ "step": 496
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2564633694193358,
+ "learning_rate": 7.034136637187275e-05,
+ "loss": 1.2067,
+ "step": 497
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2446797651174144,
+ "learning_rate": 7.028574346104926e-05,
+ "loss": 1.2284,
+ "step": 498
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2592751463399255,
+ "learning_rate": 7.022998297417034e-05,
+ "loss": 1.2371,
+ "step": 499
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2500713943206808,
+ "learning_rate": 7.017408516453365e-05,
+ "loss": 1.1061,
+ "step": 500
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2812266276040743,
+ "learning_rate": 7.011805028606064e-05,
+ "loss": 1.1949,
+ "step": 501
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.298829667668083,
+ "learning_rate": 7.006187859329544e-05,
+ "loss": 1.2313,
+ "step": 502
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.26518768159745104,
+ "learning_rate": 7.000557034140361e-05,
+ "loss": 1.2246,
+ "step": 503
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.3037280360760458,
+ "learning_rate": 6.994912578617113e-05,
+ "loss": 1.1617,
+ "step": 504
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2726903109255714,
+ "learning_rate": 6.989254518400309e-05,
+ "loss": 1.2415,
+ "step": 505
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25568082003046966,
+ "learning_rate": 6.98358287919226e-05,
+ "loss": 1.1817,
+ "step": 506
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25633294893705044,
+ "learning_rate": 6.97789768675696e-05,
+ "loss": 1.2149,
+ "step": 507
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.28291439435087123,
+ "learning_rate": 6.972198966919972e-05,
+ "loss": 1.1578,
+ "step": 508
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.27195184756655516,
+ "learning_rate": 6.966486745568308e-05,
+ "loss": 1.2355,
+ "step": 509
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.239159568376005,
+ "learning_rate": 6.960761048650312e-05,
+ "loss": 1.1688,
+ "step": 510
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.22961475425949177,
+ "learning_rate": 6.955021902175543e-05,
+ "loss": 1.2094,
+ "step": 511
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.27443773600741117,
+ "learning_rate": 6.949269332214651e-05,
+ "loss": 1.2559,
+ "step": 512
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.26230551832002097,
+ "learning_rate": 6.94350336489927e-05,
+ "loss": 1.2121,
+ "step": 513
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2716742985303849,
+ "learning_rate": 6.937724026421892e-05,
+ "loss": 1.2444,
+ "step": 514
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2537850139439542,
+ "learning_rate": 6.931931343035742e-05,
+ "loss": 1.1327,
+ "step": 515
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.28599587967496826,
+ "learning_rate": 6.926125341054676e-05,
+ "loss": 1.2236,
+ "step": 516
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.26780654378470103,
+ "learning_rate": 6.920306046853043e-05,
+ "loss": 1.2295,
+ "step": 517
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.23606296888412015,
+ "learning_rate": 6.914473486865577e-05,
+ "loss": 1.1543,
+ "step": 518
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.34976881174240837,
+ "learning_rate": 6.90862768758727e-05,
+ "loss": 1.2067,
+ "step": 519
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2481257873494882,
+ "learning_rate": 6.902768675573258e-05,
+ "loss": 1.2188,
+ "step": 520
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2996395778117021,
+ "learning_rate": 6.896896477438699e-05,
+ "loss": 1.2326,
+ "step": 521
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.8839768816333193,
+ "learning_rate": 6.891011119858643e-05,
+ "loss": 1.2435,
+ "step": 522
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2851882482058998,
+ "learning_rate": 6.885112629567927e-05,
+ "loss": 1.2644,
+ "step": 523
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2813663482913699,
+ "learning_rate": 6.879201033361035e-05,
+ "loss": 1.2309,
+ "step": 524
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3257551560135454,
+ "learning_rate": 6.873276358091996e-05,
+ "loss": 1.2755,
+ "step": 525
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.28930479952494365,
+ "learning_rate": 6.867338630674247e-05,
+ "loss": 1.1962,
+ "step": 526
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3077462996938649,
+ "learning_rate": 6.861387878080511e-05,
+ "loss": 1.2402,
+ "step": 527
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.2848900193452761,
+ "learning_rate": 6.855424127342688e-05,
+ "loss": 1.2748,
+ "step": 528
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.4765938812802202,
+ "learning_rate": 6.849447405551718e-05,
+ "loss": 1.2226,
+ "step": 529
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.53184473292579,
+ "learning_rate": 6.843457739857467e-05,
+ "loss": 1.2347,
+ "step": 530
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.6416239346492343,
+ "learning_rate": 6.837455157468596e-05,
+ "loss": 1.2429,
+ "step": 531
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3188092712502773,
+ "learning_rate": 6.831439685652442e-05,
+ "loss": 1.216,
+ "step": 532
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3527495731006385,
+ "learning_rate": 6.825411351734895e-05,
+ "loss": 1.1682,
+ "step": 533
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.29603753744741856,
+ "learning_rate": 6.819370183100274e-05,
+ "loss": 1.1434,
+ "step": 534
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.5252450389976622,
+ "learning_rate": 6.813316207191198e-05,
+ "loss": 1.1943,
+ "step": 535
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.32999419558659937,
+ "learning_rate": 6.807249451508466e-05,
+ "loss": 1.192,
+ "step": 536
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.3650175469778724,
+ "learning_rate": 6.801169943610929e-05,
+ "loss": 1.2141,
+ "step": 537
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 1.0643532150783557,
+ "learning_rate": 6.795077711115368e-05,
+ "loss": 1.2253,
+ "step": 538
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5041310609130145,
+ "learning_rate": 6.788972781696363e-05,
+ "loss": 1.278,
+ "step": 539
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5123058164360991,
+ "learning_rate": 6.782855183086177e-05,
+ "loss": 1.2231,
+ "step": 540
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.3533015702394419,
+ "learning_rate": 6.776724943074619e-05,
+ "loss": 1.2072,
+ "step": 541
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.30253964625417207,
+ "learning_rate": 6.770582089508927e-05,
+ "loss": 1.1382,
+ "step": 542
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.348991618828202,
+ "learning_rate": 6.764426650293633e-05,
+ "loss": 1.2079,
+ "step": 543
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.46017440578788743,
+ "learning_rate": 6.758258653390444e-05,
+ "loss": 1.1813,
+ "step": 544
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.31962101755594885,
+ "learning_rate": 6.75207812681811e-05,
+ "loss": 1.1339,
+ "step": 545
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.37092024548285923,
+ "learning_rate": 6.745885098652298e-05,
+ "loss": 1.2591,
+ "step": 546
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.32347106450715835,
+ "learning_rate": 6.739679597025466e-05,
+ "loss": 1.2017,
+ "step": 547
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39250187112342494,
+ "learning_rate": 6.733461650126733e-05,
+ "loss": 1.0933,
+ "step": 548
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.473522452217324,
+ "learning_rate": 6.727231286201752e-05,
+ "loss": 1.1124,
+ "step": 549
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4809062179622052,
+ "learning_rate": 6.720988533552582e-05,
+ "loss": 1.1585,
+ "step": 550
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3529662801059162,
+ "learning_rate": 6.714733420537559e-05,
+ "loss": 1.0501,
+ "step": 551
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5958247214391118,
+ "learning_rate": 6.708465975571168e-05,
+ "loss": 1.1086,
+ "step": 552
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5341364205022454,
+ "learning_rate": 6.70218622712391e-05,
+ "loss": 1.0518,
+ "step": 553
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3601805724462006,
+ "learning_rate": 6.695894203722181e-05,
+ "loss": 1.1779,
+ "step": 554
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.43410190338280613,
+ "learning_rate": 6.68958993394813e-05,
+ "loss": 1.093,
+ "step": 555
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46217742572873594,
+ "learning_rate": 6.683273446439546e-05,
+ "loss": 1.0117,
+ "step": 556
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.8591682373623357,
+ "learning_rate": 6.676944769889708e-05,
+ "loss": 1.1002,
+ "step": 557
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.7383229487622726,
+ "learning_rate": 6.670603933047272e-05,
+ "loss": 1.0779,
+ "step": 558
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5965305891207813,
+ "learning_rate": 6.664250964716131e-05,
+ "loss": 1.0889,
+ "step": 559
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.6030858606684543,
+ "learning_rate": 6.657885893755288e-05,
+ "loss": 1.0982,
+ "step": 560
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4644510682398409,
+ "learning_rate": 6.65150874907872e-05,
+ "loss": 1.1004,
+ "step": 561
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.43943285132452564,
+ "learning_rate": 6.645119559655254e-05,
+ "loss": 1.0536,
+ "step": 562
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4456395978600012,
+ "learning_rate": 6.638718354508427e-05,
+ "loss": 1.0733,
+ "step": 563
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3303824433217466,
+ "learning_rate": 6.632305162716365e-05,
+ "loss": 1.0552,
+ "step": 564
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3617704823170143,
+ "learning_rate": 6.62588001341164e-05,
+ "loss": 1.1092,
+ "step": 565
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4465013349903427,
+ "learning_rate": 6.619442935781141e-05,
+ "loss": 1.0781,
+ "step": 566
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.48516780613791277,
+ "learning_rate": 6.612993959065947e-05,
+ "loss": 1.0686,
+ "step": 567
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38867820318536633,
+ "learning_rate": 6.606533112561186e-05,
+ "loss": 1.1215,
+ "step": 568
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38566119820378336,
+ "learning_rate": 6.600060425615907e-05,
+ "loss": 1.1213,
+ "step": 569
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.35534855445058544,
+ "learning_rate": 6.593575927632947e-05,
+ "loss": 1.0955,
+ "step": 570
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38124406233349717,
+ "learning_rate": 6.587079648068795e-05,
+ "loss": 1.0659,
+ "step": 571
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.454750160923548,
+ "learning_rate": 6.580571616433457e-05,
+ "loss": 1.1149,
+ "step": 572
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.35353190088025255,
+ "learning_rate": 6.574051862290325e-05,
+ "loss": 1.0388,
+ "step": 573
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3249395594793626,
+ "learning_rate": 6.567520415256045e-05,
+ "loss": 1.0784,
+ "step": 574
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.40078898818247227,
+ "learning_rate": 6.560977305000375e-05,
+ "loss": 1.0859,
+ "step": 575
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4115264795060035,
+ "learning_rate": 6.554422561246054e-05,
+ "loss": 1.1828,
+ "step": 576
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.30090229228069215,
+ "learning_rate": 6.54785621376867e-05,
+ "loss": 1.0901,
+ "step": 577
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.28827860350299206,
+ "learning_rate": 6.541278292396523e-05,
+ "loss": 1.0277,
+ "step": 578
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.34690404488996757,
+ "learning_rate": 6.534688827010484e-05,
+ "loss": 1.048,
+ "step": 579
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.29943113556644785,
+ "learning_rate": 6.528087847543867e-05,
+ "loss": 1.0646,
+ "step": 580
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.37318202575874415,
+ "learning_rate": 6.521475383982291e-05,
+ "loss": 1.1091,
+ "step": 581
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3049663659203959,
+ "learning_rate": 6.51485146636354e-05,
+ "loss": 1.0552,
+ "step": 582
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3342407867509692,
+ "learning_rate": 6.508216124777431e-05,
+ "loss": 1.2227,
+ "step": 583
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3348396047855952,
+ "learning_rate": 6.501569389365674e-05,
+ "loss": 1.0861,
+ "step": 584
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.30951429367513383,
+ "learning_rate": 6.494911290321737e-05,
+ "loss": 1.0461,
+ "step": 585
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.33898401361064606,
+ "learning_rate": 6.488241857890711e-05,
+ "loss": 1.0854,
+ "step": 586
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4901462068263497,
+ "learning_rate": 6.481561122369164e-05,
+ "loss": 1.1012,
+ "step": 587
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3179574879809652,
+ "learning_rate": 6.474869114105018e-05,
+ "loss": 1.0451,
+ "step": 588
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.32159328915060714,
+ "learning_rate": 6.468165863497395e-05,
+ "loss": 1.0458,
+ "step": 589
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.36462235008537297,
+ "learning_rate": 6.461451400996491e-05,
+ "loss": 1.1247,
+ "step": 590
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.5373862753611778,
+ "learning_rate": 6.454725757103432e-05,
+ "loss": 1.0542,
+ "step": 591
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3160409270291303,
+ "learning_rate": 6.447988962370133e-05,
+ "loss": 1.0829,
+ "step": 592
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.390452102978435,
+ "learning_rate": 6.441241047399169e-05,
+ "loss": 1.192,
+ "step": 593
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3802122712014928,
+ "learning_rate": 6.434482042843627e-05,
+ "loss": 1.1153,
+ "step": 594
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4081584328242501,
+ "learning_rate": 6.427711979406966e-05,
+ "loss": 1.1635,
+ "step": 595
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3791962989638633,
+ "learning_rate": 6.420930887842889e-05,
+ "loss": 1.1581,
+ "step": 596
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.33239440056484193,
+ "learning_rate": 6.414138798955189e-05,
+ "loss": 1.0926,
+ "step": 597
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3279881540815014,
+ "learning_rate": 6.407335743597616e-05,
+ "loss": 1.1386,
+ "step": 598
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.30309644763750837,
+ "learning_rate": 6.40052175267374e-05,
+ "loss": 1.0523,
+ "step": 599
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3349097308403333,
+ "learning_rate": 6.393696857136801e-05,
+ "loss": 1.0815,
+ "step": 600
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3288227593556618,
+ "learning_rate": 6.386861087989581e-05,
+ "loss": 1.015,
+ "step": 601
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36685586740843157,
+ "learning_rate": 6.380014476284255e-05,
+ "loss": 1.1232,
+ "step": 602
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3620977714204643,
+ "learning_rate": 6.373157053122243e-05,
+ "loss": 1.1138,
+ "step": 603
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3130587018197183,
+ "learning_rate": 6.366288849654091e-05,
+ "loss": 1.1255,
+ "step": 604
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3602737087072766,
+ "learning_rate": 6.359409897079303e-05,
+ "loss": 1.0282,
+ "step": 605
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.31168852571991945,
+ "learning_rate": 6.352520226646222e-05,
+ "loss": 1.0779,
+ "step": 606
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3516045580189353,
+ "learning_rate": 6.345619869651871e-05,
+ "loss": 1.1028,
+ "step": 607
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3231857927563657,
+ "learning_rate": 6.33870885744182e-05,
+ "loss": 1.1202,
+ "step": 608
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.30205205129701157,
+ "learning_rate": 6.331787221410041e-05,
+ "loss": 1.1369,
+ "step": 609
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3198359813888166,
+ "learning_rate": 6.32485499299877e-05,
+ "loss": 1.1763,
+ "step": 610
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3128641370321787,
+ "learning_rate": 6.31791220369835e-05,
+ "loss": 1.0223,
+ "step": 611
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.2989105616213649,
+ "learning_rate": 6.31095888504711e-05,
+ "loss": 1.0358,
+ "step": 612
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3103537906853337,
+ "learning_rate": 6.303995068631203e-05,
+ "loss": 1.1261,
+ "step": 613
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.28598715532508207,
+ "learning_rate": 6.297020786084467e-05,
+ "loss": 1.0629,
+ "step": 614
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.29809789918093255,
+ "learning_rate": 6.290036069088288e-05,
+ "loss": 1.035,
+ "step": 615
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.33765270252261453,
+ "learning_rate": 6.283040949371451e-05,
+ "loss": 1.1221,
+ "step": 616
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3424617501293415,
+ "learning_rate": 6.276035458709993e-05,
+ "loss": 1.155,
+ "step": 617
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3799189737987811,
+ "learning_rate": 6.269019628927067e-05,
+ "loss": 1.0701,
+ "step": 618
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3358898935253196,
+ "learning_rate": 6.261993491892791e-05,
+ "loss": 1.1649,
+ "step": 619
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.31569979424117356,
+ "learning_rate": 6.254957079524099e-05,
+ "loss": 1.0633,
+ "step": 620
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3002168156888237,
+ "learning_rate": 6.247910423784609e-05,
+ "loss": 1.0846,
+ "step": 621
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3097238823450595,
+ "learning_rate": 6.24085355668447e-05,
+ "loss": 1.0808,
+ "step": 622
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3120312761417578,
+ "learning_rate": 6.233786510280212e-05,
+ "loss": 1.0142,
+ "step": 623
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3335343015064923,
+ "learning_rate": 6.22670931667461e-05,
+ "loss": 1.0674,
+ "step": 624
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3234062304634526,
+ "learning_rate": 6.219622008016533e-05,
+ "loss": 1.0981,
+ "step": 625
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.32152678786547273,
+ "learning_rate": 6.212524616500798e-05,
+ "loss": 1.0244,
+ "step": 626
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.39031977608147594,
+ "learning_rate": 6.205417174368023e-05,
+ "loss": 1.1205,
+ "step": 627
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3806189090017157,
+ "learning_rate": 6.198299713904485e-05,
+ "loss": 1.1134,
+ "step": 628
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.2978349276971668,
+ "learning_rate": 6.191172267441967e-05,
+ "loss": 1.0088,
+ "step": 629
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3190354077382501,
+ "learning_rate": 6.184034867357617e-05,
+ "loss": 1.108,
+ "step": 630
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.32633048665038994,
+ "learning_rate": 6.176887546073797e-05,
+ "loss": 1.0825,
+ "step": 631
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3428026413020903,
+ "learning_rate": 6.169730336057939e-05,
+ "loss": 1.0765,
+ "step": 632
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3475737151929015,
+ "learning_rate": 6.162563269822391e-05,
+ "loss": 1.0693,
+ "step": 633
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3870252154591392,
+ "learning_rate": 6.15538637992428e-05,
+ "loss": 1.1081,
+ "step": 634
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.33597355193652834,
+ "learning_rate": 6.148199698965352e-05,
+ "loss": 1.0893,
+ "step": 635
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.30805894179787247,
+ "learning_rate": 6.141003259591834e-05,
+ "loss": 1.0995,
+ "step": 636
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3025073882734066,
+ "learning_rate": 6.133797094494281e-05,
+ "loss": 1.0388,
+ "step": 637
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3524395196391662,
+ "learning_rate": 6.126581236407429e-05,
+ "loss": 1.1196,
+ "step": 638
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3377646188130345,
+ "learning_rate": 6.119355718110039e-05,
+ "loss": 1.0382,
+ "step": 639
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.35508400659785483,
+ "learning_rate": 6.112120572424763e-05,
+ "loss": 1.1402,
+ "step": 640
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3454418793700457,
+ "learning_rate": 6.104875832217982e-05,
+ "loss": 1.1032,
+ "step": 641
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.32629806837059866,
+ "learning_rate": 6.097621530399661e-05,
+ "loss": 1.0959,
+ "step": 642
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3329536837751315,
+ "learning_rate": 6.090357699923202e-05,
+ "loss": 1.0467,
+ "step": 643
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.32302233828349475,
+ "learning_rate": 6.083084373785287e-05,
+ "loss": 1.0858,
+ "step": 644
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3310358826507611,
+ "learning_rate": 6.075801585025739e-05,
+ "loss": 1.0715,
+ "step": 645
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.319322035854079,
+ "learning_rate": 6.068509366727362e-05,
+ "loss": 1.177,
+ "step": 646
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3065230667302707,
+ "learning_rate": 6.061207752015797e-05,
+ "loss": 1.0649,
+ "step": 647
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.29926795565748227,
+ "learning_rate": 6.053896774059368e-05,
+ "loss": 1.1325,
+ "step": 648
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3556069634279046,
+ "learning_rate": 6.046576466068931e-05,
+ "loss": 1.1366,
+ "step": 649
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3189191131461966,
+ "learning_rate": 6.039246861297727e-05,
+ "loss": 1.0693,
+ "step": 650
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3347197156648834,
+ "learning_rate": 6.031907993041227e-05,
+ "loss": 1.1009,
+ "step": 651
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.32274156348185445,
+ "learning_rate": 6.0245598946369826e-05,
+ "loss": 1.1675,
+ "step": 652
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.35534089035455224,
+ "learning_rate": 6.017202599464476e-05,
+ "loss": 1.1723,
+ "step": 653
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3106026578570133,
+ "learning_rate": 6.009836140944965e-05,
+ "loss": 1.0954,
+ "step": 654
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3309144454564729,
+ "learning_rate": 6.002460552541331e-05,
+ "loss": 1.0209,
+ "step": 655
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3023619281400003,
+ "learning_rate": 5.9950758677579345e-05,
+ "loss": 1.0363,
+ "step": 656
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3311182880219704,
+ "learning_rate": 5.987682120140451e-05,
+ "loss": 1.0515,
+ "step": 657
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.33396486010030413,
+ "learning_rate": 5.980279343275729e-05,
+ "loss": 1.1251,
+ "step": 658
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3465764556678002,
+ "learning_rate": 5.97286757079163e-05,
+ "loss": 1.165,
+ "step": 659
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.304193441363374,
+ "learning_rate": 5.965446836356882e-05,
+ "loss": 1.0228,
+ "step": 660
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3415149030413082,
+ "learning_rate": 5.9580171736809224e-05,
+ "loss": 1.0742,
+ "step": 661
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.33138658321132064,
+ "learning_rate": 5.950578616513746e-05,
+ "loss": 1.0843,
+ "step": 662
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.30774403421162994,
+ "learning_rate": 5.943131198645752e-05,
+ "loss": 1.065,
+ "step": 663
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3428877492183819,
+ "learning_rate": 5.9356749539075885e-05,
+ "loss": 1.1101,
+ "step": 664
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3621290546130101,
+ "learning_rate": 5.928209916170003e-05,
+ "loss": 1.1372,
+ "step": 665
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3482375945469884,
+ "learning_rate": 5.9207361193436865e-05,
+ "loss": 1.132,
+ "step": 666
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.31754384974068384,
+ "learning_rate": 5.9132535973791156e-05,
+ "loss": 1.148,
+ "step": 667
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.36003834782050365,
+ "learning_rate": 5.9057623842664044e-05,
+ "loss": 1.1099,
+ "step": 668
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.2963701622969662,
+ "learning_rate": 5.8982625140351464e-05,
+ "loss": 1.0755,
+ "step": 669
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.32579569606066516,
+ "learning_rate": 5.8907540207542616e-05,
+ "loss": 1.0809,
+ "step": 670
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4247563451753457,
+ "learning_rate": 5.8832369385318416e-05,
+ "loss": 1.097,
+ "step": 671
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.33076932102169776,
+ "learning_rate": 5.875711301514992e-05,
+ "loss": 1.1078,
+ "step": 672
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3609238032332309,
+ "learning_rate": 5.8681771438896815e-05,
+ "loss": 1.1031,
+ "step": 673
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.325159585649425,
+ "learning_rate": 5.860634499880583e-05,
+ "loss": 1.0707,
+ "step": 674
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4620687271068983,
+ "learning_rate": 5.853083403750922e-05,
+ "loss": 1.1017,
+ "step": 675
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33485279064365936,
+ "learning_rate": 5.845523889802316e-05,
+ "loss": 1.0989,
+ "step": 676
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.30952573170841513,
+ "learning_rate": 5.8379559923746214e-05,
+ "loss": 1.0393,
+ "step": 677
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33498605810588283,
+ "learning_rate": 5.830379745845781e-05,
+ "loss": 1.1259,
+ "step": 678
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.35771921163037307,
+ "learning_rate": 5.822795184631659e-05,
+ "loss": 1.0815,
+ "step": 679
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.3329650192347647,
+ "learning_rate": 5.815202343185894e-05,
+ "loss": 1.1344,
+ "step": 680
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3356634465845771,
+ "learning_rate": 5.807601255999736e-05,
+ "loss": 1.1297,
+ "step": 681
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3289442034151235,
+ "learning_rate": 5.7999919576018934e-05,
+ "loss": 1.022,
+ "step": 682
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3207007334784113,
+ "learning_rate": 5.7923744825583745e-05,
+ "loss": 1.0571,
+ "step": 683
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3582460325329284,
+ "learning_rate": 5.7847488654723304e-05,
+ "loss": 1.0778,
+ "step": 684
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3563317666176927,
+ "learning_rate": 5.777115140983899e-05,
+ "loss": 1.1003,
+ "step": 685
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 3.4694912945702105,
+ "learning_rate": 5.769473343770047e-05,
+ "loss": 1.121,
+ "step": 686
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.43002349520483113,
+ "learning_rate": 5.761823508544411e-05,
+ "loss": 1.0765,
+ "step": 687
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39467783104839754,
+ "learning_rate": 5.754165670057142e-05,
+ "loss": 1.0788,
+ "step": 688
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39629029674867916,
+ "learning_rate": 5.7464998630947464e-05,
+ "loss": 1.0812,
+ "step": 689
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3880152093965208,
+ "learning_rate": 5.738826122479929e-05,
+ "loss": 1.1228,
+ "step": 690
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3777874121959188,
+ "learning_rate": 5.7311444830714324e-05,
+ "loss": 1.0907,
+ "step": 691
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.38004041653523696,
+ "learning_rate": 5.723454979763882e-05,
+ "loss": 1.1263,
+ "step": 692
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.37049672627797636,
+ "learning_rate": 5.7157576474876246e-05,
+ "loss": 1.1438,
+ "step": 693
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32973606103437614,
+ "learning_rate": 5.7080525212085725e-05,
+ "loss": 1.0553,
+ "step": 694
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.31674639252070325,
+ "learning_rate": 5.700339635928038e-05,
+ "loss": 1.06,
+ "step": 695
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32282199426553837,
+ "learning_rate": 5.692619026682588e-05,
+ "loss": 1.0841,
+ "step": 696
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4810882958061859,
+ "learning_rate": 5.684890728543869e-05,
+ "loss": 1.0803,
+ "step": 697
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3995638550178378,
+ "learning_rate": 5.6771547766184566e-05,
+ "loss": 1.1187,
+ "step": 698
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35264932960583484,
+ "learning_rate": 5.669411206047699e-05,
+ "loss": 1.0641,
+ "step": 699
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35240640524733,
+ "learning_rate": 5.661660052007547e-05,
+ "loss": 1.076,
+ "step": 700
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3540694609860389,
+ "learning_rate": 5.653901349708401e-05,
+ "loss": 1.1369,
+ "step": 701
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3196055112925304,
+ "learning_rate": 5.646135134394955e-05,
+ "loss": 1.0677,
+ "step": 702
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4214141007955914,
+ "learning_rate": 5.6383614413460266e-05,
+ "loss": 1.1139,
+ "step": 703
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3625611311798579,
+ "learning_rate": 5.630580305874402e-05,
+ "loss": 1.1845,
+ "step": 704
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3425208672181188,
+ "learning_rate": 5.62279176332668e-05,
+ "loss": 1.174,
+ "step": 705
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3108419862818321,
+ "learning_rate": 5.6149958490830996e-05,
+ "loss": 1.0331,
+ "step": 706
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3274644181571904,
+ "learning_rate": 5.607192598557394e-05,
+ "loss": 1.0664,
+ "step": 707
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.346218197215145,
+ "learning_rate": 5.599382047196617e-05,
+ "loss": 1.2088,
+ "step": 708
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.328497632267458,
+ "learning_rate": 5.591564230480989e-05,
+ "loss": 1.0287,
+ "step": 709
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3708173720611468,
+ "learning_rate": 5.583739183923732e-05,
+ "loss": 1.0883,
+ "step": 710
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3631427403535479,
+ "learning_rate": 5.575906943070915e-05,
+ "loss": 1.1155,
+ "step": 711
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3305201458598695,
+ "learning_rate": 5.5680675435012834e-05,
+ "loss": 1.0958,
+ "step": 712
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.34978833532083714,
+ "learning_rate": 5.5602210208261036e-05,
+ "loss": 1.1437,
+ "step": 713
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3510553882510229,
+ "learning_rate": 5.552367410688999e-05,
+ "loss": 1.0941,
+ "step": 714
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3523747462465078,
+ "learning_rate": 5.544506748765789e-05,
+ "loss": 1.1289,
+ "step": 715
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38262637783927445,
+ "learning_rate": 5.5366390707643266e-05,
+ "loss": 1.099,
+ "step": 716
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38620065989073454,
+ "learning_rate": 5.528764412424334e-05,
+ "loss": 1.083,
+ "step": 717
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3401355276121096,
+ "learning_rate": 5.520882809517245e-05,
+ "loss": 1.028,
+ "step": 718
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3392061008943934,
+ "learning_rate": 5.512994297846039e-05,
+ "loss": 1.1083,
+ "step": 719
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.34219480421015414,
+ "learning_rate": 5.505098913245077e-05,
+ "loss": 1.1108,
+ "step": 720
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3275058061553761,
+ "learning_rate": 5.497196691579945e-05,
+ "loss": 1.111,
+ "step": 721
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36800249746509384,
+ "learning_rate": 5.489287668747283e-05,
+ "loss": 1.1221,
+ "step": 722
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4129005533101575,
+ "learning_rate": 5.481371880674628e-05,
+ "loss": 1.0966,
+ "step": 723
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36563906596251655,
+ "learning_rate": 5.4734493633202505e-05,
+ "loss": 1.0927,
+ "step": 724
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3614650536839971,
+ "learning_rate": 5.465520152672986e-05,
+ "loss": 1.13,
+ "step": 725
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.36419665098633497,
+ "learning_rate": 5.4575842847520765e-05,
+ "loss": 1.1183,
+ "step": 726
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.34490689807258995,
+ "learning_rate": 5.449641795607005e-05,
+ "loss": 1.0919,
+ "step": 727
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3627643746876298,
+ "learning_rate": 5.441692721317334e-05,
+ "loss": 1.0411,
+ "step": 728
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.323620411949565,
+ "learning_rate": 5.433737097992537e-05,
+ "loss": 1.0725,
+ "step": 729
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3521599501824965,
+ "learning_rate": 5.425774961771838e-05,
+ "loss": 1.0926,
+ "step": 730
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3302390546764222,
+ "learning_rate": 5.417806348824047e-05,
+ "loss": 1.0468,
+ "step": 731
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3833325802616019,
+ "learning_rate": 5.4098312953473956e-05,
+ "loss": 1.1291,
+ "step": 732
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3708621126835512,
+ "learning_rate": 5.401849837569372e-05,
+ "loss": 1.0887,
+ "step": 733
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3625834373416278,
+ "learning_rate": 5.393862011746555e-05,
+ "loss": 1.0981,
+ "step": 734
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3583343965080617,
+ "learning_rate": 5.385867854164451e-05,
+ "loss": 1.1021,
+ "step": 735
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34598320594096066,
+ "learning_rate": 5.377867401137332e-05,
+ "loss": 1.1376,
+ "step": 736
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3046382791315433,
+ "learning_rate": 5.369860689008066e-05,
+ "loss": 1.0206,
+ "step": 737
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34464948380043725,
+ "learning_rate": 5.3618477541479505e-05,
+ "loss": 1.1084,
+ "step": 738
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3203242519627101,
+ "learning_rate": 5.353828632956557e-05,
+ "loss": 1.0731,
+ "step": 739
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3431169960355163,
+ "learning_rate": 5.3458033618615516e-05,
+ "loss": 1.091,
+ "step": 740
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.33492074521678705,
+ "learning_rate": 5.337771977318543e-05,
+ "loss": 1.1112,
+ "step": 741
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.32576546585541344,
+ "learning_rate": 5.3297345158109086e-05,
+ "loss": 1.0993,
+ "step": 742
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3410007245037574,
+ "learning_rate": 5.3216910138496286e-05,
+ "loss": 1.094,
+ "step": 743
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.34891180680896833,
+ "learning_rate": 5.313641507973128e-05,
+ "loss": 1.1331,
+ "step": 744
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.37135766946717214,
+ "learning_rate": 5.3055860347471006e-05,
+ "loss": 1.1,
+ "step": 745
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3465019415478411,
+ "learning_rate": 5.297524630764349e-05,
+ "loss": 1.1256,
+ "step": 746
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37035388481626563,
+ "learning_rate": 5.289457332644615e-05,
+ "loss": 1.0366,
+ "step": 747
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.33853883270759155,
+ "learning_rate": 5.281384177034421e-05,
+ "loss": 1.0547,
+ "step": 748
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.364306618627317,
+ "learning_rate": 5.2733052006068897e-05,
+ "loss": 1.0768,
+ "step": 749
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.4021754315731627,
+ "learning_rate": 5.2652204400615916e-05,
+ "loss": 1.1382,
+ "step": 750
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.3332185389039008,
+ "learning_rate": 5.257129932124368e-05,
+ "loss": 1.0815,
+ "step": 751
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3453105709879854,
+ "learning_rate": 5.249033713547173e-05,
+ "loss": 1.1109,
+ "step": 752
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3385397539717797,
+ "learning_rate": 5.2409318211078966e-05,
+ "loss": 1.0529,
+ "step": 753
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.33197994450130447,
+ "learning_rate": 5.232824291610206e-05,
+ "loss": 1.0721,
+ "step": 754
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32836289576124167,
+ "learning_rate": 5.224711161883375e-05,
+ "loss": 1.0459,
+ "step": 755
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32491620058831744,
+ "learning_rate": 5.216592468782117e-05,
+ "loss": 1.0897,
+ "step": 756
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3137879047811153,
+ "learning_rate": 5.2084682491864155e-05,
+ "loss": 1.096,
+ "step": 757
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3356938043023012,
+ "learning_rate": 5.200338540001364e-05,
+ "loss": 1.0827,
+ "step": 758
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.36044340490819055,
+ "learning_rate": 5.192203378156984e-05,
+ "loss": 1.0617,
+ "step": 759
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.34674262047888293,
+ "learning_rate": 5.184062800608077e-05,
+ "loss": 1.1267,
+ "step": 760
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.32469442322149333,
+ "learning_rate": 5.1759168443340375e-05,
+ "loss": 1.1483,
+ "step": 761
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3290384307774216,
+ "learning_rate": 5.167765546338698e-05,
+ "loss": 1.047,
+ "step": 762
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.31637612188770403,
+ "learning_rate": 5.1596089436501525e-05,
+ "loss": 1.0311,
+ "step": 763
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3168693829641207,
+ "learning_rate": 5.151447073320597e-05,
+ "loss": 1.1405,
+ "step": 764
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.34322421571238926,
+ "learning_rate": 5.143279972426153e-05,
+ "loss": 1.1428,
+ "step": 765
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3291030435830325,
+ "learning_rate": 5.1351076780667026e-05,
+ "loss": 1.0473,
+ "step": 766
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.33772039158758044,
+ "learning_rate": 5.1269302273657195e-05,
+ "loss": 1.0909,
+ "step": 767
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3802031736890876,
+ "learning_rate": 5.118747657470102e-05,
+ "loss": 1.1482,
+ "step": 768
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3296067628997962,
+ "learning_rate": 5.1105600055500025e-05,
+ "loss": 1.0085,
+ "step": 769
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3707139982828035,
+ "learning_rate": 5.102367308798658e-05,
+ "loss": 1.0746,
+ "step": 770
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3378537316757011,
+ "learning_rate": 5.094169604432225e-05,
+ "loss": 1.0482,
+ "step": 771
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.4008417246255145,
+ "learning_rate": 5.085966929689601e-05,
+ "loss": 1.1065,
+ "step": 772
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3244385106988064,
+ "learning_rate": 5.077759321832271e-05,
+ "loss": 1.0827,
+ "step": 773
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.37228575732812336,
+ "learning_rate": 5.0695468181441215e-05,
+ "loss": 1.1146,
+ "step": 774
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33761714797540276,
+ "learning_rate": 5.061329455931283e-05,
+ "loss": 1.092,
+ "step": 775
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3158158390913494,
+ "learning_rate": 5.053107272521955e-05,
+ "loss": 1.1058,
+ "step": 776
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3691501929738938,
+ "learning_rate": 5.044880305266239e-05,
+ "loss": 1.1599,
+ "step": 777
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33730914019805525,
+ "learning_rate": 5.0366485915359645e-05,
+ "loss": 1.0615,
+ "step": 778
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.34970059240017,
+ "learning_rate": 5.0284121687245257e-05,
+ "loss": 1.1475,
+ "step": 779
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3374028029407197,
+ "learning_rate": 5.020171074246707e-05,
+ "loss": 1.0926,
+ "step": 780
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3350020681123992,
+ "learning_rate": 5.011925345538514e-05,
+ "loss": 1.1276,
+ "step": 781
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3224228965786606,
+ "learning_rate": 5.003675020057003e-05,
+ "loss": 1.0183,
+ "step": 782
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3357310714740298,
+ "learning_rate": 4.995420135280114e-05,
+ "loss": 1.1114,
+ "step": 783
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3590203255363759,
+ "learning_rate": 4.9871607287064966e-05,
+ "loss": 1.1504,
+ "step": 784
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.33011195419611655,
+ "learning_rate": 4.9788968378553396e-05,
+ "loss": 1.0826,
+ "step": 785
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.31088868195439445,
+ "learning_rate": 4.970628500266207e-05,
+ "loss": 1.0704,
+ "step": 786
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3144996103179409,
+ "learning_rate": 4.962355753498858e-05,
+ "loss": 1.1403,
+ "step": 787
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3147269555419068,
+ "learning_rate": 4.954078635133081e-05,
+ "loss": 1.0898,
+ "step": 788
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3280151747783868,
+ "learning_rate": 4.945797182768524e-05,
+ "loss": 1.1115,
+ "step": 789
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3551996569232493,
+ "learning_rate": 4.937511434024524e-05,
+ "loss": 1.1731,
+ "step": 790
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.343863208057807,
+ "learning_rate": 4.9292214265399336e-05,
+ "loss": 1.0866,
+ "step": 791
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.37316699385322466,
+ "learning_rate": 4.920927197972949e-05,
+ "loss": 1.1083,
+ "step": 792
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635739774067832,
+ "learning_rate": 4.9126287860009453e-05,
+ "loss": 1.1393,
+ "step": 793
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3755910554972886,
+ "learning_rate": 4.9043262283202974e-05,
+ "loss": 1.1624,
+ "step": 794
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635899120146823,
+ "learning_rate": 4.8960195626462145e-05,
+ "loss": 1.2095,
+ "step": 795
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3642202684342816,
+ "learning_rate": 4.8877088267125664e-05,
+ "loss": 1.1099,
+ "step": 796
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3339946548799316,
+ "learning_rate": 4.879394058271712e-05,
+ "loss": 1.1157,
+ "step": 797
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3457189703100475,
+ "learning_rate": 4.871075295094329e-05,
+ "loss": 1.129,
+ "step": 798
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3550931839691424,
+ "learning_rate": 4.862752574969241e-05,
+ "loss": 1.076,
+ "step": 799
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.36139108917966734,
+ "learning_rate": 4.8544259357032475e-05,
+ "loss": 1.1577,
+ "step": 800
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.32133637199528886,
+ "learning_rate": 4.8460954151209486e-05,
+ "loss": 1.0148,
+ "step": 801
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 1.043434839098495,
+ "learning_rate": 4.837761051064579e-05,
+ "loss": 1.1145,
+ "step": 802
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.36683258883118186,
+ "learning_rate": 4.8294228813938285e-05,
+ "loss": 1.0952,
+ "step": 803
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.4664840097151361,
+ "learning_rate": 4.8210809439856804e-05,
+ "loss": 1.1644,
+ "step": 804
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.355267647636684,
+ "learning_rate": 4.8127352767342276e-05,
+ "loss": 1.1062,
+ "step": 805
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.369440033402877,
+ "learning_rate": 4.8043859175505095e-05,
+ "loss": 1.0846,
+ "step": 806
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.3530796042687365,
+ "learning_rate": 4.7960329043623344e-05,
+ "loss": 1.1608,
+ "step": 807
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.4321129927586221,
+ "learning_rate": 4.787676275114111e-05,
+ "loss": 1.1339,
+ "step": 808
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.349782342376565,
+ "learning_rate": 4.779316067766673e-05,
+ "loss": 1.0685,
+ "step": 809
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.36418077406249405,
+ "learning_rate": 4.770952320297109e-05,
+ "loss": 1.1467,
+ "step": 810
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.35831016374198177,
+ "learning_rate": 4.7625850706985886e-05,
+ "loss": 1.064,
+ "step": 811
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.3596998725696811,
+ "learning_rate": 4.7542143569801894e-05,
+ "loss": 1.119,
+ "step": 812
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.3437403316058801,
+ "learning_rate": 4.745840217166725e-05,
+ "loss": 1.0451,
+ "step": 813
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.36614738184483053,
+ "learning_rate": 4.737462689298577e-05,
+ "loss": 1.1388,
+ "step": 814
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.4127884784033637,
+ "learning_rate": 4.7290818114315086e-05,
+ "loss": 1.1786,
+ "step": 815
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.4110838984805364,
+ "learning_rate": 4.72069762163651e-05,
+ "loss": 1.0709,
+ "step": 816
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.42581764215872087,
+ "learning_rate": 4.7123101579996106e-05,
+ "loss": 1.1019,
+ "step": 817
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.39442487793142056,
+ "learning_rate": 4.7039194586217136e-05,
+ "loss": 1.1532,
+ "step": 818
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.3362730343659587,
+ "learning_rate": 4.695525561618418e-05,
+ "loss": 1.1149,
+ "step": 819
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.5554581175413662,
+ "learning_rate": 4.687128505119853e-05,
+ "loss": 1.0697,
+ "step": 820
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.369476979421362,
+ "learning_rate": 4.6787283272704966e-05,
+ "loss": 1.1038,
+ "step": 821
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.4002553035181225,
+ "learning_rate": 4.670325066229009e-05,
+ "loss": 1.0714,
+ "step": 822
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.5196234078753172,
+ "learning_rate": 4.661918760168052e-05,
+ "loss": 1.1027,
+ "step": 823
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.39318954902323083,
+ "learning_rate": 4.653509447274121e-05,
+ "loss": 1.1562,
+ "step": 824
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.3822202885742058,
+ "learning_rate": 4.6450971657473743e-05,
+ "loss": 1.0662,
+ "step": 825
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.4203426346301331,
+ "learning_rate": 4.63668195380145e-05,
+ "loss": 1.0713,
+ "step": 826
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.3850673744429753,
+ "learning_rate": 4.628263849663301e-05,
+ "loss": 1.123,
+ "step": 827
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.3615517608466556,
+ "learning_rate": 4.619842891573016e-05,
+ "loss": 1.0565,
+ "step": 828
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.34448279386839303,
+ "learning_rate": 4.6114191177836514e-05,
+ "loss": 1.0703,
+ "step": 829
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.3649438531637055,
+ "learning_rate": 4.6029925665610524e-05,
+ "loss": 1.061,
+ "step": 830
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.360215034359632,
+ "learning_rate": 4.59456327618368e-05,
+ "loss": 1.075,
+ "step": 831
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.3297320285815758,
+ "learning_rate": 4.5861312849424386e-05,
+ "loss": 1.0177,
+ "step": 832
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.3687310180731473,
+ "learning_rate": 4.5776966311405035e-05,
+ "loss": 1.078,
+ "step": 833
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.40273770874860215,
+ "learning_rate": 4.5692593530931416e-05,
+ "loss": 1.1237,
+ "step": 834
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.41430404774660234,
+ "learning_rate": 4.560819489127545e-05,
+ "loss": 1.0891,
+ "step": 835
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.39133407251294566,
+ "learning_rate": 4.552377077582646e-05,
+ "loss": 1.1244,
+ "step": 836
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.45340195323000915,
+ "learning_rate": 4.543932156808959e-05,
+ "loss": 1.1659,
+ "step": 837
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.37488623560038786,
+ "learning_rate": 4.535484765168386e-05,
+ "loss": 1.1026,
+ "step": 838
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.36994669175389283,
+ "learning_rate": 4.527034941034063e-05,
+ "loss": 1.1481,
+ "step": 839
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 19.06482385633655,
+ "learning_rate": 4.51858272279017e-05,
+ "loss": 1.1897,
+ "step": 840
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 0.7974039643314161,
+ "learning_rate": 4.5101281488317634e-05,
+ "loss": 1.1467,
+ "step": 841
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 1211.6099030928651,
+ "learning_rate": 4.501671257564602e-05,
+ "loss": 1.9989,
+ "step": 842
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 1.438002431348562,
+ "learning_rate": 4.49321208740497e-05,
+ "loss": 1.1715,
+ "step": 843
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 5.653373858566177,
+ "learning_rate": 4.484750676779504e-05,
+ "loss": 1.1828,
+ "step": 844
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 625.3410428452096,
+ "learning_rate": 4.4762870641250185e-05,
+ "loss": 1.3283,
+ "step": 845
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 5.653780923251844,
+ "learning_rate": 4.467821287888331e-05,
+ "loss": 1.1532,
+ "step": 846
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 177.17210534716736,
+ "learning_rate": 4.459353386526086e-05,
+ "loss": 1.1178,
+ "step": 847
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 4.805542680722976,
+ "learning_rate": 4.450883398504584e-05,
+ "loss": 1.1057,
+ "step": 848
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 2.055060058990183,
+ "learning_rate": 4.442411362299602e-05,
+ "loss": 1.1423,
+ "step": 849
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 750.168576149713,
+ "learning_rate": 4.433937316396224e-05,
+ "loss": 1.6724,
+ "step": 850
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 130.95680794453787,
+ "learning_rate": 4.425461299288659e-05,
+ "loss": 1.4064,
+ "step": 851
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 1.0622510576276174,
+ "learning_rate": 4.416983349480073e-05,
+ "loss": 1.1254,
+ "step": 852
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 1.0460019153859763,
+ "learning_rate": 4.408503505482412e-05,
+ "loss": 1.1571,
+ "step": 853
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 1.0110886779305324,
+ "learning_rate": 4.400021805816225e-05,
+ "loss": 1.2113,
+ "step": 854
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 8.515995522196171,
+ "learning_rate": 4.391538289010493e-05,
+ "loss": 1.1128,
+ "step": 855
+ },
+ {
+ "epoch": 1.57,
+ "grad_norm": 1.2951788332991243,
+ "learning_rate": 4.383052993602448e-05,
+ "loss": 1.1713,
+ "step": 856
+ },
+ {
+ "epoch": 1.57,
+ "grad_norm": 30.749825560809473,
+ "learning_rate": 4.374565958137404e-05,
+ "loss": 1.1628,
+ "step": 857
+ },
+ {
+ "epoch": 1.57,
+ "grad_norm": 2.9528948545566096,
+ "learning_rate": 4.3660772211685775e-05,
+ "loss": 1.1314,
+ "step": 858
+ },
+ {
+ "epoch": 1.57,
+ "grad_norm": 0.7682418716635998,
+ "learning_rate": 4.357586821256918e-05,
+ "loss": 1.1017,
+ "step": 859
+ },
+ {
+ "epoch": 1.57,
+ "grad_norm": 1.4660835049872478,
+ "learning_rate": 4.349094796970925e-05,
+ "loss": 1.1871,
+ "step": 860
+ },
+ {
+ "epoch": 1.57,
+ "grad_norm": 2.086334546506882,
+ "learning_rate": 4.3406011868864795e-05,
+ "loss": 1.1273,
+ "step": 861
+ },
+ {
+ "epoch": 1.58,
+ "grad_norm": 0.8447346926666625,
+ "learning_rate": 4.3321060295866635e-05,
+ "loss": 1.1888,
+ "step": 862
+ },
+ {
+ "epoch": 1.58,
+ "grad_norm": 0.42192172811626216,
+ "learning_rate": 4.32360936366159e-05,
+ "loss": 1.1195,
+ "step": 863
+ },
+ {
+ "epoch": 1.58,
+ "grad_norm": 0.8309195428194658,
+ "learning_rate": 4.315111227708224e-05,
+ "loss": 1.1546,
+ "step": 864
+ },
+ {
+ "epoch": 1.58,
+ "grad_norm": 1.2607908118006466,
+ "learning_rate": 4.306611660330208e-05,
+ "loss": 0.9956,
+ "step": 865
+ },
+ {
+ "epoch": 1.58,
+ "grad_norm": 0.7251625291751502,
+ "learning_rate": 4.298110700137687e-05,
+ "loss": 1.0785,
+ "step": 866
+ },
+ {
+ "epoch": 1.59,
+ "grad_norm": 0.7091004801982537,
+ "learning_rate": 4.2896083857471345e-05,
+ "loss": 1.09,
+ "step": 867
+ },
+ {
+ "epoch": 1.59,
+ "grad_norm": 0.5281839474720389,
+ "learning_rate": 4.281104755781172e-05,
+ "loss": 1.1193,
+ "step": 868
+ },
+ {
+ "epoch": 1.59,
+ "grad_norm": 0.6717068342854466,
+ "learning_rate": 4.272599848868402e-05,
+ "loss": 1.0878,
+ "step": 869
+ },
+ {
+ "epoch": 1.59,
+ "grad_norm": 1.2715992187857506,
+ "learning_rate": 4.264093703643223e-05,
+ "loss": 1.1393,
+ "step": 870
+ },
+ {
+ "epoch": 1.59,
+ "grad_norm": 0.7459370091738186,
+ "learning_rate": 4.255586358745662e-05,
+ "loss": 1.1329,
+ "step": 871
+ },
+ {
+ "epoch": 1.59,
+ "grad_norm": 0.5375765240515081,
+ "learning_rate": 4.247077852821194e-05,
+ "loss": 1.1095,
+ "step": 872
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 2.800301901231134,
+ "learning_rate": 4.2385682245205685e-05,
+ "loss": 1.1454,
+ "step": 873
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 1.0380617847836733,
+ "learning_rate": 4.230057512499634e-05,
+ "loss": 1.1485,
+ "step": 874
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 0.4520839568590167,
+ "learning_rate": 4.221545755419159e-05,
+ "loss": 1.1312,
+ "step": 875
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 0.40517807692679086,
+ "learning_rate": 4.2130329919446646e-05,
+ "loss": 1.0847,
+ "step": 876
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 0.4087992479261267,
+ "learning_rate": 4.20451926074624e-05,
+ "loss": 1.0514,
+ "step": 877
+ },
+ {
+ "epoch": 1.61,
+ "grad_norm": 0.4585377147848289,
+ "learning_rate": 4.196004600498369e-05,
+ "loss": 1.0804,
+ "step": 878
+ },
+ {
+ "epoch": 1.61,
+ "grad_norm": 0.4273068731511255,
+ "learning_rate": 4.1874890498797605e-05,
+ "loss": 1.1109,
+ "step": 879
+ },
+ {
+ "epoch": 1.61,
+ "grad_norm": 0.8002075505518079,
+ "learning_rate": 4.178972647573163e-05,
+ "loss": 1.0667,
+ "step": 880
+ },
+ {
+ "epoch": 1.61,
+ "grad_norm": 1.017993355567939,
+ "learning_rate": 4.1704554322651975e-05,
+ "loss": 1.0822,
+ "step": 881
+ },
+ {
+ "epoch": 1.61,
+ "grad_norm": 0.5360398768749924,
+ "learning_rate": 4.161937442646176e-05,
+ "loss": 1.1081,
+ "step": 882
+ },
+ {
+ "epoch": 1.61,
+ "grad_norm": 0.4384721008322173,
+ "learning_rate": 4.1534187174099285e-05,
+ "loss": 1.0875,
+ "step": 883
+ },
+ {
+ "epoch": 1.62,
+ "grad_norm": 0.4847183493614364,
+ "learning_rate": 4.1448992952536275e-05,
+ "loss": 1.1283,
+ "step": 884
+ },
+ {
+ "epoch": 1.62,
+ "grad_norm": 0.4445130532575715,
+ "learning_rate": 4.136379214877609e-05,
+ "loss": 1.0729,
+ "step": 885
+ },
+ {
+ "epoch": 1.62,
+ "grad_norm": 0.4251292394390122,
+ "learning_rate": 4.127858514985203e-05,
+ "loss": 1.0,
+ "step": 886
+ },
+ {
+ "epoch": 1.62,
+ "grad_norm": 0.41930406317728264,
+ "learning_rate": 4.1193372342825494e-05,
+ "loss": 1.0231,
+ "step": 887
+ },
+ {
+ "epoch": 1.62,
+ "grad_norm": 0.5672205934273074,
+ "learning_rate": 4.1108154114784275e-05,
+ "loss": 1.1296,
+ "step": 888
+ },
+ {
+ "epoch": 1.63,
+ "grad_norm": 0.4653761886235837,
+ "learning_rate": 4.102293085284083e-05,
+ "loss": 1.101,
+ "step": 889
+ },
+ {
+ "epoch": 1.63,
+ "grad_norm": 0.4240672595320848,
+ "learning_rate": 4.0937702944130426e-05,
+ "loss": 1.1192,
+ "step": 890
+ },
+ {
+ "epoch": 1.63,
+ "grad_norm": 0.5015307479291147,
+ "learning_rate": 4.085247077580948e-05,
+ "loss": 1.1791,
+ "step": 891
+ },
+ {
+ "epoch": 1.63,
+ "grad_norm": 0.40113831320559956,
+ "learning_rate": 4.076723473505374e-05,
+ "loss": 1.1379,
+ "step": 892
+ },
+ {
+ "epoch": 1.63,
+ "grad_norm": 0.35710296877921505,
+ "learning_rate": 4.068199520905655e-05,
+ "loss": 1.0849,
+ "step": 893
+ },
+ {
+ "epoch": 1.64,
+ "grad_norm": 0.4584402111092144,
+ "learning_rate": 4.059675258502709e-05,
+ "loss": 1.1911,
+ "step": 894
+ },
+ {
+ "epoch": 1.64,
+ "grad_norm": 0.425677789826556,
+ "learning_rate": 4.05115072501886e-05,
+ "loss": 1.1332,
+ "step": 895
+ },
+ {
+ "epoch": 1.64,
+ "grad_norm": 0.3757380288097042,
+ "learning_rate": 4.0426259591776645e-05,
+ "loss": 1.0903,
+ "step": 896
+ },
+ {
+ "epoch": 1.64,
+ "grad_norm": 0.34543617916988756,
+ "learning_rate": 4.0341009997037356e-05,
+ "loss": 1.0988,
+ "step": 897
+ },
+ {
+ "epoch": 1.64,
+ "grad_norm": 0.46961952093071074,
+ "learning_rate": 4.025575885322563e-05,
+ "loss": 1.1614,
+ "step": 898
+ },
+ {
+ "epoch": 1.64,
+ "grad_norm": 0.3774699370118855,
+ "learning_rate": 4.0170506547603427e-05,
+ "loss": 1.0806,
+ "step": 899
+ },
+ {
+ "epoch": 1.65,
+ "grad_norm": 0.3971851117572538,
+ "learning_rate": 4.008525346743797e-05,
+ "loss": 1.1117,
+ "step": 900
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 933251833135104.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/v1/checkpoint-900/training_args.bin b/v1/checkpoint-900/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c5d2416a3b70bb5260978ec9996f00154a724ba7
--- /dev/null
+++ b/v1/checkpoint-900/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b22e8f9d51a16d03a2c506fa3d1eafa8f4b1ae992992c2086a4d435ffd97387e
+size 6712
diff --git a/v1/checkpoint-900/zero_to_fp32.py b/v1/checkpoint-900/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/v1/checkpoint-900/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/v1/checkpoint-950/README.md b/v1/checkpoint-950/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/v1/checkpoint-950/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/v1/checkpoint-950/adapter_config.json b/v1/checkpoint-950/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3cd6dba5d79f7ca21fd4ad465cbbcac1e0960476
--- /dev/null
+++ b/v1/checkpoint-950/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "k_proj",
+ "q_proj",
+ "o_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/v1/checkpoint-950/adapter_model.safetensors b/v1/checkpoint-950/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..489d501ff51e9197a75f915357c7f63a8f0faf74
--- /dev/null
+++ b/v1/checkpoint-950/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f1f2d008ddcf6dcc8102a594c1b5abd01d1e91afc4e1e497098f4cfa829e7562
+size 1048664848
diff --git a/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..20a8243cf6e00a413284d1bc21fb7fa19057ddf4
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a2d09b208b56dec2d00c67b3199e59f73d5a85f0357b80d77f09959c7fc1640d
+size 787270042
diff --git a/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..43deb24a70d530eaf73480ce4f19485cb7373f50
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:36c882fdfce513a50f7bb41893c75603bf544e0e6ccdddd4fc06f486c8b156a6
+size 787270042
diff --git a/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7db3d45559881ed430d4049848a4fdc3d1328d86
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:419e141f6c0505002c7e5b1e60c247cde5ef08451c7a776afb8e4e99be673daa
+size 787270042
diff --git a/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b94326dcf353c17e1f6f210c6ca3946bbe182599
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b75cb87e686434228493c6728799840e8e6dd609bf9d5660ee200eb570c9aac7
+size 787270042
diff --git a/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3376a53d2f9cc3363e97ee6823a900b80d67ae9d
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:03bf0531fc7df0fc3ef86bfed35832e636bcfc0c924a574f222a1a02d4ff119f
+size 787270042
diff --git a/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b1001919302ec9f24c2be7c518b3a931f6ce0835
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f2b7311adaf77a394e93012a756b2ff071134e58ca07e0503f70ade86046aee
+size 787270042
diff --git a/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f18aec790412f94f9a221bd8df9000c662639c63
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5163b9289645fd3b125d5111f8549759c5e2ef9bc25e3ed4e4b5458797d8f1fb
+size 787270042
diff --git a/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..fd3563c7ebc8cb2c0a536825d7351fecc850ec96
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dfbfdc05e737ca023e69a38cfb31badf8920b4a3502377e5cfddcb82f708b24c
+size 787270042
diff --git a/v1/checkpoint-950/global_step950/zero_pp_rank_0_mp_rank_00_model_states.pt b/v1/checkpoint-950/global_step950/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..362905e6054fb1bcc03c07caa54ba24eeb916f73
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:06871064748a2d74939ccaa52498980379201a336595cf5b68509ec9fd57629e
+size 653742
diff --git a/v1/checkpoint-950/global_step950/zero_pp_rank_1_mp_rank_00_model_states.pt b/v1/checkpoint-950/global_step950/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..557f7923227de6356959c3028c6144725d70f1c5
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6112c9e5ff1016ddca73586aaee232b6adade2775417261619306f82306826de
+size 653742
diff --git a/v1/checkpoint-950/global_step950/zero_pp_rank_2_mp_rank_00_model_states.pt b/v1/checkpoint-950/global_step950/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a6e3a7cf5b94ec0fe7fc3e6934965a7c56dcd31b
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:05b906209f44b0de56e207c618fbd22817ae69263d840471d6fc8561c31b3fc6
+size 653742
diff --git a/v1/checkpoint-950/global_step950/zero_pp_rank_3_mp_rank_00_model_states.pt b/v1/checkpoint-950/global_step950/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..46a0942570db8e69bfe49bf17113397441f2753d
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d7e595fe36d10ca2c7f34af230c0f05614cbdeebdb17cd27473207a8f7f1ecd1
+size 653742
diff --git a/v1/checkpoint-950/global_step950/zero_pp_rank_4_mp_rank_00_model_states.pt b/v1/checkpoint-950/global_step950/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b09a10c961dfa59b73b9007ba10d896abdec5fe8
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:95c4b101e57621fc03022a7892c156142617fcc09c0c7d195af49321c6d17c50
+size 653742
diff --git a/v1/checkpoint-950/global_step950/zero_pp_rank_5_mp_rank_00_model_states.pt b/v1/checkpoint-950/global_step950/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ea0fe032a986858f9c29913ac90bf3d7f4721fc3
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af7e613ed61c9dd397df05ec8bf2831d5f3d7ed3aef890a45d8a01444d5698d9
+size 653742
diff --git a/v1/checkpoint-950/global_step950/zero_pp_rank_6_mp_rank_00_model_states.pt b/v1/checkpoint-950/global_step950/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..42552e4dbbd62501246d5b6fcada395da63dee2d
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:caa9037a563412e434b77de8145eabdd220d1511f0fea3d5ae4adf35a264d075
+size 653742
diff --git a/v1/checkpoint-950/global_step950/zero_pp_rank_7_mp_rank_00_model_states.pt b/v1/checkpoint-950/global_step950/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..780c3641d639caf2efb4158a46a88cddb93beac5
--- /dev/null
+++ b/v1/checkpoint-950/global_step950/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:06d179e3f1cb612128185fb6d249ab14e5d361ba677134b7656e556735698ab4
+size 653742
diff --git a/v1/checkpoint-950/latest b/v1/checkpoint-950/latest
new file mode 100644
index 0000000000000000000000000000000000000000..f28dea26cae2d6cdf8627fc9fea4d0317a985e22
--- /dev/null
+++ b/v1/checkpoint-950/latest
@@ -0,0 +1 @@
+global_step950
\ No newline at end of file
diff --git a/v1/checkpoint-950/rng_state_0.pth b/v1/checkpoint-950/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4e5b7e2ec90fdb824c8932464c1d9068330655a7
--- /dev/null
+++ b/v1/checkpoint-950/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:36d2a2034ebb05cb71c510897f2795b31164e50f17b270bc25d2be3ad9a17b22
+size 15984
diff --git a/v1/checkpoint-950/rng_state_1.pth b/v1/checkpoint-950/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7d8d7722fc72cab6d492b76cb99c8177dcc47544
--- /dev/null
+++ b/v1/checkpoint-950/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:060dfdb1c49102cbdc8868a6031e68787601b4ccd782f3fb9b137e20c1fd2c7a
+size 15984
diff --git a/v1/checkpoint-950/rng_state_2.pth b/v1/checkpoint-950/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3c9f84eff30cfa9ea1feedaf262d61fb12e4cba7
--- /dev/null
+++ b/v1/checkpoint-950/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af01895cb66e616591f2e4baa8dcd8151530eab133c73571ccb31c74f35422ce
+size 15984
diff --git a/v1/checkpoint-950/rng_state_3.pth b/v1/checkpoint-950/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6eebfb928f8e91eff0ea1645a20b5aa4465c705b
--- /dev/null
+++ b/v1/checkpoint-950/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:677921992b1e0cef3aee776f245975003d22f51d9bd6ed20f248ded1deb72fa9
+size 15984
diff --git a/v1/checkpoint-950/rng_state_4.pth b/v1/checkpoint-950/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0866030a266c6d003cc378a9418a723f69e8ab99
--- /dev/null
+++ b/v1/checkpoint-950/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d69353c629541c690c5471f8ec05fdab2bfecf3d37afaa436bc45939da6db68f
+size 15984
diff --git a/v1/checkpoint-950/rng_state_5.pth b/v1/checkpoint-950/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..554638d77107f832d7aa51c61645ee2d6c48a36d
--- /dev/null
+++ b/v1/checkpoint-950/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e40ba6668cc03c9162c68a933d164bf38ae2d196a9a6fec03ae615491201185
+size 15984
diff --git a/v1/checkpoint-950/rng_state_6.pth b/v1/checkpoint-950/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..964331b65172a1bcac03e4673415fa787f724268
--- /dev/null
+++ b/v1/checkpoint-950/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:870968fea834e24b2e099cf3e4fe1e3fb8caf38d8f8e5b790d7d47386d4d05f5
+size 15984
diff --git a/v1/checkpoint-950/rng_state_7.pth b/v1/checkpoint-950/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..cd4754d65217d0f9d1f2d3334397df7a8a079652
--- /dev/null
+++ b/v1/checkpoint-950/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9e19618bee7c6ef43256fea25abe19bca88535eb1e7dc213cde8929ae4e8180
+size 15984
diff --git a/v1/checkpoint-950/scheduler.pt b/v1/checkpoint-950/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d42d83f79bf80509de3ee41912f76fc1743d5b57
--- /dev/null
+++ b/v1/checkpoint-950/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1f8b119ba4e8a4b7508e1de90fcaaa11574df940eccbb5feaa8294287f46a895
+size 1064
diff --git a/v1/checkpoint-950/special_tokens_map.json b/v1/checkpoint-950/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/v1/checkpoint-950/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/v1/checkpoint-950/tokenizer.model b/v1/checkpoint-950/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/v1/checkpoint-950/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/v1/checkpoint-950/tokenizer_config.json b/v1/checkpoint-950/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/v1/checkpoint-950/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/v1/checkpoint-950/trainer_state.json b/v1/checkpoint-950/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..8736a30d3eeb6660d0c28a2f33497d15108eac35
--- /dev/null
+++ b/v1/checkpoint-950/trainer_state.json
@@ -0,0 +1,6671 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.7375400091449476,
+ "eval_steps": 500,
+ "global_step": 950,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2698278585334131,
+ "learning_rate": 7.274705407864332e-05,
+ "loss": 1.194,
+ "step": 451
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.23678313024953834,
+ "learning_rate": 7.26980224535416e-05,
+ "loss": 1.2349,
+ "step": 452
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24851875792002978,
+ "learning_rate": 7.264884229436293e-05,
+ "loss": 1.1758,
+ "step": 453
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24122080121681125,
+ "learning_rate": 7.259951382451318e-05,
+ "loss": 1.1962,
+ "step": 454
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.22741322959884405,
+ "learning_rate": 7.25500372680719e-05,
+ "loss": 1.1702,
+ "step": 455
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.2297475610861458,
+ "learning_rate": 7.250041284979137e-05,
+ "loss": 1.1466,
+ "step": 456
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.3057605989721467,
+ "learning_rate": 7.245064079509553e-05,
+ "loss": 1.246,
+ "step": 457
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2719638501597136,
+ "learning_rate": 7.240072133007899e-05,
+ "loss": 1.2184,
+ "step": 458
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2436807816414479,
+ "learning_rate": 7.235065468150593e-05,
+ "loss": 1.2324,
+ "step": 459
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.23436349430255515,
+ "learning_rate": 7.23004410768092e-05,
+ "loss": 1.1813,
+ "step": 460
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2398940990211377,
+ "learning_rate": 7.22500807440892e-05,
+ "loss": 1.1924,
+ "step": 461
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2605716625062531,
+ "learning_rate": 7.219957391211281e-05,
+ "loss": 1.182,
+ "step": 462
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.260462524570941,
+ "learning_rate": 7.214892081031244e-05,
+ "loss": 1.2136,
+ "step": 463
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.21979766512306334,
+ "learning_rate": 7.209812166878491e-05,
+ "loss": 1.2066,
+ "step": 464
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.23324453647530663,
+ "learning_rate": 7.204717671829051e-05,
+ "loss": 1.1657,
+ "step": 465
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.2529434935507481,
+ "learning_rate": 7.199608619025177e-05,
+ "loss": 1.2093,
+ "step": 466
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.25371701891720116,
+ "learning_rate": 7.194485031675265e-05,
+ "loss": 1.2225,
+ "step": 467
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.23272423066292103,
+ "learning_rate": 7.189346933053725e-05,
+ "loss": 1.1721,
+ "step": 468
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.25122928735587546,
+ "learning_rate": 7.184194346500892e-05,
+ "loss": 1.2537,
+ "step": 469
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2159270875490409,
+ "learning_rate": 7.179027295422913e-05,
+ "loss": 1.197,
+ "step": 470
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2633111059076544,
+ "learning_rate": 7.173845803291636e-05,
+ "loss": 1.1721,
+ "step": 471
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.30555936322098703,
+ "learning_rate": 7.168649893644517e-05,
+ "loss": 1.3011,
+ "step": 472
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.23492670111453726,
+ "learning_rate": 7.163439590084502e-05,
+ "loss": 1.1601,
+ "step": 473
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.26602734263721806,
+ "learning_rate": 7.158214916279923e-05,
+ "loss": 1.2808,
+ "step": 474
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.3182695007856262,
+ "learning_rate": 7.152975895964386e-05,
+ "loss": 1.2967,
+ "step": 475
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2785021674736721,
+ "learning_rate": 7.147722552936673e-05,
+ "loss": 1.1789,
+ "step": 476
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.279474303138652,
+ "learning_rate": 7.142454911060627e-05,
+ "loss": 1.2596,
+ "step": 477
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2556980144910755,
+ "learning_rate": 7.137172994265044e-05,
+ "loss": 1.2426,
+ "step": 478
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.3311256331993533,
+ "learning_rate": 7.131876826543565e-05,
+ "loss": 1.2059,
+ "step": 479
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.26467296197775253,
+ "learning_rate": 7.12656643195457e-05,
+ "loss": 1.2482,
+ "step": 480
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.27444885274652553,
+ "learning_rate": 7.121241834621064e-05,
+ "loss": 1.2528,
+ "step": 481
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2572283861115396,
+ "learning_rate": 7.115903058730567e-05,
+ "loss": 1.1849,
+ "step": 482
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2677065778235683,
+ "learning_rate": 7.11055012853501e-05,
+ "loss": 1.2011,
+ "step": 483
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29470622036742816,
+ "learning_rate": 7.105183068350619e-05,
+ "loss": 1.2398,
+ "step": 484
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.27609230248969197,
+ "learning_rate": 7.099801902557811e-05,
+ "loss": 1.2259,
+ "step": 485
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.24248634168099284,
+ "learning_rate": 7.094406655601073e-05,
+ "loss": 1.2282,
+ "step": 486
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.2765941767688746,
+ "learning_rate": 7.088997351988865e-05,
+ "loss": 1.2319,
+ "step": 487
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29347776909858947,
+ "learning_rate": 7.083574016293493e-05,
+ "loss": 1.1765,
+ "step": 488
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.285370295424537,
+ "learning_rate": 7.078136673151008e-05,
+ "loss": 1.26,
+ "step": 489
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.29408734903836536,
+ "learning_rate": 7.072685347261093e-05,
+ "loss": 1.226,
+ "step": 490
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27437470239205813,
+ "learning_rate": 7.067220063386947e-05,
+ "loss": 1.1976,
+ "step": 491
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2680770258777871,
+ "learning_rate": 7.061740846355176e-05,
+ "loss": 1.1915,
+ "step": 492
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27200362879502954,
+ "learning_rate": 7.056247721055678e-05,
+ "loss": 1.2002,
+ "step": 493
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2637811092577037,
+ "learning_rate": 7.050740712441528e-05,
+ "loss": 1.287,
+ "step": 494
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.24657959209271266,
+ "learning_rate": 7.045219845528875e-05,
+ "loss": 1.2284,
+ "step": 495
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.25311992110358666,
+ "learning_rate": 7.039685145396812e-05,
+ "loss": 1.1616,
+ "step": 496
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2564633694193358,
+ "learning_rate": 7.034136637187275e-05,
+ "loss": 1.2067,
+ "step": 497
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2446797651174144,
+ "learning_rate": 7.028574346104926e-05,
+ "loss": 1.2284,
+ "step": 498
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2592751463399255,
+ "learning_rate": 7.022998297417034e-05,
+ "loss": 1.2371,
+ "step": 499
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2500713943206808,
+ "learning_rate": 7.017408516453365e-05,
+ "loss": 1.1061,
+ "step": 500
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2812266276040743,
+ "learning_rate": 7.011805028606064e-05,
+ "loss": 1.1949,
+ "step": 501
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.298829667668083,
+ "learning_rate": 7.006187859329544e-05,
+ "loss": 1.2313,
+ "step": 502
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.26518768159745104,
+ "learning_rate": 7.000557034140361e-05,
+ "loss": 1.2246,
+ "step": 503
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.3037280360760458,
+ "learning_rate": 6.994912578617113e-05,
+ "loss": 1.1617,
+ "step": 504
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2726903109255714,
+ "learning_rate": 6.989254518400309e-05,
+ "loss": 1.2415,
+ "step": 505
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25568082003046966,
+ "learning_rate": 6.98358287919226e-05,
+ "loss": 1.1817,
+ "step": 506
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25633294893705044,
+ "learning_rate": 6.97789768675696e-05,
+ "loss": 1.2149,
+ "step": 507
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.28291439435087123,
+ "learning_rate": 6.972198966919972e-05,
+ "loss": 1.1578,
+ "step": 508
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.27195184756655516,
+ "learning_rate": 6.966486745568308e-05,
+ "loss": 1.2355,
+ "step": 509
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.239159568376005,
+ "learning_rate": 6.960761048650312e-05,
+ "loss": 1.1688,
+ "step": 510
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.22961475425949177,
+ "learning_rate": 6.955021902175543e-05,
+ "loss": 1.2094,
+ "step": 511
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.27443773600741117,
+ "learning_rate": 6.949269332214651e-05,
+ "loss": 1.2559,
+ "step": 512
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.26230551832002097,
+ "learning_rate": 6.94350336489927e-05,
+ "loss": 1.2121,
+ "step": 513
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2716742985303849,
+ "learning_rate": 6.937724026421892e-05,
+ "loss": 1.2444,
+ "step": 514
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2537850139439542,
+ "learning_rate": 6.931931343035742e-05,
+ "loss": 1.1327,
+ "step": 515
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.28599587967496826,
+ "learning_rate": 6.926125341054676e-05,
+ "loss": 1.2236,
+ "step": 516
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.26780654378470103,
+ "learning_rate": 6.920306046853043e-05,
+ "loss": 1.2295,
+ "step": 517
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.23606296888412015,
+ "learning_rate": 6.914473486865577e-05,
+ "loss": 1.1543,
+ "step": 518
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.34976881174240837,
+ "learning_rate": 6.90862768758727e-05,
+ "loss": 1.2067,
+ "step": 519
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2481257873494882,
+ "learning_rate": 6.902768675573258e-05,
+ "loss": 1.2188,
+ "step": 520
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2996395778117021,
+ "learning_rate": 6.896896477438699e-05,
+ "loss": 1.2326,
+ "step": 521
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.8839768816333193,
+ "learning_rate": 6.891011119858643e-05,
+ "loss": 1.2435,
+ "step": 522
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2851882482058998,
+ "learning_rate": 6.885112629567927e-05,
+ "loss": 1.2644,
+ "step": 523
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2813663482913699,
+ "learning_rate": 6.879201033361035e-05,
+ "loss": 1.2309,
+ "step": 524
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3257551560135454,
+ "learning_rate": 6.873276358091996e-05,
+ "loss": 1.2755,
+ "step": 525
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.28930479952494365,
+ "learning_rate": 6.867338630674247e-05,
+ "loss": 1.1962,
+ "step": 526
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3077462996938649,
+ "learning_rate": 6.861387878080511e-05,
+ "loss": 1.2402,
+ "step": 527
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.2848900193452761,
+ "learning_rate": 6.855424127342688e-05,
+ "loss": 1.2748,
+ "step": 528
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.4765938812802202,
+ "learning_rate": 6.849447405551718e-05,
+ "loss": 1.2226,
+ "step": 529
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.53184473292579,
+ "learning_rate": 6.843457739857467e-05,
+ "loss": 1.2347,
+ "step": 530
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.6416239346492343,
+ "learning_rate": 6.837455157468596e-05,
+ "loss": 1.2429,
+ "step": 531
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3188092712502773,
+ "learning_rate": 6.831439685652442e-05,
+ "loss": 1.216,
+ "step": 532
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3527495731006385,
+ "learning_rate": 6.825411351734895e-05,
+ "loss": 1.1682,
+ "step": 533
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.29603753744741856,
+ "learning_rate": 6.819370183100274e-05,
+ "loss": 1.1434,
+ "step": 534
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.5252450389976622,
+ "learning_rate": 6.813316207191198e-05,
+ "loss": 1.1943,
+ "step": 535
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.32999419558659937,
+ "learning_rate": 6.807249451508466e-05,
+ "loss": 1.192,
+ "step": 536
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.3650175469778724,
+ "learning_rate": 6.801169943610929e-05,
+ "loss": 1.2141,
+ "step": 537
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 1.0643532150783557,
+ "learning_rate": 6.795077711115368e-05,
+ "loss": 1.2253,
+ "step": 538
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5041310609130145,
+ "learning_rate": 6.788972781696363e-05,
+ "loss": 1.278,
+ "step": 539
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5123058164360991,
+ "learning_rate": 6.782855183086177e-05,
+ "loss": 1.2231,
+ "step": 540
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.3533015702394419,
+ "learning_rate": 6.776724943074619e-05,
+ "loss": 1.2072,
+ "step": 541
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.30253964625417207,
+ "learning_rate": 6.770582089508927e-05,
+ "loss": 1.1382,
+ "step": 542
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.348991618828202,
+ "learning_rate": 6.764426650293633e-05,
+ "loss": 1.2079,
+ "step": 543
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.46017440578788743,
+ "learning_rate": 6.758258653390444e-05,
+ "loss": 1.1813,
+ "step": 544
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.31962101755594885,
+ "learning_rate": 6.75207812681811e-05,
+ "loss": 1.1339,
+ "step": 545
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.37092024548285923,
+ "learning_rate": 6.745885098652298e-05,
+ "loss": 1.2591,
+ "step": 546
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.32347106450715835,
+ "learning_rate": 6.739679597025466e-05,
+ "loss": 1.2017,
+ "step": 547
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39250187112342494,
+ "learning_rate": 6.733461650126733e-05,
+ "loss": 1.0933,
+ "step": 548
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.473522452217324,
+ "learning_rate": 6.727231286201752e-05,
+ "loss": 1.1124,
+ "step": 549
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4809062179622052,
+ "learning_rate": 6.720988533552582e-05,
+ "loss": 1.1585,
+ "step": 550
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3529662801059162,
+ "learning_rate": 6.714733420537559e-05,
+ "loss": 1.0501,
+ "step": 551
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5958247214391118,
+ "learning_rate": 6.708465975571168e-05,
+ "loss": 1.1086,
+ "step": 552
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5341364205022454,
+ "learning_rate": 6.70218622712391e-05,
+ "loss": 1.0518,
+ "step": 553
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3601805724462006,
+ "learning_rate": 6.695894203722181e-05,
+ "loss": 1.1779,
+ "step": 554
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.43410190338280613,
+ "learning_rate": 6.68958993394813e-05,
+ "loss": 1.093,
+ "step": 555
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46217742572873594,
+ "learning_rate": 6.683273446439546e-05,
+ "loss": 1.0117,
+ "step": 556
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.8591682373623357,
+ "learning_rate": 6.676944769889708e-05,
+ "loss": 1.1002,
+ "step": 557
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.7383229487622726,
+ "learning_rate": 6.670603933047272e-05,
+ "loss": 1.0779,
+ "step": 558
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5965305891207813,
+ "learning_rate": 6.664250964716131e-05,
+ "loss": 1.0889,
+ "step": 559
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.6030858606684543,
+ "learning_rate": 6.657885893755288e-05,
+ "loss": 1.0982,
+ "step": 560
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4644510682398409,
+ "learning_rate": 6.65150874907872e-05,
+ "loss": 1.1004,
+ "step": 561
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.43943285132452564,
+ "learning_rate": 6.645119559655254e-05,
+ "loss": 1.0536,
+ "step": 562
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4456395978600012,
+ "learning_rate": 6.638718354508427e-05,
+ "loss": 1.0733,
+ "step": 563
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3303824433217466,
+ "learning_rate": 6.632305162716365e-05,
+ "loss": 1.0552,
+ "step": 564
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3617704823170143,
+ "learning_rate": 6.62588001341164e-05,
+ "loss": 1.1092,
+ "step": 565
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4465013349903427,
+ "learning_rate": 6.619442935781141e-05,
+ "loss": 1.0781,
+ "step": 566
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.48516780613791277,
+ "learning_rate": 6.612993959065947e-05,
+ "loss": 1.0686,
+ "step": 567
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38867820318536633,
+ "learning_rate": 6.606533112561186e-05,
+ "loss": 1.1215,
+ "step": 568
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38566119820378336,
+ "learning_rate": 6.600060425615907e-05,
+ "loss": 1.1213,
+ "step": 569
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.35534855445058544,
+ "learning_rate": 6.593575927632947e-05,
+ "loss": 1.0955,
+ "step": 570
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38124406233349717,
+ "learning_rate": 6.587079648068795e-05,
+ "loss": 1.0659,
+ "step": 571
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.454750160923548,
+ "learning_rate": 6.580571616433457e-05,
+ "loss": 1.1149,
+ "step": 572
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.35353190088025255,
+ "learning_rate": 6.574051862290325e-05,
+ "loss": 1.0388,
+ "step": 573
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3249395594793626,
+ "learning_rate": 6.567520415256045e-05,
+ "loss": 1.0784,
+ "step": 574
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.40078898818247227,
+ "learning_rate": 6.560977305000375e-05,
+ "loss": 1.0859,
+ "step": 575
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4115264795060035,
+ "learning_rate": 6.554422561246054e-05,
+ "loss": 1.1828,
+ "step": 576
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.30090229228069215,
+ "learning_rate": 6.54785621376867e-05,
+ "loss": 1.0901,
+ "step": 577
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.28827860350299206,
+ "learning_rate": 6.541278292396523e-05,
+ "loss": 1.0277,
+ "step": 578
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.34690404488996757,
+ "learning_rate": 6.534688827010484e-05,
+ "loss": 1.048,
+ "step": 579
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.29943113556644785,
+ "learning_rate": 6.528087847543867e-05,
+ "loss": 1.0646,
+ "step": 580
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.37318202575874415,
+ "learning_rate": 6.521475383982291e-05,
+ "loss": 1.1091,
+ "step": 581
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3049663659203959,
+ "learning_rate": 6.51485146636354e-05,
+ "loss": 1.0552,
+ "step": 582
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3342407867509692,
+ "learning_rate": 6.508216124777431e-05,
+ "loss": 1.2227,
+ "step": 583
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3348396047855952,
+ "learning_rate": 6.501569389365674e-05,
+ "loss": 1.0861,
+ "step": 584
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.30951429367513383,
+ "learning_rate": 6.494911290321737e-05,
+ "loss": 1.0461,
+ "step": 585
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.33898401361064606,
+ "learning_rate": 6.488241857890711e-05,
+ "loss": 1.0854,
+ "step": 586
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4901462068263497,
+ "learning_rate": 6.481561122369164e-05,
+ "loss": 1.1012,
+ "step": 587
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3179574879809652,
+ "learning_rate": 6.474869114105018e-05,
+ "loss": 1.0451,
+ "step": 588
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.32159328915060714,
+ "learning_rate": 6.468165863497395e-05,
+ "loss": 1.0458,
+ "step": 589
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.36462235008537297,
+ "learning_rate": 6.461451400996491e-05,
+ "loss": 1.1247,
+ "step": 590
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.5373862753611778,
+ "learning_rate": 6.454725757103432e-05,
+ "loss": 1.0542,
+ "step": 591
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3160409270291303,
+ "learning_rate": 6.447988962370133e-05,
+ "loss": 1.0829,
+ "step": 592
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.390452102978435,
+ "learning_rate": 6.441241047399169e-05,
+ "loss": 1.192,
+ "step": 593
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3802122712014928,
+ "learning_rate": 6.434482042843627e-05,
+ "loss": 1.1153,
+ "step": 594
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4081584328242501,
+ "learning_rate": 6.427711979406966e-05,
+ "loss": 1.1635,
+ "step": 595
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3791962989638633,
+ "learning_rate": 6.420930887842889e-05,
+ "loss": 1.1581,
+ "step": 596
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.33239440056484193,
+ "learning_rate": 6.414138798955189e-05,
+ "loss": 1.0926,
+ "step": 597
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3279881540815014,
+ "learning_rate": 6.407335743597616e-05,
+ "loss": 1.1386,
+ "step": 598
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.30309644763750837,
+ "learning_rate": 6.40052175267374e-05,
+ "loss": 1.0523,
+ "step": 599
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3349097308403333,
+ "learning_rate": 6.393696857136801e-05,
+ "loss": 1.0815,
+ "step": 600
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3288227593556618,
+ "learning_rate": 6.386861087989581e-05,
+ "loss": 1.015,
+ "step": 601
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36685586740843157,
+ "learning_rate": 6.380014476284255e-05,
+ "loss": 1.1232,
+ "step": 602
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3620977714204643,
+ "learning_rate": 6.373157053122243e-05,
+ "loss": 1.1138,
+ "step": 603
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3130587018197183,
+ "learning_rate": 6.366288849654091e-05,
+ "loss": 1.1255,
+ "step": 604
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3602737087072766,
+ "learning_rate": 6.359409897079303e-05,
+ "loss": 1.0282,
+ "step": 605
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.31168852571991945,
+ "learning_rate": 6.352520226646222e-05,
+ "loss": 1.0779,
+ "step": 606
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3516045580189353,
+ "learning_rate": 6.345619869651871e-05,
+ "loss": 1.1028,
+ "step": 607
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3231857927563657,
+ "learning_rate": 6.33870885744182e-05,
+ "loss": 1.1202,
+ "step": 608
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.30205205129701157,
+ "learning_rate": 6.331787221410041e-05,
+ "loss": 1.1369,
+ "step": 609
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3198359813888166,
+ "learning_rate": 6.32485499299877e-05,
+ "loss": 1.1763,
+ "step": 610
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3128641370321787,
+ "learning_rate": 6.31791220369835e-05,
+ "loss": 1.0223,
+ "step": 611
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.2989105616213649,
+ "learning_rate": 6.31095888504711e-05,
+ "loss": 1.0358,
+ "step": 612
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3103537906853337,
+ "learning_rate": 6.303995068631203e-05,
+ "loss": 1.1261,
+ "step": 613
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.28598715532508207,
+ "learning_rate": 6.297020786084467e-05,
+ "loss": 1.0629,
+ "step": 614
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.29809789918093255,
+ "learning_rate": 6.290036069088288e-05,
+ "loss": 1.035,
+ "step": 615
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.33765270252261453,
+ "learning_rate": 6.283040949371451e-05,
+ "loss": 1.1221,
+ "step": 616
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3424617501293415,
+ "learning_rate": 6.276035458709993e-05,
+ "loss": 1.155,
+ "step": 617
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3799189737987811,
+ "learning_rate": 6.269019628927067e-05,
+ "loss": 1.0701,
+ "step": 618
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3358898935253196,
+ "learning_rate": 6.261993491892791e-05,
+ "loss": 1.1649,
+ "step": 619
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.31569979424117356,
+ "learning_rate": 6.254957079524099e-05,
+ "loss": 1.0633,
+ "step": 620
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3002168156888237,
+ "learning_rate": 6.247910423784609e-05,
+ "loss": 1.0846,
+ "step": 621
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3097238823450595,
+ "learning_rate": 6.24085355668447e-05,
+ "loss": 1.0808,
+ "step": 622
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3120312761417578,
+ "learning_rate": 6.233786510280212e-05,
+ "loss": 1.0142,
+ "step": 623
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3335343015064923,
+ "learning_rate": 6.22670931667461e-05,
+ "loss": 1.0674,
+ "step": 624
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3234062304634526,
+ "learning_rate": 6.219622008016533e-05,
+ "loss": 1.0981,
+ "step": 625
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.32152678786547273,
+ "learning_rate": 6.212524616500798e-05,
+ "loss": 1.0244,
+ "step": 626
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.39031977608147594,
+ "learning_rate": 6.205417174368023e-05,
+ "loss": 1.1205,
+ "step": 627
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3806189090017157,
+ "learning_rate": 6.198299713904485e-05,
+ "loss": 1.1134,
+ "step": 628
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.2978349276971668,
+ "learning_rate": 6.191172267441967e-05,
+ "loss": 1.0088,
+ "step": 629
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3190354077382501,
+ "learning_rate": 6.184034867357617e-05,
+ "loss": 1.108,
+ "step": 630
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.32633048665038994,
+ "learning_rate": 6.176887546073797e-05,
+ "loss": 1.0825,
+ "step": 631
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3428026413020903,
+ "learning_rate": 6.169730336057939e-05,
+ "loss": 1.0765,
+ "step": 632
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3475737151929015,
+ "learning_rate": 6.162563269822391e-05,
+ "loss": 1.0693,
+ "step": 633
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3870252154591392,
+ "learning_rate": 6.15538637992428e-05,
+ "loss": 1.1081,
+ "step": 634
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.33597355193652834,
+ "learning_rate": 6.148199698965352e-05,
+ "loss": 1.0893,
+ "step": 635
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.30805894179787247,
+ "learning_rate": 6.141003259591834e-05,
+ "loss": 1.0995,
+ "step": 636
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3025073882734066,
+ "learning_rate": 6.133797094494281e-05,
+ "loss": 1.0388,
+ "step": 637
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3524395196391662,
+ "learning_rate": 6.126581236407429e-05,
+ "loss": 1.1196,
+ "step": 638
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3377646188130345,
+ "learning_rate": 6.119355718110039e-05,
+ "loss": 1.0382,
+ "step": 639
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.35508400659785483,
+ "learning_rate": 6.112120572424763e-05,
+ "loss": 1.1402,
+ "step": 640
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3454418793700457,
+ "learning_rate": 6.104875832217982e-05,
+ "loss": 1.1032,
+ "step": 641
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.32629806837059866,
+ "learning_rate": 6.097621530399661e-05,
+ "loss": 1.0959,
+ "step": 642
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3329536837751315,
+ "learning_rate": 6.090357699923202e-05,
+ "loss": 1.0467,
+ "step": 643
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.32302233828349475,
+ "learning_rate": 6.083084373785287e-05,
+ "loss": 1.0858,
+ "step": 644
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3310358826507611,
+ "learning_rate": 6.075801585025739e-05,
+ "loss": 1.0715,
+ "step": 645
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.319322035854079,
+ "learning_rate": 6.068509366727362e-05,
+ "loss": 1.177,
+ "step": 646
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3065230667302707,
+ "learning_rate": 6.061207752015797e-05,
+ "loss": 1.0649,
+ "step": 647
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.29926795565748227,
+ "learning_rate": 6.053896774059368e-05,
+ "loss": 1.1325,
+ "step": 648
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3556069634279046,
+ "learning_rate": 6.046576466068931e-05,
+ "loss": 1.1366,
+ "step": 649
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3189191131461966,
+ "learning_rate": 6.039246861297727e-05,
+ "loss": 1.0693,
+ "step": 650
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3347197156648834,
+ "learning_rate": 6.031907993041227e-05,
+ "loss": 1.1009,
+ "step": 651
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.32274156348185445,
+ "learning_rate": 6.0245598946369826e-05,
+ "loss": 1.1675,
+ "step": 652
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.35534089035455224,
+ "learning_rate": 6.017202599464476e-05,
+ "loss": 1.1723,
+ "step": 653
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3106026578570133,
+ "learning_rate": 6.009836140944965e-05,
+ "loss": 1.0954,
+ "step": 654
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3309144454564729,
+ "learning_rate": 6.002460552541331e-05,
+ "loss": 1.0209,
+ "step": 655
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3023619281400003,
+ "learning_rate": 5.9950758677579345e-05,
+ "loss": 1.0363,
+ "step": 656
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3311182880219704,
+ "learning_rate": 5.987682120140451e-05,
+ "loss": 1.0515,
+ "step": 657
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.33396486010030413,
+ "learning_rate": 5.980279343275729e-05,
+ "loss": 1.1251,
+ "step": 658
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3465764556678002,
+ "learning_rate": 5.97286757079163e-05,
+ "loss": 1.165,
+ "step": 659
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.304193441363374,
+ "learning_rate": 5.965446836356882e-05,
+ "loss": 1.0228,
+ "step": 660
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3415149030413082,
+ "learning_rate": 5.9580171736809224e-05,
+ "loss": 1.0742,
+ "step": 661
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.33138658321132064,
+ "learning_rate": 5.950578616513746e-05,
+ "loss": 1.0843,
+ "step": 662
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.30774403421162994,
+ "learning_rate": 5.943131198645752e-05,
+ "loss": 1.065,
+ "step": 663
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3428877492183819,
+ "learning_rate": 5.9356749539075885e-05,
+ "loss": 1.1101,
+ "step": 664
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3621290546130101,
+ "learning_rate": 5.928209916170003e-05,
+ "loss": 1.1372,
+ "step": 665
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3482375945469884,
+ "learning_rate": 5.9207361193436865e-05,
+ "loss": 1.132,
+ "step": 666
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.31754384974068384,
+ "learning_rate": 5.9132535973791156e-05,
+ "loss": 1.148,
+ "step": 667
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.36003834782050365,
+ "learning_rate": 5.9057623842664044e-05,
+ "loss": 1.1099,
+ "step": 668
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.2963701622969662,
+ "learning_rate": 5.8982625140351464e-05,
+ "loss": 1.0755,
+ "step": 669
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.32579569606066516,
+ "learning_rate": 5.8907540207542616e-05,
+ "loss": 1.0809,
+ "step": 670
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4247563451753457,
+ "learning_rate": 5.8832369385318416e-05,
+ "loss": 1.097,
+ "step": 671
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.33076932102169776,
+ "learning_rate": 5.875711301514992e-05,
+ "loss": 1.1078,
+ "step": 672
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3609238032332309,
+ "learning_rate": 5.8681771438896815e-05,
+ "loss": 1.1031,
+ "step": 673
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.325159585649425,
+ "learning_rate": 5.860634499880583e-05,
+ "loss": 1.0707,
+ "step": 674
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4620687271068983,
+ "learning_rate": 5.853083403750922e-05,
+ "loss": 1.1017,
+ "step": 675
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33485279064365936,
+ "learning_rate": 5.845523889802316e-05,
+ "loss": 1.0989,
+ "step": 676
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.30952573170841513,
+ "learning_rate": 5.8379559923746214e-05,
+ "loss": 1.0393,
+ "step": 677
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33498605810588283,
+ "learning_rate": 5.830379745845781e-05,
+ "loss": 1.1259,
+ "step": 678
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.35771921163037307,
+ "learning_rate": 5.822795184631659e-05,
+ "loss": 1.0815,
+ "step": 679
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.3329650192347647,
+ "learning_rate": 5.815202343185894e-05,
+ "loss": 1.1344,
+ "step": 680
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3356634465845771,
+ "learning_rate": 5.807601255999736e-05,
+ "loss": 1.1297,
+ "step": 681
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3289442034151235,
+ "learning_rate": 5.7999919576018934e-05,
+ "loss": 1.022,
+ "step": 682
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3207007334784113,
+ "learning_rate": 5.7923744825583745e-05,
+ "loss": 1.0571,
+ "step": 683
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3582460325329284,
+ "learning_rate": 5.7847488654723304e-05,
+ "loss": 1.0778,
+ "step": 684
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3563317666176927,
+ "learning_rate": 5.777115140983899e-05,
+ "loss": 1.1003,
+ "step": 685
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 3.4694912945702105,
+ "learning_rate": 5.769473343770047e-05,
+ "loss": 1.121,
+ "step": 686
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.43002349520483113,
+ "learning_rate": 5.761823508544411e-05,
+ "loss": 1.0765,
+ "step": 687
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39467783104839754,
+ "learning_rate": 5.754165670057142e-05,
+ "loss": 1.0788,
+ "step": 688
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39629029674867916,
+ "learning_rate": 5.7464998630947464e-05,
+ "loss": 1.0812,
+ "step": 689
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3880152093965208,
+ "learning_rate": 5.738826122479929e-05,
+ "loss": 1.1228,
+ "step": 690
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3777874121959188,
+ "learning_rate": 5.7311444830714324e-05,
+ "loss": 1.0907,
+ "step": 691
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.38004041653523696,
+ "learning_rate": 5.723454979763882e-05,
+ "loss": 1.1263,
+ "step": 692
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.37049672627797636,
+ "learning_rate": 5.7157576474876246e-05,
+ "loss": 1.1438,
+ "step": 693
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32973606103437614,
+ "learning_rate": 5.7080525212085725e-05,
+ "loss": 1.0553,
+ "step": 694
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.31674639252070325,
+ "learning_rate": 5.700339635928038e-05,
+ "loss": 1.06,
+ "step": 695
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32282199426553837,
+ "learning_rate": 5.692619026682588e-05,
+ "loss": 1.0841,
+ "step": 696
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4810882958061859,
+ "learning_rate": 5.684890728543869e-05,
+ "loss": 1.0803,
+ "step": 697
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3995638550178378,
+ "learning_rate": 5.6771547766184566e-05,
+ "loss": 1.1187,
+ "step": 698
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35264932960583484,
+ "learning_rate": 5.669411206047699e-05,
+ "loss": 1.0641,
+ "step": 699
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35240640524733,
+ "learning_rate": 5.661660052007547e-05,
+ "loss": 1.076,
+ "step": 700
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3540694609860389,
+ "learning_rate": 5.653901349708401e-05,
+ "loss": 1.1369,
+ "step": 701
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3196055112925304,
+ "learning_rate": 5.646135134394955e-05,
+ "loss": 1.0677,
+ "step": 702
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4214141007955914,
+ "learning_rate": 5.6383614413460266e-05,
+ "loss": 1.1139,
+ "step": 703
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3625611311798579,
+ "learning_rate": 5.630580305874402e-05,
+ "loss": 1.1845,
+ "step": 704
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3425208672181188,
+ "learning_rate": 5.62279176332668e-05,
+ "loss": 1.174,
+ "step": 705
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3108419862818321,
+ "learning_rate": 5.6149958490830996e-05,
+ "loss": 1.0331,
+ "step": 706
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3274644181571904,
+ "learning_rate": 5.607192598557394e-05,
+ "loss": 1.0664,
+ "step": 707
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.346218197215145,
+ "learning_rate": 5.599382047196617e-05,
+ "loss": 1.2088,
+ "step": 708
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.328497632267458,
+ "learning_rate": 5.591564230480989e-05,
+ "loss": 1.0287,
+ "step": 709
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3708173720611468,
+ "learning_rate": 5.583739183923732e-05,
+ "loss": 1.0883,
+ "step": 710
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3631427403535479,
+ "learning_rate": 5.575906943070915e-05,
+ "loss": 1.1155,
+ "step": 711
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3305201458598695,
+ "learning_rate": 5.5680675435012834e-05,
+ "loss": 1.0958,
+ "step": 712
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.34978833532083714,
+ "learning_rate": 5.5602210208261036e-05,
+ "loss": 1.1437,
+ "step": 713
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3510553882510229,
+ "learning_rate": 5.552367410688999e-05,
+ "loss": 1.0941,
+ "step": 714
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3523747462465078,
+ "learning_rate": 5.544506748765789e-05,
+ "loss": 1.1289,
+ "step": 715
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38262637783927445,
+ "learning_rate": 5.5366390707643266e-05,
+ "loss": 1.099,
+ "step": 716
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38620065989073454,
+ "learning_rate": 5.528764412424334e-05,
+ "loss": 1.083,
+ "step": 717
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3401355276121096,
+ "learning_rate": 5.520882809517245e-05,
+ "loss": 1.028,
+ "step": 718
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3392061008943934,
+ "learning_rate": 5.512994297846039e-05,
+ "loss": 1.1083,
+ "step": 719
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.34219480421015414,
+ "learning_rate": 5.505098913245077e-05,
+ "loss": 1.1108,
+ "step": 720
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3275058061553761,
+ "learning_rate": 5.497196691579945e-05,
+ "loss": 1.111,
+ "step": 721
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36800249746509384,
+ "learning_rate": 5.489287668747283e-05,
+ "loss": 1.1221,
+ "step": 722
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4129005533101575,
+ "learning_rate": 5.481371880674628e-05,
+ "loss": 1.0966,
+ "step": 723
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36563906596251655,
+ "learning_rate": 5.4734493633202505e-05,
+ "loss": 1.0927,
+ "step": 724
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3614650536839971,
+ "learning_rate": 5.465520152672986e-05,
+ "loss": 1.13,
+ "step": 725
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.36419665098633497,
+ "learning_rate": 5.4575842847520765e-05,
+ "loss": 1.1183,
+ "step": 726
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.34490689807258995,
+ "learning_rate": 5.449641795607005e-05,
+ "loss": 1.0919,
+ "step": 727
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3627643746876298,
+ "learning_rate": 5.441692721317334e-05,
+ "loss": 1.0411,
+ "step": 728
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.323620411949565,
+ "learning_rate": 5.433737097992537e-05,
+ "loss": 1.0725,
+ "step": 729
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3521599501824965,
+ "learning_rate": 5.425774961771838e-05,
+ "loss": 1.0926,
+ "step": 730
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3302390546764222,
+ "learning_rate": 5.417806348824047e-05,
+ "loss": 1.0468,
+ "step": 731
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3833325802616019,
+ "learning_rate": 5.4098312953473956e-05,
+ "loss": 1.1291,
+ "step": 732
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3708621126835512,
+ "learning_rate": 5.401849837569372e-05,
+ "loss": 1.0887,
+ "step": 733
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3625834373416278,
+ "learning_rate": 5.393862011746555e-05,
+ "loss": 1.0981,
+ "step": 734
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3583343965080617,
+ "learning_rate": 5.385867854164451e-05,
+ "loss": 1.1021,
+ "step": 735
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34598320594096066,
+ "learning_rate": 5.377867401137332e-05,
+ "loss": 1.1376,
+ "step": 736
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3046382791315433,
+ "learning_rate": 5.369860689008066e-05,
+ "loss": 1.0206,
+ "step": 737
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34464948380043725,
+ "learning_rate": 5.3618477541479505e-05,
+ "loss": 1.1084,
+ "step": 738
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3203242519627101,
+ "learning_rate": 5.353828632956557e-05,
+ "loss": 1.0731,
+ "step": 739
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3431169960355163,
+ "learning_rate": 5.3458033618615516e-05,
+ "loss": 1.091,
+ "step": 740
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.33492074521678705,
+ "learning_rate": 5.337771977318543e-05,
+ "loss": 1.1112,
+ "step": 741
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.32576546585541344,
+ "learning_rate": 5.3297345158109086e-05,
+ "loss": 1.0993,
+ "step": 742
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3410007245037574,
+ "learning_rate": 5.3216910138496286e-05,
+ "loss": 1.094,
+ "step": 743
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.34891180680896833,
+ "learning_rate": 5.313641507973128e-05,
+ "loss": 1.1331,
+ "step": 744
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.37135766946717214,
+ "learning_rate": 5.3055860347471006e-05,
+ "loss": 1.1,
+ "step": 745
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3465019415478411,
+ "learning_rate": 5.297524630764349e-05,
+ "loss": 1.1256,
+ "step": 746
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37035388481626563,
+ "learning_rate": 5.289457332644615e-05,
+ "loss": 1.0366,
+ "step": 747
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.33853883270759155,
+ "learning_rate": 5.281384177034421e-05,
+ "loss": 1.0547,
+ "step": 748
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.364306618627317,
+ "learning_rate": 5.2733052006068897e-05,
+ "loss": 1.0768,
+ "step": 749
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.4021754315731627,
+ "learning_rate": 5.2652204400615916e-05,
+ "loss": 1.1382,
+ "step": 750
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.3332185389039008,
+ "learning_rate": 5.257129932124368e-05,
+ "loss": 1.0815,
+ "step": 751
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3453105709879854,
+ "learning_rate": 5.249033713547173e-05,
+ "loss": 1.1109,
+ "step": 752
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3385397539717797,
+ "learning_rate": 5.2409318211078966e-05,
+ "loss": 1.0529,
+ "step": 753
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.33197994450130447,
+ "learning_rate": 5.232824291610206e-05,
+ "loss": 1.0721,
+ "step": 754
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32836289576124167,
+ "learning_rate": 5.224711161883375e-05,
+ "loss": 1.0459,
+ "step": 755
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32491620058831744,
+ "learning_rate": 5.216592468782117e-05,
+ "loss": 1.0897,
+ "step": 756
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3137879047811153,
+ "learning_rate": 5.2084682491864155e-05,
+ "loss": 1.096,
+ "step": 757
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3356938043023012,
+ "learning_rate": 5.200338540001364e-05,
+ "loss": 1.0827,
+ "step": 758
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.36044340490819055,
+ "learning_rate": 5.192203378156984e-05,
+ "loss": 1.0617,
+ "step": 759
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.34674262047888293,
+ "learning_rate": 5.184062800608077e-05,
+ "loss": 1.1267,
+ "step": 760
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.32469442322149333,
+ "learning_rate": 5.1759168443340375e-05,
+ "loss": 1.1483,
+ "step": 761
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3290384307774216,
+ "learning_rate": 5.167765546338698e-05,
+ "loss": 1.047,
+ "step": 762
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.31637612188770403,
+ "learning_rate": 5.1596089436501525e-05,
+ "loss": 1.0311,
+ "step": 763
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3168693829641207,
+ "learning_rate": 5.151447073320597e-05,
+ "loss": 1.1405,
+ "step": 764
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.34322421571238926,
+ "learning_rate": 5.143279972426153e-05,
+ "loss": 1.1428,
+ "step": 765
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3291030435830325,
+ "learning_rate": 5.1351076780667026e-05,
+ "loss": 1.0473,
+ "step": 766
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.33772039158758044,
+ "learning_rate": 5.1269302273657195e-05,
+ "loss": 1.0909,
+ "step": 767
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3802031736890876,
+ "learning_rate": 5.118747657470102e-05,
+ "loss": 1.1482,
+ "step": 768
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3296067628997962,
+ "learning_rate": 5.1105600055500025e-05,
+ "loss": 1.0085,
+ "step": 769
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3707139982828035,
+ "learning_rate": 5.102367308798658e-05,
+ "loss": 1.0746,
+ "step": 770
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3378537316757011,
+ "learning_rate": 5.094169604432225e-05,
+ "loss": 1.0482,
+ "step": 771
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.4008417246255145,
+ "learning_rate": 5.085966929689601e-05,
+ "loss": 1.1065,
+ "step": 772
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3244385106988064,
+ "learning_rate": 5.077759321832271e-05,
+ "loss": 1.0827,
+ "step": 773
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.37228575732812336,
+ "learning_rate": 5.0695468181441215e-05,
+ "loss": 1.1146,
+ "step": 774
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33761714797540276,
+ "learning_rate": 5.061329455931283e-05,
+ "loss": 1.092,
+ "step": 775
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3158158390913494,
+ "learning_rate": 5.053107272521955e-05,
+ "loss": 1.1058,
+ "step": 776
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3691501929738938,
+ "learning_rate": 5.044880305266239e-05,
+ "loss": 1.1599,
+ "step": 777
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33730914019805525,
+ "learning_rate": 5.0366485915359645e-05,
+ "loss": 1.0615,
+ "step": 778
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.34970059240017,
+ "learning_rate": 5.0284121687245257e-05,
+ "loss": 1.1475,
+ "step": 779
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3374028029407197,
+ "learning_rate": 5.020171074246707e-05,
+ "loss": 1.0926,
+ "step": 780
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3350020681123992,
+ "learning_rate": 5.011925345538514e-05,
+ "loss": 1.1276,
+ "step": 781
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3224228965786606,
+ "learning_rate": 5.003675020057003e-05,
+ "loss": 1.0183,
+ "step": 782
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3357310714740298,
+ "learning_rate": 4.995420135280114e-05,
+ "loss": 1.1114,
+ "step": 783
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3590203255363759,
+ "learning_rate": 4.9871607287064966e-05,
+ "loss": 1.1504,
+ "step": 784
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.33011195419611655,
+ "learning_rate": 4.9788968378553396e-05,
+ "loss": 1.0826,
+ "step": 785
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.31088868195439445,
+ "learning_rate": 4.970628500266207e-05,
+ "loss": 1.0704,
+ "step": 786
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3144996103179409,
+ "learning_rate": 4.962355753498858e-05,
+ "loss": 1.1403,
+ "step": 787
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3147269555419068,
+ "learning_rate": 4.954078635133081e-05,
+ "loss": 1.0898,
+ "step": 788
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3280151747783868,
+ "learning_rate": 4.945797182768524e-05,
+ "loss": 1.1115,
+ "step": 789
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3551996569232493,
+ "learning_rate": 4.937511434024524e-05,
+ "loss": 1.1731,
+ "step": 790
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.343863208057807,
+ "learning_rate": 4.9292214265399336e-05,
+ "loss": 1.0866,
+ "step": 791
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.37316699385322466,
+ "learning_rate": 4.920927197972949e-05,
+ "loss": 1.1083,
+ "step": 792
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635739774067832,
+ "learning_rate": 4.9126287860009453e-05,
+ "loss": 1.1393,
+ "step": 793
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3755910554972886,
+ "learning_rate": 4.9043262283202974e-05,
+ "loss": 1.1624,
+ "step": 794
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635899120146823,
+ "learning_rate": 4.8960195626462145e-05,
+ "loss": 1.2095,
+ "step": 795
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3642202684342816,
+ "learning_rate": 4.8877088267125664e-05,
+ "loss": 1.1099,
+ "step": 796
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3339946548799316,
+ "learning_rate": 4.879394058271712e-05,
+ "loss": 1.1157,
+ "step": 797
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3457189703100475,
+ "learning_rate": 4.871075295094329e-05,
+ "loss": 1.129,
+ "step": 798
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3550931839691424,
+ "learning_rate": 4.862752574969241e-05,
+ "loss": 1.076,
+ "step": 799
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.36139108917966734,
+ "learning_rate": 4.8544259357032475e-05,
+ "loss": 1.1577,
+ "step": 800
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.32133637199528886,
+ "learning_rate": 4.8460954151209486e-05,
+ "loss": 1.0148,
+ "step": 801
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 1.043434839098495,
+ "learning_rate": 4.837761051064579e-05,
+ "loss": 1.1145,
+ "step": 802
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.36683258883118186,
+ "learning_rate": 4.8294228813938285e-05,
+ "loss": 1.0952,
+ "step": 803
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.4664840097151361,
+ "learning_rate": 4.8210809439856804e-05,
+ "loss": 1.1644,
+ "step": 804
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.355267647636684,
+ "learning_rate": 4.8127352767342276e-05,
+ "loss": 1.1062,
+ "step": 805
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 0.369440033402877,
+ "learning_rate": 4.8043859175505095e-05,
+ "loss": 1.0846,
+ "step": 806
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.3530796042687365,
+ "learning_rate": 4.7960329043623344e-05,
+ "loss": 1.1608,
+ "step": 807
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.4321129927586221,
+ "learning_rate": 4.787676275114111e-05,
+ "loss": 1.1339,
+ "step": 808
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.349782342376565,
+ "learning_rate": 4.779316067766673e-05,
+ "loss": 1.0685,
+ "step": 809
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.36418077406249405,
+ "learning_rate": 4.770952320297109e-05,
+ "loss": 1.1467,
+ "step": 810
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.35831016374198177,
+ "learning_rate": 4.7625850706985886e-05,
+ "loss": 1.064,
+ "step": 811
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.3596998725696811,
+ "learning_rate": 4.7542143569801894e-05,
+ "loss": 1.119,
+ "step": 812
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.3437403316058801,
+ "learning_rate": 4.745840217166725e-05,
+ "loss": 1.0451,
+ "step": 813
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.36614738184483053,
+ "learning_rate": 4.737462689298577e-05,
+ "loss": 1.1388,
+ "step": 814
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.4127884784033637,
+ "learning_rate": 4.7290818114315086e-05,
+ "loss": 1.1786,
+ "step": 815
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.4110838984805364,
+ "learning_rate": 4.72069762163651e-05,
+ "loss": 1.0709,
+ "step": 816
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 0.42581764215872087,
+ "learning_rate": 4.7123101579996106e-05,
+ "loss": 1.1019,
+ "step": 817
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.39442487793142056,
+ "learning_rate": 4.7039194586217136e-05,
+ "loss": 1.1532,
+ "step": 818
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.3362730343659587,
+ "learning_rate": 4.695525561618418e-05,
+ "loss": 1.1149,
+ "step": 819
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.5554581175413662,
+ "learning_rate": 4.687128505119853e-05,
+ "loss": 1.0697,
+ "step": 820
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.369476979421362,
+ "learning_rate": 4.6787283272704966e-05,
+ "loss": 1.1038,
+ "step": 821
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.4002553035181225,
+ "learning_rate": 4.670325066229009e-05,
+ "loss": 1.0714,
+ "step": 822
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.5196234078753172,
+ "learning_rate": 4.661918760168052e-05,
+ "loss": 1.1027,
+ "step": 823
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.39318954902323083,
+ "learning_rate": 4.653509447274121e-05,
+ "loss": 1.1562,
+ "step": 824
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.3822202885742058,
+ "learning_rate": 4.6450971657473743e-05,
+ "loss": 1.0662,
+ "step": 825
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.4203426346301331,
+ "learning_rate": 4.63668195380145e-05,
+ "loss": 1.0713,
+ "step": 826
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.3850673744429753,
+ "learning_rate": 4.628263849663301e-05,
+ "loss": 1.123,
+ "step": 827
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 0.3615517608466556,
+ "learning_rate": 4.619842891573016e-05,
+ "loss": 1.0565,
+ "step": 828
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.34448279386839303,
+ "learning_rate": 4.6114191177836514e-05,
+ "loss": 1.0703,
+ "step": 829
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.3649438531637055,
+ "learning_rate": 4.6029925665610524e-05,
+ "loss": 1.061,
+ "step": 830
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.360215034359632,
+ "learning_rate": 4.59456327618368e-05,
+ "loss": 1.075,
+ "step": 831
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.3297320285815758,
+ "learning_rate": 4.5861312849424386e-05,
+ "loss": 1.0177,
+ "step": 832
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.3687310180731473,
+ "learning_rate": 4.5776966311405035e-05,
+ "loss": 1.078,
+ "step": 833
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.40273770874860215,
+ "learning_rate": 4.5692593530931416e-05,
+ "loss": 1.1237,
+ "step": 834
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.41430404774660234,
+ "learning_rate": 4.560819489127545e-05,
+ "loss": 1.0891,
+ "step": 835
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.39133407251294566,
+ "learning_rate": 4.552377077582646e-05,
+ "loss": 1.1244,
+ "step": 836
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.45340195323000915,
+ "learning_rate": 4.543932156808959e-05,
+ "loss": 1.1659,
+ "step": 837
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.37488623560038786,
+ "learning_rate": 4.535484765168386e-05,
+ "loss": 1.1026,
+ "step": 838
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.36994669175389283,
+ "learning_rate": 4.527034941034063e-05,
+ "loss": 1.1481,
+ "step": 839
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 19.06482385633655,
+ "learning_rate": 4.51858272279017e-05,
+ "loss": 1.1897,
+ "step": 840
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 0.7974039643314161,
+ "learning_rate": 4.5101281488317634e-05,
+ "loss": 1.1467,
+ "step": 841
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 1211.6099030928651,
+ "learning_rate": 4.501671257564602e-05,
+ "loss": 1.9989,
+ "step": 842
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 1.438002431348562,
+ "learning_rate": 4.49321208740497e-05,
+ "loss": 1.1715,
+ "step": 843
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 5.653373858566177,
+ "learning_rate": 4.484750676779504e-05,
+ "loss": 1.1828,
+ "step": 844
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 625.3410428452096,
+ "learning_rate": 4.4762870641250185e-05,
+ "loss": 1.3283,
+ "step": 845
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 5.653780923251844,
+ "learning_rate": 4.467821287888331e-05,
+ "loss": 1.1532,
+ "step": 846
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 177.17210534716736,
+ "learning_rate": 4.459353386526086e-05,
+ "loss": 1.1178,
+ "step": 847
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 4.805542680722976,
+ "learning_rate": 4.450883398504584e-05,
+ "loss": 1.1057,
+ "step": 848
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 2.055060058990183,
+ "learning_rate": 4.442411362299602e-05,
+ "loss": 1.1423,
+ "step": 849
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 750.168576149713,
+ "learning_rate": 4.433937316396224e-05,
+ "loss": 1.6724,
+ "step": 850
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 130.95680794453787,
+ "learning_rate": 4.425461299288659e-05,
+ "loss": 1.4064,
+ "step": 851
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 1.0622510576276174,
+ "learning_rate": 4.416983349480073e-05,
+ "loss": 1.1254,
+ "step": 852
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 1.0460019153859763,
+ "learning_rate": 4.408503505482412e-05,
+ "loss": 1.1571,
+ "step": 853
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 1.0110886779305324,
+ "learning_rate": 4.400021805816225e-05,
+ "loss": 1.2113,
+ "step": 854
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 8.515995522196171,
+ "learning_rate": 4.391538289010493e-05,
+ "loss": 1.1128,
+ "step": 855
+ },
+ {
+ "epoch": 1.57,
+ "grad_norm": 1.2951788332991243,
+ "learning_rate": 4.383052993602448e-05,
+ "loss": 1.1713,
+ "step": 856
+ },
+ {
+ "epoch": 1.57,
+ "grad_norm": 30.749825560809473,
+ "learning_rate": 4.374565958137404e-05,
+ "loss": 1.1628,
+ "step": 857
+ },
+ {
+ "epoch": 1.57,
+ "grad_norm": 2.9528948545566096,
+ "learning_rate": 4.3660772211685775e-05,
+ "loss": 1.1314,
+ "step": 858
+ },
+ {
+ "epoch": 1.57,
+ "grad_norm": 0.7682418716635998,
+ "learning_rate": 4.357586821256918e-05,
+ "loss": 1.1017,
+ "step": 859
+ },
+ {
+ "epoch": 1.57,
+ "grad_norm": 1.4660835049872478,
+ "learning_rate": 4.349094796970925e-05,
+ "loss": 1.1871,
+ "step": 860
+ },
+ {
+ "epoch": 1.57,
+ "grad_norm": 2.086334546506882,
+ "learning_rate": 4.3406011868864795e-05,
+ "loss": 1.1273,
+ "step": 861
+ },
+ {
+ "epoch": 1.58,
+ "grad_norm": 0.8447346926666625,
+ "learning_rate": 4.3321060295866635e-05,
+ "loss": 1.1888,
+ "step": 862
+ },
+ {
+ "epoch": 1.58,
+ "grad_norm": 0.42192172811626216,
+ "learning_rate": 4.32360936366159e-05,
+ "loss": 1.1195,
+ "step": 863
+ },
+ {
+ "epoch": 1.58,
+ "grad_norm": 0.8309195428194658,
+ "learning_rate": 4.315111227708224e-05,
+ "loss": 1.1546,
+ "step": 864
+ },
+ {
+ "epoch": 1.58,
+ "grad_norm": 1.2607908118006466,
+ "learning_rate": 4.306611660330208e-05,
+ "loss": 0.9956,
+ "step": 865
+ },
+ {
+ "epoch": 1.58,
+ "grad_norm": 0.7251625291751502,
+ "learning_rate": 4.298110700137687e-05,
+ "loss": 1.0785,
+ "step": 866
+ },
+ {
+ "epoch": 1.59,
+ "grad_norm": 0.7091004801982537,
+ "learning_rate": 4.2896083857471345e-05,
+ "loss": 1.09,
+ "step": 867
+ },
+ {
+ "epoch": 1.59,
+ "grad_norm": 0.5281839474720389,
+ "learning_rate": 4.281104755781172e-05,
+ "loss": 1.1193,
+ "step": 868
+ },
+ {
+ "epoch": 1.59,
+ "grad_norm": 0.6717068342854466,
+ "learning_rate": 4.272599848868402e-05,
+ "loss": 1.0878,
+ "step": 869
+ },
+ {
+ "epoch": 1.59,
+ "grad_norm": 1.2715992187857506,
+ "learning_rate": 4.264093703643223e-05,
+ "loss": 1.1393,
+ "step": 870
+ },
+ {
+ "epoch": 1.59,
+ "grad_norm": 0.7459370091738186,
+ "learning_rate": 4.255586358745662e-05,
+ "loss": 1.1329,
+ "step": 871
+ },
+ {
+ "epoch": 1.59,
+ "grad_norm": 0.5375765240515081,
+ "learning_rate": 4.247077852821194e-05,
+ "loss": 1.1095,
+ "step": 872
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 2.800301901231134,
+ "learning_rate": 4.2385682245205685e-05,
+ "loss": 1.1454,
+ "step": 873
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 1.0380617847836733,
+ "learning_rate": 4.230057512499634e-05,
+ "loss": 1.1485,
+ "step": 874
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 0.4520839568590167,
+ "learning_rate": 4.221545755419159e-05,
+ "loss": 1.1312,
+ "step": 875
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 0.40517807692679086,
+ "learning_rate": 4.2130329919446646e-05,
+ "loss": 1.0847,
+ "step": 876
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 0.4087992479261267,
+ "learning_rate": 4.20451926074624e-05,
+ "loss": 1.0514,
+ "step": 877
+ },
+ {
+ "epoch": 1.61,
+ "grad_norm": 0.4585377147848289,
+ "learning_rate": 4.196004600498369e-05,
+ "loss": 1.0804,
+ "step": 878
+ },
+ {
+ "epoch": 1.61,
+ "grad_norm": 0.4273068731511255,
+ "learning_rate": 4.1874890498797605e-05,
+ "loss": 1.1109,
+ "step": 879
+ },
+ {
+ "epoch": 1.61,
+ "grad_norm": 0.8002075505518079,
+ "learning_rate": 4.178972647573163e-05,
+ "loss": 1.0667,
+ "step": 880
+ },
+ {
+ "epoch": 1.61,
+ "grad_norm": 1.017993355567939,
+ "learning_rate": 4.1704554322651975e-05,
+ "loss": 1.0822,
+ "step": 881
+ },
+ {
+ "epoch": 1.61,
+ "grad_norm": 0.5360398768749924,
+ "learning_rate": 4.161937442646176e-05,
+ "loss": 1.1081,
+ "step": 882
+ },
+ {
+ "epoch": 1.61,
+ "grad_norm": 0.4384721008322173,
+ "learning_rate": 4.1534187174099285e-05,
+ "loss": 1.0875,
+ "step": 883
+ },
+ {
+ "epoch": 1.62,
+ "grad_norm": 0.4847183493614364,
+ "learning_rate": 4.1448992952536275e-05,
+ "loss": 1.1283,
+ "step": 884
+ },
+ {
+ "epoch": 1.62,
+ "grad_norm": 0.4445130532575715,
+ "learning_rate": 4.136379214877609e-05,
+ "loss": 1.0729,
+ "step": 885
+ },
+ {
+ "epoch": 1.62,
+ "grad_norm": 0.4251292394390122,
+ "learning_rate": 4.127858514985203e-05,
+ "loss": 1.0,
+ "step": 886
+ },
+ {
+ "epoch": 1.62,
+ "grad_norm": 0.41930406317728264,
+ "learning_rate": 4.1193372342825494e-05,
+ "loss": 1.0231,
+ "step": 887
+ },
+ {
+ "epoch": 1.62,
+ "grad_norm": 0.5672205934273074,
+ "learning_rate": 4.1108154114784275e-05,
+ "loss": 1.1296,
+ "step": 888
+ },
+ {
+ "epoch": 1.63,
+ "grad_norm": 0.4653761886235837,
+ "learning_rate": 4.102293085284083e-05,
+ "loss": 1.101,
+ "step": 889
+ },
+ {
+ "epoch": 1.63,
+ "grad_norm": 0.4240672595320848,
+ "learning_rate": 4.0937702944130426e-05,
+ "loss": 1.1192,
+ "step": 890
+ },
+ {
+ "epoch": 1.63,
+ "grad_norm": 0.5015307479291147,
+ "learning_rate": 4.085247077580948e-05,
+ "loss": 1.1791,
+ "step": 891
+ },
+ {
+ "epoch": 1.63,
+ "grad_norm": 0.40113831320559956,
+ "learning_rate": 4.076723473505374e-05,
+ "loss": 1.1379,
+ "step": 892
+ },
+ {
+ "epoch": 1.63,
+ "grad_norm": 0.35710296877921505,
+ "learning_rate": 4.068199520905655e-05,
+ "loss": 1.0849,
+ "step": 893
+ },
+ {
+ "epoch": 1.64,
+ "grad_norm": 0.4584402111092144,
+ "learning_rate": 4.059675258502709e-05,
+ "loss": 1.1911,
+ "step": 894
+ },
+ {
+ "epoch": 1.64,
+ "grad_norm": 0.425677789826556,
+ "learning_rate": 4.05115072501886e-05,
+ "loss": 1.1332,
+ "step": 895
+ },
+ {
+ "epoch": 1.64,
+ "grad_norm": 0.3757380288097042,
+ "learning_rate": 4.0426259591776645e-05,
+ "loss": 1.0903,
+ "step": 896
+ },
+ {
+ "epoch": 1.64,
+ "grad_norm": 0.34543617916988756,
+ "learning_rate": 4.0341009997037356e-05,
+ "loss": 1.0988,
+ "step": 897
+ },
+ {
+ "epoch": 1.64,
+ "grad_norm": 0.46961952093071074,
+ "learning_rate": 4.025575885322563e-05,
+ "loss": 1.1614,
+ "step": 898
+ },
+ {
+ "epoch": 1.64,
+ "grad_norm": 0.3774699370118855,
+ "learning_rate": 4.0170506547603427e-05,
+ "loss": 1.0806,
+ "step": 899
+ },
+ {
+ "epoch": 1.65,
+ "grad_norm": 0.3971851117572538,
+ "learning_rate": 4.008525346743797e-05,
+ "loss": 1.1117,
+ "step": 900
+ },
+ {
+ "epoch": 1.65,
+ "grad_norm": 0.42110141019514874,
+ "learning_rate": 4e-05,
+ "loss": 1.0844,
+ "step": 901
+ },
+ {
+ "epoch": 1.65,
+ "grad_norm": 0.5370519120028358,
+ "learning_rate": 3.991474653256204e-05,
+ "loss": 1.1884,
+ "step": 902
+ },
+ {
+ "epoch": 1.65,
+ "grad_norm": 0.3457424153715587,
+ "learning_rate": 3.982949345239658e-05,
+ "loss": 1.0358,
+ "step": 903
+ },
+ {
+ "epoch": 1.65,
+ "grad_norm": 0.4743219985978888,
+ "learning_rate": 3.974424114677437e-05,
+ "loss": 1.1326,
+ "step": 904
+ },
+ {
+ "epoch": 1.66,
+ "grad_norm": 0.43983040513890376,
+ "learning_rate": 3.965899000296266e-05,
+ "loss": 1.0642,
+ "step": 905
+ },
+ {
+ "epoch": 1.66,
+ "grad_norm": 0.3703570198161374,
+ "learning_rate": 3.957374040822335e-05,
+ "loss": 1.0856,
+ "step": 906
+ },
+ {
+ "epoch": 1.66,
+ "grad_norm": 0.4293256377520691,
+ "learning_rate": 3.948849274981141e-05,
+ "loss": 1.0882,
+ "step": 907
+ },
+ {
+ "epoch": 1.66,
+ "grad_norm": 0.4357013405185512,
+ "learning_rate": 3.940324741497291e-05,
+ "loss": 1.088,
+ "step": 908
+ },
+ {
+ "epoch": 1.66,
+ "grad_norm": 0.33377681350860733,
+ "learning_rate": 3.9318004790943465e-05,
+ "loss": 1.0089,
+ "step": 909
+ },
+ {
+ "epoch": 1.66,
+ "grad_norm": 0.4813359776701383,
+ "learning_rate": 3.923276526494627e-05,
+ "loss": 1.1908,
+ "step": 910
+ },
+ {
+ "epoch": 1.67,
+ "grad_norm": 0.37882502754714165,
+ "learning_rate": 3.9147529224190536e-05,
+ "loss": 1.0491,
+ "step": 911
+ },
+ {
+ "epoch": 1.67,
+ "grad_norm": 0.36231124827391586,
+ "learning_rate": 3.906229705586959e-05,
+ "loss": 1.1129,
+ "step": 912
+ },
+ {
+ "epoch": 1.67,
+ "grad_norm": 0.4042943319927394,
+ "learning_rate": 3.89770691471592e-05,
+ "loss": 1.0862,
+ "step": 913
+ },
+ {
+ "epoch": 1.67,
+ "grad_norm": 0.3947588371337343,
+ "learning_rate": 3.889184588521573e-05,
+ "loss": 1.0938,
+ "step": 914
+ },
+ {
+ "epoch": 1.67,
+ "grad_norm": 0.3591561376381207,
+ "learning_rate": 3.880662765717453e-05,
+ "loss": 1.0899,
+ "step": 915
+ },
+ {
+ "epoch": 1.68,
+ "grad_norm": 0.38267733601616066,
+ "learning_rate": 3.8721414850147985e-05,
+ "loss": 1.1599,
+ "step": 916
+ },
+ {
+ "epoch": 1.68,
+ "grad_norm": 0.41595090046831623,
+ "learning_rate": 3.8636207851223924e-05,
+ "loss": 1.115,
+ "step": 917
+ },
+ {
+ "epoch": 1.68,
+ "grad_norm": 0.36845297049900755,
+ "learning_rate": 3.855100704746374e-05,
+ "loss": 1.1011,
+ "step": 918
+ },
+ {
+ "epoch": 1.68,
+ "grad_norm": 0.34445156716382197,
+ "learning_rate": 3.8465812825900715e-05,
+ "loss": 1.0995,
+ "step": 919
+ },
+ {
+ "epoch": 1.68,
+ "grad_norm": 0.4127858136049686,
+ "learning_rate": 3.838062557353825e-05,
+ "loss": 1.0781,
+ "step": 920
+ },
+ {
+ "epoch": 1.68,
+ "grad_norm": 0.3797549012325668,
+ "learning_rate": 3.8295445677348025e-05,
+ "loss": 1.1574,
+ "step": 921
+ },
+ {
+ "epoch": 1.69,
+ "grad_norm": 0.38104077698450556,
+ "learning_rate": 3.8210273524268375e-05,
+ "loss": 1.0465,
+ "step": 922
+ },
+ {
+ "epoch": 1.69,
+ "grad_norm": 0.4351498698357752,
+ "learning_rate": 3.8125109501202395e-05,
+ "loss": 1.0854,
+ "step": 923
+ },
+ {
+ "epoch": 1.69,
+ "grad_norm": 0.4593568604690429,
+ "learning_rate": 3.803995399501632e-05,
+ "loss": 1.1704,
+ "step": 924
+ },
+ {
+ "epoch": 1.69,
+ "grad_norm": 0.4225870590501897,
+ "learning_rate": 3.795480739253761e-05,
+ "loss": 1.145,
+ "step": 925
+ },
+ {
+ "epoch": 1.69,
+ "grad_norm": 0.42864985402444117,
+ "learning_rate": 3.786967008055337e-05,
+ "loss": 1.131,
+ "step": 926
+ },
+ {
+ "epoch": 1.7,
+ "grad_norm": 0.357907258574411,
+ "learning_rate": 3.7784542445808414e-05,
+ "loss": 1.1103,
+ "step": 927
+ },
+ {
+ "epoch": 1.7,
+ "grad_norm": 0.34017234728326645,
+ "learning_rate": 3.769942487500368e-05,
+ "loss": 0.9975,
+ "step": 928
+ },
+ {
+ "epoch": 1.7,
+ "grad_norm": 0.40014710134570614,
+ "learning_rate": 3.761431775479432e-05,
+ "loss": 1.1227,
+ "step": 929
+ },
+ {
+ "epoch": 1.7,
+ "grad_norm": 0.3602742664743244,
+ "learning_rate": 3.752922147178807e-05,
+ "loss": 1.0503,
+ "step": 930
+ },
+ {
+ "epoch": 1.7,
+ "grad_norm": 0.3561228928737548,
+ "learning_rate": 3.744413641254339e-05,
+ "loss": 1.0595,
+ "step": 931
+ },
+ {
+ "epoch": 1.7,
+ "grad_norm": 0.35069548036479187,
+ "learning_rate": 3.735906296356778e-05,
+ "loss": 1.1939,
+ "step": 932
+ },
+ {
+ "epoch": 1.71,
+ "grad_norm": 0.3531658852904023,
+ "learning_rate": 3.727400151131599e-05,
+ "loss": 1.0988,
+ "step": 933
+ },
+ {
+ "epoch": 1.71,
+ "grad_norm": 9.914829476803295,
+ "learning_rate": 3.71889524421883e-05,
+ "loss": 1.2786,
+ "step": 934
+ },
+ {
+ "epoch": 1.71,
+ "grad_norm": 0.3879290707659155,
+ "learning_rate": 3.710391614252867e-05,
+ "loss": 1.1563,
+ "step": 935
+ },
+ {
+ "epoch": 1.71,
+ "grad_norm": 0.41634454015416344,
+ "learning_rate": 3.701889299862314e-05,
+ "loss": 1.1407,
+ "step": 936
+ },
+ {
+ "epoch": 1.71,
+ "grad_norm": 0.43010188488928786,
+ "learning_rate": 3.6933883396697936e-05,
+ "loss": 1.0721,
+ "step": 937
+ },
+ {
+ "epoch": 1.72,
+ "grad_norm": 1.6679642679951394,
+ "learning_rate": 3.684888772291777e-05,
+ "loss": 1.0999,
+ "step": 938
+ },
+ {
+ "epoch": 1.72,
+ "grad_norm": 1350.9989735260963,
+ "learning_rate": 3.676390636338411e-05,
+ "loss": 1.7498,
+ "step": 939
+ },
+ {
+ "epoch": 1.72,
+ "grad_norm": 2.684772362538658,
+ "learning_rate": 3.667893970413337e-05,
+ "loss": 1.146,
+ "step": 940
+ },
+ {
+ "epoch": 1.72,
+ "grad_norm": 178.0069375946376,
+ "learning_rate": 3.659398813113522e-05,
+ "loss": 1.2453,
+ "step": 941
+ },
+ {
+ "epoch": 1.72,
+ "grad_norm": 155.03254746503782,
+ "learning_rate": 3.650905203029075e-05,
+ "loss": 1.1225,
+ "step": 942
+ },
+ {
+ "epoch": 1.72,
+ "grad_norm": 1312.1744895576157,
+ "learning_rate": 3.642413178743083e-05,
+ "loss": 1.7129,
+ "step": 943
+ },
+ {
+ "epoch": 1.73,
+ "grad_norm": 217.83449465912037,
+ "learning_rate": 3.633922778831423e-05,
+ "loss": 1.4892,
+ "step": 944
+ },
+ {
+ "epoch": 1.73,
+ "grad_norm": 101.38660805063398,
+ "learning_rate": 3.6254340418625975e-05,
+ "loss": 1.2221,
+ "step": 945
+ },
+ {
+ "epoch": 1.73,
+ "grad_norm": 576.3054796798854,
+ "learning_rate": 3.6169470063975536e-05,
+ "loss": 1.4428,
+ "step": 946
+ },
+ {
+ "epoch": 1.73,
+ "grad_norm": 30.78168481179688,
+ "learning_rate": 3.608461710989509e-05,
+ "loss": 1.1431,
+ "step": 947
+ },
+ {
+ "epoch": 1.73,
+ "grad_norm": 4.931486655346698,
+ "learning_rate": 3.5999781941837755e-05,
+ "loss": 1.0992,
+ "step": 948
+ },
+ {
+ "epoch": 1.74,
+ "grad_norm": 4.857687363744206,
+ "learning_rate": 3.591496494517589e-05,
+ "loss": 1.0505,
+ "step": 949
+ },
+ {
+ "epoch": 1.74,
+ "grad_norm": 2.5500805511012374,
+ "learning_rate": 3.5830166505199284e-05,
+ "loss": 1.0775,
+ "step": 950
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 985113563234304.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/v1/checkpoint-950/training_args.bin b/v1/checkpoint-950/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c5d2416a3b70bb5260978ec9996f00154a724ba7
--- /dev/null
+++ b/v1/checkpoint-950/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b22e8f9d51a16d03a2c506fa3d1eafa8f4b1ae992992c2086a4d435ffd97387e
+size 6712
diff --git a/v1/checkpoint-950/zero_to_fp32.py b/v1/checkpoint-950/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/v1/checkpoint-950/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/v2/checkpoint-1050/README.md b/v2/checkpoint-1050/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/v2/checkpoint-1050/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/v2/checkpoint-1050/adapter_config.json b/v2/checkpoint-1050/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..832188d72d81e59dd2b5259e86f371199b441aca
--- /dev/null
+++ b/v2/checkpoint-1050/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "o_proj",
+ "k_proj",
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/v2/checkpoint-1050/adapter_model.safetensors b/v2/checkpoint-1050/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c86aaba92fa1d64245713ce2afb61584104eaff4
--- /dev/null
+++ b/v2/checkpoint-1050/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:217fd744f1fbd0031deb260cfe8b6a5c095e884c13fb70480f3de055c43b1cb8
+size 1048664848
diff --git a/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..18b9723df812454b900618fde9b36d293ff268e8
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:394007078c0de5dcca5254567885c300c3f12ba420eac0a43150459872cd4267
+size 787270042
diff --git a/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7f6e327b07cde7dcab11b05db43d71706d7ed024
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b0cbb53e2ef4919f5383564a59b74013a7ef9320a837c986a45db786caa64d61
+size 787270042
diff --git a/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7192bae007c6dc88aff595e70ac15cc92b0fb4ee
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e4cb38a298cc8c24af13599bb59ce27ed8ffb888d252eb66d62fdef266afef6
+size 787270042
diff --git a/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..03abc88387bf9d2f0294797261ebb88e75042bd1
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:123b4fe3306db94c134957aed98c1d858e8f777b4f81482f7900dfec5c12815c
+size 787270042
diff --git a/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5f87af2bbd79bdc9f3a5f35202b92a428fb04b00
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4a770cfa1af3c42a99c80ab684b73da5a0a40487f6e2415b95cb68d899ed5f5e
+size 787270042
diff --git a/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..38eeb36272573b833d9afea0c07252133a259461
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:64c2277b4f2212e18ea18640958d50a3b60e78f8773b45dacc7e60a5e190db07
+size 787270042
diff --git a/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a2d01286f2725c48f3ffb02df1b876bc20e0e03f
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:80f156db85cf3e7c1e87acf987d24e798401f0a3f076716fd2aa1ccab0c06fab
+size 787270042
diff --git a/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a1207bd999f8515a87ef44e0c09faba89f436985
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4f4f8ada59448f38f04df502f8031b3032e3a6305ed651718b08ebf41769ccbf
+size 787270042
diff --git a/v2/checkpoint-1050/global_step1050/zero_pp_rank_0_mp_rank_00_model_states.pt b/v2/checkpoint-1050/global_step1050/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9bb149842f0c52e742d84b64484866ee97d0db5b
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b3f51e27f57bf07ab75a22b3be7686c834716ac6bf1e42e58696786749e40eb
+size 653742
diff --git a/v2/checkpoint-1050/global_step1050/zero_pp_rank_1_mp_rank_00_model_states.pt b/v2/checkpoint-1050/global_step1050/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1219ce5aa5f2d8540ecf3486a6ad722b4466a575
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4bc7d961b5a083a83f9206e13485480e5edd384db7a2eccd8f5de849a65b61f5
+size 653742
diff --git a/v2/checkpoint-1050/global_step1050/zero_pp_rank_2_mp_rank_00_model_states.pt b/v2/checkpoint-1050/global_step1050/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a1e506bf194d5bd563e7858c695a0dca57eb5b1a
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:414a5f5d4247b734ac16b08d4c1e5659697300fde198d5056b0505887c25a2b8
+size 653742
diff --git a/v2/checkpoint-1050/global_step1050/zero_pp_rank_3_mp_rank_00_model_states.pt b/v2/checkpoint-1050/global_step1050/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1ecc7612d7f59e3d15454c22604efc92a13b066b
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be1b2039a29e8f23b7cad84b8eaf0024e18a2fbdcbc41fdb9a652e4b7e95bbba
+size 653742
diff --git a/v2/checkpoint-1050/global_step1050/zero_pp_rank_4_mp_rank_00_model_states.pt b/v2/checkpoint-1050/global_step1050/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..112eaa5d108be50233d04f7818f3ff710d29f49b
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:58a942392d335b9d82ec508fea9e99a76ccf8a2ff510cb38c92c0b167fcf67fe
+size 653742
diff --git a/v2/checkpoint-1050/global_step1050/zero_pp_rank_5_mp_rank_00_model_states.pt b/v2/checkpoint-1050/global_step1050/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..714f18ffa311c637f8bdce3df1110cbf6384db5e
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0a11536ae6ed3f56eb0983e22371db0076f2a0540b433142639548e46348cf36
+size 653742
diff --git a/v2/checkpoint-1050/global_step1050/zero_pp_rank_6_mp_rank_00_model_states.pt b/v2/checkpoint-1050/global_step1050/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..982c2af2865dc7330afdf3a30411d22b48d449ec
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c76a9d81f76ff90f486b98ee2f90924bfa83ac298d3b1a865d2945e241717a6b
+size 653742
diff --git a/v2/checkpoint-1050/global_step1050/zero_pp_rank_7_mp_rank_00_model_states.pt b/v2/checkpoint-1050/global_step1050/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4660be82da4673bca83817646ebc658ec434cc74
--- /dev/null
+++ b/v2/checkpoint-1050/global_step1050/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:31c89a7f8a58901e1ef71eb63cc914bb06f34c661e4657336851817a1cfa1ccb
+size 653742
diff --git a/v2/checkpoint-1050/latest b/v2/checkpoint-1050/latest
new file mode 100644
index 0000000000000000000000000000000000000000..9003e5f7e95704409b5d8f438ed7572043c8b9ad
--- /dev/null
+++ b/v2/checkpoint-1050/latest
@@ -0,0 +1 @@
+global_step1050
\ No newline at end of file
diff --git a/v2/checkpoint-1050/rng_state_0.pth b/v2/checkpoint-1050/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9dd2a62da4ca83b3b986d96dbf0eaeb82207ca93
--- /dev/null
+++ b/v2/checkpoint-1050/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a0628a9017696045a3a29e9eaffc71e9262d855716e773c0c3be760a1fe85bc8
+size 15984
diff --git a/v2/checkpoint-1050/rng_state_1.pth b/v2/checkpoint-1050/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1ba5f3aba4388a582cd47f7f9e57cd5879b1cbd2
--- /dev/null
+++ b/v2/checkpoint-1050/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df342004a4d8e3626bf2a9f689fde7c8bfd6d995e14931f5496eda1f456cb6f2
+size 15984
diff --git a/v2/checkpoint-1050/rng_state_2.pth b/v2/checkpoint-1050/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..27b0f7845c2b9530c3e6ed3ce232ff4e86b86122
--- /dev/null
+++ b/v2/checkpoint-1050/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f02096eb4e8850b91490e80e4a042e2e60f71bd2abc6a269d62c271649cb77d2
+size 15984
diff --git a/v2/checkpoint-1050/rng_state_3.pth b/v2/checkpoint-1050/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fcfb583fc43c6dd4395671708744cfd18c419970
--- /dev/null
+++ b/v2/checkpoint-1050/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:326c778d3d0e7e3d5665fa0a9ecd92986609c430da08b41611d6c05dc19815a8
+size 15984
diff --git a/v2/checkpoint-1050/rng_state_4.pth b/v2/checkpoint-1050/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7a8c64b1f15ac655b2be2a42fe61cabe2a877704
--- /dev/null
+++ b/v2/checkpoint-1050/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d978dcb0c34e022ee6750e9d86814b8c82e4965d7e07662f35f06eeac12938f3
+size 15984
diff --git a/v2/checkpoint-1050/rng_state_5.pth b/v2/checkpoint-1050/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..262e8187e6caeca12ef3b0aa923b12afd697e03d
--- /dev/null
+++ b/v2/checkpoint-1050/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01e83399aed1d9d173c3e07b2efa8530c956b62b2b68394c2ed0d43bd8bba9d1
+size 15984
diff --git a/v2/checkpoint-1050/rng_state_6.pth b/v2/checkpoint-1050/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..72f794e31f8d3e0c63972e5076e1ed90c52087ba
--- /dev/null
+++ b/v2/checkpoint-1050/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:606ab3ca92e3d20c327c69fdcce7f7e39bec2f2c3538b036088b255f917e3ba4
+size 15984
diff --git a/v2/checkpoint-1050/rng_state_7.pth b/v2/checkpoint-1050/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..244e7fdaa1cef2e82bd4e16afb10f32f68318bcc
--- /dev/null
+++ b/v2/checkpoint-1050/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1276a987dd22c9093fec58921ba19f340a28f18bff635cc01324e09a3c37ac3a
+size 15984
diff --git a/v2/checkpoint-1050/scheduler.pt b/v2/checkpoint-1050/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..706e20a94a7e2991f07bfa5aa44e2d6c47b72da7
--- /dev/null
+++ b/v2/checkpoint-1050/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:97feef438f4431a4ad6f82ff4896fbb93af122cfbe57a1b829aa9b6b13da43a5
+size 1064
diff --git a/v2/checkpoint-1050/special_tokens_map.json b/v2/checkpoint-1050/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/v2/checkpoint-1050/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/v2/checkpoint-1050/tokenizer.model b/v2/checkpoint-1050/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/v2/checkpoint-1050/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/v2/checkpoint-1050/tokenizer_config.json b/v2/checkpoint-1050/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/v2/checkpoint-1050/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/v2/checkpoint-1050/trainer_state.json b/v2/checkpoint-1050/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..81fa10b387605012feaf77e98c4217c7523a9e35
--- /dev/null
+++ b/v2/checkpoint-1050/trainer_state.json
@@ -0,0 +1,7371 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.4572473708276177,
+ "eval_steps": 500,
+ "global_step": 1050,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2698278585334131,
+ "learning_rate": 7.274705407864332e-05,
+ "loss": 1.194,
+ "step": 451
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.23678313024953834,
+ "learning_rate": 7.26980224535416e-05,
+ "loss": 1.2349,
+ "step": 452
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24851875792002978,
+ "learning_rate": 7.264884229436293e-05,
+ "loss": 1.1758,
+ "step": 453
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24122080121681125,
+ "learning_rate": 7.259951382451318e-05,
+ "loss": 1.1962,
+ "step": 454
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.22741322959884405,
+ "learning_rate": 7.25500372680719e-05,
+ "loss": 1.1702,
+ "step": 455
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.2297475610861458,
+ "learning_rate": 7.250041284979137e-05,
+ "loss": 1.1466,
+ "step": 456
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.3057605989721467,
+ "learning_rate": 7.245064079509553e-05,
+ "loss": 1.246,
+ "step": 457
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2719638501597136,
+ "learning_rate": 7.240072133007899e-05,
+ "loss": 1.2184,
+ "step": 458
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2436807816414479,
+ "learning_rate": 7.235065468150593e-05,
+ "loss": 1.2324,
+ "step": 459
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.23436349430255515,
+ "learning_rate": 7.23004410768092e-05,
+ "loss": 1.1813,
+ "step": 460
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2398940990211377,
+ "learning_rate": 7.22500807440892e-05,
+ "loss": 1.1924,
+ "step": 461
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2605716625062531,
+ "learning_rate": 7.219957391211281e-05,
+ "loss": 1.182,
+ "step": 462
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.260462524570941,
+ "learning_rate": 7.214892081031244e-05,
+ "loss": 1.2136,
+ "step": 463
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.21979766512306334,
+ "learning_rate": 7.209812166878491e-05,
+ "loss": 1.2066,
+ "step": 464
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.23324453647530663,
+ "learning_rate": 7.204717671829051e-05,
+ "loss": 1.1657,
+ "step": 465
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.2529434935507481,
+ "learning_rate": 7.199608619025177e-05,
+ "loss": 1.2093,
+ "step": 466
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.25371701891720116,
+ "learning_rate": 7.194485031675265e-05,
+ "loss": 1.2225,
+ "step": 467
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.23272423066292103,
+ "learning_rate": 7.189346933053725e-05,
+ "loss": 1.1721,
+ "step": 468
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.25122928735587546,
+ "learning_rate": 7.184194346500892e-05,
+ "loss": 1.2537,
+ "step": 469
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2159270875490409,
+ "learning_rate": 7.179027295422913e-05,
+ "loss": 1.197,
+ "step": 470
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2633111059076544,
+ "learning_rate": 7.173845803291636e-05,
+ "loss": 1.1721,
+ "step": 471
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.30555936322098703,
+ "learning_rate": 7.168649893644517e-05,
+ "loss": 1.3011,
+ "step": 472
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.23492670111453726,
+ "learning_rate": 7.163439590084502e-05,
+ "loss": 1.1601,
+ "step": 473
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.26602734263721806,
+ "learning_rate": 7.158214916279923e-05,
+ "loss": 1.2808,
+ "step": 474
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.3182695007856262,
+ "learning_rate": 7.152975895964386e-05,
+ "loss": 1.2967,
+ "step": 475
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2785021674736721,
+ "learning_rate": 7.147722552936673e-05,
+ "loss": 1.1789,
+ "step": 476
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.279474303138652,
+ "learning_rate": 7.142454911060627e-05,
+ "loss": 1.2596,
+ "step": 477
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2556980144910755,
+ "learning_rate": 7.137172994265044e-05,
+ "loss": 1.2426,
+ "step": 478
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.3311256331993533,
+ "learning_rate": 7.131876826543565e-05,
+ "loss": 1.2059,
+ "step": 479
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.26467296197775253,
+ "learning_rate": 7.12656643195457e-05,
+ "loss": 1.2482,
+ "step": 480
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.27444885274652553,
+ "learning_rate": 7.121241834621064e-05,
+ "loss": 1.2528,
+ "step": 481
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2572283861115396,
+ "learning_rate": 7.115903058730567e-05,
+ "loss": 1.1849,
+ "step": 482
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2677065778235683,
+ "learning_rate": 7.11055012853501e-05,
+ "loss": 1.2011,
+ "step": 483
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29470622036742816,
+ "learning_rate": 7.105183068350619e-05,
+ "loss": 1.2398,
+ "step": 484
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.27609230248969197,
+ "learning_rate": 7.099801902557811e-05,
+ "loss": 1.2259,
+ "step": 485
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.24248634168099284,
+ "learning_rate": 7.094406655601073e-05,
+ "loss": 1.2282,
+ "step": 486
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.2765941767688746,
+ "learning_rate": 7.088997351988865e-05,
+ "loss": 1.2319,
+ "step": 487
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29347776909858947,
+ "learning_rate": 7.083574016293493e-05,
+ "loss": 1.1765,
+ "step": 488
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.285370295424537,
+ "learning_rate": 7.078136673151008e-05,
+ "loss": 1.26,
+ "step": 489
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.29408734903836536,
+ "learning_rate": 7.072685347261093e-05,
+ "loss": 1.226,
+ "step": 490
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27437470239205813,
+ "learning_rate": 7.067220063386947e-05,
+ "loss": 1.1976,
+ "step": 491
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2680770258777871,
+ "learning_rate": 7.061740846355176e-05,
+ "loss": 1.1915,
+ "step": 492
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27200362879502954,
+ "learning_rate": 7.056247721055678e-05,
+ "loss": 1.2002,
+ "step": 493
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2637811092577037,
+ "learning_rate": 7.050740712441528e-05,
+ "loss": 1.287,
+ "step": 494
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.24657959209271266,
+ "learning_rate": 7.045219845528875e-05,
+ "loss": 1.2284,
+ "step": 495
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.25311992110358666,
+ "learning_rate": 7.039685145396812e-05,
+ "loss": 1.1616,
+ "step": 496
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2564633694193358,
+ "learning_rate": 7.034136637187275e-05,
+ "loss": 1.2067,
+ "step": 497
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2446797651174144,
+ "learning_rate": 7.028574346104926e-05,
+ "loss": 1.2284,
+ "step": 498
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2592751463399255,
+ "learning_rate": 7.022998297417034e-05,
+ "loss": 1.2371,
+ "step": 499
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2500713943206808,
+ "learning_rate": 7.017408516453365e-05,
+ "loss": 1.1061,
+ "step": 500
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2812266276040743,
+ "learning_rate": 7.011805028606064e-05,
+ "loss": 1.1949,
+ "step": 501
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.298829667668083,
+ "learning_rate": 7.006187859329544e-05,
+ "loss": 1.2313,
+ "step": 502
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.26518768159745104,
+ "learning_rate": 7.000557034140361e-05,
+ "loss": 1.2246,
+ "step": 503
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.3037280360760458,
+ "learning_rate": 6.994912578617113e-05,
+ "loss": 1.1617,
+ "step": 504
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2726903109255714,
+ "learning_rate": 6.989254518400309e-05,
+ "loss": 1.2415,
+ "step": 505
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25568082003046966,
+ "learning_rate": 6.98358287919226e-05,
+ "loss": 1.1817,
+ "step": 506
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25633294893705044,
+ "learning_rate": 6.97789768675696e-05,
+ "loss": 1.2149,
+ "step": 507
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.28291439435087123,
+ "learning_rate": 6.972198966919972e-05,
+ "loss": 1.1578,
+ "step": 508
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.27195184756655516,
+ "learning_rate": 6.966486745568308e-05,
+ "loss": 1.2355,
+ "step": 509
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.239159568376005,
+ "learning_rate": 6.960761048650312e-05,
+ "loss": 1.1688,
+ "step": 510
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.22961475425949177,
+ "learning_rate": 6.955021902175543e-05,
+ "loss": 1.2094,
+ "step": 511
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.27443773600741117,
+ "learning_rate": 6.949269332214651e-05,
+ "loss": 1.2559,
+ "step": 512
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.26230551832002097,
+ "learning_rate": 6.94350336489927e-05,
+ "loss": 1.2121,
+ "step": 513
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2716742985303849,
+ "learning_rate": 6.937724026421892e-05,
+ "loss": 1.2444,
+ "step": 514
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2537850139439542,
+ "learning_rate": 6.931931343035742e-05,
+ "loss": 1.1327,
+ "step": 515
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.28599587967496826,
+ "learning_rate": 6.926125341054676e-05,
+ "loss": 1.2236,
+ "step": 516
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.26780654378470103,
+ "learning_rate": 6.920306046853043e-05,
+ "loss": 1.2295,
+ "step": 517
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.23606296888412015,
+ "learning_rate": 6.914473486865577e-05,
+ "loss": 1.1543,
+ "step": 518
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.34976881174240837,
+ "learning_rate": 6.90862768758727e-05,
+ "loss": 1.2067,
+ "step": 519
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2481257873494882,
+ "learning_rate": 6.902768675573258e-05,
+ "loss": 1.2188,
+ "step": 520
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2996395778117021,
+ "learning_rate": 6.896896477438699e-05,
+ "loss": 1.2326,
+ "step": 521
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.8839768816333193,
+ "learning_rate": 6.891011119858643e-05,
+ "loss": 1.2435,
+ "step": 522
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2851882482058998,
+ "learning_rate": 6.885112629567927e-05,
+ "loss": 1.2644,
+ "step": 523
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2813663482913699,
+ "learning_rate": 6.879201033361035e-05,
+ "loss": 1.2309,
+ "step": 524
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3257551560135454,
+ "learning_rate": 6.873276358091996e-05,
+ "loss": 1.2755,
+ "step": 525
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.28930479952494365,
+ "learning_rate": 6.867338630674247e-05,
+ "loss": 1.1962,
+ "step": 526
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3077462996938649,
+ "learning_rate": 6.861387878080511e-05,
+ "loss": 1.2402,
+ "step": 527
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.2848900193452761,
+ "learning_rate": 6.855424127342688e-05,
+ "loss": 1.2748,
+ "step": 528
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.4765938812802202,
+ "learning_rate": 6.849447405551718e-05,
+ "loss": 1.2226,
+ "step": 529
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.53184473292579,
+ "learning_rate": 6.843457739857467e-05,
+ "loss": 1.2347,
+ "step": 530
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.6416239346492343,
+ "learning_rate": 6.837455157468596e-05,
+ "loss": 1.2429,
+ "step": 531
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3188092712502773,
+ "learning_rate": 6.831439685652442e-05,
+ "loss": 1.216,
+ "step": 532
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3527495731006385,
+ "learning_rate": 6.825411351734895e-05,
+ "loss": 1.1682,
+ "step": 533
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.29603753744741856,
+ "learning_rate": 6.819370183100274e-05,
+ "loss": 1.1434,
+ "step": 534
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.5252450389976622,
+ "learning_rate": 6.813316207191198e-05,
+ "loss": 1.1943,
+ "step": 535
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.32999419558659937,
+ "learning_rate": 6.807249451508466e-05,
+ "loss": 1.192,
+ "step": 536
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.3650175469778724,
+ "learning_rate": 6.801169943610929e-05,
+ "loss": 1.2141,
+ "step": 537
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 1.0643532150783557,
+ "learning_rate": 6.795077711115368e-05,
+ "loss": 1.2253,
+ "step": 538
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5041310609130145,
+ "learning_rate": 6.788972781696363e-05,
+ "loss": 1.278,
+ "step": 539
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5123058164360991,
+ "learning_rate": 6.782855183086177e-05,
+ "loss": 1.2231,
+ "step": 540
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.3533015702394419,
+ "learning_rate": 6.776724943074619e-05,
+ "loss": 1.2072,
+ "step": 541
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.30253964625417207,
+ "learning_rate": 6.770582089508927e-05,
+ "loss": 1.1382,
+ "step": 542
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.348991618828202,
+ "learning_rate": 6.764426650293633e-05,
+ "loss": 1.2079,
+ "step": 543
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.46017440578788743,
+ "learning_rate": 6.758258653390444e-05,
+ "loss": 1.1813,
+ "step": 544
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.31962101755594885,
+ "learning_rate": 6.75207812681811e-05,
+ "loss": 1.1339,
+ "step": 545
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.37092024548285923,
+ "learning_rate": 6.745885098652298e-05,
+ "loss": 1.2591,
+ "step": 546
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.32347106450715835,
+ "learning_rate": 6.739679597025466e-05,
+ "loss": 1.2017,
+ "step": 547
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39250187112342494,
+ "learning_rate": 6.733461650126733e-05,
+ "loss": 1.0933,
+ "step": 548
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.473522452217324,
+ "learning_rate": 6.727231286201752e-05,
+ "loss": 1.1124,
+ "step": 549
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4809062179622052,
+ "learning_rate": 6.720988533552582e-05,
+ "loss": 1.1585,
+ "step": 550
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3529662801059162,
+ "learning_rate": 6.714733420537559e-05,
+ "loss": 1.0501,
+ "step": 551
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5958247214391118,
+ "learning_rate": 6.708465975571168e-05,
+ "loss": 1.1086,
+ "step": 552
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5341364205022454,
+ "learning_rate": 6.70218622712391e-05,
+ "loss": 1.0518,
+ "step": 553
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3601805724462006,
+ "learning_rate": 6.695894203722181e-05,
+ "loss": 1.1779,
+ "step": 554
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.43410190338280613,
+ "learning_rate": 6.68958993394813e-05,
+ "loss": 1.093,
+ "step": 555
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46217742572873594,
+ "learning_rate": 6.683273446439546e-05,
+ "loss": 1.0117,
+ "step": 556
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.8591682373623357,
+ "learning_rate": 6.676944769889708e-05,
+ "loss": 1.1002,
+ "step": 557
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.7383229487622726,
+ "learning_rate": 6.670603933047272e-05,
+ "loss": 1.0779,
+ "step": 558
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5965305891207813,
+ "learning_rate": 6.664250964716131e-05,
+ "loss": 1.0889,
+ "step": 559
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.6030858606684543,
+ "learning_rate": 6.657885893755288e-05,
+ "loss": 1.0982,
+ "step": 560
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4644510682398409,
+ "learning_rate": 6.65150874907872e-05,
+ "loss": 1.1004,
+ "step": 561
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.43943285132452564,
+ "learning_rate": 6.645119559655254e-05,
+ "loss": 1.0536,
+ "step": 562
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4456395978600012,
+ "learning_rate": 6.638718354508427e-05,
+ "loss": 1.0733,
+ "step": 563
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3303824433217466,
+ "learning_rate": 6.632305162716365e-05,
+ "loss": 1.0552,
+ "step": 564
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3617704823170143,
+ "learning_rate": 6.62588001341164e-05,
+ "loss": 1.1092,
+ "step": 565
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4465013349903427,
+ "learning_rate": 6.619442935781141e-05,
+ "loss": 1.0781,
+ "step": 566
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.48516780613791277,
+ "learning_rate": 6.612993959065947e-05,
+ "loss": 1.0686,
+ "step": 567
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38867820318536633,
+ "learning_rate": 6.606533112561186e-05,
+ "loss": 1.1215,
+ "step": 568
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38566119820378336,
+ "learning_rate": 6.600060425615907e-05,
+ "loss": 1.1213,
+ "step": 569
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.35534855445058544,
+ "learning_rate": 6.593575927632947e-05,
+ "loss": 1.0955,
+ "step": 570
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38124406233349717,
+ "learning_rate": 6.587079648068795e-05,
+ "loss": 1.0659,
+ "step": 571
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.454750160923548,
+ "learning_rate": 6.580571616433457e-05,
+ "loss": 1.1149,
+ "step": 572
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.35353190088025255,
+ "learning_rate": 6.574051862290325e-05,
+ "loss": 1.0388,
+ "step": 573
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3249395594793626,
+ "learning_rate": 6.567520415256045e-05,
+ "loss": 1.0784,
+ "step": 574
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.40078898818247227,
+ "learning_rate": 6.560977305000375e-05,
+ "loss": 1.0859,
+ "step": 575
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4115264795060035,
+ "learning_rate": 6.554422561246054e-05,
+ "loss": 1.1828,
+ "step": 576
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.30090229228069215,
+ "learning_rate": 6.54785621376867e-05,
+ "loss": 1.0901,
+ "step": 577
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.28827860350299206,
+ "learning_rate": 6.541278292396523e-05,
+ "loss": 1.0277,
+ "step": 578
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.34690404488996757,
+ "learning_rate": 6.534688827010484e-05,
+ "loss": 1.048,
+ "step": 579
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.29943113556644785,
+ "learning_rate": 6.528087847543867e-05,
+ "loss": 1.0646,
+ "step": 580
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.37318202575874415,
+ "learning_rate": 6.521475383982291e-05,
+ "loss": 1.1091,
+ "step": 581
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3049663659203959,
+ "learning_rate": 6.51485146636354e-05,
+ "loss": 1.0552,
+ "step": 582
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3342407867509692,
+ "learning_rate": 6.508216124777431e-05,
+ "loss": 1.2227,
+ "step": 583
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3348396047855952,
+ "learning_rate": 6.501569389365674e-05,
+ "loss": 1.0861,
+ "step": 584
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.30951429367513383,
+ "learning_rate": 6.494911290321737e-05,
+ "loss": 1.0461,
+ "step": 585
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.33898401361064606,
+ "learning_rate": 6.488241857890711e-05,
+ "loss": 1.0854,
+ "step": 586
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4901462068263497,
+ "learning_rate": 6.481561122369164e-05,
+ "loss": 1.1012,
+ "step": 587
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3179574879809652,
+ "learning_rate": 6.474869114105018e-05,
+ "loss": 1.0451,
+ "step": 588
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.32159328915060714,
+ "learning_rate": 6.468165863497395e-05,
+ "loss": 1.0458,
+ "step": 589
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.36462235008537297,
+ "learning_rate": 6.461451400996491e-05,
+ "loss": 1.1247,
+ "step": 590
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.5373862753611778,
+ "learning_rate": 6.454725757103432e-05,
+ "loss": 1.0542,
+ "step": 591
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3160409270291303,
+ "learning_rate": 6.447988962370133e-05,
+ "loss": 1.0829,
+ "step": 592
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.390452102978435,
+ "learning_rate": 6.441241047399169e-05,
+ "loss": 1.192,
+ "step": 593
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3802122712014928,
+ "learning_rate": 6.434482042843627e-05,
+ "loss": 1.1153,
+ "step": 594
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4081584328242501,
+ "learning_rate": 6.427711979406966e-05,
+ "loss": 1.1635,
+ "step": 595
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3791962989638633,
+ "learning_rate": 6.420930887842889e-05,
+ "loss": 1.1581,
+ "step": 596
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.33239440056484193,
+ "learning_rate": 6.414138798955189e-05,
+ "loss": 1.0926,
+ "step": 597
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3279881540815014,
+ "learning_rate": 6.407335743597616e-05,
+ "loss": 1.1386,
+ "step": 598
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.30309644763750837,
+ "learning_rate": 6.40052175267374e-05,
+ "loss": 1.0523,
+ "step": 599
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3349097308403333,
+ "learning_rate": 6.393696857136801e-05,
+ "loss": 1.0815,
+ "step": 600
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3288227593556618,
+ "learning_rate": 6.386861087989581e-05,
+ "loss": 1.015,
+ "step": 601
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36685586740843157,
+ "learning_rate": 6.380014476284255e-05,
+ "loss": 1.1232,
+ "step": 602
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3620977714204643,
+ "learning_rate": 6.373157053122243e-05,
+ "loss": 1.1138,
+ "step": 603
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3130587018197183,
+ "learning_rate": 6.366288849654091e-05,
+ "loss": 1.1255,
+ "step": 604
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3602737087072766,
+ "learning_rate": 6.359409897079303e-05,
+ "loss": 1.0282,
+ "step": 605
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.31168852571991945,
+ "learning_rate": 6.352520226646222e-05,
+ "loss": 1.0779,
+ "step": 606
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3516045580189353,
+ "learning_rate": 6.345619869651871e-05,
+ "loss": 1.1028,
+ "step": 607
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3231857927563657,
+ "learning_rate": 6.33870885744182e-05,
+ "loss": 1.1202,
+ "step": 608
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.30205205129701157,
+ "learning_rate": 6.331787221410041e-05,
+ "loss": 1.1369,
+ "step": 609
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3198359813888166,
+ "learning_rate": 6.32485499299877e-05,
+ "loss": 1.1763,
+ "step": 610
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3128641370321787,
+ "learning_rate": 6.31791220369835e-05,
+ "loss": 1.0223,
+ "step": 611
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.2989105616213649,
+ "learning_rate": 6.31095888504711e-05,
+ "loss": 1.0358,
+ "step": 612
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3103537906853337,
+ "learning_rate": 6.303995068631203e-05,
+ "loss": 1.1261,
+ "step": 613
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.28598715532508207,
+ "learning_rate": 6.297020786084467e-05,
+ "loss": 1.0629,
+ "step": 614
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.29809789918093255,
+ "learning_rate": 6.290036069088288e-05,
+ "loss": 1.035,
+ "step": 615
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.33765270252261453,
+ "learning_rate": 6.283040949371451e-05,
+ "loss": 1.1221,
+ "step": 616
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3424617501293415,
+ "learning_rate": 6.276035458709993e-05,
+ "loss": 1.155,
+ "step": 617
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3799189737987811,
+ "learning_rate": 6.269019628927067e-05,
+ "loss": 1.0701,
+ "step": 618
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3358898935253196,
+ "learning_rate": 6.261993491892791e-05,
+ "loss": 1.1649,
+ "step": 619
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.31569979424117356,
+ "learning_rate": 6.254957079524099e-05,
+ "loss": 1.0633,
+ "step": 620
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3002168156888237,
+ "learning_rate": 6.247910423784609e-05,
+ "loss": 1.0846,
+ "step": 621
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3097238823450595,
+ "learning_rate": 6.24085355668447e-05,
+ "loss": 1.0808,
+ "step": 622
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3120312761417578,
+ "learning_rate": 6.233786510280212e-05,
+ "loss": 1.0142,
+ "step": 623
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3335343015064923,
+ "learning_rate": 6.22670931667461e-05,
+ "loss": 1.0674,
+ "step": 624
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3234062304634526,
+ "learning_rate": 6.219622008016533e-05,
+ "loss": 1.0981,
+ "step": 625
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.32152678786547273,
+ "learning_rate": 6.212524616500798e-05,
+ "loss": 1.0244,
+ "step": 626
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.39031977608147594,
+ "learning_rate": 6.205417174368023e-05,
+ "loss": 1.1205,
+ "step": 627
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3806189090017157,
+ "learning_rate": 6.198299713904485e-05,
+ "loss": 1.1134,
+ "step": 628
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.2978349276971668,
+ "learning_rate": 6.191172267441967e-05,
+ "loss": 1.0088,
+ "step": 629
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3190354077382501,
+ "learning_rate": 6.184034867357617e-05,
+ "loss": 1.108,
+ "step": 630
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.32633048665038994,
+ "learning_rate": 6.176887546073797e-05,
+ "loss": 1.0825,
+ "step": 631
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3428026413020903,
+ "learning_rate": 6.169730336057939e-05,
+ "loss": 1.0765,
+ "step": 632
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3475737151929015,
+ "learning_rate": 6.162563269822391e-05,
+ "loss": 1.0693,
+ "step": 633
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3870252154591392,
+ "learning_rate": 6.15538637992428e-05,
+ "loss": 1.1081,
+ "step": 634
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.33597355193652834,
+ "learning_rate": 6.148199698965352e-05,
+ "loss": 1.0893,
+ "step": 635
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.30805894179787247,
+ "learning_rate": 6.141003259591834e-05,
+ "loss": 1.0995,
+ "step": 636
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3025073882734066,
+ "learning_rate": 6.133797094494281e-05,
+ "loss": 1.0388,
+ "step": 637
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3524395196391662,
+ "learning_rate": 6.126581236407429e-05,
+ "loss": 1.1196,
+ "step": 638
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3377646188130345,
+ "learning_rate": 6.119355718110039e-05,
+ "loss": 1.0382,
+ "step": 639
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.35508400659785483,
+ "learning_rate": 6.112120572424763e-05,
+ "loss": 1.1402,
+ "step": 640
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3454418793700457,
+ "learning_rate": 6.104875832217982e-05,
+ "loss": 1.1032,
+ "step": 641
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.32629806837059866,
+ "learning_rate": 6.097621530399661e-05,
+ "loss": 1.0959,
+ "step": 642
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3329536837751315,
+ "learning_rate": 6.090357699923202e-05,
+ "loss": 1.0467,
+ "step": 643
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.32302233828349475,
+ "learning_rate": 6.083084373785287e-05,
+ "loss": 1.0858,
+ "step": 644
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3310358826507611,
+ "learning_rate": 6.075801585025739e-05,
+ "loss": 1.0715,
+ "step": 645
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.319322035854079,
+ "learning_rate": 6.068509366727362e-05,
+ "loss": 1.177,
+ "step": 646
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3065230667302707,
+ "learning_rate": 6.061207752015797e-05,
+ "loss": 1.0649,
+ "step": 647
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.29926795565748227,
+ "learning_rate": 6.053896774059368e-05,
+ "loss": 1.1325,
+ "step": 648
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3556069634279046,
+ "learning_rate": 6.046576466068931e-05,
+ "loss": 1.1366,
+ "step": 649
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3189191131461966,
+ "learning_rate": 6.039246861297727e-05,
+ "loss": 1.0693,
+ "step": 650
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3347197156648834,
+ "learning_rate": 6.031907993041227e-05,
+ "loss": 1.1009,
+ "step": 651
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.32274156348185445,
+ "learning_rate": 6.0245598946369826e-05,
+ "loss": 1.1675,
+ "step": 652
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.35534089035455224,
+ "learning_rate": 6.017202599464476e-05,
+ "loss": 1.1723,
+ "step": 653
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3106026578570133,
+ "learning_rate": 6.009836140944965e-05,
+ "loss": 1.0954,
+ "step": 654
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3309144454564729,
+ "learning_rate": 6.002460552541331e-05,
+ "loss": 1.0209,
+ "step": 655
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3023619281400003,
+ "learning_rate": 5.9950758677579345e-05,
+ "loss": 1.0363,
+ "step": 656
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3311182880219704,
+ "learning_rate": 5.987682120140451e-05,
+ "loss": 1.0515,
+ "step": 657
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.33396486010030413,
+ "learning_rate": 5.980279343275729e-05,
+ "loss": 1.1251,
+ "step": 658
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3465764556678002,
+ "learning_rate": 5.97286757079163e-05,
+ "loss": 1.165,
+ "step": 659
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.304193441363374,
+ "learning_rate": 5.965446836356882e-05,
+ "loss": 1.0228,
+ "step": 660
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3415149030413082,
+ "learning_rate": 5.9580171736809224e-05,
+ "loss": 1.0742,
+ "step": 661
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.33138658321132064,
+ "learning_rate": 5.950578616513746e-05,
+ "loss": 1.0843,
+ "step": 662
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.30774403421162994,
+ "learning_rate": 5.943131198645752e-05,
+ "loss": 1.065,
+ "step": 663
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3428877492183819,
+ "learning_rate": 5.9356749539075885e-05,
+ "loss": 1.1101,
+ "step": 664
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3621290546130101,
+ "learning_rate": 5.928209916170003e-05,
+ "loss": 1.1372,
+ "step": 665
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3482375945469884,
+ "learning_rate": 5.9207361193436865e-05,
+ "loss": 1.132,
+ "step": 666
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.31754384974068384,
+ "learning_rate": 5.9132535973791156e-05,
+ "loss": 1.148,
+ "step": 667
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.36003834782050365,
+ "learning_rate": 5.9057623842664044e-05,
+ "loss": 1.1099,
+ "step": 668
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.2963701622969662,
+ "learning_rate": 5.8982625140351464e-05,
+ "loss": 1.0755,
+ "step": 669
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.32579569606066516,
+ "learning_rate": 5.8907540207542616e-05,
+ "loss": 1.0809,
+ "step": 670
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4247563451753457,
+ "learning_rate": 5.8832369385318416e-05,
+ "loss": 1.097,
+ "step": 671
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.33076932102169776,
+ "learning_rate": 5.875711301514992e-05,
+ "loss": 1.1078,
+ "step": 672
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3609238032332309,
+ "learning_rate": 5.8681771438896815e-05,
+ "loss": 1.1031,
+ "step": 673
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.325159585649425,
+ "learning_rate": 5.860634499880583e-05,
+ "loss": 1.0707,
+ "step": 674
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4620687271068983,
+ "learning_rate": 5.853083403750922e-05,
+ "loss": 1.1017,
+ "step": 675
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33485279064365936,
+ "learning_rate": 5.845523889802316e-05,
+ "loss": 1.0989,
+ "step": 676
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.30952573170841513,
+ "learning_rate": 5.8379559923746214e-05,
+ "loss": 1.0393,
+ "step": 677
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33498605810588283,
+ "learning_rate": 5.830379745845781e-05,
+ "loss": 1.1259,
+ "step": 678
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.35771921163037307,
+ "learning_rate": 5.822795184631659e-05,
+ "loss": 1.0815,
+ "step": 679
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.3329650192347647,
+ "learning_rate": 5.815202343185894e-05,
+ "loss": 1.1344,
+ "step": 680
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3356634465845771,
+ "learning_rate": 5.807601255999736e-05,
+ "loss": 1.1297,
+ "step": 681
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3289442034151235,
+ "learning_rate": 5.7999919576018934e-05,
+ "loss": 1.022,
+ "step": 682
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3207007334784113,
+ "learning_rate": 5.7923744825583745e-05,
+ "loss": 1.0571,
+ "step": 683
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3582460325329284,
+ "learning_rate": 5.7847488654723304e-05,
+ "loss": 1.0778,
+ "step": 684
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3563317666176927,
+ "learning_rate": 5.777115140983899e-05,
+ "loss": 1.1003,
+ "step": 685
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 3.4694912945702105,
+ "learning_rate": 5.769473343770047e-05,
+ "loss": 1.121,
+ "step": 686
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.43002349520483113,
+ "learning_rate": 5.761823508544411e-05,
+ "loss": 1.0765,
+ "step": 687
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39467783104839754,
+ "learning_rate": 5.754165670057142e-05,
+ "loss": 1.0788,
+ "step": 688
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39629029674867916,
+ "learning_rate": 5.7464998630947464e-05,
+ "loss": 1.0812,
+ "step": 689
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3880152093965208,
+ "learning_rate": 5.738826122479929e-05,
+ "loss": 1.1228,
+ "step": 690
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3777874121959188,
+ "learning_rate": 5.7311444830714324e-05,
+ "loss": 1.0907,
+ "step": 691
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.38004041653523696,
+ "learning_rate": 5.723454979763882e-05,
+ "loss": 1.1263,
+ "step": 692
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.37049672627797636,
+ "learning_rate": 5.7157576474876246e-05,
+ "loss": 1.1438,
+ "step": 693
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32973606103437614,
+ "learning_rate": 5.7080525212085725e-05,
+ "loss": 1.0553,
+ "step": 694
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.31674639252070325,
+ "learning_rate": 5.700339635928038e-05,
+ "loss": 1.06,
+ "step": 695
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32282199426553837,
+ "learning_rate": 5.692619026682588e-05,
+ "loss": 1.0841,
+ "step": 696
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4810882958061859,
+ "learning_rate": 5.684890728543869e-05,
+ "loss": 1.0803,
+ "step": 697
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3995638550178378,
+ "learning_rate": 5.6771547766184566e-05,
+ "loss": 1.1187,
+ "step": 698
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35264932960583484,
+ "learning_rate": 5.669411206047699e-05,
+ "loss": 1.0641,
+ "step": 699
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35240640524733,
+ "learning_rate": 5.661660052007547e-05,
+ "loss": 1.076,
+ "step": 700
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3540694609860389,
+ "learning_rate": 5.653901349708401e-05,
+ "loss": 1.1369,
+ "step": 701
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3196055112925304,
+ "learning_rate": 5.646135134394955e-05,
+ "loss": 1.0677,
+ "step": 702
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4214141007955914,
+ "learning_rate": 5.6383614413460266e-05,
+ "loss": 1.1139,
+ "step": 703
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3625611311798579,
+ "learning_rate": 5.630580305874402e-05,
+ "loss": 1.1845,
+ "step": 704
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3425208672181188,
+ "learning_rate": 5.62279176332668e-05,
+ "loss": 1.174,
+ "step": 705
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3108419862818321,
+ "learning_rate": 5.6149958490830996e-05,
+ "loss": 1.0331,
+ "step": 706
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3274644181571904,
+ "learning_rate": 5.607192598557394e-05,
+ "loss": 1.0664,
+ "step": 707
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.346218197215145,
+ "learning_rate": 5.599382047196617e-05,
+ "loss": 1.2088,
+ "step": 708
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.328497632267458,
+ "learning_rate": 5.591564230480989e-05,
+ "loss": 1.0287,
+ "step": 709
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3708173720611468,
+ "learning_rate": 5.583739183923732e-05,
+ "loss": 1.0883,
+ "step": 710
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3631427403535479,
+ "learning_rate": 5.575906943070915e-05,
+ "loss": 1.1155,
+ "step": 711
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3305201458598695,
+ "learning_rate": 5.5680675435012834e-05,
+ "loss": 1.0958,
+ "step": 712
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.34978833532083714,
+ "learning_rate": 5.5602210208261036e-05,
+ "loss": 1.1437,
+ "step": 713
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3510553882510229,
+ "learning_rate": 5.552367410688999e-05,
+ "loss": 1.0941,
+ "step": 714
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3523747462465078,
+ "learning_rate": 5.544506748765789e-05,
+ "loss": 1.1289,
+ "step": 715
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38262637783927445,
+ "learning_rate": 5.5366390707643266e-05,
+ "loss": 1.099,
+ "step": 716
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38620065989073454,
+ "learning_rate": 5.528764412424334e-05,
+ "loss": 1.083,
+ "step": 717
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3401355276121096,
+ "learning_rate": 5.520882809517245e-05,
+ "loss": 1.028,
+ "step": 718
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3392061008943934,
+ "learning_rate": 5.512994297846039e-05,
+ "loss": 1.1083,
+ "step": 719
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.34219480421015414,
+ "learning_rate": 5.505098913245077e-05,
+ "loss": 1.1108,
+ "step": 720
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3275058061553761,
+ "learning_rate": 5.497196691579945e-05,
+ "loss": 1.111,
+ "step": 721
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36800249746509384,
+ "learning_rate": 5.489287668747283e-05,
+ "loss": 1.1221,
+ "step": 722
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4129005533101575,
+ "learning_rate": 5.481371880674628e-05,
+ "loss": 1.0966,
+ "step": 723
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36563906596251655,
+ "learning_rate": 5.4734493633202505e-05,
+ "loss": 1.0927,
+ "step": 724
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3614650536839971,
+ "learning_rate": 5.465520152672986e-05,
+ "loss": 1.13,
+ "step": 725
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.36419665098633497,
+ "learning_rate": 5.4575842847520765e-05,
+ "loss": 1.1183,
+ "step": 726
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.34490689807258995,
+ "learning_rate": 5.449641795607005e-05,
+ "loss": 1.0919,
+ "step": 727
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3627643746876298,
+ "learning_rate": 5.441692721317334e-05,
+ "loss": 1.0411,
+ "step": 728
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.323620411949565,
+ "learning_rate": 5.433737097992537e-05,
+ "loss": 1.0725,
+ "step": 729
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3521599501824965,
+ "learning_rate": 5.425774961771838e-05,
+ "loss": 1.0926,
+ "step": 730
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3302390546764222,
+ "learning_rate": 5.417806348824047e-05,
+ "loss": 1.0468,
+ "step": 731
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3833325802616019,
+ "learning_rate": 5.4098312953473956e-05,
+ "loss": 1.1291,
+ "step": 732
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3708621126835512,
+ "learning_rate": 5.401849837569372e-05,
+ "loss": 1.0887,
+ "step": 733
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3625834373416278,
+ "learning_rate": 5.393862011746555e-05,
+ "loss": 1.0981,
+ "step": 734
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3583343965080617,
+ "learning_rate": 5.385867854164451e-05,
+ "loss": 1.1021,
+ "step": 735
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34598320594096066,
+ "learning_rate": 5.377867401137332e-05,
+ "loss": 1.1376,
+ "step": 736
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3046382791315433,
+ "learning_rate": 5.369860689008066e-05,
+ "loss": 1.0206,
+ "step": 737
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34464948380043725,
+ "learning_rate": 5.3618477541479505e-05,
+ "loss": 1.1084,
+ "step": 738
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3203242519627101,
+ "learning_rate": 5.353828632956557e-05,
+ "loss": 1.0731,
+ "step": 739
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3431169960355163,
+ "learning_rate": 5.3458033618615516e-05,
+ "loss": 1.091,
+ "step": 740
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.33492074521678705,
+ "learning_rate": 5.337771977318543e-05,
+ "loss": 1.1112,
+ "step": 741
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.32576546585541344,
+ "learning_rate": 5.3297345158109086e-05,
+ "loss": 1.0993,
+ "step": 742
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3410007245037574,
+ "learning_rate": 5.3216910138496286e-05,
+ "loss": 1.094,
+ "step": 743
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.34891180680896833,
+ "learning_rate": 5.313641507973128e-05,
+ "loss": 1.1331,
+ "step": 744
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.37135766946717214,
+ "learning_rate": 5.3055860347471006e-05,
+ "loss": 1.1,
+ "step": 745
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3465019415478411,
+ "learning_rate": 5.297524630764349e-05,
+ "loss": 1.1256,
+ "step": 746
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37035388481626563,
+ "learning_rate": 5.289457332644615e-05,
+ "loss": 1.0366,
+ "step": 747
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.33853883270759155,
+ "learning_rate": 5.281384177034421e-05,
+ "loss": 1.0547,
+ "step": 748
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.364306618627317,
+ "learning_rate": 5.2733052006068897e-05,
+ "loss": 1.0768,
+ "step": 749
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.4021754315731627,
+ "learning_rate": 5.2652204400615916e-05,
+ "loss": 1.1382,
+ "step": 750
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.3332185389039008,
+ "learning_rate": 5.257129932124368e-05,
+ "loss": 1.0815,
+ "step": 751
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3453105709879854,
+ "learning_rate": 5.249033713547173e-05,
+ "loss": 1.1109,
+ "step": 752
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3385397539717797,
+ "learning_rate": 5.2409318211078966e-05,
+ "loss": 1.0529,
+ "step": 753
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.33197994450130447,
+ "learning_rate": 5.232824291610206e-05,
+ "loss": 1.0721,
+ "step": 754
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32836289576124167,
+ "learning_rate": 5.224711161883375e-05,
+ "loss": 1.0459,
+ "step": 755
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32491620058831744,
+ "learning_rate": 5.216592468782117e-05,
+ "loss": 1.0897,
+ "step": 756
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3137879047811153,
+ "learning_rate": 5.2084682491864155e-05,
+ "loss": 1.096,
+ "step": 757
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3356938043023012,
+ "learning_rate": 5.200338540001364e-05,
+ "loss": 1.0827,
+ "step": 758
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.36044340490819055,
+ "learning_rate": 5.192203378156984e-05,
+ "loss": 1.0617,
+ "step": 759
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.34674262047888293,
+ "learning_rate": 5.184062800608077e-05,
+ "loss": 1.1267,
+ "step": 760
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.32469442322149333,
+ "learning_rate": 5.1759168443340375e-05,
+ "loss": 1.1483,
+ "step": 761
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3290384307774216,
+ "learning_rate": 5.167765546338698e-05,
+ "loss": 1.047,
+ "step": 762
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.31637612188770403,
+ "learning_rate": 5.1596089436501525e-05,
+ "loss": 1.0311,
+ "step": 763
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3168693829641207,
+ "learning_rate": 5.151447073320597e-05,
+ "loss": 1.1405,
+ "step": 764
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.34322421571238926,
+ "learning_rate": 5.143279972426153e-05,
+ "loss": 1.1428,
+ "step": 765
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3291030435830325,
+ "learning_rate": 5.1351076780667026e-05,
+ "loss": 1.0473,
+ "step": 766
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.33772039158758044,
+ "learning_rate": 5.1269302273657195e-05,
+ "loss": 1.0909,
+ "step": 767
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3802031736890876,
+ "learning_rate": 5.118747657470102e-05,
+ "loss": 1.1482,
+ "step": 768
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3296067628997962,
+ "learning_rate": 5.1105600055500025e-05,
+ "loss": 1.0085,
+ "step": 769
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3707139982828035,
+ "learning_rate": 5.102367308798658e-05,
+ "loss": 1.0746,
+ "step": 770
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3378537316757011,
+ "learning_rate": 5.094169604432225e-05,
+ "loss": 1.0482,
+ "step": 771
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.4008417246255145,
+ "learning_rate": 5.085966929689601e-05,
+ "loss": 1.1065,
+ "step": 772
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3244385106988064,
+ "learning_rate": 5.077759321832271e-05,
+ "loss": 1.0827,
+ "step": 773
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.37228575732812336,
+ "learning_rate": 5.0695468181441215e-05,
+ "loss": 1.1146,
+ "step": 774
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33761714797540276,
+ "learning_rate": 5.061329455931283e-05,
+ "loss": 1.092,
+ "step": 775
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3158158390913494,
+ "learning_rate": 5.053107272521955e-05,
+ "loss": 1.1058,
+ "step": 776
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3691501929738938,
+ "learning_rate": 5.044880305266239e-05,
+ "loss": 1.1599,
+ "step": 777
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33730914019805525,
+ "learning_rate": 5.0366485915359645e-05,
+ "loss": 1.0615,
+ "step": 778
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.34970059240017,
+ "learning_rate": 5.0284121687245257e-05,
+ "loss": 1.1475,
+ "step": 779
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3374028029407197,
+ "learning_rate": 5.020171074246707e-05,
+ "loss": 1.0926,
+ "step": 780
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3350020681123992,
+ "learning_rate": 5.011925345538514e-05,
+ "loss": 1.1276,
+ "step": 781
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3224228965786606,
+ "learning_rate": 5.003675020057003e-05,
+ "loss": 1.0183,
+ "step": 782
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3357310714740298,
+ "learning_rate": 4.995420135280114e-05,
+ "loss": 1.1114,
+ "step": 783
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3590203255363759,
+ "learning_rate": 4.9871607287064966e-05,
+ "loss": 1.1504,
+ "step": 784
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.33011195419611655,
+ "learning_rate": 4.9788968378553396e-05,
+ "loss": 1.0826,
+ "step": 785
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.31088868195439445,
+ "learning_rate": 4.970628500266207e-05,
+ "loss": 1.0704,
+ "step": 786
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3144996103179409,
+ "learning_rate": 4.962355753498858e-05,
+ "loss": 1.1403,
+ "step": 787
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3147269555419068,
+ "learning_rate": 4.954078635133081e-05,
+ "loss": 1.0898,
+ "step": 788
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3280151747783868,
+ "learning_rate": 4.945797182768524e-05,
+ "loss": 1.1115,
+ "step": 789
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3551996569232493,
+ "learning_rate": 4.937511434024524e-05,
+ "loss": 1.1731,
+ "step": 790
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.343863208057807,
+ "learning_rate": 4.9292214265399336e-05,
+ "loss": 1.0866,
+ "step": 791
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.37316699385322466,
+ "learning_rate": 4.920927197972949e-05,
+ "loss": 1.1083,
+ "step": 792
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635739774067832,
+ "learning_rate": 4.9126287860009453e-05,
+ "loss": 1.1393,
+ "step": 793
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3755910554972886,
+ "learning_rate": 4.9043262283202974e-05,
+ "loss": 1.1624,
+ "step": 794
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635899120146823,
+ "learning_rate": 4.8960195626462145e-05,
+ "loss": 1.2095,
+ "step": 795
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3642202684342816,
+ "learning_rate": 4.8877088267125664e-05,
+ "loss": 1.1099,
+ "step": 796
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3339946548799316,
+ "learning_rate": 4.879394058271712e-05,
+ "loss": 1.1157,
+ "step": 797
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3457189703100475,
+ "learning_rate": 4.871075295094329e-05,
+ "loss": 1.129,
+ "step": 798
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3550931839691424,
+ "learning_rate": 4.862752574969241e-05,
+ "loss": 1.076,
+ "step": 799
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.36139108917966734,
+ "learning_rate": 4.8544259357032475e-05,
+ "loss": 1.1577,
+ "step": 800
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39569703665247874,
+ "learning_rate": 4.8460954151209486e-05,
+ "loss": 1.0543,
+ "step": 801
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.3879033670170866,
+ "learning_rate": 4.837761051064579e-05,
+ "loss": 1.0688,
+ "step": 802
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3796846713967255,
+ "learning_rate": 4.8294228813938285e-05,
+ "loss": 0.9911,
+ "step": 803
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4007831430409375,
+ "learning_rate": 4.8210809439856804e-05,
+ "loss": 1.0126,
+ "step": 804
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.37588078665500885,
+ "learning_rate": 4.8127352767342276e-05,
+ "loss": 0.9302,
+ "step": 805
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4078509175013281,
+ "learning_rate": 4.8043859175505095e-05,
+ "loss": 0.9982,
+ "step": 806
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.379096046185539,
+ "learning_rate": 4.7960329043623344e-05,
+ "loss": 1.0035,
+ "step": 807
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3813938568133554,
+ "learning_rate": 4.787676275114111e-05,
+ "loss": 0.9579,
+ "step": 808
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.3686863564511168,
+ "learning_rate": 4.779316067766673e-05,
+ "loss": 1.0105,
+ "step": 809
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.4263940878847523,
+ "learning_rate": 4.770952320297109e-05,
+ "loss": 1.0677,
+ "step": 810
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.37178778374665006,
+ "learning_rate": 4.7625850706985886e-05,
+ "loss": 1.0019,
+ "step": 811
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.36803355429187945,
+ "learning_rate": 4.7542143569801894e-05,
+ "loss": 0.9937,
+ "step": 812
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.3897072472941179,
+ "learning_rate": 4.745840217166725e-05,
+ "loss": 1.0877,
+ "step": 813
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.35571833841716255,
+ "learning_rate": 4.737462689298577e-05,
+ "loss": 1.0015,
+ "step": 814
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.38930229991094323,
+ "learning_rate": 4.7290818114315086e-05,
+ "loss": 1.028,
+ "step": 815
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.411005007105147,
+ "learning_rate": 4.72069762163651e-05,
+ "loss": 1.0068,
+ "step": 816
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3980240190337736,
+ "learning_rate": 4.7123101579996106e-05,
+ "loss": 0.9919,
+ "step": 817
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.36369517703115467,
+ "learning_rate": 4.7039194586217136e-05,
+ "loss": 0.967,
+ "step": 818
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.38591148840458894,
+ "learning_rate": 4.695525561618418e-05,
+ "loss": 0.9743,
+ "step": 819
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.45873135108949337,
+ "learning_rate": 4.687128505119853e-05,
+ "loss": 1.0516,
+ "step": 820
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.3866330351411308,
+ "learning_rate": 4.6787283272704966e-05,
+ "loss": 0.9939,
+ "step": 821
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4620340173291326,
+ "learning_rate": 4.670325066229009e-05,
+ "loss": 1.0526,
+ "step": 822
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38877454299870284,
+ "learning_rate": 4.661918760168052e-05,
+ "loss": 0.9904,
+ "step": 823
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.3880489386116793,
+ "learning_rate": 4.653509447274121e-05,
+ "loss": 0.9623,
+ "step": 824
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3827392356186151,
+ "learning_rate": 4.6450971657473743e-05,
+ "loss": 1.0772,
+ "step": 825
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4132814641854327,
+ "learning_rate": 4.63668195380145e-05,
+ "loss": 1.0533,
+ "step": 826
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3703610182402835,
+ "learning_rate": 4.628263849663301e-05,
+ "loss": 0.9336,
+ "step": 827
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4152053683299823,
+ "learning_rate": 4.619842891573016e-05,
+ "loss": 0.9801,
+ "step": 828
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.41791059043554274,
+ "learning_rate": 4.6114191177836514e-05,
+ "loss": 1.0617,
+ "step": 829
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.46363896517299136,
+ "learning_rate": 4.6029925665610524e-05,
+ "loss": 0.9687,
+ "step": 830
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.41141959057512445,
+ "learning_rate": 4.59456327618368e-05,
+ "loss": 1.0965,
+ "step": 831
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3789192764519836,
+ "learning_rate": 4.5861312849424386e-05,
+ "loss": 0.9793,
+ "step": 832
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4047291581107866,
+ "learning_rate": 4.5776966311405035e-05,
+ "loss": 1.0342,
+ "step": 833
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4425157400959256,
+ "learning_rate": 4.5692593530931416e-05,
+ "loss": 1.0892,
+ "step": 834
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3707332144806616,
+ "learning_rate": 4.560819489127545e-05,
+ "loss": 0.9815,
+ "step": 835
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3897444102572823,
+ "learning_rate": 4.552377077582646e-05,
+ "loss": 0.9884,
+ "step": 836
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.42725787957019346,
+ "learning_rate": 4.543932156808959e-05,
+ "loss": 0.9972,
+ "step": 837
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.40615269781820007,
+ "learning_rate": 4.535484765168386e-05,
+ "loss": 0.9529,
+ "step": 838
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3505829736050887,
+ "learning_rate": 4.527034941034063e-05,
+ "loss": 0.9492,
+ "step": 839
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.36688064686440497,
+ "learning_rate": 4.51858272279017e-05,
+ "loss": 0.9592,
+ "step": 840
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4043468777955929,
+ "learning_rate": 4.5101281488317634e-05,
+ "loss": 1.048,
+ "step": 841
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3811489793242706,
+ "learning_rate": 4.501671257564602e-05,
+ "loss": 1.0138,
+ "step": 842
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.39813004142325986,
+ "learning_rate": 4.49321208740497e-05,
+ "loss": 1.071,
+ "step": 843
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3809751022095503,
+ "learning_rate": 4.484750676779504e-05,
+ "loss": 1.0351,
+ "step": 844
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.384312178013823,
+ "learning_rate": 4.4762870641250185e-05,
+ "loss": 0.9737,
+ "step": 845
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.40769404907923557,
+ "learning_rate": 4.467821287888331e-05,
+ "loss": 0.9659,
+ "step": 846
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.39594136851937817,
+ "learning_rate": 4.459353386526086e-05,
+ "loss": 0.9405,
+ "step": 847
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.37180161011562185,
+ "learning_rate": 4.450883398504584e-05,
+ "loss": 1.0732,
+ "step": 848
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3772603623154663,
+ "learning_rate": 4.442411362299602e-05,
+ "loss": 0.9646,
+ "step": 849
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4346142368506476,
+ "learning_rate": 4.433937316396224e-05,
+ "loss": 0.9572,
+ "step": 850
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3997258084612474,
+ "learning_rate": 4.425461299288659e-05,
+ "loss": 0.9492,
+ "step": 851
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.41245476865247155,
+ "learning_rate": 4.416983349480073e-05,
+ "loss": 0.8732,
+ "step": 852
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.6761499297939195,
+ "learning_rate": 4.408503505482412e-05,
+ "loss": 1.0425,
+ "step": 853
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.40340979486858985,
+ "learning_rate": 4.400021805816225e-05,
+ "loss": 0.9596,
+ "step": 854
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.43290732392699666,
+ "learning_rate": 4.391538289010493e-05,
+ "loss": 1.0123,
+ "step": 855
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36878054442190156,
+ "learning_rate": 4.383052993602448e-05,
+ "loss": 0.9448,
+ "step": 856
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.7146145128961262,
+ "learning_rate": 4.374565958137404e-05,
+ "loss": 1.0342,
+ "step": 857
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.44429357586145607,
+ "learning_rate": 4.3660772211685775e-05,
+ "loss": 1.0436,
+ "step": 858
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.4565751973640598,
+ "learning_rate": 4.357586821256918e-05,
+ "loss": 1.0311,
+ "step": 859
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3919991236654277,
+ "learning_rate": 4.349094796970925e-05,
+ "loss": 1.1401,
+ "step": 860
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.4347441949284011,
+ "learning_rate": 4.3406011868864795e-05,
+ "loss": 1.0252,
+ "step": 861
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.38339976027415407,
+ "learning_rate": 4.3321060295866635e-05,
+ "loss": 1.0536,
+ "step": 862
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.37688790408195166,
+ "learning_rate": 4.32360936366159e-05,
+ "loss": 1.012,
+ "step": 863
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4317538207582504,
+ "learning_rate": 4.315111227708224e-05,
+ "loss": 1.0505,
+ "step": 864
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4145324872228796,
+ "learning_rate": 4.306611660330208e-05,
+ "loss": 1.0496,
+ "step": 865
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.416535227064448,
+ "learning_rate": 4.298110700137687e-05,
+ "loss": 0.9628,
+ "step": 866
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.46564356187492717,
+ "learning_rate": 4.2896083857471345e-05,
+ "loss": 1.0016,
+ "step": 867
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4228980941889828,
+ "learning_rate": 4.281104755781172e-05,
+ "loss": 1.0904,
+ "step": 868
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.4267821214430208,
+ "learning_rate": 4.272599848868402e-05,
+ "loss": 1.0544,
+ "step": 869
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.45763332095792075,
+ "learning_rate": 4.264093703643223e-05,
+ "loss": 1.0686,
+ "step": 870
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.4347555516548761,
+ "learning_rate": 4.255586358745662e-05,
+ "loss": 1.0264,
+ "step": 871
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3817726381103066,
+ "learning_rate": 4.247077852821194e-05,
+ "loss": 1.0045,
+ "step": 872
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3882808845457995,
+ "learning_rate": 4.2385682245205685e-05,
+ "loss": 1.0193,
+ "step": 873
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.39410930252966775,
+ "learning_rate": 4.230057512499634e-05,
+ "loss": 0.9832,
+ "step": 874
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4373094593907156,
+ "learning_rate": 4.221545755419159e-05,
+ "loss": 1.0343,
+ "step": 875
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4462843721698891,
+ "learning_rate": 4.2130329919446646e-05,
+ "loss": 1.0324,
+ "step": 876
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4747274247448112,
+ "learning_rate": 4.20451926074624e-05,
+ "loss": 0.9903,
+ "step": 877
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4157472897596409,
+ "learning_rate": 4.196004600498369e-05,
+ "loss": 0.9266,
+ "step": 878
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.41625958088960685,
+ "learning_rate": 4.1874890498797605e-05,
+ "loss": 0.9658,
+ "step": 879
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.44784944130574333,
+ "learning_rate": 4.178972647573163e-05,
+ "loss": 0.9671,
+ "step": 880
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.4116839177956385,
+ "learning_rate": 4.1704554322651975e-05,
+ "loss": 0.9591,
+ "step": 881
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.4025569857639452,
+ "learning_rate": 4.161937442646176e-05,
+ "loss": 1.0072,
+ "step": 882
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.41518478124763597,
+ "learning_rate": 4.1534187174099285e-05,
+ "loss": 1.0275,
+ "step": 883
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3987815564664466,
+ "learning_rate": 4.1448992952536275e-05,
+ "loss": 1.0039,
+ "step": 884
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4270378155679982,
+ "learning_rate": 4.136379214877609e-05,
+ "loss": 1.0369,
+ "step": 885
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.42144733922972777,
+ "learning_rate": 4.127858514985203e-05,
+ "loss": 1.0269,
+ "step": 886
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4198664438272548,
+ "learning_rate": 4.1193372342825494e-05,
+ "loss": 1.0427,
+ "step": 887
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3985048256281719,
+ "learning_rate": 4.1108154114784275e-05,
+ "loss": 1.0702,
+ "step": 888
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.605520808292362,
+ "learning_rate": 4.102293085284083e-05,
+ "loss": 0.9749,
+ "step": 889
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4150515863924052,
+ "learning_rate": 4.0937702944130426e-05,
+ "loss": 1.0231,
+ "step": 890
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3935997576565283,
+ "learning_rate": 4.085247077580948e-05,
+ "loss": 1.0014,
+ "step": 891
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.399446131403209,
+ "learning_rate": 4.076723473505374e-05,
+ "loss": 0.9602,
+ "step": 892
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4406024397129952,
+ "learning_rate": 4.068199520905655e-05,
+ "loss": 1.0425,
+ "step": 893
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4036917571496492,
+ "learning_rate": 4.059675258502709e-05,
+ "loss": 0.973,
+ "step": 894
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4057196459433299,
+ "learning_rate": 4.05115072501886e-05,
+ "loss": 0.9997,
+ "step": 895
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4374124954708759,
+ "learning_rate": 4.0426259591776645e-05,
+ "loss": 0.9826,
+ "step": 896
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4545699371285546,
+ "learning_rate": 4.0341009997037356e-05,
+ "loss": 1.0554,
+ "step": 897
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4251917031237376,
+ "learning_rate": 4.025575885322563e-05,
+ "loss": 1.0217,
+ "step": 898
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3857651901893941,
+ "learning_rate": 4.0170506547603427e-05,
+ "loss": 1.0317,
+ "step": 899
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.46323573798490897,
+ "learning_rate": 4.008525346743797e-05,
+ "loss": 1.0398,
+ "step": 900
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4011541121460918,
+ "learning_rate": 4e-05,
+ "loss": 1.0706,
+ "step": 901
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.46493281221028004,
+ "learning_rate": 3.991474653256204e-05,
+ "loss": 1.0525,
+ "step": 902
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.41683080924539023,
+ "learning_rate": 3.982949345239658e-05,
+ "loss": 1.0905,
+ "step": 903
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.4750350025014512,
+ "learning_rate": 3.974424114677437e-05,
+ "loss": 1.049,
+ "step": 904
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3867445073614702,
+ "learning_rate": 3.965899000296266e-05,
+ "loss": 0.9624,
+ "step": 905
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.378387661131469,
+ "learning_rate": 3.957374040822335e-05,
+ "loss": 1.0223,
+ "step": 906
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3905996390559077,
+ "learning_rate": 3.948849274981141e-05,
+ "loss": 1.0315,
+ "step": 907
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.4139717689498189,
+ "learning_rate": 3.940324741497291e-05,
+ "loss": 0.9297,
+ "step": 908
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.39086355684921514,
+ "learning_rate": 3.9318004790943465e-05,
+ "loss": 0.9684,
+ "step": 909
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.4334915643736419,
+ "learning_rate": 3.923276526494627e-05,
+ "loss": 0.996,
+ "step": 910
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.40782018986229496,
+ "learning_rate": 3.9147529224190536e-05,
+ "loss": 1.0875,
+ "step": 911
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.43578702386625723,
+ "learning_rate": 3.906229705586959e-05,
+ "loss": 1.1214,
+ "step": 912
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.414945683409524,
+ "learning_rate": 3.89770691471592e-05,
+ "loss": 1.1037,
+ "step": 913
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.40665801579679106,
+ "learning_rate": 3.889184588521573e-05,
+ "loss": 0.9743,
+ "step": 914
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.4064250611574517,
+ "learning_rate": 3.880662765717453e-05,
+ "loss": 0.8814,
+ "step": 915
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.48023046298843347,
+ "learning_rate": 3.8721414850147985e-05,
+ "loss": 0.9663,
+ "step": 916
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.42358024833566227,
+ "learning_rate": 3.8636207851223924e-05,
+ "loss": 1.0491,
+ "step": 917
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.41522494786195835,
+ "learning_rate": 3.855100704746374e-05,
+ "loss": 1.033,
+ "step": 918
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.40890517696706496,
+ "learning_rate": 3.8465812825900715e-05,
+ "loss": 1.0369,
+ "step": 919
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.4325851866408538,
+ "learning_rate": 3.838062557353825e-05,
+ "loss": 0.9362,
+ "step": 920
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.4185860919050069,
+ "learning_rate": 3.8295445677348025e-05,
+ "loss": 1.026,
+ "step": 921
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3975762375934804,
+ "learning_rate": 3.8210273524268375e-05,
+ "loss": 1.0412,
+ "step": 922
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.41725298241987474,
+ "learning_rate": 3.8125109501202395e-05,
+ "loss": 1.0004,
+ "step": 923
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.455183913149126,
+ "learning_rate": 3.803995399501632e-05,
+ "loss": 1.0594,
+ "step": 924
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3993993856483797,
+ "learning_rate": 3.795480739253761e-05,
+ "loss": 0.9761,
+ "step": 925
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.41638796815161494,
+ "learning_rate": 3.786967008055337e-05,
+ "loss": 1.0369,
+ "step": 926
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.40015112695810534,
+ "learning_rate": 3.7784542445808414e-05,
+ "loss": 1.0271,
+ "step": 927
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3995749494729548,
+ "learning_rate": 3.769942487500368e-05,
+ "loss": 1.0613,
+ "step": 928
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.4073556267037492,
+ "learning_rate": 3.761431775479432e-05,
+ "loss": 1.0528,
+ "step": 929
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.44218148822636044,
+ "learning_rate": 3.752922147178807e-05,
+ "loss": 1.0742,
+ "step": 930
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.4435063485893757,
+ "learning_rate": 3.744413641254339e-05,
+ "loss": 1.0825,
+ "step": 931
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.46841574994107515,
+ "learning_rate": 3.735906296356778e-05,
+ "loss": 1.0471,
+ "step": 932
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.40093716627657294,
+ "learning_rate": 3.727400151131599e-05,
+ "loss": 1.0474,
+ "step": 933
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3866415067997244,
+ "learning_rate": 3.71889524421883e-05,
+ "loss": 1.0209,
+ "step": 934
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.4881546110706673,
+ "learning_rate": 3.710391614252867e-05,
+ "loss": 1.0768,
+ "step": 935
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.4133084639324523,
+ "learning_rate": 3.701889299862314e-05,
+ "loss": 1.0423,
+ "step": 936
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.40523563084001196,
+ "learning_rate": 3.6933883396697936e-05,
+ "loss": 1.005,
+ "step": 937
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.38757352418642405,
+ "learning_rate": 3.684888772291777e-05,
+ "loss": 0.9659,
+ "step": 938
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.421394551890689,
+ "learning_rate": 3.676390636338411e-05,
+ "loss": 1.0454,
+ "step": 939
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.45693070958342186,
+ "learning_rate": 3.667893970413337e-05,
+ "loss": 1.1459,
+ "step": 940
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.4172025376377795,
+ "learning_rate": 3.659398813113522e-05,
+ "loss": 0.9954,
+ "step": 941
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3871624019510191,
+ "learning_rate": 3.650905203029075e-05,
+ "loss": 1.0441,
+ "step": 942
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.38541342610032325,
+ "learning_rate": 3.642413178743083e-05,
+ "loss": 0.9465,
+ "step": 943
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.4208031670525743,
+ "learning_rate": 3.633922778831423e-05,
+ "loss": 1.0367,
+ "step": 944
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.41867209013040035,
+ "learning_rate": 3.6254340418625975e-05,
+ "loss": 1.0868,
+ "step": 945
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.431758149074127,
+ "learning_rate": 3.6169470063975536e-05,
+ "loss": 1.0689,
+ "step": 946
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4988803338819952,
+ "learning_rate": 3.608461710989509e-05,
+ "loss": 1.0879,
+ "step": 947
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4094858411191625,
+ "learning_rate": 3.5999781941837755e-05,
+ "loss": 1.0332,
+ "step": 948
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.3831847195845155,
+ "learning_rate": 3.591496494517589e-05,
+ "loss": 0.9751,
+ "step": 949
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.40535692821947267,
+ "learning_rate": 3.5830166505199284e-05,
+ "loss": 1.0594,
+ "step": 950
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.4875663789389966,
+ "learning_rate": 3.574538700711343e-05,
+ "loss": 0.9749,
+ "step": 951
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5155923998285772,
+ "learning_rate": 3.566062683603778e-05,
+ "loss": 0.9999,
+ "step": 952
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5280285947816189,
+ "learning_rate": 3.557588637700399e-05,
+ "loss": 1.1061,
+ "step": 953
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.46573407357796753,
+ "learning_rate": 3.5491166014954174e-05,
+ "loss": 1.102,
+ "step": 954
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.4122542582865379,
+ "learning_rate": 3.540646613473915e-05,
+ "loss": 1.0469,
+ "step": 955
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.41414476980823367,
+ "learning_rate": 3.53217871211167e-05,
+ "loss": 0.9973,
+ "step": 956
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4030707611608045,
+ "learning_rate": 3.523712935874983e-05,
+ "loss": 0.9796,
+ "step": 957
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4235313349747291,
+ "learning_rate": 3.5152493232204975e-05,
+ "loss": 1.0601,
+ "step": 958
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4165235178302652,
+ "learning_rate": 3.5067879125950316e-05,
+ "loss": 1.0358,
+ "step": 959
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.44083984701952955,
+ "learning_rate": 3.4983287424354e-05,
+ "loss": 1.0957,
+ "step": 960
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3781161039063518,
+ "learning_rate": 3.489871851168238e-05,
+ "loss": 0.9838,
+ "step": 961
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4095747724038915,
+ "learning_rate": 3.4814172772098314e-05,
+ "loss": 1.014,
+ "step": 962
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.42197119558898466,
+ "learning_rate": 3.472965058965938e-05,
+ "loss": 1.0096,
+ "step": 963
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4339963388152155,
+ "learning_rate": 3.464515234831615e-05,
+ "loss": 1.0158,
+ "step": 964
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4284638765548976,
+ "learning_rate": 3.4560678431910424e-05,
+ "loss": 1.1047,
+ "step": 965
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3935144535755794,
+ "learning_rate": 3.447622922417355e-05,
+ "loss": 0.9925,
+ "step": 966
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.45884343961025,
+ "learning_rate": 3.439180510872457e-05,
+ "loss": 1.0583,
+ "step": 967
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.42439320759788374,
+ "learning_rate": 3.4307406469068604e-05,
+ "loss": 0.9305,
+ "step": 968
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.45770082390324845,
+ "learning_rate": 3.4223033688594985e-05,
+ "loss": 1.054,
+ "step": 969
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.4284786643981094,
+ "learning_rate": 3.4138687150575634e-05,
+ "loss": 0.9409,
+ "step": 970
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.41356124058383237,
+ "learning_rate": 3.4054367238163215e-05,
+ "loss": 1.0739,
+ "step": 971
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.4255832249412624,
+ "learning_rate": 3.3970074334389496e-05,
+ "loss": 1.0764,
+ "step": 972
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4337695536142702,
+ "learning_rate": 3.388580882216349e-05,
+ "loss": 1.0195,
+ "step": 973
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.41363495650922455,
+ "learning_rate": 3.380157108426985e-05,
+ "loss": 1.0615,
+ "step": 974
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3950691247686479,
+ "learning_rate": 3.371736150336701e-05,
+ "loss": 1.0283,
+ "step": 975
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4042823691555822,
+ "learning_rate": 3.3633180461985505e-05,
+ "loss": 1.0309,
+ "step": 976
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3921158850479399,
+ "learning_rate": 3.354902834252627e-05,
+ "loss": 1.068,
+ "step": 977
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38349545732725654,
+ "learning_rate": 3.346490552725879e-05,
+ "loss": 1.0886,
+ "step": 978
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38689221457248724,
+ "learning_rate": 3.33808123983195e-05,
+ "loss": 0.987,
+ "step": 979
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38660550867425647,
+ "learning_rate": 3.329674933770992e-05,
+ "loss": 1.069,
+ "step": 980
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3917593746353493,
+ "learning_rate": 3.321271672729504e-05,
+ "loss": 0.9858,
+ "step": 981
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.4292314072827653,
+ "learning_rate": 3.3128714948801474e-05,
+ "loss": 1.0477,
+ "step": 982
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.479414638418211,
+ "learning_rate": 3.3044744383815835e-05,
+ "loss": 1.0763,
+ "step": 983
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.380831894995463,
+ "learning_rate": 3.2960805413782884e-05,
+ "loss": 1.0393,
+ "step": 984
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.42402274703362114,
+ "learning_rate": 3.2876898420003914e-05,
+ "loss": 1.0837,
+ "step": 985
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.4571447203722258,
+ "learning_rate": 3.279302378363491e-05,
+ "loss": 1.0594,
+ "step": 986
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3776673281658531,
+ "learning_rate": 3.270918188568493e-05,
+ "loss": 1.0121,
+ "step": 987
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.4367173448132159,
+ "learning_rate": 3.262537310701425e-05,
+ "loss": 0.9612,
+ "step": 988
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.43679765208840926,
+ "learning_rate": 3.254159782833276e-05,
+ "loss": 1.0565,
+ "step": 989
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.4018151260013493,
+ "learning_rate": 3.2457856430198126e-05,
+ "loss": 0.9975,
+ "step": 990
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.40461959940721076,
+ "learning_rate": 3.237414929301412e-05,
+ "loss": 1.0255,
+ "step": 991
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.41342378541540653,
+ "learning_rate": 3.2290476797028926e-05,
+ "loss": 1.024,
+ "step": 992
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3926173909201105,
+ "learning_rate": 3.220683932233328e-05,
+ "loss": 1.0877,
+ "step": 993
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3835623199834992,
+ "learning_rate": 3.21232372488589e-05,
+ "loss": 1.0992,
+ "step": 994
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.39901809497083496,
+ "learning_rate": 3.2039670956376656e-05,
+ "loss": 1.0723,
+ "step": 995
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3979604537466272,
+ "learning_rate": 3.195614082449492e-05,
+ "loss": 1.0201,
+ "step": 996
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.4057122427176845,
+ "learning_rate": 3.1872647232657723e-05,
+ "loss": 1.0885,
+ "step": 997
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.39747060350754754,
+ "learning_rate": 3.17891905601432e-05,
+ "loss": 1.0544,
+ "step": 998
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.4397658078291558,
+ "learning_rate": 3.1705771186061715e-05,
+ "loss": 1.0998,
+ "step": 999
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37373547663810053,
+ "learning_rate": 3.162238948935423e-05,
+ "loss": 1.0465,
+ "step": 1000
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.3982578259433756,
+ "learning_rate": 3.153904584879052e-05,
+ "loss": 1.0319,
+ "step": 1001
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.3874917951751892,
+ "learning_rate": 3.1455740642967545e-05,
+ "loss": 1.0064,
+ "step": 1002
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.39186897217724515,
+ "learning_rate": 3.1372474250307594e-05,
+ "loss": 0.9924,
+ "step": 1003
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.40862291659745487,
+ "learning_rate": 3.128924704905673e-05,
+ "loss": 1.0697,
+ "step": 1004
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.4020116078010512,
+ "learning_rate": 3.1206059417282894e-05,
+ "loss": 1.0669,
+ "step": 1005
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.4195258340431994,
+ "learning_rate": 3.1122911732874356e-05,
+ "loss": 0.9669,
+ "step": 1006
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.4051289242539706,
+ "learning_rate": 3.103980437353787e-05,
+ "loss": 1.0283,
+ "step": 1007
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.4072322485068397,
+ "learning_rate": 3.0956737716797047e-05,
+ "loss": 0.9819,
+ "step": 1008
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.4183439146679152,
+ "learning_rate": 3.087371213999056e-05,
+ "loss": 1.0195,
+ "step": 1009
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.4223541507769984,
+ "learning_rate": 3.079072802027051e-05,
+ "loss": 1.0321,
+ "step": 1010
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.4393009772902467,
+ "learning_rate": 3.070778573460068e-05,
+ "loss": 1.008,
+ "step": 1011
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3790600190213189,
+ "learning_rate": 3.062488565975476e-05,
+ "loss": 0.9642,
+ "step": 1012
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.39767262663748454,
+ "learning_rate": 3.054202817231477e-05,
+ "loss": 1.0067,
+ "step": 1013
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.4245599091706745,
+ "learning_rate": 3.0459213648669195e-05,
+ "loss": 1.0128,
+ "step": 1014
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.39825908696948487,
+ "learning_rate": 3.0376442465011436e-05,
+ "loss": 1.0415,
+ "step": 1015
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.38711515877098823,
+ "learning_rate": 3.0293714997337927e-05,
+ "loss": 1.008,
+ "step": 1016
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.40094768473114994,
+ "learning_rate": 3.0211031621446607e-05,
+ "loss": 1.0424,
+ "step": 1017
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.4257977860413385,
+ "learning_rate": 3.0128392712935044e-05,
+ "loss": 1.0244,
+ "step": 1018
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.43915000496042145,
+ "learning_rate": 3.0045798647198882e-05,
+ "loss": 1.0096,
+ "step": 1019
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.43529687364630915,
+ "learning_rate": 2.9963249799429986e-05,
+ "loss": 1.0672,
+ "step": 1020
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3930746148439634,
+ "learning_rate": 2.988074654461489e-05,
+ "loss": 0.9502,
+ "step": 1021
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 12.100708480080742,
+ "learning_rate": 2.9798289257532946e-05,
+ "loss": 1.1234,
+ "step": 1022
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.4482898066028835,
+ "learning_rate": 2.9715878312754767e-05,
+ "loss": 1.0238,
+ "step": 1023
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.49283220433392333,
+ "learning_rate": 2.9633514084640365e-05,
+ "loss": 1.0953,
+ "step": 1024
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.5149555706737133,
+ "learning_rate": 2.955119694733763e-05,
+ "loss": 1.0521,
+ "step": 1025
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.44798691881600083,
+ "learning_rate": 2.946892727478045e-05,
+ "loss": 0.9552,
+ "step": 1026
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.4805603304240495,
+ "learning_rate": 2.9386705440687168e-05,
+ "loss": 1.0627,
+ "step": 1027
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.4869146396788101,
+ "learning_rate": 2.9304531818558795e-05,
+ "loss": 0.9919,
+ "step": 1028
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 7.1337477050690765,
+ "learning_rate": 2.9222406781677294e-05,
+ "loss": 1.0948,
+ "step": 1029
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 3.9006954730122643,
+ "learning_rate": 2.9140330703103992e-05,
+ "loss": 1.0288,
+ "step": 1030
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 1247.1481817866024,
+ "learning_rate": 2.905830395567776e-05,
+ "loss": 2.7593,
+ "step": 1031
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 3.536837141759145,
+ "learning_rate": 2.8976326912013422e-05,
+ "loss": 1.0416,
+ "step": 1032
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 2.1190195460751875,
+ "learning_rate": 2.8894399944499974e-05,
+ "loss": 1.0894,
+ "step": 1033
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 23.71856345141067,
+ "learning_rate": 2.8812523425299e-05,
+ "loss": 1.0503,
+ "step": 1034
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 1.6586194628502813,
+ "learning_rate": 2.873069772634281e-05,
+ "loss": 0.9838,
+ "step": 1035
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 754.5038667582967,
+ "learning_rate": 2.8648923219332997e-05,
+ "loss": 1.5912,
+ "step": 1036
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 2084.148772219088,
+ "learning_rate": 2.856720027573848e-05,
+ "loss": 2.7096,
+ "step": 1037
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 7.150640904502661,
+ "learning_rate": 2.8485529266794043e-05,
+ "loss": 1.0482,
+ "step": 1038
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 15.341911132717605,
+ "learning_rate": 2.8403910563498482e-05,
+ "loss": 1.1031,
+ "step": 1039
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 28.495476932479527,
+ "learning_rate": 2.832234453661304e-05,
+ "loss": 1.0985,
+ "step": 1040
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 6.611240968969735,
+ "learning_rate": 2.8240831556659635e-05,
+ "loss": 1.076,
+ "step": 1041
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 158.1044433512434,
+ "learning_rate": 2.815937199391924e-05,
+ "loss": 1.1907,
+ "step": 1042
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 1.2973014286777083,
+ "learning_rate": 2.807796621843016e-05,
+ "loss": 1.0292,
+ "step": 1043
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 15.820120602912542,
+ "learning_rate": 2.799661459998638e-05,
+ "loss": 1.1606,
+ "step": 1044
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 25.84305200367942,
+ "learning_rate": 2.7915317508135848e-05,
+ "loss": 1.0422,
+ "step": 1045
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.6840315849067687,
+ "learning_rate": 2.7834075312178838e-05,
+ "loss": 1.0541,
+ "step": 1046
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 11.975406658849673,
+ "learning_rate": 2.775288838116626e-05,
+ "loss": 1.0545,
+ "step": 1047
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 513.8880995650269,
+ "learning_rate": 2.767175708389794e-05,
+ "loss": 1.2914,
+ "step": 1048
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 7.1778887098472755,
+ "learning_rate": 2.759068178892105e-05,
+ "loss": 1.0446,
+ "step": 1049
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 1529.592860032449,
+ "learning_rate": 2.750966286452828e-05,
+ "loss": 3.6171,
+ "step": 1050
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 1088577714782208.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/v2/checkpoint-1050/training_args.bin b/v2/checkpoint-1050/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8c2dfa20e1da5754719c3d7e300b9b86407f077f
--- /dev/null
+++ b/v2/checkpoint-1050/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3f2f7bd873b9dca108c5ca2e32ea140480fabeed2dec60f702daabd0a44d071e
+size 6776
diff --git a/v2/checkpoint-1050/zero_to_fp32.py b/v2/checkpoint-1050/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/v2/checkpoint-1050/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/v2/checkpoint-1100/README.md b/v2/checkpoint-1100/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/v2/checkpoint-1100/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/v2/checkpoint-1100/adapter_config.json b/v2/checkpoint-1100/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..832188d72d81e59dd2b5259e86f371199b441aca
--- /dev/null
+++ b/v2/checkpoint-1100/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "o_proj",
+ "k_proj",
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/v2/checkpoint-1100/adapter_model.safetensors b/v2/checkpoint-1100/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d80fbaaf47773f4c7be0cd79cde825389b386d8b
--- /dev/null
+++ b/v2/checkpoint-1100/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5a878a6951ff0b3e9a8218b2726e85769ecf9b59b1d4a93bf911d34a4548df9c
+size 1048664848
diff --git a/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d95dbd02f3b45d8d715a2ade2504991744c8877a
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e842a91632915b4728d55d123aae05cde72110e694f0973e0cbb48e5593f2722
+size 787270042
diff --git a/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..99be0752320596cca82419afb7dda937576308ca
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f60a3ce7656e8285f41cdf7aba23da88d498a99e3796cbebaada87825b1bf01
+size 787270042
diff --git a/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..cfd543168a674b089593e7960db08807d4742e75
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:10967f1b5b88930d8fd1386282e9891ea77d2256cdc13c8709e653de3dd8092d
+size 787270042
diff --git a/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..76ce577389a8d9391fb09e15a2aafb2b22147ad6
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:488603da2683dc59a598d244f2d8f1d6a104cc6e9019fe610d4e393a548dd9bc
+size 787270042
diff --git a/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..aeff643f0f6b8591d20cc1de1c11dd6c32869913
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bb75e6cb011abc6e968245569d38e58dd23cfb148ef39d2b8663330209adfec7
+size 787270042
diff --git a/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..cf7a91e43356438fea4d50dd17be840853a5d14e
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:188a1de02a3b3bff7828f5637469da79a47d9837d8ab45e8ac22d49986073e86
+size 787270042
diff --git a/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..79ef37721d9a3eaf8c2dde77a7789a7e6a1c5aa4
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9f0442e76bef9042763ba63f6b63f50c720b577306a9ab76514de631780fc59a
+size 787270042
diff --git a/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6652e245939c5d36b9105c3375b55c4d58c18fa7
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bad332a5ef49bef6401cc24801423a13e8b5b64242a2e2b313c5e9c44e64b03c
+size 787270042
diff --git a/v2/checkpoint-1100/global_step1100/zero_pp_rank_0_mp_rank_00_model_states.pt b/v2/checkpoint-1100/global_step1100/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..56b1b9a47bbd6bdc455849e47649a03dccb32248
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9c769cbfb9bb02c99ba367fb77ae4600142c10796a5f2981693d9217e84ec52a
+size 653742
diff --git a/v2/checkpoint-1100/global_step1100/zero_pp_rank_1_mp_rank_00_model_states.pt b/v2/checkpoint-1100/global_step1100/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..26ea63fae69e18c1fbeb1640836919e8a9290e13
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5285eb24bfe43180e25a0aed0d6135893889228b40d88c09d87172badb155d02
+size 653742
diff --git a/v2/checkpoint-1100/global_step1100/zero_pp_rank_2_mp_rank_00_model_states.pt b/v2/checkpoint-1100/global_step1100/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7c5c53379e0cb424446e8424be9cca7081c51134
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:45d17f3b92f5f0897599a89c9ca82ccd4bcf980ac05427ec47b37774dd0d7836
+size 653742
diff --git a/v2/checkpoint-1100/global_step1100/zero_pp_rank_3_mp_rank_00_model_states.pt b/v2/checkpoint-1100/global_step1100/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e2ed0042a11be1fd9dfddd799b76d5c7a9d5e35a
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e1005b64cba9f84408a432a5465c0821012797187041b73dd62245150d699401
+size 653742
diff --git a/v2/checkpoint-1100/global_step1100/zero_pp_rank_4_mp_rank_00_model_states.pt b/v2/checkpoint-1100/global_step1100/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6abe8550bbc218a84119ae98c6fa9d182ffd3179
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:33fea851aea5540e1e14ef2e33ba8014b1a1f69e8c22311c0cedfd7e359f979d
+size 653742
diff --git a/v2/checkpoint-1100/global_step1100/zero_pp_rank_5_mp_rank_00_model_states.pt b/v2/checkpoint-1100/global_step1100/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0f1fa6c671027baf62c12a8618cf9a646cb0dc21
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e48f64d145ae4602275a2877ecc448efc9ec58f93f8364811a30a93ced0b76ad
+size 653742
diff --git a/v2/checkpoint-1100/global_step1100/zero_pp_rank_6_mp_rank_00_model_states.pt b/v2/checkpoint-1100/global_step1100/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..20720bdb5e208aa3129f7ee9ed0da54850f1a41b
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60582efe7474aa55d4745b11de94f5e294ef1c61df59dc79360753e051374e71
+size 653742
diff --git a/v2/checkpoint-1100/global_step1100/zero_pp_rank_7_mp_rank_00_model_states.pt b/v2/checkpoint-1100/global_step1100/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..800a772e5b331ddeb871df43a2c5b72366b05364
--- /dev/null
+++ b/v2/checkpoint-1100/global_step1100/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d1f58731d732acd2c0b1bdda0b00be0412c32ccdac00d654324bbb3ad53c323a
+size 653742
diff --git a/v2/checkpoint-1100/latest b/v2/checkpoint-1100/latest
new file mode 100644
index 0000000000000000000000000000000000000000..22cd5c3402316b70299aed2025d7943595f5d495
--- /dev/null
+++ b/v2/checkpoint-1100/latest
@@ -0,0 +1 @@
+global_step1100
\ No newline at end of file
diff --git a/v2/checkpoint-1100/rng_state_0.pth b/v2/checkpoint-1100/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9dd2a62da4ca83b3b986d96dbf0eaeb82207ca93
--- /dev/null
+++ b/v2/checkpoint-1100/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a0628a9017696045a3a29e9eaffc71e9262d855716e773c0c3be760a1fe85bc8
+size 15984
diff --git a/v2/checkpoint-1100/rng_state_1.pth b/v2/checkpoint-1100/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1ba5f3aba4388a582cd47f7f9e57cd5879b1cbd2
--- /dev/null
+++ b/v2/checkpoint-1100/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df342004a4d8e3626bf2a9f689fde7c8bfd6d995e14931f5496eda1f456cb6f2
+size 15984
diff --git a/v2/checkpoint-1100/rng_state_2.pth b/v2/checkpoint-1100/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..27b0f7845c2b9530c3e6ed3ce232ff4e86b86122
--- /dev/null
+++ b/v2/checkpoint-1100/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f02096eb4e8850b91490e80e4a042e2e60f71bd2abc6a269d62c271649cb77d2
+size 15984
diff --git a/v2/checkpoint-1100/rng_state_3.pth b/v2/checkpoint-1100/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fcfb583fc43c6dd4395671708744cfd18c419970
--- /dev/null
+++ b/v2/checkpoint-1100/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:326c778d3d0e7e3d5665fa0a9ecd92986609c430da08b41611d6c05dc19815a8
+size 15984
diff --git a/v2/checkpoint-1100/rng_state_4.pth b/v2/checkpoint-1100/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7a8c64b1f15ac655b2be2a42fe61cabe2a877704
--- /dev/null
+++ b/v2/checkpoint-1100/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d978dcb0c34e022ee6750e9d86814b8c82e4965d7e07662f35f06eeac12938f3
+size 15984
diff --git a/v2/checkpoint-1100/rng_state_5.pth b/v2/checkpoint-1100/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..262e8187e6caeca12ef3b0aa923b12afd697e03d
--- /dev/null
+++ b/v2/checkpoint-1100/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01e83399aed1d9d173c3e07b2efa8530c956b62b2b68394c2ed0d43bd8bba9d1
+size 15984
diff --git a/v2/checkpoint-1100/rng_state_6.pth b/v2/checkpoint-1100/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..72f794e31f8d3e0c63972e5076e1ed90c52087ba
--- /dev/null
+++ b/v2/checkpoint-1100/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:606ab3ca92e3d20c327c69fdcce7f7e39bec2f2c3538b036088b255f917e3ba4
+size 15984
diff --git a/v2/checkpoint-1100/rng_state_7.pth b/v2/checkpoint-1100/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..244e7fdaa1cef2e82bd4e16afb10f32f68318bcc
--- /dev/null
+++ b/v2/checkpoint-1100/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1276a987dd22c9093fec58921ba19f340a28f18bff635cc01324e09a3c37ac3a
+size 15984
diff --git a/v2/checkpoint-1100/scheduler.pt b/v2/checkpoint-1100/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..82133a41263df3772d03bb81dbdf70b97ffb13d9
--- /dev/null
+++ b/v2/checkpoint-1100/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c08441fe4d6081d0dce5306ab4664d738c567d94c90b431beb90d45c64769f66
+size 1064
diff --git a/v2/checkpoint-1100/special_tokens_map.json b/v2/checkpoint-1100/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/v2/checkpoint-1100/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/v2/checkpoint-1100/tokenizer.model b/v2/checkpoint-1100/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/v2/checkpoint-1100/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/v2/checkpoint-1100/tokenizer_config.json b/v2/checkpoint-1100/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/v2/checkpoint-1100/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/v2/checkpoint-1100/trainer_state.json b/v2/checkpoint-1100/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..81b7c7d93df0a572fcd0fe047d1eb201c34c83fa
--- /dev/null
+++ b/v2/checkpoint-1100/trainer_state.json
@@ -0,0 +1,7721 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.5486968449931413,
+ "eval_steps": 500,
+ "global_step": 1100,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2698278585334131,
+ "learning_rate": 7.274705407864332e-05,
+ "loss": 1.194,
+ "step": 451
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.23678313024953834,
+ "learning_rate": 7.26980224535416e-05,
+ "loss": 1.2349,
+ "step": 452
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24851875792002978,
+ "learning_rate": 7.264884229436293e-05,
+ "loss": 1.1758,
+ "step": 453
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24122080121681125,
+ "learning_rate": 7.259951382451318e-05,
+ "loss": 1.1962,
+ "step": 454
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.22741322959884405,
+ "learning_rate": 7.25500372680719e-05,
+ "loss": 1.1702,
+ "step": 455
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.2297475610861458,
+ "learning_rate": 7.250041284979137e-05,
+ "loss": 1.1466,
+ "step": 456
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.3057605989721467,
+ "learning_rate": 7.245064079509553e-05,
+ "loss": 1.246,
+ "step": 457
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2719638501597136,
+ "learning_rate": 7.240072133007899e-05,
+ "loss": 1.2184,
+ "step": 458
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2436807816414479,
+ "learning_rate": 7.235065468150593e-05,
+ "loss": 1.2324,
+ "step": 459
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.23436349430255515,
+ "learning_rate": 7.23004410768092e-05,
+ "loss": 1.1813,
+ "step": 460
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2398940990211377,
+ "learning_rate": 7.22500807440892e-05,
+ "loss": 1.1924,
+ "step": 461
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2605716625062531,
+ "learning_rate": 7.219957391211281e-05,
+ "loss": 1.182,
+ "step": 462
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.260462524570941,
+ "learning_rate": 7.214892081031244e-05,
+ "loss": 1.2136,
+ "step": 463
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.21979766512306334,
+ "learning_rate": 7.209812166878491e-05,
+ "loss": 1.2066,
+ "step": 464
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.23324453647530663,
+ "learning_rate": 7.204717671829051e-05,
+ "loss": 1.1657,
+ "step": 465
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.2529434935507481,
+ "learning_rate": 7.199608619025177e-05,
+ "loss": 1.2093,
+ "step": 466
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.25371701891720116,
+ "learning_rate": 7.194485031675265e-05,
+ "loss": 1.2225,
+ "step": 467
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.23272423066292103,
+ "learning_rate": 7.189346933053725e-05,
+ "loss": 1.1721,
+ "step": 468
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.25122928735587546,
+ "learning_rate": 7.184194346500892e-05,
+ "loss": 1.2537,
+ "step": 469
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2159270875490409,
+ "learning_rate": 7.179027295422913e-05,
+ "loss": 1.197,
+ "step": 470
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2633111059076544,
+ "learning_rate": 7.173845803291636e-05,
+ "loss": 1.1721,
+ "step": 471
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.30555936322098703,
+ "learning_rate": 7.168649893644517e-05,
+ "loss": 1.3011,
+ "step": 472
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.23492670111453726,
+ "learning_rate": 7.163439590084502e-05,
+ "loss": 1.1601,
+ "step": 473
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.26602734263721806,
+ "learning_rate": 7.158214916279923e-05,
+ "loss": 1.2808,
+ "step": 474
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.3182695007856262,
+ "learning_rate": 7.152975895964386e-05,
+ "loss": 1.2967,
+ "step": 475
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2785021674736721,
+ "learning_rate": 7.147722552936673e-05,
+ "loss": 1.1789,
+ "step": 476
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.279474303138652,
+ "learning_rate": 7.142454911060627e-05,
+ "loss": 1.2596,
+ "step": 477
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2556980144910755,
+ "learning_rate": 7.137172994265044e-05,
+ "loss": 1.2426,
+ "step": 478
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.3311256331993533,
+ "learning_rate": 7.131876826543565e-05,
+ "loss": 1.2059,
+ "step": 479
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.26467296197775253,
+ "learning_rate": 7.12656643195457e-05,
+ "loss": 1.2482,
+ "step": 480
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.27444885274652553,
+ "learning_rate": 7.121241834621064e-05,
+ "loss": 1.2528,
+ "step": 481
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2572283861115396,
+ "learning_rate": 7.115903058730567e-05,
+ "loss": 1.1849,
+ "step": 482
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2677065778235683,
+ "learning_rate": 7.11055012853501e-05,
+ "loss": 1.2011,
+ "step": 483
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29470622036742816,
+ "learning_rate": 7.105183068350619e-05,
+ "loss": 1.2398,
+ "step": 484
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.27609230248969197,
+ "learning_rate": 7.099801902557811e-05,
+ "loss": 1.2259,
+ "step": 485
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.24248634168099284,
+ "learning_rate": 7.094406655601073e-05,
+ "loss": 1.2282,
+ "step": 486
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.2765941767688746,
+ "learning_rate": 7.088997351988865e-05,
+ "loss": 1.2319,
+ "step": 487
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29347776909858947,
+ "learning_rate": 7.083574016293493e-05,
+ "loss": 1.1765,
+ "step": 488
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.285370295424537,
+ "learning_rate": 7.078136673151008e-05,
+ "loss": 1.26,
+ "step": 489
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.29408734903836536,
+ "learning_rate": 7.072685347261093e-05,
+ "loss": 1.226,
+ "step": 490
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27437470239205813,
+ "learning_rate": 7.067220063386947e-05,
+ "loss": 1.1976,
+ "step": 491
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2680770258777871,
+ "learning_rate": 7.061740846355176e-05,
+ "loss": 1.1915,
+ "step": 492
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27200362879502954,
+ "learning_rate": 7.056247721055678e-05,
+ "loss": 1.2002,
+ "step": 493
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2637811092577037,
+ "learning_rate": 7.050740712441528e-05,
+ "loss": 1.287,
+ "step": 494
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.24657959209271266,
+ "learning_rate": 7.045219845528875e-05,
+ "loss": 1.2284,
+ "step": 495
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.25311992110358666,
+ "learning_rate": 7.039685145396812e-05,
+ "loss": 1.1616,
+ "step": 496
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2564633694193358,
+ "learning_rate": 7.034136637187275e-05,
+ "loss": 1.2067,
+ "step": 497
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2446797651174144,
+ "learning_rate": 7.028574346104926e-05,
+ "loss": 1.2284,
+ "step": 498
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2592751463399255,
+ "learning_rate": 7.022998297417034e-05,
+ "loss": 1.2371,
+ "step": 499
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2500713943206808,
+ "learning_rate": 7.017408516453365e-05,
+ "loss": 1.1061,
+ "step": 500
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2812266276040743,
+ "learning_rate": 7.011805028606064e-05,
+ "loss": 1.1949,
+ "step": 501
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.298829667668083,
+ "learning_rate": 7.006187859329544e-05,
+ "loss": 1.2313,
+ "step": 502
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.26518768159745104,
+ "learning_rate": 7.000557034140361e-05,
+ "loss": 1.2246,
+ "step": 503
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.3037280360760458,
+ "learning_rate": 6.994912578617113e-05,
+ "loss": 1.1617,
+ "step": 504
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2726903109255714,
+ "learning_rate": 6.989254518400309e-05,
+ "loss": 1.2415,
+ "step": 505
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25568082003046966,
+ "learning_rate": 6.98358287919226e-05,
+ "loss": 1.1817,
+ "step": 506
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25633294893705044,
+ "learning_rate": 6.97789768675696e-05,
+ "loss": 1.2149,
+ "step": 507
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.28291439435087123,
+ "learning_rate": 6.972198966919972e-05,
+ "loss": 1.1578,
+ "step": 508
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.27195184756655516,
+ "learning_rate": 6.966486745568308e-05,
+ "loss": 1.2355,
+ "step": 509
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.239159568376005,
+ "learning_rate": 6.960761048650312e-05,
+ "loss": 1.1688,
+ "step": 510
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.22961475425949177,
+ "learning_rate": 6.955021902175543e-05,
+ "loss": 1.2094,
+ "step": 511
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.27443773600741117,
+ "learning_rate": 6.949269332214651e-05,
+ "loss": 1.2559,
+ "step": 512
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.26230551832002097,
+ "learning_rate": 6.94350336489927e-05,
+ "loss": 1.2121,
+ "step": 513
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2716742985303849,
+ "learning_rate": 6.937724026421892e-05,
+ "loss": 1.2444,
+ "step": 514
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2537850139439542,
+ "learning_rate": 6.931931343035742e-05,
+ "loss": 1.1327,
+ "step": 515
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.28599587967496826,
+ "learning_rate": 6.926125341054676e-05,
+ "loss": 1.2236,
+ "step": 516
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.26780654378470103,
+ "learning_rate": 6.920306046853043e-05,
+ "loss": 1.2295,
+ "step": 517
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.23606296888412015,
+ "learning_rate": 6.914473486865577e-05,
+ "loss": 1.1543,
+ "step": 518
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.34976881174240837,
+ "learning_rate": 6.90862768758727e-05,
+ "loss": 1.2067,
+ "step": 519
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2481257873494882,
+ "learning_rate": 6.902768675573258e-05,
+ "loss": 1.2188,
+ "step": 520
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2996395778117021,
+ "learning_rate": 6.896896477438699e-05,
+ "loss": 1.2326,
+ "step": 521
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.8839768816333193,
+ "learning_rate": 6.891011119858643e-05,
+ "loss": 1.2435,
+ "step": 522
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2851882482058998,
+ "learning_rate": 6.885112629567927e-05,
+ "loss": 1.2644,
+ "step": 523
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2813663482913699,
+ "learning_rate": 6.879201033361035e-05,
+ "loss": 1.2309,
+ "step": 524
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3257551560135454,
+ "learning_rate": 6.873276358091996e-05,
+ "loss": 1.2755,
+ "step": 525
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.28930479952494365,
+ "learning_rate": 6.867338630674247e-05,
+ "loss": 1.1962,
+ "step": 526
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3077462996938649,
+ "learning_rate": 6.861387878080511e-05,
+ "loss": 1.2402,
+ "step": 527
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.2848900193452761,
+ "learning_rate": 6.855424127342688e-05,
+ "loss": 1.2748,
+ "step": 528
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.4765938812802202,
+ "learning_rate": 6.849447405551718e-05,
+ "loss": 1.2226,
+ "step": 529
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.53184473292579,
+ "learning_rate": 6.843457739857467e-05,
+ "loss": 1.2347,
+ "step": 530
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.6416239346492343,
+ "learning_rate": 6.837455157468596e-05,
+ "loss": 1.2429,
+ "step": 531
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3188092712502773,
+ "learning_rate": 6.831439685652442e-05,
+ "loss": 1.216,
+ "step": 532
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3527495731006385,
+ "learning_rate": 6.825411351734895e-05,
+ "loss": 1.1682,
+ "step": 533
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.29603753744741856,
+ "learning_rate": 6.819370183100274e-05,
+ "loss": 1.1434,
+ "step": 534
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.5252450389976622,
+ "learning_rate": 6.813316207191198e-05,
+ "loss": 1.1943,
+ "step": 535
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.32999419558659937,
+ "learning_rate": 6.807249451508466e-05,
+ "loss": 1.192,
+ "step": 536
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.3650175469778724,
+ "learning_rate": 6.801169943610929e-05,
+ "loss": 1.2141,
+ "step": 537
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 1.0643532150783557,
+ "learning_rate": 6.795077711115368e-05,
+ "loss": 1.2253,
+ "step": 538
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5041310609130145,
+ "learning_rate": 6.788972781696363e-05,
+ "loss": 1.278,
+ "step": 539
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5123058164360991,
+ "learning_rate": 6.782855183086177e-05,
+ "loss": 1.2231,
+ "step": 540
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.3533015702394419,
+ "learning_rate": 6.776724943074619e-05,
+ "loss": 1.2072,
+ "step": 541
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.30253964625417207,
+ "learning_rate": 6.770582089508927e-05,
+ "loss": 1.1382,
+ "step": 542
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.348991618828202,
+ "learning_rate": 6.764426650293633e-05,
+ "loss": 1.2079,
+ "step": 543
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.46017440578788743,
+ "learning_rate": 6.758258653390444e-05,
+ "loss": 1.1813,
+ "step": 544
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.31962101755594885,
+ "learning_rate": 6.75207812681811e-05,
+ "loss": 1.1339,
+ "step": 545
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.37092024548285923,
+ "learning_rate": 6.745885098652298e-05,
+ "loss": 1.2591,
+ "step": 546
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.32347106450715835,
+ "learning_rate": 6.739679597025466e-05,
+ "loss": 1.2017,
+ "step": 547
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39250187112342494,
+ "learning_rate": 6.733461650126733e-05,
+ "loss": 1.0933,
+ "step": 548
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.473522452217324,
+ "learning_rate": 6.727231286201752e-05,
+ "loss": 1.1124,
+ "step": 549
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4809062179622052,
+ "learning_rate": 6.720988533552582e-05,
+ "loss": 1.1585,
+ "step": 550
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3529662801059162,
+ "learning_rate": 6.714733420537559e-05,
+ "loss": 1.0501,
+ "step": 551
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5958247214391118,
+ "learning_rate": 6.708465975571168e-05,
+ "loss": 1.1086,
+ "step": 552
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5341364205022454,
+ "learning_rate": 6.70218622712391e-05,
+ "loss": 1.0518,
+ "step": 553
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3601805724462006,
+ "learning_rate": 6.695894203722181e-05,
+ "loss": 1.1779,
+ "step": 554
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.43410190338280613,
+ "learning_rate": 6.68958993394813e-05,
+ "loss": 1.093,
+ "step": 555
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46217742572873594,
+ "learning_rate": 6.683273446439546e-05,
+ "loss": 1.0117,
+ "step": 556
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.8591682373623357,
+ "learning_rate": 6.676944769889708e-05,
+ "loss": 1.1002,
+ "step": 557
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.7383229487622726,
+ "learning_rate": 6.670603933047272e-05,
+ "loss": 1.0779,
+ "step": 558
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5965305891207813,
+ "learning_rate": 6.664250964716131e-05,
+ "loss": 1.0889,
+ "step": 559
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.6030858606684543,
+ "learning_rate": 6.657885893755288e-05,
+ "loss": 1.0982,
+ "step": 560
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4644510682398409,
+ "learning_rate": 6.65150874907872e-05,
+ "loss": 1.1004,
+ "step": 561
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.43943285132452564,
+ "learning_rate": 6.645119559655254e-05,
+ "loss": 1.0536,
+ "step": 562
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4456395978600012,
+ "learning_rate": 6.638718354508427e-05,
+ "loss": 1.0733,
+ "step": 563
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3303824433217466,
+ "learning_rate": 6.632305162716365e-05,
+ "loss": 1.0552,
+ "step": 564
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3617704823170143,
+ "learning_rate": 6.62588001341164e-05,
+ "loss": 1.1092,
+ "step": 565
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4465013349903427,
+ "learning_rate": 6.619442935781141e-05,
+ "loss": 1.0781,
+ "step": 566
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.48516780613791277,
+ "learning_rate": 6.612993959065947e-05,
+ "loss": 1.0686,
+ "step": 567
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38867820318536633,
+ "learning_rate": 6.606533112561186e-05,
+ "loss": 1.1215,
+ "step": 568
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38566119820378336,
+ "learning_rate": 6.600060425615907e-05,
+ "loss": 1.1213,
+ "step": 569
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.35534855445058544,
+ "learning_rate": 6.593575927632947e-05,
+ "loss": 1.0955,
+ "step": 570
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38124406233349717,
+ "learning_rate": 6.587079648068795e-05,
+ "loss": 1.0659,
+ "step": 571
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.454750160923548,
+ "learning_rate": 6.580571616433457e-05,
+ "loss": 1.1149,
+ "step": 572
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.35353190088025255,
+ "learning_rate": 6.574051862290325e-05,
+ "loss": 1.0388,
+ "step": 573
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3249395594793626,
+ "learning_rate": 6.567520415256045e-05,
+ "loss": 1.0784,
+ "step": 574
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.40078898818247227,
+ "learning_rate": 6.560977305000375e-05,
+ "loss": 1.0859,
+ "step": 575
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4115264795060035,
+ "learning_rate": 6.554422561246054e-05,
+ "loss": 1.1828,
+ "step": 576
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.30090229228069215,
+ "learning_rate": 6.54785621376867e-05,
+ "loss": 1.0901,
+ "step": 577
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.28827860350299206,
+ "learning_rate": 6.541278292396523e-05,
+ "loss": 1.0277,
+ "step": 578
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.34690404488996757,
+ "learning_rate": 6.534688827010484e-05,
+ "loss": 1.048,
+ "step": 579
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.29943113556644785,
+ "learning_rate": 6.528087847543867e-05,
+ "loss": 1.0646,
+ "step": 580
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.37318202575874415,
+ "learning_rate": 6.521475383982291e-05,
+ "loss": 1.1091,
+ "step": 581
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3049663659203959,
+ "learning_rate": 6.51485146636354e-05,
+ "loss": 1.0552,
+ "step": 582
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3342407867509692,
+ "learning_rate": 6.508216124777431e-05,
+ "loss": 1.2227,
+ "step": 583
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3348396047855952,
+ "learning_rate": 6.501569389365674e-05,
+ "loss": 1.0861,
+ "step": 584
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.30951429367513383,
+ "learning_rate": 6.494911290321737e-05,
+ "loss": 1.0461,
+ "step": 585
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.33898401361064606,
+ "learning_rate": 6.488241857890711e-05,
+ "loss": 1.0854,
+ "step": 586
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4901462068263497,
+ "learning_rate": 6.481561122369164e-05,
+ "loss": 1.1012,
+ "step": 587
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3179574879809652,
+ "learning_rate": 6.474869114105018e-05,
+ "loss": 1.0451,
+ "step": 588
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.32159328915060714,
+ "learning_rate": 6.468165863497395e-05,
+ "loss": 1.0458,
+ "step": 589
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.36462235008537297,
+ "learning_rate": 6.461451400996491e-05,
+ "loss": 1.1247,
+ "step": 590
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.5373862753611778,
+ "learning_rate": 6.454725757103432e-05,
+ "loss": 1.0542,
+ "step": 591
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3160409270291303,
+ "learning_rate": 6.447988962370133e-05,
+ "loss": 1.0829,
+ "step": 592
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.390452102978435,
+ "learning_rate": 6.441241047399169e-05,
+ "loss": 1.192,
+ "step": 593
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3802122712014928,
+ "learning_rate": 6.434482042843627e-05,
+ "loss": 1.1153,
+ "step": 594
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4081584328242501,
+ "learning_rate": 6.427711979406966e-05,
+ "loss": 1.1635,
+ "step": 595
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3791962989638633,
+ "learning_rate": 6.420930887842889e-05,
+ "loss": 1.1581,
+ "step": 596
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.33239440056484193,
+ "learning_rate": 6.414138798955189e-05,
+ "loss": 1.0926,
+ "step": 597
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3279881540815014,
+ "learning_rate": 6.407335743597616e-05,
+ "loss": 1.1386,
+ "step": 598
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.30309644763750837,
+ "learning_rate": 6.40052175267374e-05,
+ "loss": 1.0523,
+ "step": 599
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3349097308403333,
+ "learning_rate": 6.393696857136801e-05,
+ "loss": 1.0815,
+ "step": 600
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3288227593556618,
+ "learning_rate": 6.386861087989581e-05,
+ "loss": 1.015,
+ "step": 601
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36685586740843157,
+ "learning_rate": 6.380014476284255e-05,
+ "loss": 1.1232,
+ "step": 602
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3620977714204643,
+ "learning_rate": 6.373157053122243e-05,
+ "loss": 1.1138,
+ "step": 603
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3130587018197183,
+ "learning_rate": 6.366288849654091e-05,
+ "loss": 1.1255,
+ "step": 604
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3602737087072766,
+ "learning_rate": 6.359409897079303e-05,
+ "loss": 1.0282,
+ "step": 605
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.31168852571991945,
+ "learning_rate": 6.352520226646222e-05,
+ "loss": 1.0779,
+ "step": 606
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3516045580189353,
+ "learning_rate": 6.345619869651871e-05,
+ "loss": 1.1028,
+ "step": 607
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3231857927563657,
+ "learning_rate": 6.33870885744182e-05,
+ "loss": 1.1202,
+ "step": 608
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.30205205129701157,
+ "learning_rate": 6.331787221410041e-05,
+ "loss": 1.1369,
+ "step": 609
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3198359813888166,
+ "learning_rate": 6.32485499299877e-05,
+ "loss": 1.1763,
+ "step": 610
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3128641370321787,
+ "learning_rate": 6.31791220369835e-05,
+ "loss": 1.0223,
+ "step": 611
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.2989105616213649,
+ "learning_rate": 6.31095888504711e-05,
+ "loss": 1.0358,
+ "step": 612
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3103537906853337,
+ "learning_rate": 6.303995068631203e-05,
+ "loss": 1.1261,
+ "step": 613
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.28598715532508207,
+ "learning_rate": 6.297020786084467e-05,
+ "loss": 1.0629,
+ "step": 614
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.29809789918093255,
+ "learning_rate": 6.290036069088288e-05,
+ "loss": 1.035,
+ "step": 615
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.33765270252261453,
+ "learning_rate": 6.283040949371451e-05,
+ "loss": 1.1221,
+ "step": 616
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3424617501293415,
+ "learning_rate": 6.276035458709993e-05,
+ "loss": 1.155,
+ "step": 617
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3799189737987811,
+ "learning_rate": 6.269019628927067e-05,
+ "loss": 1.0701,
+ "step": 618
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3358898935253196,
+ "learning_rate": 6.261993491892791e-05,
+ "loss": 1.1649,
+ "step": 619
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.31569979424117356,
+ "learning_rate": 6.254957079524099e-05,
+ "loss": 1.0633,
+ "step": 620
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3002168156888237,
+ "learning_rate": 6.247910423784609e-05,
+ "loss": 1.0846,
+ "step": 621
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3097238823450595,
+ "learning_rate": 6.24085355668447e-05,
+ "loss": 1.0808,
+ "step": 622
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3120312761417578,
+ "learning_rate": 6.233786510280212e-05,
+ "loss": 1.0142,
+ "step": 623
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3335343015064923,
+ "learning_rate": 6.22670931667461e-05,
+ "loss": 1.0674,
+ "step": 624
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3234062304634526,
+ "learning_rate": 6.219622008016533e-05,
+ "loss": 1.0981,
+ "step": 625
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.32152678786547273,
+ "learning_rate": 6.212524616500798e-05,
+ "loss": 1.0244,
+ "step": 626
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.39031977608147594,
+ "learning_rate": 6.205417174368023e-05,
+ "loss": 1.1205,
+ "step": 627
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3806189090017157,
+ "learning_rate": 6.198299713904485e-05,
+ "loss": 1.1134,
+ "step": 628
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.2978349276971668,
+ "learning_rate": 6.191172267441967e-05,
+ "loss": 1.0088,
+ "step": 629
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3190354077382501,
+ "learning_rate": 6.184034867357617e-05,
+ "loss": 1.108,
+ "step": 630
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.32633048665038994,
+ "learning_rate": 6.176887546073797e-05,
+ "loss": 1.0825,
+ "step": 631
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3428026413020903,
+ "learning_rate": 6.169730336057939e-05,
+ "loss": 1.0765,
+ "step": 632
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3475737151929015,
+ "learning_rate": 6.162563269822391e-05,
+ "loss": 1.0693,
+ "step": 633
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3870252154591392,
+ "learning_rate": 6.15538637992428e-05,
+ "loss": 1.1081,
+ "step": 634
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.33597355193652834,
+ "learning_rate": 6.148199698965352e-05,
+ "loss": 1.0893,
+ "step": 635
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.30805894179787247,
+ "learning_rate": 6.141003259591834e-05,
+ "loss": 1.0995,
+ "step": 636
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3025073882734066,
+ "learning_rate": 6.133797094494281e-05,
+ "loss": 1.0388,
+ "step": 637
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3524395196391662,
+ "learning_rate": 6.126581236407429e-05,
+ "loss": 1.1196,
+ "step": 638
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3377646188130345,
+ "learning_rate": 6.119355718110039e-05,
+ "loss": 1.0382,
+ "step": 639
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.35508400659785483,
+ "learning_rate": 6.112120572424763e-05,
+ "loss": 1.1402,
+ "step": 640
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3454418793700457,
+ "learning_rate": 6.104875832217982e-05,
+ "loss": 1.1032,
+ "step": 641
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.32629806837059866,
+ "learning_rate": 6.097621530399661e-05,
+ "loss": 1.0959,
+ "step": 642
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3329536837751315,
+ "learning_rate": 6.090357699923202e-05,
+ "loss": 1.0467,
+ "step": 643
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.32302233828349475,
+ "learning_rate": 6.083084373785287e-05,
+ "loss": 1.0858,
+ "step": 644
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3310358826507611,
+ "learning_rate": 6.075801585025739e-05,
+ "loss": 1.0715,
+ "step": 645
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.319322035854079,
+ "learning_rate": 6.068509366727362e-05,
+ "loss": 1.177,
+ "step": 646
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3065230667302707,
+ "learning_rate": 6.061207752015797e-05,
+ "loss": 1.0649,
+ "step": 647
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.29926795565748227,
+ "learning_rate": 6.053896774059368e-05,
+ "loss": 1.1325,
+ "step": 648
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3556069634279046,
+ "learning_rate": 6.046576466068931e-05,
+ "loss": 1.1366,
+ "step": 649
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3189191131461966,
+ "learning_rate": 6.039246861297727e-05,
+ "loss": 1.0693,
+ "step": 650
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3347197156648834,
+ "learning_rate": 6.031907993041227e-05,
+ "loss": 1.1009,
+ "step": 651
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.32274156348185445,
+ "learning_rate": 6.0245598946369826e-05,
+ "loss": 1.1675,
+ "step": 652
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.35534089035455224,
+ "learning_rate": 6.017202599464476e-05,
+ "loss": 1.1723,
+ "step": 653
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3106026578570133,
+ "learning_rate": 6.009836140944965e-05,
+ "loss": 1.0954,
+ "step": 654
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3309144454564729,
+ "learning_rate": 6.002460552541331e-05,
+ "loss": 1.0209,
+ "step": 655
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3023619281400003,
+ "learning_rate": 5.9950758677579345e-05,
+ "loss": 1.0363,
+ "step": 656
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3311182880219704,
+ "learning_rate": 5.987682120140451e-05,
+ "loss": 1.0515,
+ "step": 657
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.33396486010030413,
+ "learning_rate": 5.980279343275729e-05,
+ "loss": 1.1251,
+ "step": 658
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3465764556678002,
+ "learning_rate": 5.97286757079163e-05,
+ "loss": 1.165,
+ "step": 659
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.304193441363374,
+ "learning_rate": 5.965446836356882e-05,
+ "loss": 1.0228,
+ "step": 660
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3415149030413082,
+ "learning_rate": 5.9580171736809224e-05,
+ "loss": 1.0742,
+ "step": 661
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.33138658321132064,
+ "learning_rate": 5.950578616513746e-05,
+ "loss": 1.0843,
+ "step": 662
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.30774403421162994,
+ "learning_rate": 5.943131198645752e-05,
+ "loss": 1.065,
+ "step": 663
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3428877492183819,
+ "learning_rate": 5.9356749539075885e-05,
+ "loss": 1.1101,
+ "step": 664
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3621290546130101,
+ "learning_rate": 5.928209916170003e-05,
+ "loss": 1.1372,
+ "step": 665
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3482375945469884,
+ "learning_rate": 5.9207361193436865e-05,
+ "loss": 1.132,
+ "step": 666
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.31754384974068384,
+ "learning_rate": 5.9132535973791156e-05,
+ "loss": 1.148,
+ "step": 667
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.36003834782050365,
+ "learning_rate": 5.9057623842664044e-05,
+ "loss": 1.1099,
+ "step": 668
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.2963701622969662,
+ "learning_rate": 5.8982625140351464e-05,
+ "loss": 1.0755,
+ "step": 669
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.32579569606066516,
+ "learning_rate": 5.8907540207542616e-05,
+ "loss": 1.0809,
+ "step": 670
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4247563451753457,
+ "learning_rate": 5.8832369385318416e-05,
+ "loss": 1.097,
+ "step": 671
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.33076932102169776,
+ "learning_rate": 5.875711301514992e-05,
+ "loss": 1.1078,
+ "step": 672
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3609238032332309,
+ "learning_rate": 5.8681771438896815e-05,
+ "loss": 1.1031,
+ "step": 673
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.325159585649425,
+ "learning_rate": 5.860634499880583e-05,
+ "loss": 1.0707,
+ "step": 674
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4620687271068983,
+ "learning_rate": 5.853083403750922e-05,
+ "loss": 1.1017,
+ "step": 675
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33485279064365936,
+ "learning_rate": 5.845523889802316e-05,
+ "loss": 1.0989,
+ "step": 676
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.30952573170841513,
+ "learning_rate": 5.8379559923746214e-05,
+ "loss": 1.0393,
+ "step": 677
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33498605810588283,
+ "learning_rate": 5.830379745845781e-05,
+ "loss": 1.1259,
+ "step": 678
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.35771921163037307,
+ "learning_rate": 5.822795184631659e-05,
+ "loss": 1.0815,
+ "step": 679
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.3329650192347647,
+ "learning_rate": 5.815202343185894e-05,
+ "loss": 1.1344,
+ "step": 680
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3356634465845771,
+ "learning_rate": 5.807601255999736e-05,
+ "loss": 1.1297,
+ "step": 681
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3289442034151235,
+ "learning_rate": 5.7999919576018934e-05,
+ "loss": 1.022,
+ "step": 682
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3207007334784113,
+ "learning_rate": 5.7923744825583745e-05,
+ "loss": 1.0571,
+ "step": 683
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3582460325329284,
+ "learning_rate": 5.7847488654723304e-05,
+ "loss": 1.0778,
+ "step": 684
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3563317666176927,
+ "learning_rate": 5.777115140983899e-05,
+ "loss": 1.1003,
+ "step": 685
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 3.4694912945702105,
+ "learning_rate": 5.769473343770047e-05,
+ "loss": 1.121,
+ "step": 686
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.43002349520483113,
+ "learning_rate": 5.761823508544411e-05,
+ "loss": 1.0765,
+ "step": 687
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39467783104839754,
+ "learning_rate": 5.754165670057142e-05,
+ "loss": 1.0788,
+ "step": 688
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39629029674867916,
+ "learning_rate": 5.7464998630947464e-05,
+ "loss": 1.0812,
+ "step": 689
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3880152093965208,
+ "learning_rate": 5.738826122479929e-05,
+ "loss": 1.1228,
+ "step": 690
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3777874121959188,
+ "learning_rate": 5.7311444830714324e-05,
+ "loss": 1.0907,
+ "step": 691
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.38004041653523696,
+ "learning_rate": 5.723454979763882e-05,
+ "loss": 1.1263,
+ "step": 692
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.37049672627797636,
+ "learning_rate": 5.7157576474876246e-05,
+ "loss": 1.1438,
+ "step": 693
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32973606103437614,
+ "learning_rate": 5.7080525212085725e-05,
+ "loss": 1.0553,
+ "step": 694
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.31674639252070325,
+ "learning_rate": 5.700339635928038e-05,
+ "loss": 1.06,
+ "step": 695
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32282199426553837,
+ "learning_rate": 5.692619026682588e-05,
+ "loss": 1.0841,
+ "step": 696
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4810882958061859,
+ "learning_rate": 5.684890728543869e-05,
+ "loss": 1.0803,
+ "step": 697
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3995638550178378,
+ "learning_rate": 5.6771547766184566e-05,
+ "loss": 1.1187,
+ "step": 698
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35264932960583484,
+ "learning_rate": 5.669411206047699e-05,
+ "loss": 1.0641,
+ "step": 699
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35240640524733,
+ "learning_rate": 5.661660052007547e-05,
+ "loss": 1.076,
+ "step": 700
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3540694609860389,
+ "learning_rate": 5.653901349708401e-05,
+ "loss": 1.1369,
+ "step": 701
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3196055112925304,
+ "learning_rate": 5.646135134394955e-05,
+ "loss": 1.0677,
+ "step": 702
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4214141007955914,
+ "learning_rate": 5.6383614413460266e-05,
+ "loss": 1.1139,
+ "step": 703
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3625611311798579,
+ "learning_rate": 5.630580305874402e-05,
+ "loss": 1.1845,
+ "step": 704
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3425208672181188,
+ "learning_rate": 5.62279176332668e-05,
+ "loss": 1.174,
+ "step": 705
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3108419862818321,
+ "learning_rate": 5.6149958490830996e-05,
+ "loss": 1.0331,
+ "step": 706
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3274644181571904,
+ "learning_rate": 5.607192598557394e-05,
+ "loss": 1.0664,
+ "step": 707
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.346218197215145,
+ "learning_rate": 5.599382047196617e-05,
+ "loss": 1.2088,
+ "step": 708
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.328497632267458,
+ "learning_rate": 5.591564230480989e-05,
+ "loss": 1.0287,
+ "step": 709
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3708173720611468,
+ "learning_rate": 5.583739183923732e-05,
+ "loss": 1.0883,
+ "step": 710
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3631427403535479,
+ "learning_rate": 5.575906943070915e-05,
+ "loss": 1.1155,
+ "step": 711
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3305201458598695,
+ "learning_rate": 5.5680675435012834e-05,
+ "loss": 1.0958,
+ "step": 712
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.34978833532083714,
+ "learning_rate": 5.5602210208261036e-05,
+ "loss": 1.1437,
+ "step": 713
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3510553882510229,
+ "learning_rate": 5.552367410688999e-05,
+ "loss": 1.0941,
+ "step": 714
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3523747462465078,
+ "learning_rate": 5.544506748765789e-05,
+ "loss": 1.1289,
+ "step": 715
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38262637783927445,
+ "learning_rate": 5.5366390707643266e-05,
+ "loss": 1.099,
+ "step": 716
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38620065989073454,
+ "learning_rate": 5.528764412424334e-05,
+ "loss": 1.083,
+ "step": 717
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3401355276121096,
+ "learning_rate": 5.520882809517245e-05,
+ "loss": 1.028,
+ "step": 718
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3392061008943934,
+ "learning_rate": 5.512994297846039e-05,
+ "loss": 1.1083,
+ "step": 719
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.34219480421015414,
+ "learning_rate": 5.505098913245077e-05,
+ "loss": 1.1108,
+ "step": 720
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3275058061553761,
+ "learning_rate": 5.497196691579945e-05,
+ "loss": 1.111,
+ "step": 721
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36800249746509384,
+ "learning_rate": 5.489287668747283e-05,
+ "loss": 1.1221,
+ "step": 722
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4129005533101575,
+ "learning_rate": 5.481371880674628e-05,
+ "loss": 1.0966,
+ "step": 723
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36563906596251655,
+ "learning_rate": 5.4734493633202505e-05,
+ "loss": 1.0927,
+ "step": 724
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3614650536839971,
+ "learning_rate": 5.465520152672986e-05,
+ "loss": 1.13,
+ "step": 725
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.36419665098633497,
+ "learning_rate": 5.4575842847520765e-05,
+ "loss": 1.1183,
+ "step": 726
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.34490689807258995,
+ "learning_rate": 5.449641795607005e-05,
+ "loss": 1.0919,
+ "step": 727
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3627643746876298,
+ "learning_rate": 5.441692721317334e-05,
+ "loss": 1.0411,
+ "step": 728
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.323620411949565,
+ "learning_rate": 5.433737097992537e-05,
+ "loss": 1.0725,
+ "step": 729
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3521599501824965,
+ "learning_rate": 5.425774961771838e-05,
+ "loss": 1.0926,
+ "step": 730
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3302390546764222,
+ "learning_rate": 5.417806348824047e-05,
+ "loss": 1.0468,
+ "step": 731
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3833325802616019,
+ "learning_rate": 5.4098312953473956e-05,
+ "loss": 1.1291,
+ "step": 732
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3708621126835512,
+ "learning_rate": 5.401849837569372e-05,
+ "loss": 1.0887,
+ "step": 733
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3625834373416278,
+ "learning_rate": 5.393862011746555e-05,
+ "loss": 1.0981,
+ "step": 734
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3583343965080617,
+ "learning_rate": 5.385867854164451e-05,
+ "loss": 1.1021,
+ "step": 735
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34598320594096066,
+ "learning_rate": 5.377867401137332e-05,
+ "loss": 1.1376,
+ "step": 736
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3046382791315433,
+ "learning_rate": 5.369860689008066e-05,
+ "loss": 1.0206,
+ "step": 737
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34464948380043725,
+ "learning_rate": 5.3618477541479505e-05,
+ "loss": 1.1084,
+ "step": 738
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3203242519627101,
+ "learning_rate": 5.353828632956557e-05,
+ "loss": 1.0731,
+ "step": 739
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3431169960355163,
+ "learning_rate": 5.3458033618615516e-05,
+ "loss": 1.091,
+ "step": 740
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.33492074521678705,
+ "learning_rate": 5.337771977318543e-05,
+ "loss": 1.1112,
+ "step": 741
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.32576546585541344,
+ "learning_rate": 5.3297345158109086e-05,
+ "loss": 1.0993,
+ "step": 742
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3410007245037574,
+ "learning_rate": 5.3216910138496286e-05,
+ "loss": 1.094,
+ "step": 743
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.34891180680896833,
+ "learning_rate": 5.313641507973128e-05,
+ "loss": 1.1331,
+ "step": 744
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.37135766946717214,
+ "learning_rate": 5.3055860347471006e-05,
+ "loss": 1.1,
+ "step": 745
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3465019415478411,
+ "learning_rate": 5.297524630764349e-05,
+ "loss": 1.1256,
+ "step": 746
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37035388481626563,
+ "learning_rate": 5.289457332644615e-05,
+ "loss": 1.0366,
+ "step": 747
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.33853883270759155,
+ "learning_rate": 5.281384177034421e-05,
+ "loss": 1.0547,
+ "step": 748
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.364306618627317,
+ "learning_rate": 5.2733052006068897e-05,
+ "loss": 1.0768,
+ "step": 749
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.4021754315731627,
+ "learning_rate": 5.2652204400615916e-05,
+ "loss": 1.1382,
+ "step": 750
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.3332185389039008,
+ "learning_rate": 5.257129932124368e-05,
+ "loss": 1.0815,
+ "step": 751
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3453105709879854,
+ "learning_rate": 5.249033713547173e-05,
+ "loss": 1.1109,
+ "step": 752
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3385397539717797,
+ "learning_rate": 5.2409318211078966e-05,
+ "loss": 1.0529,
+ "step": 753
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.33197994450130447,
+ "learning_rate": 5.232824291610206e-05,
+ "loss": 1.0721,
+ "step": 754
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32836289576124167,
+ "learning_rate": 5.224711161883375e-05,
+ "loss": 1.0459,
+ "step": 755
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32491620058831744,
+ "learning_rate": 5.216592468782117e-05,
+ "loss": 1.0897,
+ "step": 756
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3137879047811153,
+ "learning_rate": 5.2084682491864155e-05,
+ "loss": 1.096,
+ "step": 757
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3356938043023012,
+ "learning_rate": 5.200338540001364e-05,
+ "loss": 1.0827,
+ "step": 758
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.36044340490819055,
+ "learning_rate": 5.192203378156984e-05,
+ "loss": 1.0617,
+ "step": 759
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.34674262047888293,
+ "learning_rate": 5.184062800608077e-05,
+ "loss": 1.1267,
+ "step": 760
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.32469442322149333,
+ "learning_rate": 5.1759168443340375e-05,
+ "loss": 1.1483,
+ "step": 761
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3290384307774216,
+ "learning_rate": 5.167765546338698e-05,
+ "loss": 1.047,
+ "step": 762
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.31637612188770403,
+ "learning_rate": 5.1596089436501525e-05,
+ "loss": 1.0311,
+ "step": 763
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3168693829641207,
+ "learning_rate": 5.151447073320597e-05,
+ "loss": 1.1405,
+ "step": 764
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.34322421571238926,
+ "learning_rate": 5.143279972426153e-05,
+ "loss": 1.1428,
+ "step": 765
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3291030435830325,
+ "learning_rate": 5.1351076780667026e-05,
+ "loss": 1.0473,
+ "step": 766
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.33772039158758044,
+ "learning_rate": 5.1269302273657195e-05,
+ "loss": 1.0909,
+ "step": 767
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3802031736890876,
+ "learning_rate": 5.118747657470102e-05,
+ "loss": 1.1482,
+ "step": 768
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3296067628997962,
+ "learning_rate": 5.1105600055500025e-05,
+ "loss": 1.0085,
+ "step": 769
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3707139982828035,
+ "learning_rate": 5.102367308798658e-05,
+ "loss": 1.0746,
+ "step": 770
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3378537316757011,
+ "learning_rate": 5.094169604432225e-05,
+ "loss": 1.0482,
+ "step": 771
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.4008417246255145,
+ "learning_rate": 5.085966929689601e-05,
+ "loss": 1.1065,
+ "step": 772
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3244385106988064,
+ "learning_rate": 5.077759321832271e-05,
+ "loss": 1.0827,
+ "step": 773
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.37228575732812336,
+ "learning_rate": 5.0695468181441215e-05,
+ "loss": 1.1146,
+ "step": 774
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33761714797540276,
+ "learning_rate": 5.061329455931283e-05,
+ "loss": 1.092,
+ "step": 775
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3158158390913494,
+ "learning_rate": 5.053107272521955e-05,
+ "loss": 1.1058,
+ "step": 776
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3691501929738938,
+ "learning_rate": 5.044880305266239e-05,
+ "loss": 1.1599,
+ "step": 777
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33730914019805525,
+ "learning_rate": 5.0366485915359645e-05,
+ "loss": 1.0615,
+ "step": 778
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.34970059240017,
+ "learning_rate": 5.0284121687245257e-05,
+ "loss": 1.1475,
+ "step": 779
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3374028029407197,
+ "learning_rate": 5.020171074246707e-05,
+ "loss": 1.0926,
+ "step": 780
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3350020681123992,
+ "learning_rate": 5.011925345538514e-05,
+ "loss": 1.1276,
+ "step": 781
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3224228965786606,
+ "learning_rate": 5.003675020057003e-05,
+ "loss": 1.0183,
+ "step": 782
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3357310714740298,
+ "learning_rate": 4.995420135280114e-05,
+ "loss": 1.1114,
+ "step": 783
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3590203255363759,
+ "learning_rate": 4.9871607287064966e-05,
+ "loss": 1.1504,
+ "step": 784
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.33011195419611655,
+ "learning_rate": 4.9788968378553396e-05,
+ "loss": 1.0826,
+ "step": 785
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.31088868195439445,
+ "learning_rate": 4.970628500266207e-05,
+ "loss": 1.0704,
+ "step": 786
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3144996103179409,
+ "learning_rate": 4.962355753498858e-05,
+ "loss": 1.1403,
+ "step": 787
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3147269555419068,
+ "learning_rate": 4.954078635133081e-05,
+ "loss": 1.0898,
+ "step": 788
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3280151747783868,
+ "learning_rate": 4.945797182768524e-05,
+ "loss": 1.1115,
+ "step": 789
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3551996569232493,
+ "learning_rate": 4.937511434024524e-05,
+ "loss": 1.1731,
+ "step": 790
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.343863208057807,
+ "learning_rate": 4.9292214265399336e-05,
+ "loss": 1.0866,
+ "step": 791
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.37316699385322466,
+ "learning_rate": 4.920927197972949e-05,
+ "loss": 1.1083,
+ "step": 792
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635739774067832,
+ "learning_rate": 4.9126287860009453e-05,
+ "loss": 1.1393,
+ "step": 793
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3755910554972886,
+ "learning_rate": 4.9043262283202974e-05,
+ "loss": 1.1624,
+ "step": 794
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635899120146823,
+ "learning_rate": 4.8960195626462145e-05,
+ "loss": 1.2095,
+ "step": 795
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3642202684342816,
+ "learning_rate": 4.8877088267125664e-05,
+ "loss": 1.1099,
+ "step": 796
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3339946548799316,
+ "learning_rate": 4.879394058271712e-05,
+ "loss": 1.1157,
+ "step": 797
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3457189703100475,
+ "learning_rate": 4.871075295094329e-05,
+ "loss": 1.129,
+ "step": 798
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3550931839691424,
+ "learning_rate": 4.862752574969241e-05,
+ "loss": 1.076,
+ "step": 799
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.36139108917966734,
+ "learning_rate": 4.8544259357032475e-05,
+ "loss": 1.1577,
+ "step": 800
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39569703665247874,
+ "learning_rate": 4.8460954151209486e-05,
+ "loss": 1.0543,
+ "step": 801
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.3879033670170866,
+ "learning_rate": 4.837761051064579e-05,
+ "loss": 1.0688,
+ "step": 802
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3796846713967255,
+ "learning_rate": 4.8294228813938285e-05,
+ "loss": 0.9911,
+ "step": 803
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4007831430409375,
+ "learning_rate": 4.8210809439856804e-05,
+ "loss": 1.0126,
+ "step": 804
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.37588078665500885,
+ "learning_rate": 4.8127352767342276e-05,
+ "loss": 0.9302,
+ "step": 805
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4078509175013281,
+ "learning_rate": 4.8043859175505095e-05,
+ "loss": 0.9982,
+ "step": 806
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.379096046185539,
+ "learning_rate": 4.7960329043623344e-05,
+ "loss": 1.0035,
+ "step": 807
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3813938568133554,
+ "learning_rate": 4.787676275114111e-05,
+ "loss": 0.9579,
+ "step": 808
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.3686863564511168,
+ "learning_rate": 4.779316067766673e-05,
+ "loss": 1.0105,
+ "step": 809
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.4263940878847523,
+ "learning_rate": 4.770952320297109e-05,
+ "loss": 1.0677,
+ "step": 810
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.37178778374665006,
+ "learning_rate": 4.7625850706985886e-05,
+ "loss": 1.0019,
+ "step": 811
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.36803355429187945,
+ "learning_rate": 4.7542143569801894e-05,
+ "loss": 0.9937,
+ "step": 812
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.3897072472941179,
+ "learning_rate": 4.745840217166725e-05,
+ "loss": 1.0877,
+ "step": 813
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.35571833841716255,
+ "learning_rate": 4.737462689298577e-05,
+ "loss": 1.0015,
+ "step": 814
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.38930229991094323,
+ "learning_rate": 4.7290818114315086e-05,
+ "loss": 1.028,
+ "step": 815
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.411005007105147,
+ "learning_rate": 4.72069762163651e-05,
+ "loss": 1.0068,
+ "step": 816
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3980240190337736,
+ "learning_rate": 4.7123101579996106e-05,
+ "loss": 0.9919,
+ "step": 817
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.36369517703115467,
+ "learning_rate": 4.7039194586217136e-05,
+ "loss": 0.967,
+ "step": 818
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.38591148840458894,
+ "learning_rate": 4.695525561618418e-05,
+ "loss": 0.9743,
+ "step": 819
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.45873135108949337,
+ "learning_rate": 4.687128505119853e-05,
+ "loss": 1.0516,
+ "step": 820
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.3866330351411308,
+ "learning_rate": 4.6787283272704966e-05,
+ "loss": 0.9939,
+ "step": 821
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4620340173291326,
+ "learning_rate": 4.670325066229009e-05,
+ "loss": 1.0526,
+ "step": 822
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38877454299870284,
+ "learning_rate": 4.661918760168052e-05,
+ "loss": 0.9904,
+ "step": 823
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.3880489386116793,
+ "learning_rate": 4.653509447274121e-05,
+ "loss": 0.9623,
+ "step": 824
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3827392356186151,
+ "learning_rate": 4.6450971657473743e-05,
+ "loss": 1.0772,
+ "step": 825
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4132814641854327,
+ "learning_rate": 4.63668195380145e-05,
+ "loss": 1.0533,
+ "step": 826
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3703610182402835,
+ "learning_rate": 4.628263849663301e-05,
+ "loss": 0.9336,
+ "step": 827
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4152053683299823,
+ "learning_rate": 4.619842891573016e-05,
+ "loss": 0.9801,
+ "step": 828
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.41791059043554274,
+ "learning_rate": 4.6114191177836514e-05,
+ "loss": 1.0617,
+ "step": 829
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.46363896517299136,
+ "learning_rate": 4.6029925665610524e-05,
+ "loss": 0.9687,
+ "step": 830
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.41141959057512445,
+ "learning_rate": 4.59456327618368e-05,
+ "loss": 1.0965,
+ "step": 831
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3789192764519836,
+ "learning_rate": 4.5861312849424386e-05,
+ "loss": 0.9793,
+ "step": 832
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4047291581107866,
+ "learning_rate": 4.5776966311405035e-05,
+ "loss": 1.0342,
+ "step": 833
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4425157400959256,
+ "learning_rate": 4.5692593530931416e-05,
+ "loss": 1.0892,
+ "step": 834
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3707332144806616,
+ "learning_rate": 4.560819489127545e-05,
+ "loss": 0.9815,
+ "step": 835
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3897444102572823,
+ "learning_rate": 4.552377077582646e-05,
+ "loss": 0.9884,
+ "step": 836
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.42725787957019346,
+ "learning_rate": 4.543932156808959e-05,
+ "loss": 0.9972,
+ "step": 837
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.40615269781820007,
+ "learning_rate": 4.535484765168386e-05,
+ "loss": 0.9529,
+ "step": 838
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3505829736050887,
+ "learning_rate": 4.527034941034063e-05,
+ "loss": 0.9492,
+ "step": 839
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.36688064686440497,
+ "learning_rate": 4.51858272279017e-05,
+ "loss": 0.9592,
+ "step": 840
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4043468777955929,
+ "learning_rate": 4.5101281488317634e-05,
+ "loss": 1.048,
+ "step": 841
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3811489793242706,
+ "learning_rate": 4.501671257564602e-05,
+ "loss": 1.0138,
+ "step": 842
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.39813004142325986,
+ "learning_rate": 4.49321208740497e-05,
+ "loss": 1.071,
+ "step": 843
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3809751022095503,
+ "learning_rate": 4.484750676779504e-05,
+ "loss": 1.0351,
+ "step": 844
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.384312178013823,
+ "learning_rate": 4.4762870641250185e-05,
+ "loss": 0.9737,
+ "step": 845
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.40769404907923557,
+ "learning_rate": 4.467821287888331e-05,
+ "loss": 0.9659,
+ "step": 846
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.39594136851937817,
+ "learning_rate": 4.459353386526086e-05,
+ "loss": 0.9405,
+ "step": 847
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.37180161011562185,
+ "learning_rate": 4.450883398504584e-05,
+ "loss": 1.0732,
+ "step": 848
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3772603623154663,
+ "learning_rate": 4.442411362299602e-05,
+ "loss": 0.9646,
+ "step": 849
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4346142368506476,
+ "learning_rate": 4.433937316396224e-05,
+ "loss": 0.9572,
+ "step": 850
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3997258084612474,
+ "learning_rate": 4.425461299288659e-05,
+ "loss": 0.9492,
+ "step": 851
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.41245476865247155,
+ "learning_rate": 4.416983349480073e-05,
+ "loss": 0.8732,
+ "step": 852
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.6761499297939195,
+ "learning_rate": 4.408503505482412e-05,
+ "loss": 1.0425,
+ "step": 853
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.40340979486858985,
+ "learning_rate": 4.400021805816225e-05,
+ "loss": 0.9596,
+ "step": 854
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.43290732392699666,
+ "learning_rate": 4.391538289010493e-05,
+ "loss": 1.0123,
+ "step": 855
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36878054442190156,
+ "learning_rate": 4.383052993602448e-05,
+ "loss": 0.9448,
+ "step": 856
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.7146145128961262,
+ "learning_rate": 4.374565958137404e-05,
+ "loss": 1.0342,
+ "step": 857
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.44429357586145607,
+ "learning_rate": 4.3660772211685775e-05,
+ "loss": 1.0436,
+ "step": 858
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.4565751973640598,
+ "learning_rate": 4.357586821256918e-05,
+ "loss": 1.0311,
+ "step": 859
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3919991236654277,
+ "learning_rate": 4.349094796970925e-05,
+ "loss": 1.1401,
+ "step": 860
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.4347441949284011,
+ "learning_rate": 4.3406011868864795e-05,
+ "loss": 1.0252,
+ "step": 861
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.38339976027415407,
+ "learning_rate": 4.3321060295866635e-05,
+ "loss": 1.0536,
+ "step": 862
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.37688790408195166,
+ "learning_rate": 4.32360936366159e-05,
+ "loss": 1.012,
+ "step": 863
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4317538207582504,
+ "learning_rate": 4.315111227708224e-05,
+ "loss": 1.0505,
+ "step": 864
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4145324872228796,
+ "learning_rate": 4.306611660330208e-05,
+ "loss": 1.0496,
+ "step": 865
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.416535227064448,
+ "learning_rate": 4.298110700137687e-05,
+ "loss": 0.9628,
+ "step": 866
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.46564356187492717,
+ "learning_rate": 4.2896083857471345e-05,
+ "loss": 1.0016,
+ "step": 867
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4228980941889828,
+ "learning_rate": 4.281104755781172e-05,
+ "loss": 1.0904,
+ "step": 868
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.4267821214430208,
+ "learning_rate": 4.272599848868402e-05,
+ "loss": 1.0544,
+ "step": 869
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.45763332095792075,
+ "learning_rate": 4.264093703643223e-05,
+ "loss": 1.0686,
+ "step": 870
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.4347555516548761,
+ "learning_rate": 4.255586358745662e-05,
+ "loss": 1.0264,
+ "step": 871
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3817726381103066,
+ "learning_rate": 4.247077852821194e-05,
+ "loss": 1.0045,
+ "step": 872
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3882808845457995,
+ "learning_rate": 4.2385682245205685e-05,
+ "loss": 1.0193,
+ "step": 873
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.39410930252966775,
+ "learning_rate": 4.230057512499634e-05,
+ "loss": 0.9832,
+ "step": 874
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4373094593907156,
+ "learning_rate": 4.221545755419159e-05,
+ "loss": 1.0343,
+ "step": 875
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4462843721698891,
+ "learning_rate": 4.2130329919446646e-05,
+ "loss": 1.0324,
+ "step": 876
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4747274247448112,
+ "learning_rate": 4.20451926074624e-05,
+ "loss": 0.9903,
+ "step": 877
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4157472897596409,
+ "learning_rate": 4.196004600498369e-05,
+ "loss": 0.9266,
+ "step": 878
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.41625958088960685,
+ "learning_rate": 4.1874890498797605e-05,
+ "loss": 0.9658,
+ "step": 879
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.44784944130574333,
+ "learning_rate": 4.178972647573163e-05,
+ "loss": 0.9671,
+ "step": 880
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.4116839177956385,
+ "learning_rate": 4.1704554322651975e-05,
+ "loss": 0.9591,
+ "step": 881
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.4025569857639452,
+ "learning_rate": 4.161937442646176e-05,
+ "loss": 1.0072,
+ "step": 882
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.41518478124763597,
+ "learning_rate": 4.1534187174099285e-05,
+ "loss": 1.0275,
+ "step": 883
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3987815564664466,
+ "learning_rate": 4.1448992952536275e-05,
+ "loss": 1.0039,
+ "step": 884
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4270378155679982,
+ "learning_rate": 4.136379214877609e-05,
+ "loss": 1.0369,
+ "step": 885
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.42144733922972777,
+ "learning_rate": 4.127858514985203e-05,
+ "loss": 1.0269,
+ "step": 886
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4198664438272548,
+ "learning_rate": 4.1193372342825494e-05,
+ "loss": 1.0427,
+ "step": 887
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3985048256281719,
+ "learning_rate": 4.1108154114784275e-05,
+ "loss": 1.0702,
+ "step": 888
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.605520808292362,
+ "learning_rate": 4.102293085284083e-05,
+ "loss": 0.9749,
+ "step": 889
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4150515863924052,
+ "learning_rate": 4.0937702944130426e-05,
+ "loss": 1.0231,
+ "step": 890
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3935997576565283,
+ "learning_rate": 4.085247077580948e-05,
+ "loss": 1.0014,
+ "step": 891
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.399446131403209,
+ "learning_rate": 4.076723473505374e-05,
+ "loss": 0.9602,
+ "step": 892
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4406024397129952,
+ "learning_rate": 4.068199520905655e-05,
+ "loss": 1.0425,
+ "step": 893
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4036917571496492,
+ "learning_rate": 4.059675258502709e-05,
+ "loss": 0.973,
+ "step": 894
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4057196459433299,
+ "learning_rate": 4.05115072501886e-05,
+ "loss": 0.9997,
+ "step": 895
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4374124954708759,
+ "learning_rate": 4.0426259591776645e-05,
+ "loss": 0.9826,
+ "step": 896
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4545699371285546,
+ "learning_rate": 4.0341009997037356e-05,
+ "loss": 1.0554,
+ "step": 897
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4251917031237376,
+ "learning_rate": 4.025575885322563e-05,
+ "loss": 1.0217,
+ "step": 898
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3857651901893941,
+ "learning_rate": 4.0170506547603427e-05,
+ "loss": 1.0317,
+ "step": 899
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.46323573798490897,
+ "learning_rate": 4.008525346743797e-05,
+ "loss": 1.0398,
+ "step": 900
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4011541121460918,
+ "learning_rate": 4e-05,
+ "loss": 1.0706,
+ "step": 901
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.46493281221028004,
+ "learning_rate": 3.991474653256204e-05,
+ "loss": 1.0525,
+ "step": 902
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.41683080924539023,
+ "learning_rate": 3.982949345239658e-05,
+ "loss": 1.0905,
+ "step": 903
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.4750350025014512,
+ "learning_rate": 3.974424114677437e-05,
+ "loss": 1.049,
+ "step": 904
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3867445073614702,
+ "learning_rate": 3.965899000296266e-05,
+ "loss": 0.9624,
+ "step": 905
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.378387661131469,
+ "learning_rate": 3.957374040822335e-05,
+ "loss": 1.0223,
+ "step": 906
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3905996390559077,
+ "learning_rate": 3.948849274981141e-05,
+ "loss": 1.0315,
+ "step": 907
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.4139717689498189,
+ "learning_rate": 3.940324741497291e-05,
+ "loss": 0.9297,
+ "step": 908
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.39086355684921514,
+ "learning_rate": 3.9318004790943465e-05,
+ "loss": 0.9684,
+ "step": 909
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.4334915643736419,
+ "learning_rate": 3.923276526494627e-05,
+ "loss": 0.996,
+ "step": 910
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.40782018986229496,
+ "learning_rate": 3.9147529224190536e-05,
+ "loss": 1.0875,
+ "step": 911
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.43578702386625723,
+ "learning_rate": 3.906229705586959e-05,
+ "loss": 1.1214,
+ "step": 912
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.414945683409524,
+ "learning_rate": 3.89770691471592e-05,
+ "loss": 1.1037,
+ "step": 913
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.40665801579679106,
+ "learning_rate": 3.889184588521573e-05,
+ "loss": 0.9743,
+ "step": 914
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.4064250611574517,
+ "learning_rate": 3.880662765717453e-05,
+ "loss": 0.8814,
+ "step": 915
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.48023046298843347,
+ "learning_rate": 3.8721414850147985e-05,
+ "loss": 0.9663,
+ "step": 916
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.42358024833566227,
+ "learning_rate": 3.8636207851223924e-05,
+ "loss": 1.0491,
+ "step": 917
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.41522494786195835,
+ "learning_rate": 3.855100704746374e-05,
+ "loss": 1.033,
+ "step": 918
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.40890517696706496,
+ "learning_rate": 3.8465812825900715e-05,
+ "loss": 1.0369,
+ "step": 919
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.4325851866408538,
+ "learning_rate": 3.838062557353825e-05,
+ "loss": 0.9362,
+ "step": 920
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.4185860919050069,
+ "learning_rate": 3.8295445677348025e-05,
+ "loss": 1.026,
+ "step": 921
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3975762375934804,
+ "learning_rate": 3.8210273524268375e-05,
+ "loss": 1.0412,
+ "step": 922
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.41725298241987474,
+ "learning_rate": 3.8125109501202395e-05,
+ "loss": 1.0004,
+ "step": 923
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.455183913149126,
+ "learning_rate": 3.803995399501632e-05,
+ "loss": 1.0594,
+ "step": 924
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3993993856483797,
+ "learning_rate": 3.795480739253761e-05,
+ "loss": 0.9761,
+ "step": 925
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.41638796815161494,
+ "learning_rate": 3.786967008055337e-05,
+ "loss": 1.0369,
+ "step": 926
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.40015112695810534,
+ "learning_rate": 3.7784542445808414e-05,
+ "loss": 1.0271,
+ "step": 927
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3995749494729548,
+ "learning_rate": 3.769942487500368e-05,
+ "loss": 1.0613,
+ "step": 928
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.4073556267037492,
+ "learning_rate": 3.761431775479432e-05,
+ "loss": 1.0528,
+ "step": 929
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.44218148822636044,
+ "learning_rate": 3.752922147178807e-05,
+ "loss": 1.0742,
+ "step": 930
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.4435063485893757,
+ "learning_rate": 3.744413641254339e-05,
+ "loss": 1.0825,
+ "step": 931
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.46841574994107515,
+ "learning_rate": 3.735906296356778e-05,
+ "loss": 1.0471,
+ "step": 932
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.40093716627657294,
+ "learning_rate": 3.727400151131599e-05,
+ "loss": 1.0474,
+ "step": 933
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3866415067997244,
+ "learning_rate": 3.71889524421883e-05,
+ "loss": 1.0209,
+ "step": 934
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.4881546110706673,
+ "learning_rate": 3.710391614252867e-05,
+ "loss": 1.0768,
+ "step": 935
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.4133084639324523,
+ "learning_rate": 3.701889299862314e-05,
+ "loss": 1.0423,
+ "step": 936
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.40523563084001196,
+ "learning_rate": 3.6933883396697936e-05,
+ "loss": 1.005,
+ "step": 937
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.38757352418642405,
+ "learning_rate": 3.684888772291777e-05,
+ "loss": 0.9659,
+ "step": 938
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.421394551890689,
+ "learning_rate": 3.676390636338411e-05,
+ "loss": 1.0454,
+ "step": 939
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.45693070958342186,
+ "learning_rate": 3.667893970413337e-05,
+ "loss": 1.1459,
+ "step": 940
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.4172025376377795,
+ "learning_rate": 3.659398813113522e-05,
+ "loss": 0.9954,
+ "step": 941
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3871624019510191,
+ "learning_rate": 3.650905203029075e-05,
+ "loss": 1.0441,
+ "step": 942
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.38541342610032325,
+ "learning_rate": 3.642413178743083e-05,
+ "loss": 0.9465,
+ "step": 943
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.4208031670525743,
+ "learning_rate": 3.633922778831423e-05,
+ "loss": 1.0367,
+ "step": 944
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.41867209013040035,
+ "learning_rate": 3.6254340418625975e-05,
+ "loss": 1.0868,
+ "step": 945
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.431758149074127,
+ "learning_rate": 3.6169470063975536e-05,
+ "loss": 1.0689,
+ "step": 946
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4988803338819952,
+ "learning_rate": 3.608461710989509e-05,
+ "loss": 1.0879,
+ "step": 947
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4094858411191625,
+ "learning_rate": 3.5999781941837755e-05,
+ "loss": 1.0332,
+ "step": 948
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.3831847195845155,
+ "learning_rate": 3.591496494517589e-05,
+ "loss": 0.9751,
+ "step": 949
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.40535692821947267,
+ "learning_rate": 3.5830166505199284e-05,
+ "loss": 1.0594,
+ "step": 950
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.4875663789389966,
+ "learning_rate": 3.574538700711343e-05,
+ "loss": 0.9749,
+ "step": 951
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5155923998285772,
+ "learning_rate": 3.566062683603778e-05,
+ "loss": 0.9999,
+ "step": 952
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5280285947816189,
+ "learning_rate": 3.557588637700399e-05,
+ "loss": 1.1061,
+ "step": 953
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.46573407357796753,
+ "learning_rate": 3.5491166014954174e-05,
+ "loss": 1.102,
+ "step": 954
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.4122542582865379,
+ "learning_rate": 3.540646613473915e-05,
+ "loss": 1.0469,
+ "step": 955
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.41414476980823367,
+ "learning_rate": 3.53217871211167e-05,
+ "loss": 0.9973,
+ "step": 956
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4030707611608045,
+ "learning_rate": 3.523712935874983e-05,
+ "loss": 0.9796,
+ "step": 957
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4235313349747291,
+ "learning_rate": 3.5152493232204975e-05,
+ "loss": 1.0601,
+ "step": 958
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4165235178302652,
+ "learning_rate": 3.5067879125950316e-05,
+ "loss": 1.0358,
+ "step": 959
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.44083984701952955,
+ "learning_rate": 3.4983287424354e-05,
+ "loss": 1.0957,
+ "step": 960
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3781161039063518,
+ "learning_rate": 3.489871851168238e-05,
+ "loss": 0.9838,
+ "step": 961
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4095747724038915,
+ "learning_rate": 3.4814172772098314e-05,
+ "loss": 1.014,
+ "step": 962
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.42197119558898466,
+ "learning_rate": 3.472965058965938e-05,
+ "loss": 1.0096,
+ "step": 963
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4339963388152155,
+ "learning_rate": 3.464515234831615e-05,
+ "loss": 1.0158,
+ "step": 964
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4284638765548976,
+ "learning_rate": 3.4560678431910424e-05,
+ "loss": 1.1047,
+ "step": 965
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3935144535755794,
+ "learning_rate": 3.447622922417355e-05,
+ "loss": 0.9925,
+ "step": 966
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.45884343961025,
+ "learning_rate": 3.439180510872457e-05,
+ "loss": 1.0583,
+ "step": 967
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.42439320759788374,
+ "learning_rate": 3.4307406469068604e-05,
+ "loss": 0.9305,
+ "step": 968
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.45770082390324845,
+ "learning_rate": 3.4223033688594985e-05,
+ "loss": 1.054,
+ "step": 969
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.4284786643981094,
+ "learning_rate": 3.4138687150575634e-05,
+ "loss": 0.9409,
+ "step": 970
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.41356124058383237,
+ "learning_rate": 3.4054367238163215e-05,
+ "loss": 1.0739,
+ "step": 971
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.4255832249412624,
+ "learning_rate": 3.3970074334389496e-05,
+ "loss": 1.0764,
+ "step": 972
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4337695536142702,
+ "learning_rate": 3.388580882216349e-05,
+ "loss": 1.0195,
+ "step": 973
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.41363495650922455,
+ "learning_rate": 3.380157108426985e-05,
+ "loss": 1.0615,
+ "step": 974
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3950691247686479,
+ "learning_rate": 3.371736150336701e-05,
+ "loss": 1.0283,
+ "step": 975
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4042823691555822,
+ "learning_rate": 3.3633180461985505e-05,
+ "loss": 1.0309,
+ "step": 976
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3921158850479399,
+ "learning_rate": 3.354902834252627e-05,
+ "loss": 1.068,
+ "step": 977
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38349545732725654,
+ "learning_rate": 3.346490552725879e-05,
+ "loss": 1.0886,
+ "step": 978
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38689221457248724,
+ "learning_rate": 3.33808123983195e-05,
+ "loss": 0.987,
+ "step": 979
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38660550867425647,
+ "learning_rate": 3.329674933770992e-05,
+ "loss": 1.069,
+ "step": 980
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3917593746353493,
+ "learning_rate": 3.321271672729504e-05,
+ "loss": 0.9858,
+ "step": 981
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.4292314072827653,
+ "learning_rate": 3.3128714948801474e-05,
+ "loss": 1.0477,
+ "step": 982
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.479414638418211,
+ "learning_rate": 3.3044744383815835e-05,
+ "loss": 1.0763,
+ "step": 983
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.380831894995463,
+ "learning_rate": 3.2960805413782884e-05,
+ "loss": 1.0393,
+ "step": 984
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.42402274703362114,
+ "learning_rate": 3.2876898420003914e-05,
+ "loss": 1.0837,
+ "step": 985
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.4571447203722258,
+ "learning_rate": 3.279302378363491e-05,
+ "loss": 1.0594,
+ "step": 986
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3776673281658531,
+ "learning_rate": 3.270918188568493e-05,
+ "loss": 1.0121,
+ "step": 987
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.4367173448132159,
+ "learning_rate": 3.262537310701425e-05,
+ "loss": 0.9612,
+ "step": 988
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.43679765208840926,
+ "learning_rate": 3.254159782833276e-05,
+ "loss": 1.0565,
+ "step": 989
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.4018151260013493,
+ "learning_rate": 3.2457856430198126e-05,
+ "loss": 0.9975,
+ "step": 990
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.40461959940721076,
+ "learning_rate": 3.237414929301412e-05,
+ "loss": 1.0255,
+ "step": 991
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.41342378541540653,
+ "learning_rate": 3.2290476797028926e-05,
+ "loss": 1.024,
+ "step": 992
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3926173909201105,
+ "learning_rate": 3.220683932233328e-05,
+ "loss": 1.0877,
+ "step": 993
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3835623199834992,
+ "learning_rate": 3.21232372488589e-05,
+ "loss": 1.0992,
+ "step": 994
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.39901809497083496,
+ "learning_rate": 3.2039670956376656e-05,
+ "loss": 1.0723,
+ "step": 995
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3979604537466272,
+ "learning_rate": 3.195614082449492e-05,
+ "loss": 1.0201,
+ "step": 996
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.4057122427176845,
+ "learning_rate": 3.1872647232657723e-05,
+ "loss": 1.0885,
+ "step": 997
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.39747060350754754,
+ "learning_rate": 3.17891905601432e-05,
+ "loss": 1.0544,
+ "step": 998
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.4397658078291558,
+ "learning_rate": 3.1705771186061715e-05,
+ "loss": 1.0998,
+ "step": 999
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37373547663810053,
+ "learning_rate": 3.162238948935423e-05,
+ "loss": 1.0465,
+ "step": 1000
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.3982578259433756,
+ "learning_rate": 3.153904584879052e-05,
+ "loss": 1.0319,
+ "step": 1001
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.3874917951751892,
+ "learning_rate": 3.1455740642967545e-05,
+ "loss": 1.0064,
+ "step": 1002
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.39186897217724515,
+ "learning_rate": 3.1372474250307594e-05,
+ "loss": 0.9924,
+ "step": 1003
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.40862291659745487,
+ "learning_rate": 3.128924704905673e-05,
+ "loss": 1.0697,
+ "step": 1004
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.4020116078010512,
+ "learning_rate": 3.1206059417282894e-05,
+ "loss": 1.0669,
+ "step": 1005
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.4195258340431994,
+ "learning_rate": 3.1122911732874356e-05,
+ "loss": 0.9669,
+ "step": 1006
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.4051289242539706,
+ "learning_rate": 3.103980437353787e-05,
+ "loss": 1.0283,
+ "step": 1007
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.4072322485068397,
+ "learning_rate": 3.0956737716797047e-05,
+ "loss": 0.9819,
+ "step": 1008
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.4183439146679152,
+ "learning_rate": 3.087371213999056e-05,
+ "loss": 1.0195,
+ "step": 1009
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.4223541507769984,
+ "learning_rate": 3.079072802027051e-05,
+ "loss": 1.0321,
+ "step": 1010
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.4393009772902467,
+ "learning_rate": 3.070778573460068e-05,
+ "loss": 1.008,
+ "step": 1011
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3790600190213189,
+ "learning_rate": 3.062488565975476e-05,
+ "loss": 0.9642,
+ "step": 1012
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.39767262663748454,
+ "learning_rate": 3.054202817231477e-05,
+ "loss": 1.0067,
+ "step": 1013
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.4245599091706745,
+ "learning_rate": 3.0459213648669195e-05,
+ "loss": 1.0128,
+ "step": 1014
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.39825908696948487,
+ "learning_rate": 3.0376442465011436e-05,
+ "loss": 1.0415,
+ "step": 1015
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.38711515877098823,
+ "learning_rate": 3.0293714997337927e-05,
+ "loss": 1.008,
+ "step": 1016
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.40094768473114994,
+ "learning_rate": 3.0211031621446607e-05,
+ "loss": 1.0424,
+ "step": 1017
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.4257977860413385,
+ "learning_rate": 3.0128392712935044e-05,
+ "loss": 1.0244,
+ "step": 1018
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.43915000496042145,
+ "learning_rate": 3.0045798647198882e-05,
+ "loss": 1.0096,
+ "step": 1019
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.43529687364630915,
+ "learning_rate": 2.9963249799429986e-05,
+ "loss": 1.0672,
+ "step": 1020
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3930746148439634,
+ "learning_rate": 2.988074654461489e-05,
+ "loss": 0.9502,
+ "step": 1021
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 12.100708480080742,
+ "learning_rate": 2.9798289257532946e-05,
+ "loss": 1.1234,
+ "step": 1022
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.4482898066028835,
+ "learning_rate": 2.9715878312754767e-05,
+ "loss": 1.0238,
+ "step": 1023
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.49283220433392333,
+ "learning_rate": 2.9633514084640365e-05,
+ "loss": 1.0953,
+ "step": 1024
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.5149555706737133,
+ "learning_rate": 2.955119694733763e-05,
+ "loss": 1.0521,
+ "step": 1025
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.44798691881600083,
+ "learning_rate": 2.946892727478045e-05,
+ "loss": 0.9552,
+ "step": 1026
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.4805603304240495,
+ "learning_rate": 2.9386705440687168e-05,
+ "loss": 1.0627,
+ "step": 1027
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.4869146396788101,
+ "learning_rate": 2.9304531818558795e-05,
+ "loss": 0.9919,
+ "step": 1028
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 7.1337477050690765,
+ "learning_rate": 2.9222406781677294e-05,
+ "loss": 1.0948,
+ "step": 1029
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 3.9006954730122643,
+ "learning_rate": 2.9140330703103992e-05,
+ "loss": 1.0288,
+ "step": 1030
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 1247.1481817866024,
+ "learning_rate": 2.905830395567776e-05,
+ "loss": 2.7593,
+ "step": 1031
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 3.536837141759145,
+ "learning_rate": 2.8976326912013422e-05,
+ "loss": 1.0416,
+ "step": 1032
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 2.1190195460751875,
+ "learning_rate": 2.8894399944499974e-05,
+ "loss": 1.0894,
+ "step": 1033
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 23.71856345141067,
+ "learning_rate": 2.8812523425299e-05,
+ "loss": 1.0503,
+ "step": 1034
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 1.6586194628502813,
+ "learning_rate": 2.873069772634281e-05,
+ "loss": 0.9838,
+ "step": 1035
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 754.5038667582967,
+ "learning_rate": 2.8648923219332997e-05,
+ "loss": 1.5912,
+ "step": 1036
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 2084.148772219088,
+ "learning_rate": 2.856720027573848e-05,
+ "loss": 2.7096,
+ "step": 1037
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 7.150640904502661,
+ "learning_rate": 2.8485529266794043e-05,
+ "loss": 1.0482,
+ "step": 1038
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 15.341911132717605,
+ "learning_rate": 2.8403910563498482e-05,
+ "loss": 1.1031,
+ "step": 1039
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 28.495476932479527,
+ "learning_rate": 2.832234453661304e-05,
+ "loss": 1.0985,
+ "step": 1040
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 6.611240968969735,
+ "learning_rate": 2.8240831556659635e-05,
+ "loss": 1.076,
+ "step": 1041
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 158.1044433512434,
+ "learning_rate": 2.815937199391924e-05,
+ "loss": 1.1907,
+ "step": 1042
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 1.2973014286777083,
+ "learning_rate": 2.807796621843016e-05,
+ "loss": 1.0292,
+ "step": 1043
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 15.820120602912542,
+ "learning_rate": 2.799661459998638e-05,
+ "loss": 1.1606,
+ "step": 1044
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 25.84305200367942,
+ "learning_rate": 2.7915317508135848e-05,
+ "loss": 1.0422,
+ "step": 1045
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.6840315849067687,
+ "learning_rate": 2.7834075312178838e-05,
+ "loss": 1.0541,
+ "step": 1046
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 11.975406658849673,
+ "learning_rate": 2.775288838116626e-05,
+ "loss": 1.0545,
+ "step": 1047
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 513.8880995650269,
+ "learning_rate": 2.767175708389794e-05,
+ "loss": 1.2914,
+ "step": 1048
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 7.1778887098472755,
+ "learning_rate": 2.759068178892105e-05,
+ "loss": 1.0446,
+ "step": 1049
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 1529.592860032449,
+ "learning_rate": 2.750966286452828e-05,
+ "loss": 3.6171,
+ "step": 1050
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 457.584729264938,
+ "learning_rate": 2.7428700678756334e-05,
+ "loss": 1.3267,
+ "step": 1051
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 522.2749314347409,
+ "learning_rate": 2.7347795599384097e-05,
+ "loss": 1.546,
+ "step": 1052
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 6.221272389029345,
+ "learning_rate": 2.7266947993931113e-05,
+ "loss": 1.1087,
+ "step": 1053
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 311.2632708977676,
+ "learning_rate": 2.7186158229655805e-05,
+ "loss": 1.3081,
+ "step": 1054
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 23.66110664544657,
+ "learning_rate": 2.7105426673553855e-05,
+ "loss": 1.0733,
+ "step": 1055
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 2.300653447677052,
+ "learning_rate": 2.7024753692356526e-05,
+ "loss": 1.0781,
+ "step": 1056
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 12.450323800171912,
+ "learning_rate": 2.694413965252901e-05,
+ "loss": 1.077,
+ "step": 1057
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 265.3597276252935,
+ "learning_rate": 2.686358492026873e-05,
+ "loss": 1.3128,
+ "step": 1058
+ },
+ {
+ "epoch": 1.47,
+ "grad_norm": 59.34678923602864,
+ "learning_rate": 2.6783089861503717e-05,
+ "loss": 1.2054,
+ "step": 1059
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 184.6606219618748,
+ "learning_rate": 2.670265484189093e-05,
+ "loss": 1.1299,
+ "step": 1060
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 80.66125275316996,
+ "learning_rate": 2.6622280226814582e-05,
+ "loss": 1.0784,
+ "step": 1061
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 2.377701583912402,
+ "learning_rate": 2.6541966381384487e-05,
+ "loss": 1.091,
+ "step": 1062
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 1.4503793656466002,
+ "learning_rate": 2.6461713670434445e-05,
+ "loss": 1.0014,
+ "step": 1063
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 6.111135395353941,
+ "learning_rate": 2.6381522458520498e-05,
+ "loss": 1.1101,
+ "step": 1064
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 53.5458453879621,
+ "learning_rate": 2.6301393109919353e-05,
+ "loss": 1.0417,
+ "step": 1065
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 2.296669669357606,
+ "learning_rate": 2.6221325988626686e-05,
+ "loss": 1.0555,
+ "step": 1066
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 4.002288790689249,
+ "learning_rate": 2.61413214583555e-05,
+ "loss": 1.0497,
+ "step": 1067
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 2.590180280200089,
+ "learning_rate": 2.6061379882534466e-05,
+ "loss": 1.0971,
+ "step": 1068
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 1.6552849234151497,
+ "learning_rate": 2.5981501624306296e-05,
+ "loss": 0.9975,
+ "step": 1069
+ },
+ {
+ "epoch": 1.49,
+ "grad_norm": 2.819530643973377,
+ "learning_rate": 2.590168704652605e-05,
+ "loss": 1.061,
+ "step": 1070
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 37.72256335715325,
+ "learning_rate": 2.582193651175954e-05,
+ "loss": 1.1041,
+ "step": 1071
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.7567234742258271,
+ "learning_rate": 2.5742250382281638e-05,
+ "loss": 1.1077,
+ "step": 1072
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 0.9599151737835186,
+ "learning_rate": 2.5662629020074647e-05,
+ "loss": 1.0409,
+ "step": 1073
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 8.981886071466588,
+ "learning_rate": 2.5583072786826678e-05,
+ "loss": 1.0974,
+ "step": 1074
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 2.404349972229506,
+ "learning_rate": 2.5503582043929963e-05,
+ "loss": 1.1159,
+ "step": 1075
+ },
+ {
+ "epoch": 1.5,
+ "grad_norm": 4.373545206924793,
+ "learning_rate": 2.542415715247926e-05,
+ "loss": 1.0598,
+ "step": 1076
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 1.4596003688735983,
+ "learning_rate": 2.5344798473270152e-05,
+ "loss": 1.1134,
+ "step": 1077
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 2.1083725689844326,
+ "learning_rate": 2.526550636679751e-05,
+ "loss": 0.9931,
+ "step": 1078
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 1.0003351902473687,
+ "learning_rate": 2.5186281193253726e-05,
+ "loss": 1.0186,
+ "step": 1079
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 1.461014978228795,
+ "learning_rate": 2.510712331252719e-05,
+ "loss": 0.9672,
+ "step": 1080
+ },
+ {
+ "epoch": 1.51,
+ "grad_norm": 1.0129142937848878,
+ "learning_rate": 2.5028033084200566e-05,
+ "loss": 0.9275,
+ "step": 1081
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.5137143101178606,
+ "learning_rate": 2.494901086754923e-05,
+ "loss": 1.1034,
+ "step": 1082
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 1.1837206879042184,
+ "learning_rate": 2.4870057021539628e-05,
+ "loss": 1.052,
+ "step": 1083
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.6034132086412607,
+ "learning_rate": 2.4791171904827548e-05,
+ "loss": 1.1377,
+ "step": 1084
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.8905740860693494,
+ "learning_rate": 2.4712355875756666e-05,
+ "loss": 1.0123,
+ "step": 1085
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.8444181857939702,
+ "learning_rate": 2.4633609292356737e-05,
+ "loss": 1.0657,
+ "step": 1086
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.5744979164068933,
+ "learning_rate": 2.4554932512342117e-05,
+ "loss": 1.0005,
+ "step": 1087
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.5649175037847919,
+ "learning_rate": 2.4476325893110008e-05,
+ "loss": 1.0319,
+ "step": 1088
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.691534481432307,
+ "learning_rate": 2.4397789791738974e-05,
+ "loss": 1.0253,
+ "step": 1089
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.5110907757872322,
+ "learning_rate": 2.431932456498717e-05,
+ "loss": 1.0261,
+ "step": 1090
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.4342362374564813,
+ "learning_rate": 2.4240930569290867e-05,
+ "loss": 1.0492,
+ "step": 1091
+ },
+ {
+ "epoch": 1.53,
+ "grad_norm": 0.5320223874229756,
+ "learning_rate": 2.416260816076269e-05,
+ "loss": 1.0414,
+ "step": 1092
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 0.4847073954059318,
+ "learning_rate": 2.408435769519014e-05,
+ "loss": 1.1036,
+ "step": 1093
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 0.4887310785937357,
+ "learning_rate": 2.4006179528033844e-05,
+ "loss": 1.0172,
+ "step": 1094
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 0.5097503637400853,
+ "learning_rate": 2.3928074014426077e-05,
+ "loss": 1.0626,
+ "step": 1095
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 0.4260447256317975,
+ "learning_rate": 2.3850041509169007e-05,
+ "loss": 1.0068,
+ "step": 1096
+ },
+ {
+ "epoch": 1.54,
+ "grad_norm": 0.6279002214589313,
+ "learning_rate": 2.3772082366733224e-05,
+ "loss": 1.0533,
+ "step": 1097
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 0.514571075001012,
+ "learning_rate": 2.3694196941255988e-05,
+ "loss": 1.055,
+ "step": 1098
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 0.5918196577760101,
+ "learning_rate": 2.361638558653974e-05,
+ "loss": 1.1135,
+ "step": 1099
+ },
+ {
+ "epoch": 1.55,
+ "grad_norm": 0.47452609662301504,
+ "learning_rate": 2.3538648656050463e-05,
+ "loss": 1.1205,
+ "step": 1100
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 1140439444881408.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/v2/checkpoint-1100/training_args.bin b/v2/checkpoint-1100/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8c2dfa20e1da5754719c3d7e300b9b86407f077f
--- /dev/null
+++ b/v2/checkpoint-1100/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3f2f7bd873b9dca108c5ca2e32ea140480fabeed2dec60f702daabd0a44d071e
+size 6776
diff --git a/v2/checkpoint-1100/zero_to_fp32.py b/v2/checkpoint-1100/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/v2/checkpoint-1100/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)