diff --git a/checkpoint-1000/README.md b/checkpoint-1000/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/checkpoint-1000/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/checkpoint-1000/adapter_config.json b/checkpoint-1000/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..832188d72d81e59dd2b5259e86f371199b441aca
--- /dev/null
+++ b/checkpoint-1000/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "o_proj",
+ "k_proj",
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/checkpoint-1000/adapter_model.safetensors b/checkpoint-1000/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..634eb05b161ad2b8d63c690758e96ed6ad879566
--- /dev/null
+++ b/checkpoint-1000/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e1ce644b4ea3cb5631a4ade4a68fa19557634516036410af8e088f5c2b51b3ad
+size 1048664848
diff --git a/checkpoint-1000/global_step1000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..05c1fda4c6052faa8f7c60239ae248f06f6b8894
--- /dev/null
+++ b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c4f078801474443721e2577ddc62ff1c280744861d069c1bcb960c34196f9d9b
+size 787270042
diff --git a/checkpoint-1000/global_step1000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..de5db04a98ca3d64d4e32c8d28ac7fc0c6b8c7ce
--- /dev/null
+++ b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c735b7960263d865759c604d4661f0970bcbbba4f3f864b6cf7dd93357f669dc
+size 787270042
diff --git a/checkpoint-1000/global_step1000/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9f74f5ab6a3f1386c711da953f10b5b6b8556bfb
--- /dev/null
+++ b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:077981153dc39aef6468d5cae213da828adb6d6516dd1ecfd5e92126125e0e1e
+size 787270042
diff --git a/checkpoint-1000/global_step1000/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3958dfe55e01fd06c0dfdba77776a5f6fe46ffca
--- /dev/null
+++ b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f951e8a525cd2b57687d34505dfbe62ba707a1e7fa3b87351c6c4b5d089dd23c
+size 787270042
diff --git a/checkpoint-1000/global_step1000/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3ea9e9a0fd06ca695efd22fa051301b913ce693e
--- /dev/null
+++ b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eea35d69b8382c06ff895f2ca19189f9d36fbeaf8e2123f498fb431c2cf4dc8d
+size 787270042
diff --git a/checkpoint-1000/global_step1000/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1315d20ae5964b65ffd456d2d136b7f50b9a77fa
--- /dev/null
+++ b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:adca0aeba92c1581a778b03dcebf647f4d80f9a304359aec2f7e8739e3d2dee9
+size 787270042
diff --git a/checkpoint-1000/global_step1000/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..225d919d35bf6120bb37159ad7e7ad74aa4bdf42
--- /dev/null
+++ b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1be8b91f86d410b6d352f260da5223d0db64fe8a10f97da168db3465f59bc194
+size 787270042
diff --git a/checkpoint-1000/global_step1000/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..de25629c43d14c771b3364e5252c77b058ae465f
--- /dev/null
+++ b/checkpoint-1000/global_step1000/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6b7902bc2fc97ca5b26dfbeb1e093daa77464f605402b20f14f955aee413e021
+size 787270042
diff --git a/checkpoint-1000/global_step1000/zero_pp_rank_0_mp_rank_00_model_states.pt b/checkpoint-1000/global_step1000/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..94eb802d0f82d382044acdb13c95baaf8808ac08
--- /dev/null
+++ b/checkpoint-1000/global_step1000/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:82c0af70dff5f96a3a06356646455999cb721f9c9c9cb6bffe441eccfe736008
+size 653742
diff --git a/checkpoint-1000/global_step1000/zero_pp_rank_1_mp_rank_00_model_states.pt b/checkpoint-1000/global_step1000/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..59ccdb3a5d81c6d9326a89fef05824647a372a48
--- /dev/null
+++ b/checkpoint-1000/global_step1000/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da48eb8b26f93823d0693833e5ac78f5cbe01579d9e23ff6b36a69489dca2610
+size 653742
diff --git a/checkpoint-1000/global_step1000/zero_pp_rank_2_mp_rank_00_model_states.pt b/checkpoint-1000/global_step1000/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a7564f51069055122aab1fddb7b91c4ee18b7ea2
--- /dev/null
+++ b/checkpoint-1000/global_step1000/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7436d2d4163ecc69a21cfb9c0aa9bb8fb04a6a0eb67326cc10e1abe243036e1f
+size 653742
diff --git a/checkpoint-1000/global_step1000/zero_pp_rank_3_mp_rank_00_model_states.pt b/checkpoint-1000/global_step1000/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3acc1ba7bd92567083d7d22be80a70973550a416
--- /dev/null
+++ b/checkpoint-1000/global_step1000/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9739aad3885189418801ae6d471cad7ecc60d9dde6c5855acdbfe218e9c9eadd
+size 653742
diff --git a/checkpoint-1000/global_step1000/zero_pp_rank_4_mp_rank_00_model_states.pt b/checkpoint-1000/global_step1000/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e5c938bd1f3417190d2c43d31d3cd842d38326e2
--- /dev/null
+++ b/checkpoint-1000/global_step1000/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:127f55a8a0ffe91f1b222203cb0dc4c57dc1a60cd0a546f3dfcc55b2a976fbca
+size 653742
diff --git a/checkpoint-1000/global_step1000/zero_pp_rank_5_mp_rank_00_model_states.pt b/checkpoint-1000/global_step1000/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f5a4af1fb75f6c7a2626c035a339de66773228c5
--- /dev/null
+++ b/checkpoint-1000/global_step1000/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e3f1322e9f7cee7269435dc7f168dc235f65730c79bf762adc662a080c9ea720
+size 653742
diff --git a/checkpoint-1000/global_step1000/zero_pp_rank_6_mp_rank_00_model_states.pt b/checkpoint-1000/global_step1000/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..60e77b0dc7069f3456267fa1adadda9dc41c8831
--- /dev/null
+++ b/checkpoint-1000/global_step1000/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:84675944246f5dc5aba60518fa340ee67c8efae1c8aa70b7fa2e9b6b9efeaf0a
+size 653742
diff --git a/checkpoint-1000/global_step1000/zero_pp_rank_7_mp_rank_00_model_states.pt b/checkpoint-1000/global_step1000/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ac9dea05fe1c99698e039a35ae835e8444cc8e8f
--- /dev/null
+++ b/checkpoint-1000/global_step1000/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b0ad763b5bae57e03d281834711e7b66343bcd505012a0c2d92b05764fb7f3a
+size 653742
diff --git a/checkpoint-1000/latest b/checkpoint-1000/latest
new file mode 100644
index 0000000000000000000000000000000000000000..e2d3435fb1acf8913e6bd6c51b01adfc69b11ac6
--- /dev/null
+++ b/checkpoint-1000/latest
@@ -0,0 +1 @@
+global_step1000
\ No newline at end of file
diff --git a/checkpoint-1000/rng_state_0.pth b/checkpoint-1000/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9dd2a62da4ca83b3b986d96dbf0eaeb82207ca93
--- /dev/null
+++ b/checkpoint-1000/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a0628a9017696045a3a29e9eaffc71e9262d855716e773c0c3be760a1fe85bc8
+size 15984
diff --git a/checkpoint-1000/rng_state_1.pth b/checkpoint-1000/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1ba5f3aba4388a582cd47f7f9e57cd5879b1cbd2
--- /dev/null
+++ b/checkpoint-1000/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df342004a4d8e3626bf2a9f689fde7c8bfd6d995e14931f5496eda1f456cb6f2
+size 15984
diff --git a/checkpoint-1000/rng_state_2.pth b/checkpoint-1000/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..27b0f7845c2b9530c3e6ed3ce232ff4e86b86122
--- /dev/null
+++ b/checkpoint-1000/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f02096eb4e8850b91490e80e4a042e2e60f71bd2abc6a269d62c271649cb77d2
+size 15984
diff --git a/checkpoint-1000/rng_state_3.pth b/checkpoint-1000/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fcfb583fc43c6dd4395671708744cfd18c419970
--- /dev/null
+++ b/checkpoint-1000/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:326c778d3d0e7e3d5665fa0a9ecd92986609c430da08b41611d6c05dc19815a8
+size 15984
diff --git a/checkpoint-1000/rng_state_4.pth b/checkpoint-1000/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7a8c64b1f15ac655b2be2a42fe61cabe2a877704
--- /dev/null
+++ b/checkpoint-1000/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d978dcb0c34e022ee6750e9d86814b8c82e4965d7e07662f35f06eeac12938f3
+size 15984
diff --git a/checkpoint-1000/rng_state_5.pth b/checkpoint-1000/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..262e8187e6caeca12ef3b0aa923b12afd697e03d
--- /dev/null
+++ b/checkpoint-1000/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01e83399aed1d9d173c3e07b2efa8530c956b62b2b68394c2ed0d43bd8bba9d1
+size 15984
diff --git a/checkpoint-1000/rng_state_6.pth b/checkpoint-1000/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..72f794e31f8d3e0c63972e5076e1ed90c52087ba
--- /dev/null
+++ b/checkpoint-1000/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:606ab3ca92e3d20c327c69fdcce7f7e39bec2f2c3538b036088b255f917e3ba4
+size 15984
diff --git a/checkpoint-1000/rng_state_7.pth b/checkpoint-1000/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..244e7fdaa1cef2e82bd4e16afb10f32f68318bcc
--- /dev/null
+++ b/checkpoint-1000/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1276a987dd22c9093fec58921ba19f340a28f18bff635cc01324e09a3c37ac3a
+size 15984
diff --git a/checkpoint-1000/scheduler.pt b/checkpoint-1000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..01f479c24cd827042085d519a3c91073dad5efba
--- /dev/null
+++ b/checkpoint-1000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:269f3c1d01e53e47ac89c5d48686db3c5bd5052b5dbb75807c9cffc4b0ab99ae
+size 1064
diff --git a/checkpoint-1000/special_tokens_map.json b/checkpoint-1000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-1000/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-1000/tokenizer.model b/checkpoint-1000/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-1000/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-1000/tokenizer_config.json b/checkpoint-1000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/checkpoint-1000/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-1000/trainer_state.json b/checkpoint-1000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..1c8b4a95e4310fa237b7a233fad45f643cbdd090
--- /dev/null
+++ b/checkpoint-1000/trainer_state.json
@@ -0,0 +1,7021 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.365797896662094,
+ "eval_steps": 500,
+ "global_step": 1000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2698278585334131,
+ "learning_rate": 7.274705407864332e-05,
+ "loss": 1.194,
+ "step": 451
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.23678313024953834,
+ "learning_rate": 7.26980224535416e-05,
+ "loss": 1.2349,
+ "step": 452
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24851875792002978,
+ "learning_rate": 7.264884229436293e-05,
+ "loss": 1.1758,
+ "step": 453
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24122080121681125,
+ "learning_rate": 7.259951382451318e-05,
+ "loss": 1.1962,
+ "step": 454
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.22741322959884405,
+ "learning_rate": 7.25500372680719e-05,
+ "loss": 1.1702,
+ "step": 455
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.2297475610861458,
+ "learning_rate": 7.250041284979137e-05,
+ "loss": 1.1466,
+ "step": 456
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.3057605989721467,
+ "learning_rate": 7.245064079509553e-05,
+ "loss": 1.246,
+ "step": 457
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2719638501597136,
+ "learning_rate": 7.240072133007899e-05,
+ "loss": 1.2184,
+ "step": 458
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2436807816414479,
+ "learning_rate": 7.235065468150593e-05,
+ "loss": 1.2324,
+ "step": 459
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.23436349430255515,
+ "learning_rate": 7.23004410768092e-05,
+ "loss": 1.1813,
+ "step": 460
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2398940990211377,
+ "learning_rate": 7.22500807440892e-05,
+ "loss": 1.1924,
+ "step": 461
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2605716625062531,
+ "learning_rate": 7.219957391211281e-05,
+ "loss": 1.182,
+ "step": 462
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.260462524570941,
+ "learning_rate": 7.214892081031244e-05,
+ "loss": 1.2136,
+ "step": 463
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.21979766512306334,
+ "learning_rate": 7.209812166878491e-05,
+ "loss": 1.2066,
+ "step": 464
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.23324453647530663,
+ "learning_rate": 7.204717671829051e-05,
+ "loss": 1.1657,
+ "step": 465
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.2529434935507481,
+ "learning_rate": 7.199608619025177e-05,
+ "loss": 1.2093,
+ "step": 466
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.25371701891720116,
+ "learning_rate": 7.194485031675265e-05,
+ "loss": 1.2225,
+ "step": 467
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.23272423066292103,
+ "learning_rate": 7.189346933053725e-05,
+ "loss": 1.1721,
+ "step": 468
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.25122928735587546,
+ "learning_rate": 7.184194346500892e-05,
+ "loss": 1.2537,
+ "step": 469
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2159270875490409,
+ "learning_rate": 7.179027295422913e-05,
+ "loss": 1.197,
+ "step": 470
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2633111059076544,
+ "learning_rate": 7.173845803291636e-05,
+ "loss": 1.1721,
+ "step": 471
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.30555936322098703,
+ "learning_rate": 7.168649893644517e-05,
+ "loss": 1.3011,
+ "step": 472
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.23492670111453726,
+ "learning_rate": 7.163439590084502e-05,
+ "loss": 1.1601,
+ "step": 473
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.26602734263721806,
+ "learning_rate": 7.158214916279923e-05,
+ "loss": 1.2808,
+ "step": 474
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.3182695007856262,
+ "learning_rate": 7.152975895964386e-05,
+ "loss": 1.2967,
+ "step": 475
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2785021674736721,
+ "learning_rate": 7.147722552936673e-05,
+ "loss": 1.1789,
+ "step": 476
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.279474303138652,
+ "learning_rate": 7.142454911060627e-05,
+ "loss": 1.2596,
+ "step": 477
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2556980144910755,
+ "learning_rate": 7.137172994265044e-05,
+ "loss": 1.2426,
+ "step": 478
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.3311256331993533,
+ "learning_rate": 7.131876826543565e-05,
+ "loss": 1.2059,
+ "step": 479
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.26467296197775253,
+ "learning_rate": 7.12656643195457e-05,
+ "loss": 1.2482,
+ "step": 480
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.27444885274652553,
+ "learning_rate": 7.121241834621064e-05,
+ "loss": 1.2528,
+ "step": 481
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2572283861115396,
+ "learning_rate": 7.115903058730567e-05,
+ "loss": 1.1849,
+ "step": 482
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2677065778235683,
+ "learning_rate": 7.11055012853501e-05,
+ "loss": 1.2011,
+ "step": 483
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29470622036742816,
+ "learning_rate": 7.105183068350619e-05,
+ "loss": 1.2398,
+ "step": 484
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.27609230248969197,
+ "learning_rate": 7.099801902557811e-05,
+ "loss": 1.2259,
+ "step": 485
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.24248634168099284,
+ "learning_rate": 7.094406655601073e-05,
+ "loss": 1.2282,
+ "step": 486
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.2765941767688746,
+ "learning_rate": 7.088997351988865e-05,
+ "loss": 1.2319,
+ "step": 487
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29347776909858947,
+ "learning_rate": 7.083574016293493e-05,
+ "loss": 1.1765,
+ "step": 488
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.285370295424537,
+ "learning_rate": 7.078136673151008e-05,
+ "loss": 1.26,
+ "step": 489
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.29408734903836536,
+ "learning_rate": 7.072685347261093e-05,
+ "loss": 1.226,
+ "step": 490
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27437470239205813,
+ "learning_rate": 7.067220063386947e-05,
+ "loss": 1.1976,
+ "step": 491
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2680770258777871,
+ "learning_rate": 7.061740846355176e-05,
+ "loss": 1.1915,
+ "step": 492
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27200362879502954,
+ "learning_rate": 7.056247721055678e-05,
+ "loss": 1.2002,
+ "step": 493
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2637811092577037,
+ "learning_rate": 7.050740712441528e-05,
+ "loss": 1.287,
+ "step": 494
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.24657959209271266,
+ "learning_rate": 7.045219845528875e-05,
+ "loss": 1.2284,
+ "step": 495
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.25311992110358666,
+ "learning_rate": 7.039685145396812e-05,
+ "loss": 1.1616,
+ "step": 496
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2564633694193358,
+ "learning_rate": 7.034136637187275e-05,
+ "loss": 1.2067,
+ "step": 497
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2446797651174144,
+ "learning_rate": 7.028574346104926e-05,
+ "loss": 1.2284,
+ "step": 498
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2592751463399255,
+ "learning_rate": 7.022998297417034e-05,
+ "loss": 1.2371,
+ "step": 499
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2500713943206808,
+ "learning_rate": 7.017408516453365e-05,
+ "loss": 1.1061,
+ "step": 500
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2812266276040743,
+ "learning_rate": 7.011805028606064e-05,
+ "loss": 1.1949,
+ "step": 501
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.298829667668083,
+ "learning_rate": 7.006187859329544e-05,
+ "loss": 1.2313,
+ "step": 502
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.26518768159745104,
+ "learning_rate": 7.000557034140361e-05,
+ "loss": 1.2246,
+ "step": 503
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.3037280360760458,
+ "learning_rate": 6.994912578617113e-05,
+ "loss": 1.1617,
+ "step": 504
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2726903109255714,
+ "learning_rate": 6.989254518400309e-05,
+ "loss": 1.2415,
+ "step": 505
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25568082003046966,
+ "learning_rate": 6.98358287919226e-05,
+ "loss": 1.1817,
+ "step": 506
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25633294893705044,
+ "learning_rate": 6.97789768675696e-05,
+ "loss": 1.2149,
+ "step": 507
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.28291439435087123,
+ "learning_rate": 6.972198966919972e-05,
+ "loss": 1.1578,
+ "step": 508
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.27195184756655516,
+ "learning_rate": 6.966486745568308e-05,
+ "loss": 1.2355,
+ "step": 509
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.239159568376005,
+ "learning_rate": 6.960761048650312e-05,
+ "loss": 1.1688,
+ "step": 510
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.22961475425949177,
+ "learning_rate": 6.955021902175543e-05,
+ "loss": 1.2094,
+ "step": 511
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.27443773600741117,
+ "learning_rate": 6.949269332214651e-05,
+ "loss": 1.2559,
+ "step": 512
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.26230551832002097,
+ "learning_rate": 6.94350336489927e-05,
+ "loss": 1.2121,
+ "step": 513
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2716742985303849,
+ "learning_rate": 6.937724026421892e-05,
+ "loss": 1.2444,
+ "step": 514
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2537850139439542,
+ "learning_rate": 6.931931343035742e-05,
+ "loss": 1.1327,
+ "step": 515
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.28599587967496826,
+ "learning_rate": 6.926125341054676e-05,
+ "loss": 1.2236,
+ "step": 516
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.26780654378470103,
+ "learning_rate": 6.920306046853043e-05,
+ "loss": 1.2295,
+ "step": 517
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.23606296888412015,
+ "learning_rate": 6.914473486865577e-05,
+ "loss": 1.1543,
+ "step": 518
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.34976881174240837,
+ "learning_rate": 6.90862768758727e-05,
+ "loss": 1.2067,
+ "step": 519
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2481257873494882,
+ "learning_rate": 6.902768675573258e-05,
+ "loss": 1.2188,
+ "step": 520
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2996395778117021,
+ "learning_rate": 6.896896477438699e-05,
+ "loss": 1.2326,
+ "step": 521
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.8839768816333193,
+ "learning_rate": 6.891011119858643e-05,
+ "loss": 1.2435,
+ "step": 522
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2851882482058998,
+ "learning_rate": 6.885112629567927e-05,
+ "loss": 1.2644,
+ "step": 523
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2813663482913699,
+ "learning_rate": 6.879201033361035e-05,
+ "loss": 1.2309,
+ "step": 524
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3257551560135454,
+ "learning_rate": 6.873276358091996e-05,
+ "loss": 1.2755,
+ "step": 525
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.28930479952494365,
+ "learning_rate": 6.867338630674247e-05,
+ "loss": 1.1962,
+ "step": 526
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3077462996938649,
+ "learning_rate": 6.861387878080511e-05,
+ "loss": 1.2402,
+ "step": 527
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.2848900193452761,
+ "learning_rate": 6.855424127342688e-05,
+ "loss": 1.2748,
+ "step": 528
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.4765938812802202,
+ "learning_rate": 6.849447405551718e-05,
+ "loss": 1.2226,
+ "step": 529
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.53184473292579,
+ "learning_rate": 6.843457739857467e-05,
+ "loss": 1.2347,
+ "step": 530
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.6416239346492343,
+ "learning_rate": 6.837455157468596e-05,
+ "loss": 1.2429,
+ "step": 531
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3188092712502773,
+ "learning_rate": 6.831439685652442e-05,
+ "loss": 1.216,
+ "step": 532
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3527495731006385,
+ "learning_rate": 6.825411351734895e-05,
+ "loss": 1.1682,
+ "step": 533
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.29603753744741856,
+ "learning_rate": 6.819370183100274e-05,
+ "loss": 1.1434,
+ "step": 534
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.5252450389976622,
+ "learning_rate": 6.813316207191198e-05,
+ "loss": 1.1943,
+ "step": 535
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.32999419558659937,
+ "learning_rate": 6.807249451508466e-05,
+ "loss": 1.192,
+ "step": 536
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.3650175469778724,
+ "learning_rate": 6.801169943610929e-05,
+ "loss": 1.2141,
+ "step": 537
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 1.0643532150783557,
+ "learning_rate": 6.795077711115368e-05,
+ "loss": 1.2253,
+ "step": 538
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5041310609130145,
+ "learning_rate": 6.788972781696363e-05,
+ "loss": 1.278,
+ "step": 539
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5123058164360991,
+ "learning_rate": 6.782855183086177e-05,
+ "loss": 1.2231,
+ "step": 540
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.3533015702394419,
+ "learning_rate": 6.776724943074619e-05,
+ "loss": 1.2072,
+ "step": 541
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.30253964625417207,
+ "learning_rate": 6.770582089508927e-05,
+ "loss": 1.1382,
+ "step": 542
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.348991618828202,
+ "learning_rate": 6.764426650293633e-05,
+ "loss": 1.2079,
+ "step": 543
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.46017440578788743,
+ "learning_rate": 6.758258653390444e-05,
+ "loss": 1.1813,
+ "step": 544
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.31962101755594885,
+ "learning_rate": 6.75207812681811e-05,
+ "loss": 1.1339,
+ "step": 545
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.37092024548285923,
+ "learning_rate": 6.745885098652298e-05,
+ "loss": 1.2591,
+ "step": 546
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.32347106450715835,
+ "learning_rate": 6.739679597025466e-05,
+ "loss": 1.2017,
+ "step": 547
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39250187112342494,
+ "learning_rate": 6.733461650126733e-05,
+ "loss": 1.0933,
+ "step": 548
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.473522452217324,
+ "learning_rate": 6.727231286201752e-05,
+ "loss": 1.1124,
+ "step": 549
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4809062179622052,
+ "learning_rate": 6.720988533552582e-05,
+ "loss": 1.1585,
+ "step": 550
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3529662801059162,
+ "learning_rate": 6.714733420537559e-05,
+ "loss": 1.0501,
+ "step": 551
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5958247214391118,
+ "learning_rate": 6.708465975571168e-05,
+ "loss": 1.1086,
+ "step": 552
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5341364205022454,
+ "learning_rate": 6.70218622712391e-05,
+ "loss": 1.0518,
+ "step": 553
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3601805724462006,
+ "learning_rate": 6.695894203722181e-05,
+ "loss": 1.1779,
+ "step": 554
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.43410190338280613,
+ "learning_rate": 6.68958993394813e-05,
+ "loss": 1.093,
+ "step": 555
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46217742572873594,
+ "learning_rate": 6.683273446439546e-05,
+ "loss": 1.0117,
+ "step": 556
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.8591682373623357,
+ "learning_rate": 6.676944769889708e-05,
+ "loss": 1.1002,
+ "step": 557
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.7383229487622726,
+ "learning_rate": 6.670603933047272e-05,
+ "loss": 1.0779,
+ "step": 558
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5965305891207813,
+ "learning_rate": 6.664250964716131e-05,
+ "loss": 1.0889,
+ "step": 559
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.6030858606684543,
+ "learning_rate": 6.657885893755288e-05,
+ "loss": 1.0982,
+ "step": 560
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4644510682398409,
+ "learning_rate": 6.65150874907872e-05,
+ "loss": 1.1004,
+ "step": 561
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.43943285132452564,
+ "learning_rate": 6.645119559655254e-05,
+ "loss": 1.0536,
+ "step": 562
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4456395978600012,
+ "learning_rate": 6.638718354508427e-05,
+ "loss": 1.0733,
+ "step": 563
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3303824433217466,
+ "learning_rate": 6.632305162716365e-05,
+ "loss": 1.0552,
+ "step": 564
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3617704823170143,
+ "learning_rate": 6.62588001341164e-05,
+ "loss": 1.1092,
+ "step": 565
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4465013349903427,
+ "learning_rate": 6.619442935781141e-05,
+ "loss": 1.0781,
+ "step": 566
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.48516780613791277,
+ "learning_rate": 6.612993959065947e-05,
+ "loss": 1.0686,
+ "step": 567
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38867820318536633,
+ "learning_rate": 6.606533112561186e-05,
+ "loss": 1.1215,
+ "step": 568
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38566119820378336,
+ "learning_rate": 6.600060425615907e-05,
+ "loss": 1.1213,
+ "step": 569
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.35534855445058544,
+ "learning_rate": 6.593575927632947e-05,
+ "loss": 1.0955,
+ "step": 570
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38124406233349717,
+ "learning_rate": 6.587079648068795e-05,
+ "loss": 1.0659,
+ "step": 571
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.454750160923548,
+ "learning_rate": 6.580571616433457e-05,
+ "loss": 1.1149,
+ "step": 572
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.35353190088025255,
+ "learning_rate": 6.574051862290325e-05,
+ "loss": 1.0388,
+ "step": 573
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3249395594793626,
+ "learning_rate": 6.567520415256045e-05,
+ "loss": 1.0784,
+ "step": 574
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.40078898818247227,
+ "learning_rate": 6.560977305000375e-05,
+ "loss": 1.0859,
+ "step": 575
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4115264795060035,
+ "learning_rate": 6.554422561246054e-05,
+ "loss": 1.1828,
+ "step": 576
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.30090229228069215,
+ "learning_rate": 6.54785621376867e-05,
+ "loss": 1.0901,
+ "step": 577
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.28827860350299206,
+ "learning_rate": 6.541278292396523e-05,
+ "loss": 1.0277,
+ "step": 578
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.34690404488996757,
+ "learning_rate": 6.534688827010484e-05,
+ "loss": 1.048,
+ "step": 579
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.29943113556644785,
+ "learning_rate": 6.528087847543867e-05,
+ "loss": 1.0646,
+ "step": 580
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.37318202575874415,
+ "learning_rate": 6.521475383982291e-05,
+ "loss": 1.1091,
+ "step": 581
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3049663659203959,
+ "learning_rate": 6.51485146636354e-05,
+ "loss": 1.0552,
+ "step": 582
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3342407867509692,
+ "learning_rate": 6.508216124777431e-05,
+ "loss": 1.2227,
+ "step": 583
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3348396047855952,
+ "learning_rate": 6.501569389365674e-05,
+ "loss": 1.0861,
+ "step": 584
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.30951429367513383,
+ "learning_rate": 6.494911290321737e-05,
+ "loss": 1.0461,
+ "step": 585
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.33898401361064606,
+ "learning_rate": 6.488241857890711e-05,
+ "loss": 1.0854,
+ "step": 586
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4901462068263497,
+ "learning_rate": 6.481561122369164e-05,
+ "loss": 1.1012,
+ "step": 587
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3179574879809652,
+ "learning_rate": 6.474869114105018e-05,
+ "loss": 1.0451,
+ "step": 588
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.32159328915060714,
+ "learning_rate": 6.468165863497395e-05,
+ "loss": 1.0458,
+ "step": 589
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.36462235008537297,
+ "learning_rate": 6.461451400996491e-05,
+ "loss": 1.1247,
+ "step": 590
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.5373862753611778,
+ "learning_rate": 6.454725757103432e-05,
+ "loss": 1.0542,
+ "step": 591
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3160409270291303,
+ "learning_rate": 6.447988962370133e-05,
+ "loss": 1.0829,
+ "step": 592
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.390452102978435,
+ "learning_rate": 6.441241047399169e-05,
+ "loss": 1.192,
+ "step": 593
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3802122712014928,
+ "learning_rate": 6.434482042843627e-05,
+ "loss": 1.1153,
+ "step": 594
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4081584328242501,
+ "learning_rate": 6.427711979406966e-05,
+ "loss": 1.1635,
+ "step": 595
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3791962989638633,
+ "learning_rate": 6.420930887842889e-05,
+ "loss": 1.1581,
+ "step": 596
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.33239440056484193,
+ "learning_rate": 6.414138798955189e-05,
+ "loss": 1.0926,
+ "step": 597
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3279881540815014,
+ "learning_rate": 6.407335743597616e-05,
+ "loss": 1.1386,
+ "step": 598
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.30309644763750837,
+ "learning_rate": 6.40052175267374e-05,
+ "loss": 1.0523,
+ "step": 599
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3349097308403333,
+ "learning_rate": 6.393696857136801e-05,
+ "loss": 1.0815,
+ "step": 600
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3288227593556618,
+ "learning_rate": 6.386861087989581e-05,
+ "loss": 1.015,
+ "step": 601
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36685586740843157,
+ "learning_rate": 6.380014476284255e-05,
+ "loss": 1.1232,
+ "step": 602
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3620977714204643,
+ "learning_rate": 6.373157053122243e-05,
+ "loss": 1.1138,
+ "step": 603
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3130587018197183,
+ "learning_rate": 6.366288849654091e-05,
+ "loss": 1.1255,
+ "step": 604
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3602737087072766,
+ "learning_rate": 6.359409897079303e-05,
+ "loss": 1.0282,
+ "step": 605
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.31168852571991945,
+ "learning_rate": 6.352520226646222e-05,
+ "loss": 1.0779,
+ "step": 606
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3516045580189353,
+ "learning_rate": 6.345619869651871e-05,
+ "loss": 1.1028,
+ "step": 607
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3231857927563657,
+ "learning_rate": 6.33870885744182e-05,
+ "loss": 1.1202,
+ "step": 608
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.30205205129701157,
+ "learning_rate": 6.331787221410041e-05,
+ "loss": 1.1369,
+ "step": 609
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3198359813888166,
+ "learning_rate": 6.32485499299877e-05,
+ "loss": 1.1763,
+ "step": 610
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3128641370321787,
+ "learning_rate": 6.31791220369835e-05,
+ "loss": 1.0223,
+ "step": 611
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.2989105616213649,
+ "learning_rate": 6.31095888504711e-05,
+ "loss": 1.0358,
+ "step": 612
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3103537906853337,
+ "learning_rate": 6.303995068631203e-05,
+ "loss": 1.1261,
+ "step": 613
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.28598715532508207,
+ "learning_rate": 6.297020786084467e-05,
+ "loss": 1.0629,
+ "step": 614
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.29809789918093255,
+ "learning_rate": 6.290036069088288e-05,
+ "loss": 1.035,
+ "step": 615
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.33765270252261453,
+ "learning_rate": 6.283040949371451e-05,
+ "loss": 1.1221,
+ "step": 616
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3424617501293415,
+ "learning_rate": 6.276035458709993e-05,
+ "loss": 1.155,
+ "step": 617
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3799189737987811,
+ "learning_rate": 6.269019628927067e-05,
+ "loss": 1.0701,
+ "step": 618
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3358898935253196,
+ "learning_rate": 6.261993491892791e-05,
+ "loss": 1.1649,
+ "step": 619
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.31569979424117356,
+ "learning_rate": 6.254957079524099e-05,
+ "loss": 1.0633,
+ "step": 620
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3002168156888237,
+ "learning_rate": 6.247910423784609e-05,
+ "loss": 1.0846,
+ "step": 621
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3097238823450595,
+ "learning_rate": 6.24085355668447e-05,
+ "loss": 1.0808,
+ "step": 622
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3120312761417578,
+ "learning_rate": 6.233786510280212e-05,
+ "loss": 1.0142,
+ "step": 623
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3335343015064923,
+ "learning_rate": 6.22670931667461e-05,
+ "loss": 1.0674,
+ "step": 624
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3234062304634526,
+ "learning_rate": 6.219622008016533e-05,
+ "loss": 1.0981,
+ "step": 625
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.32152678786547273,
+ "learning_rate": 6.212524616500798e-05,
+ "loss": 1.0244,
+ "step": 626
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.39031977608147594,
+ "learning_rate": 6.205417174368023e-05,
+ "loss": 1.1205,
+ "step": 627
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3806189090017157,
+ "learning_rate": 6.198299713904485e-05,
+ "loss": 1.1134,
+ "step": 628
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.2978349276971668,
+ "learning_rate": 6.191172267441967e-05,
+ "loss": 1.0088,
+ "step": 629
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3190354077382501,
+ "learning_rate": 6.184034867357617e-05,
+ "loss": 1.108,
+ "step": 630
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.32633048665038994,
+ "learning_rate": 6.176887546073797e-05,
+ "loss": 1.0825,
+ "step": 631
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3428026413020903,
+ "learning_rate": 6.169730336057939e-05,
+ "loss": 1.0765,
+ "step": 632
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3475737151929015,
+ "learning_rate": 6.162563269822391e-05,
+ "loss": 1.0693,
+ "step": 633
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3870252154591392,
+ "learning_rate": 6.15538637992428e-05,
+ "loss": 1.1081,
+ "step": 634
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.33597355193652834,
+ "learning_rate": 6.148199698965352e-05,
+ "loss": 1.0893,
+ "step": 635
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.30805894179787247,
+ "learning_rate": 6.141003259591834e-05,
+ "loss": 1.0995,
+ "step": 636
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3025073882734066,
+ "learning_rate": 6.133797094494281e-05,
+ "loss": 1.0388,
+ "step": 637
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3524395196391662,
+ "learning_rate": 6.126581236407429e-05,
+ "loss": 1.1196,
+ "step": 638
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3377646188130345,
+ "learning_rate": 6.119355718110039e-05,
+ "loss": 1.0382,
+ "step": 639
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.35508400659785483,
+ "learning_rate": 6.112120572424763e-05,
+ "loss": 1.1402,
+ "step": 640
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3454418793700457,
+ "learning_rate": 6.104875832217982e-05,
+ "loss": 1.1032,
+ "step": 641
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.32629806837059866,
+ "learning_rate": 6.097621530399661e-05,
+ "loss": 1.0959,
+ "step": 642
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3329536837751315,
+ "learning_rate": 6.090357699923202e-05,
+ "loss": 1.0467,
+ "step": 643
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.32302233828349475,
+ "learning_rate": 6.083084373785287e-05,
+ "loss": 1.0858,
+ "step": 644
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3310358826507611,
+ "learning_rate": 6.075801585025739e-05,
+ "loss": 1.0715,
+ "step": 645
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.319322035854079,
+ "learning_rate": 6.068509366727362e-05,
+ "loss": 1.177,
+ "step": 646
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3065230667302707,
+ "learning_rate": 6.061207752015797e-05,
+ "loss": 1.0649,
+ "step": 647
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.29926795565748227,
+ "learning_rate": 6.053896774059368e-05,
+ "loss": 1.1325,
+ "step": 648
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3556069634279046,
+ "learning_rate": 6.046576466068931e-05,
+ "loss": 1.1366,
+ "step": 649
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3189191131461966,
+ "learning_rate": 6.039246861297727e-05,
+ "loss": 1.0693,
+ "step": 650
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3347197156648834,
+ "learning_rate": 6.031907993041227e-05,
+ "loss": 1.1009,
+ "step": 651
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.32274156348185445,
+ "learning_rate": 6.0245598946369826e-05,
+ "loss": 1.1675,
+ "step": 652
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.35534089035455224,
+ "learning_rate": 6.017202599464476e-05,
+ "loss": 1.1723,
+ "step": 653
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3106026578570133,
+ "learning_rate": 6.009836140944965e-05,
+ "loss": 1.0954,
+ "step": 654
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3309144454564729,
+ "learning_rate": 6.002460552541331e-05,
+ "loss": 1.0209,
+ "step": 655
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3023619281400003,
+ "learning_rate": 5.9950758677579345e-05,
+ "loss": 1.0363,
+ "step": 656
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3311182880219704,
+ "learning_rate": 5.987682120140451e-05,
+ "loss": 1.0515,
+ "step": 657
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.33396486010030413,
+ "learning_rate": 5.980279343275729e-05,
+ "loss": 1.1251,
+ "step": 658
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3465764556678002,
+ "learning_rate": 5.97286757079163e-05,
+ "loss": 1.165,
+ "step": 659
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.304193441363374,
+ "learning_rate": 5.965446836356882e-05,
+ "loss": 1.0228,
+ "step": 660
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3415149030413082,
+ "learning_rate": 5.9580171736809224e-05,
+ "loss": 1.0742,
+ "step": 661
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.33138658321132064,
+ "learning_rate": 5.950578616513746e-05,
+ "loss": 1.0843,
+ "step": 662
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.30774403421162994,
+ "learning_rate": 5.943131198645752e-05,
+ "loss": 1.065,
+ "step": 663
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3428877492183819,
+ "learning_rate": 5.9356749539075885e-05,
+ "loss": 1.1101,
+ "step": 664
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3621290546130101,
+ "learning_rate": 5.928209916170003e-05,
+ "loss": 1.1372,
+ "step": 665
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3482375945469884,
+ "learning_rate": 5.9207361193436865e-05,
+ "loss": 1.132,
+ "step": 666
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.31754384974068384,
+ "learning_rate": 5.9132535973791156e-05,
+ "loss": 1.148,
+ "step": 667
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.36003834782050365,
+ "learning_rate": 5.9057623842664044e-05,
+ "loss": 1.1099,
+ "step": 668
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.2963701622969662,
+ "learning_rate": 5.8982625140351464e-05,
+ "loss": 1.0755,
+ "step": 669
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.32579569606066516,
+ "learning_rate": 5.8907540207542616e-05,
+ "loss": 1.0809,
+ "step": 670
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4247563451753457,
+ "learning_rate": 5.8832369385318416e-05,
+ "loss": 1.097,
+ "step": 671
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.33076932102169776,
+ "learning_rate": 5.875711301514992e-05,
+ "loss": 1.1078,
+ "step": 672
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3609238032332309,
+ "learning_rate": 5.8681771438896815e-05,
+ "loss": 1.1031,
+ "step": 673
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.325159585649425,
+ "learning_rate": 5.860634499880583e-05,
+ "loss": 1.0707,
+ "step": 674
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4620687271068983,
+ "learning_rate": 5.853083403750922e-05,
+ "loss": 1.1017,
+ "step": 675
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33485279064365936,
+ "learning_rate": 5.845523889802316e-05,
+ "loss": 1.0989,
+ "step": 676
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.30952573170841513,
+ "learning_rate": 5.8379559923746214e-05,
+ "loss": 1.0393,
+ "step": 677
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33498605810588283,
+ "learning_rate": 5.830379745845781e-05,
+ "loss": 1.1259,
+ "step": 678
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.35771921163037307,
+ "learning_rate": 5.822795184631659e-05,
+ "loss": 1.0815,
+ "step": 679
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.3329650192347647,
+ "learning_rate": 5.815202343185894e-05,
+ "loss": 1.1344,
+ "step": 680
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3356634465845771,
+ "learning_rate": 5.807601255999736e-05,
+ "loss": 1.1297,
+ "step": 681
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3289442034151235,
+ "learning_rate": 5.7999919576018934e-05,
+ "loss": 1.022,
+ "step": 682
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3207007334784113,
+ "learning_rate": 5.7923744825583745e-05,
+ "loss": 1.0571,
+ "step": 683
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3582460325329284,
+ "learning_rate": 5.7847488654723304e-05,
+ "loss": 1.0778,
+ "step": 684
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3563317666176927,
+ "learning_rate": 5.777115140983899e-05,
+ "loss": 1.1003,
+ "step": 685
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 3.4694912945702105,
+ "learning_rate": 5.769473343770047e-05,
+ "loss": 1.121,
+ "step": 686
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.43002349520483113,
+ "learning_rate": 5.761823508544411e-05,
+ "loss": 1.0765,
+ "step": 687
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39467783104839754,
+ "learning_rate": 5.754165670057142e-05,
+ "loss": 1.0788,
+ "step": 688
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39629029674867916,
+ "learning_rate": 5.7464998630947464e-05,
+ "loss": 1.0812,
+ "step": 689
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3880152093965208,
+ "learning_rate": 5.738826122479929e-05,
+ "loss": 1.1228,
+ "step": 690
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3777874121959188,
+ "learning_rate": 5.7311444830714324e-05,
+ "loss": 1.0907,
+ "step": 691
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.38004041653523696,
+ "learning_rate": 5.723454979763882e-05,
+ "loss": 1.1263,
+ "step": 692
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.37049672627797636,
+ "learning_rate": 5.7157576474876246e-05,
+ "loss": 1.1438,
+ "step": 693
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32973606103437614,
+ "learning_rate": 5.7080525212085725e-05,
+ "loss": 1.0553,
+ "step": 694
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.31674639252070325,
+ "learning_rate": 5.700339635928038e-05,
+ "loss": 1.06,
+ "step": 695
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32282199426553837,
+ "learning_rate": 5.692619026682588e-05,
+ "loss": 1.0841,
+ "step": 696
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4810882958061859,
+ "learning_rate": 5.684890728543869e-05,
+ "loss": 1.0803,
+ "step": 697
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3995638550178378,
+ "learning_rate": 5.6771547766184566e-05,
+ "loss": 1.1187,
+ "step": 698
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35264932960583484,
+ "learning_rate": 5.669411206047699e-05,
+ "loss": 1.0641,
+ "step": 699
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35240640524733,
+ "learning_rate": 5.661660052007547e-05,
+ "loss": 1.076,
+ "step": 700
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3540694609860389,
+ "learning_rate": 5.653901349708401e-05,
+ "loss": 1.1369,
+ "step": 701
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3196055112925304,
+ "learning_rate": 5.646135134394955e-05,
+ "loss": 1.0677,
+ "step": 702
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4214141007955914,
+ "learning_rate": 5.6383614413460266e-05,
+ "loss": 1.1139,
+ "step": 703
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3625611311798579,
+ "learning_rate": 5.630580305874402e-05,
+ "loss": 1.1845,
+ "step": 704
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3425208672181188,
+ "learning_rate": 5.62279176332668e-05,
+ "loss": 1.174,
+ "step": 705
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3108419862818321,
+ "learning_rate": 5.6149958490830996e-05,
+ "loss": 1.0331,
+ "step": 706
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3274644181571904,
+ "learning_rate": 5.607192598557394e-05,
+ "loss": 1.0664,
+ "step": 707
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.346218197215145,
+ "learning_rate": 5.599382047196617e-05,
+ "loss": 1.2088,
+ "step": 708
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.328497632267458,
+ "learning_rate": 5.591564230480989e-05,
+ "loss": 1.0287,
+ "step": 709
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3708173720611468,
+ "learning_rate": 5.583739183923732e-05,
+ "loss": 1.0883,
+ "step": 710
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3631427403535479,
+ "learning_rate": 5.575906943070915e-05,
+ "loss": 1.1155,
+ "step": 711
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3305201458598695,
+ "learning_rate": 5.5680675435012834e-05,
+ "loss": 1.0958,
+ "step": 712
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.34978833532083714,
+ "learning_rate": 5.5602210208261036e-05,
+ "loss": 1.1437,
+ "step": 713
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3510553882510229,
+ "learning_rate": 5.552367410688999e-05,
+ "loss": 1.0941,
+ "step": 714
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3523747462465078,
+ "learning_rate": 5.544506748765789e-05,
+ "loss": 1.1289,
+ "step": 715
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38262637783927445,
+ "learning_rate": 5.5366390707643266e-05,
+ "loss": 1.099,
+ "step": 716
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38620065989073454,
+ "learning_rate": 5.528764412424334e-05,
+ "loss": 1.083,
+ "step": 717
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3401355276121096,
+ "learning_rate": 5.520882809517245e-05,
+ "loss": 1.028,
+ "step": 718
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3392061008943934,
+ "learning_rate": 5.512994297846039e-05,
+ "loss": 1.1083,
+ "step": 719
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.34219480421015414,
+ "learning_rate": 5.505098913245077e-05,
+ "loss": 1.1108,
+ "step": 720
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3275058061553761,
+ "learning_rate": 5.497196691579945e-05,
+ "loss": 1.111,
+ "step": 721
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36800249746509384,
+ "learning_rate": 5.489287668747283e-05,
+ "loss": 1.1221,
+ "step": 722
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4129005533101575,
+ "learning_rate": 5.481371880674628e-05,
+ "loss": 1.0966,
+ "step": 723
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36563906596251655,
+ "learning_rate": 5.4734493633202505e-05,
+ "loss": 1.0927,
+ "step": 724
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3614650536839971,
+ "learning_rate": 5.465520152672986e-05,
+ "loss": 1.13,
+ "step": 725
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.36419665098633497,
+ "learning_rate": 5.4575842847520765e-05,
+ "loss": 1.1183,
+ "step": 726
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.34490689807258995,
+ "learning_rate": 5.449641795607005e-05,
+ "loss": 1.0919,
+ "step": 727
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3627643746876298,
+ "learning_rate": 5.441692721317334e-05,
+ "loss": 1.0411,
+ "step": 728
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.323620411949565,
+ "learning_rate": 5.433737097992537e-05,
+ "loss": 1.0725,
+ "step": 729
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3521599501824965,
+ "learning_rate": 5.425774961771838e-05,
+ "loss": 1.0926,
+ "step": 730
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3302390546764222,
+ "learning_rate": 5.417806348824047e-05,
+ "loss": 1.0468,
+ "step": 731
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3833325802616019,
+ "learning_rate": 5.4098312953473956e-05,
+ "loss": 1.1291,
+ "step": 732
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3708621126835512,
+ "learning_rate": 5.401849837569372e-05,
+ "loss": 1.0887,
+ "step": 733
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3625834373416278,
+ "learning_rate": 5.393862011746555e-05,
+ "loss": 1.0981,
+ "step": 734
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3583343965080617,
+ "learning_rate": 5.385867854164451e-05,
+ "loss": 1.1021,
+ "step": 735
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34598320594096066,
+ "learning_rate": 5.377867401137332e-05,
+ "loss": 1.1376,
+ "step": 736
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3046382791315433,
+ "learning_rate": 5.369860689008066e-05,
+ "loss": 1.0206,
+ "step": 737
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34464948380043725,
+ "learning_rate": 5.3618477541479505e-05,
+ "loss": 1.1084,
+ "step": 738
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3203242519627101,
+ "learning_rate": 5.353828632956557e-05,
+ "loss": 1.0731,
+ "step": 739
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3431169960355163,
+ "learning_rate": 5.3458033618615516e-05,
+ "loss": 1.091,
+ "step": 740
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.33492074521678705,
+ "learning_rate": 5.337771977318543e-05,
+ "loss": 1.1112,
+ "step": 741
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.32576546585541344,
+ "learning_rate": 5.3297345158109086e-05,
+ "loss": 1.0993,
+ "step": 742
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3410007245037574,
+ "learning_rate": 5.3216910138496286e-05,
+ "loss": 1.094,
+ "step": 743
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.34891180680896833,
+ "learning_rate": 5.313641507973128e-05,
+ "loss": 1.1331,
+ "step": 744
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.37135766946717214,
+ "learning_rate": 5.3055860347471006e-05,
+ "loss": 1.1,
+ "step": 745
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3465019415478411,
+ "learning_rate": 5.297524630764349e-05,
+ "loss": 1.1256,
+ "step": 746
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37035388481626563,
+ "learning_rate": 5.289457332644615e-05,
+ "loss": 1.0366,
+ "step": 747
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.33853883270759155,
+ "learning_rate": 5.281384177034421e-05,
+ "loss": 1.0547,
+ "step": 748
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.364306618627317,
+ "learning_rate": 5.2733052006068897e-05,
+ "loss": 1.0768,
+ "step": 749
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.4021754315731627,
+ "learning_rate": 5.2652204400615916e-05,
+ "loss": 1.1382,
+ "step": 750
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.3332185389039008,
+ "learning_rate": 5.257129932124368e-05,
+ "loss": 1.0815,
+ "step": 751
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3453105709879854,
+ "learning_rate": 5.249033713547173e-05,
+ "loss": 1.1109,
+ "step": 752
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3385397539717797,
+ "learning_rate": 5.2409318211078966e-05,
+ "loss": 1.0529,
+ "step": 753
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.33197994450130447,
+ "learning_rate": 5.232824291610206e-05,
+ "loss": 1.0721,
+ "step": 754
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32836289576124167,
+ "learning_rate": 5.224711161883375e-05,
+ "loss": 1.0459,
+ "step": 755
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32491620058831744,
+ "learning_rate": 5.216592468782117e-05,
+ "loss": 1.0897,
+ "step": 756
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3137879047811153,
+ "learning_rate": 5.2084682491864155e-05,
+ "loss": 1.096,
+ "step": 757
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3356938043023012,
+ "learning_rate": 5.200338540001364e-05,
+ "loss": 1.0827,
+ "step": 758
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.36044340490819055,
+ "learning_rate": 5.192203378156984e-05,
+ "loss": 1.0617,
+ "step": 759
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.34674262047888293,
+ "learning_rate": 5.184062800608077e-05,
+ "loss": 1.1267,
+ "step": 760
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.32469442322149333,
+ "learning_rate": 5.1759168443340375e-05,
+ "loss": 1.1483,
+ "step": 761
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3290384307774216,
+ "learning_rate": 5.167765546338698e-05,
+ "loss": 1.047,
+ "step": 762
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.31637612188770403,
+ "learning_rate": 5.1596089436501525e-05,
+ "loss": 1.0311,
+ "step": 763
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3168693829641207,
+ "learning_rate": 5.151447073320597e-05,
+ "loss": 1.1405,
+ "step": 764
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.34322421571238926,
+ "learning_rate": 5.143279972426153e-05,
+ "loss": 1.1428,
+ "step": 765
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3291030435830325,
+ "learning_rate": 5.1351076780667026e-05,
+ "loss": 1.0473,
+ "step": 766
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.33772039158758044,
+ "learning_rate": 5.1269302273657195e-05,
+ "loss": 1.0909,
+ "step": 767
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3802031736890876,
+ "learning_rate": 5.118747657470102e-05,
+ "loss": 1.1482,
+ "step": 768
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3296067628997962,
+ "learning_rate": 5.1105600055500025e-05,
+ "loss": 1.0085,
+ "step": 769
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3707139982828035,
+ "learning_rate": 5.102367308798658e-05,
+ "loss": 1.0746,
+ "step": 770
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3378537316757011,
+ "learning_rate": 5.094169604432225e-05,
+ "loss": 1.0482,
+ "step": 771
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.4008417246255145,
+ "learning_rate": 5.085966929689601e-05,
+ "loss": 1.1065,
+ "step": 772
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3244385106988064,
+ "learning_rate": 5.077759321832271e-05,
+ "loss": 1.0827,
+ "step": 773
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.37228575732812336,
+ "learning_rate": 5.0695468181441215e-05,
+ "loss": 1.1146,
+ "step": 774
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33761714797540276,
+ "learning_rate": 5.061329455931283e-05,
+ "loss": 1.092,
+ "step": 775
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3158158390913494,
+ "learning_rate": 5.053107272521955e-05,
+ "loss": 1.1058,
+ "step": 776
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3691501929738938,
+ "learning_rate": 5.044880305266239e-05,
+ "loss": 1.1599,
+ "step": 777
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33730914019805525,
+ "learning_rate": 5.0366485915359645e-05,
+ "loss": 1.0615,
+ "step": 778
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.34970059240017,
+ "learning_rate": 5.0284121687245257e-05,
+ "loss": 1.1475,
+ "step": 779
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3374028029407197,
+ "learning_rate": 5.020171074246707e-05,
+ "loss": 1.0926,
+ "step": 780
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3350020681123992,
+ "learning_rate": 5.011925345538514e-05,
+ "loss": 1.1276,
+ "step": 781
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3224228965786606,
+ "learning_rate": 5.003675020057003e-05,
+ "loss": 1.0183,
+ "step": 782
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3357310714740298,
+ "learning_rate": 4.995420135280114e-05,
+ "loss": 1.1114,
+ "step": 783
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3590203255363759,
+ "learning_rate": 4.9871607287064966e-05,
+ "loss": 1.1504,
+ "step": 784
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.33011195419611655,
+ "learning_rate": 4.9788968378553396e-05,
+ "loss": 1.0826,
+ "step": 785
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.31088868195439445,
+ "learning_rate": 4.970628500266207e-05,
+ "loss": 1.0704,
+ "step": 786
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3144996103179409,
+ "learning_rate": 4.962355753498858e-05,
+ "loss": 1.1403,
+ "step": 787
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3147269555419068,
+ "learning_rate": 4.954078635133081e-05,
+ "loss": 1.0898,
+ "step": 788
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3280151747783868,
+ "learning_rate": 4.945797182768524e-05,
+ "loss": 1.1115,
+ "step": 789
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3551996569232493,
+ "learning_rate": 4.937511434024524e-05,
+ "loss": 1.1731,
+ "step": 790
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.343863208057807,
+ "learning_rate": 4.9292214265399336e-05,
+ "loss": 1.0866,
+ "step": 791
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.37316699385322466,
+ "learning_rate": 4.920927197972949e-05,
+ "loss": 1.1083,
+ "step": 792
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635739774067832,
+ "learning_rate": 4.9126287860009453e-05,
+ "loss": 1.1393,
+ "step": 793
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3755910554972886,
+ "learning_rate": 4.9043262283202974e-05,
+ "loss": 1.1624,
+ "step": 794
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635899120146823,
+ "learning_rate": 4.8960195626462145e-05,
+ "loss": 1.2095,
+ "step": 795
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3642202684342816,
+ "learning_rate": 4.8877088267125664e-05,
+ "loss": 1.1099,
+ "step": 796
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3339946548799316,
+ "learning_rate": 4.879394058271712e-05,
+ "loss": 1.1157,
+ "step": 797
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3457189703100475,
+ "learning_rate": 4.871075295094329e-05,
+ "loss": 1.129,
+ "step": 798
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3550931839691424,
+ "learning_rate": 4.862752574969241e-05,
+ "loss": 1.076,
+ "step": 799
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.36139108917966734,
+ "learning_rate": 4.8544259357032475e-05,
+ "loss": 1.1577,
+ "step": 800
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39569703665247874,
+ "learning_rate": 4.8460954151209486e-05,
+ "loss": 1.0543,
+ "step": 801
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.3879033670170866,
+ "learning_rate": 4.837761051064579e-05,
+ "loss": 1.0688,
+ "step": 802
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3796846713967255,
+ "learning_rate": 4.8294228813938285e-05,
+ "loss": 0.9911,
+ "step": 803
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4007831430409375,
+ "learning_rate": 4.8210809439856804e-05,
+ "loss": 1.0126,
+ "step": 804
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.37588078665500885,
+ "learning_rate": 4.8127352767342276e-05,
+ "loss": 0.9302,
+ "step": 805
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4078509175013281,
+ "learning_rate": 4.8043859175505095e-05,
+ "loss": 0.9982,
+ "step": 806
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.379096046185539,
+ "learning_rate": 4.7960329043623344e-05,
+ "loss": 1.0035,
+ "step": 807
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3813938568133554,
+ "learning_rate": 4.787676275114111e-05,
+ "loss": 0.9579,
+ "step": 808
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.3686863564511168,
+ "learning_rate": 4.779316067766673e-05,
+ "loss": 1.0105,
+ "step": 809
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.4263940878847523,
+ "learning_rate": 4.770952320297109e-05,
+ "loss": 1.0677,
+ "step": 810
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.37178778374665006,
+ "learning_rate": 4.7625850706985886e-05,
+ "loss": 1.0019,
+ "step": 811
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.36803355429187945,
+ "learning_rate": 4.7542143569801894e-05,
+ "loss": 0.9937,
+ "step": 812
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.3897072472941179,
+ "learning_rate": 4.745840217166725e-05,
+ "loss": 1.0877,
+ "step": 813
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.35571833841716255,
+ "learning_rate": 4.737462689298577e-05,
+ "loss": 1.0015,
+ "step": 814
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.38930229991094323,
+ "learning_rate": 4.7290818114315086e-05,
+ "loss": 1.028,
+ "step": 815
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.411005007105147,
+ "learning_rate": 4.72069762163651e-05,
+ "loss": 1.0068,
+ "step": 816
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3980240190337736,
+ "learning_rate": 4.7123101579996106e-05,
+ "loss": 0.9919,
+ "step": 817
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.36369517703115467,
+ "learning_rate": 4.7039194586217136e-05,
+ "loss": 0.967,
+ "step": 818
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.38591148840458894,
+ "learning_rate": 4.695525561618418e-05,
+ "loss": 0.9743,
+ "step": 819
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.45873135108949337,
+ "learning_rate": 4.687128505119853e-05,
+ "loss": 1.0516,
+ "step": 820
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.3866330351411308,
+ "learning_rate": 4.6787283272704966e-05,
+ "loss": 0.9939,
+ "step": 821
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4620340173291326,
+ "learning_rate": 4.670325066229009e-05,
+ "loss": 1.0526,
+ "step": 822
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38877454299870284,
+ "learning_rate": 4.661918760168052e-05,
+ "loss": 0.9904,
+ "step": 823
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.3880489386116793,
+ "learning_rate": 4.653509447274121e-05,
+ "loss": 0.9623,
+ "step": 824
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3827392356186151,
+ "learning_rate": 4.6450971657473743e-05,
+ "loss": 1.0772,
+ "step": 825
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4132814641854327,
+ "learning_rate": 4.63668195380145e-05,
+ "loss": 1.0533,
+ "step": 826
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3703610182402835,
+ "learning_rate": 4.628263849663301e-05,
+ "loss": 0.9336,
+ "step": 827
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4152053683299823,
+ "learning_rate": 4.619842891573016e-05,
+ "loss": 0.9801,
+ "step": 828
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.41791059043554274,
+ "learning_rate": 4.6114191177836514e-05,
+ "loss": 1.0617,
+ "step": 829
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.46363896517299136,
+ "learning_rate": 4.6029925665610524e-05,
+ "loss": 0.9687,
+ "step": 830
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.41141959057512445,
+ "learning_rate": 4.59456327618368e-05,
+ "loss": 1.0965,
+ "step": 831
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3789192764519836,
+ "learning_rate": 4.5861312849424386e-05,
+ "loss": 0.9793,
+ "step": 832
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4047291581107866,
+ "learning_rate": 4.5776966311405035e-05,
+ "loss": 1.0342,
+ "step": 833
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4425157400959256,
+ "learning_rate": 4.5692593530931416e-05,
+ "loss": 1.0892,
+ "step": 834
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3707332144806616,
+ "learning_rate": 4.560819489127545e-05,
+ "loss": 0.9815,
+ "step": 835
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3897444102572823,
+ "learning_rate": 4.552377077582646e-05,
+ "loss": 0.9884,
+ "step": 836
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.42725787957019346,
+ "learning_rate": 4.543932156808959e-05,
+ "loss": 0.9972,
+ "step": 837
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.40615269781820007,
+ "learning_rate": 4.535484765168386e-05,
+ "loss": 0.9529,
+ "step": 838
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3505829736050887,
+ "learning_rate": 4.527034941034063e-05,
+ "loss": 0.9492,
+ "step": 839
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.36688064686440497,
+ "learning_rate": 4.51858272279017e-05,
+ "loss": 0.9592,
+ "step": 840
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4043468777955929,
+ "learning_rate": 4.5101281488317634e-05,
+ "loss": 1.048,
+ "step": 841
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3811489793242706,
+ "learning_rate": 4.501671257564602e-05,
+ "loss": 1.0138,
+ "step": 842
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.39813004142325986,
+ "learning_rate": 4.49321208740497e-05,
+ "loss": 1.071,
+ "step": 843
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3809751022095503,
+ "learning_rate": 4.484750676779504e-05,
+ "loss": 1.0351,
+ "step": 844
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.384312178013823,
+ "learning_rate": 4.4762870641250185e-05,
+ "loss": 0.9737,
+ "step": 845
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.40769404907923557,
+ "learning_rate": 4.467821287888331e-05,
+ "loss": 0.9659,
+ "step": 846
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.39594136851937817,
+ "learning_rate": 4.459353386526086e-05,
+ "loss": 0.9405,
+ "step": 847
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.37180161011562185,
+ "learning_rate": 4.450883398504584e-05,
+ "loss": 1.0732,
+ "step": 848
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3772603623154663,
+ "learning_rate": 4.442411362299602e-05,
+ "loss": 0.9646,
+ "step": 849
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4346142368506476,
+ "learning_rate": 4.433937316396224e-05,
+ "loss": 0.9572,
+ "step": 850
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3997258084612474,
+ "learning_rate": 4.425461299288659e-05,
+ "loss": 0.9492,
+ "step": 851
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.41245476865247155,
+ "learning_rate": 4.416983349480073e-05,
+ "loss": 0.8732,
+ "step": 852
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.6761499297939195,
+ "learning_rate": 4.408503505482412e-05,
+ "loss": 1.0425,
+ "step": 853
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.40340979486858985,
+ "learning_rate": 4.400021805816225e-05,
+ "loss": 0.9596,
+ "step": 854
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.43290732392699666,
+ "learning_rate": 4.391538289010493e-05,
+ "loss": 1.0123,
+ "step": 855
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36878054442190156,
+ "learning_rate": 4.383052993602448e-05,
+ "loss": 0.9448,
+ "step": 856
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.7146145128961262,
+ "learning_rate": 4.374565958137404e-05,
+ "loss": 1.0342,
+ "step": 857
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.44429357586145607,
+ "learning_rate": 4.3660772211685775e-05,
+ "loss": 1.0436,
+ "step": 858
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.4565751973640598,
+ "learning_rate": 4.357586821256918e-05,
+ "loss": 1.0311,
+ "step": 859
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3919991236654277,
+ "learning_rate": 4.349094796970925e-05,
+ "loss": 1.1401,
+ "step": 860
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.4347441949284011,
+ "learning_rate": 4.3406011868864795e-05,
+ "loss": 1.0252,
+ "step": 861
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.38339976027415407,
+ "learning_rate": 4.3321060295866635e-05,
+ "loss": 1.0536,
+ "step": 862
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.37688790408195166,
+ "learning_rate": 4.32360936366159e-05,
+ "loss": 1.012,
+ "step": 863
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4317538207582504,
+ "learning_rate": 4.315111227708224e-05,
+ "loss": 1.0505,
+ "step": 864
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4145324872228796,
+ "learning_rate": 4.306611660330208e-05,
+ "loss": 1.0496,
+ "step": 865
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.416535227064448,
+ "learning_rate": 4.298110700137687e-05,
+ "loss": 0.9628,
+ "step": 866
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.46564356187492717,
+ "learning_rate": 4.2896083857471345e-05,
+ "loss": 1.0016,
+ "step": 867
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4228980941889828,
+ "learning_rate": 4.281104755781172e-05,
+ "loss": 1.0904,
+ "step": 868
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.4267821214430208,
+ "learning_rate": 4.272599848868402e-05,
+ "loss": 1.0544,
+ "step": 869
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.45763332095792075,
+ "learning_rate": 4.264093703643223e-05,
+ "loss": 1.0686,
+ "step": 870
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.4347555516548761,
+ "learning_rate": 4.255586358745662e-05,
+ "loss": 1.0264,
+ "step": 871
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3817726381103066,
+ "learning_rate": 4.247077852821194e-05,
+ "loss": 1.0045,
+ "step": 872
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3882808845457995,
+ "learning_rate": 4.2385682245205685e-05,
+ "loss": 1.0193,
+ "step": 873
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.39410930252966775,
+ "learning_rate": 4.230057512499634e-05,
+ "loss": 0.9832,
+ "step": 874
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4373094593907156,
+ "learning_rate": 4.221545755419159e-05,
+ "loss": 1.0343,
+ "step": 875
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4462843721698891,
+ "learning_rate": 4.2130329919446646e-05,
+ "loss": 1.0324,
+ "step": 876
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4747274247448112,
+ "learning_rate": 4.20451926074624e-05,
+ "loss": 0.9903,
+ "step": 877
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4157472897596409,
+ "learning_rate": 4.196004600498369e-05,
+ "loss": 0.9266,
+ "step": 878
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.41625958088960685,
+ "learning_rate": 4.1874890498797605e-05,
+ "loss": 0.9658,
+ "step": 879
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.44784944130574333,
+ "learning_rate": 4.178972647573163e-05,
+ "loss": 0.9671,
+ "step": 880
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.4116839177956385,
+ "learning_rate": 4.1704554322651975e-05,
+ "loss": 0.9591,
+ "step": 881
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.4025569857639452,
+ "learning_rate": 4.161937442646176e-05,
+ "loss": 1.0072,
+ "step": 882
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.41518478124763597,
+ "learning_rate": 4.1534187174099285e-05,
+ "loss": 1.0275,
+ "step": 883
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3987815564664466,
+ "learning_rate": 4.1448992952536275e-05,
+ "loss": 1.0039,
+ "step": 884
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4270378155679982,
+ "learning_rate": 4.136379214877609e-05,
+ "loss": 1.0369,
+ "step": 885
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.42144733922972777,
+ "learning_rate": 4.127858514985203e-05,
+ "loss": 1.0269,
+ "step": 886
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4198664438272548,
+ "learning_rate": 4.1193372342825494e-05,
+ "loss": 1.0427,
+ "step": 887
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3985048256281719,
+ "learning_rate": 4.1108154114784275e-05,
+ "loss": 1.0702,
+ "step": 888
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.605520808292362,
+ "learning_rate": 4.102293085284083e-05,
+ "loss": 0.9749,
+ "step": 889
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4150515863924052,
+ "learning_rate": 4.0937702944130426e-05,
+ "loss": 1.0231,
+ "step": 890
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3935997576565283,
+ "learning_rate": 4.085247077580948e-05,
+ "loss": 1.0014,
+ "step": 891
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.399446131403209,
+ "learning_rate": 4.076723473505374e-05,
+ "loss": 0.9602,
+ "step": 892
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4406024397129952,
+ "learning_rate": 4.068199520905655e-05,
+ "loss": 1.0425,
+ "step": 893
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4036917571496492,
+ "learning_rate": 4.059675258502709e-05,
+ "loss": 0.973,
+ "step": 894
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4057196459433299,
+ "learning_rate": 4.05115072501886e-05,
+ "loss": 0.9997,
+ "step": 895
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4374124954708759,
+ "learning_rate": 4.0426259591776645e-05,
+ "loss": 0.9826,
+ "step": 896
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4545699371285546,
+ "learning_rate": 4.0341009997037356e-05,
+ "loss": 1.0554,
+ "step": 897
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4251917031237376,
+ "learning_rate": 4.025575885322563e-05,
+ "loss": 1.0217,
+ "step": 898
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3857651901893941,
+ "learning_rate": 4.0170506547603427e-05,
+ "loss": 1.0317,
+ "step": 899
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.46323573798490897,
+ "learning_rate": 4.008525346743797e-05,
+ "loss": 1.0398,
+ "step": 900
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4011541121460918,
+ "learning_rate": 4e-05,
+ "loss": 1.0706,
+ "step": 901
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.46493281221028004,
+ "learning_rate": 3.991474653256204e-05,
+ "loss": 1.0525,
+ "step": 902
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.41683080924539023,
+ "learning_rate": 3.982949345239658e-05,
+ "loss": 1.0905,
+ "step": 903
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.4750350025014512,
+ "learning_rate": 3.974424114677437e-05,
+ "loss": 1.049,
+ "step": 904
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3867445073614702,
+ "learning_rate": 3.965899000296266e-05,
+ "loss": 0.9624,
+ "step": 905
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.378387661131469,
+ "learning_rate": 3.957374040822335e-05,
+ "loss": 1.0223,
+ "step": 906
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3905996390559077,
+ "learning_rate": 3.948849274981141e-05,
+ "loss": 1.0315,
+ "step": 907
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.4139717689498189,
+ "learning_rate": 3.940324741497291e-05,
+ "loss": 0.9297,
+ "step": 908
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.39086355684921514,
+ "learning_rate": 3.9318004790943465e-05,
+ "loss": 0.9684,
+ "step": 909
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.4334915643736419,
+ "learning_rate": 3.923276526494627e-05,
+ "loss": 0.996,
+ "step": 910
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.40782018986229496,
+ "learning_rate": 3.9147529224190536e-05,
+ "loss": 1.0875,
+ "step": 911
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.43578702386625723,
+ "learning_rate": 3.906229705586959e-05,
+ "loss": 1.1214,
+ "step": 912
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.414945683409524,
+ "learning_rate": 3.89770691471592e-05,
+ "loss": 1.1037,
+ "step": 913
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.40665801579679106,
+ "learning_rate": 3.889184588521573e-05,
+ "loss": 0.9743,
+ "step": 914
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.4064250611574517,
+ "learning_rate": 3.880662765717453e-05,
+ "loss": 0.8814,
+ "step": 915
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.48023046298843347,
+ "learning_rate": 3.8721414850147985e-05,
+ "loss": 0.9663,
+ "step": 916
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.42358024833566227,
+ "learning_rate": 3.8636207851223924e-05,
+ "loss": 1.0491,
+ "step": 917
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.41522494786195835,
+ "learning_rate": 3.855100704746374e-05,
+ "loss": 1.033,
+ "step": 918
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.40890517696706496,
+ "learning_rate": 3.8465812825900715e-05,
+ "loss": 1.0369,
+ "step": 919
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.4325851866408538,
+ "learning_rate": 3.838062557353825e-05,
+ "loss": 0.9362,
+ "step": 920
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.4185860919050069,
+ "learning_rate": 3.8295445677348025e-05,
+ "loss": 1.026,
+ "step": 921
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3975762375934804,
+ "learning_rate": 3.8210273524268375e-05,
+ "loss": 1.0412,
+ "step": 922
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.41725298241987474,
+ "learning_rate": 3.8125109501202395e-05,
+ "loss": 1.0004,
+ "step": 923
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.455183913149126,
+ "learning_rate": 3.803995399501632e-05,
+ "loss": 1.0594,
+ "step": 924
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3993993856483797,
+ "learning_rate": 3.795480739253761e-05,
+ "loss": 0.9761,
+ "step": 925
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.41638796815161494,
+ "learning_rate": 3.786967008055337e-05,
+ "loss": 1.0369,
+ "step": 926
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.40015112695810534,
+ "learning_rate": 3.7784542445808414e-05,
+ "loss": 1.0271,
+ "step": 927
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3995749494729548,
+ "learning_rate": 3.769942487500368e-05,
+ "loss": 1.0613,
+ "step": 928
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.4073556267037492,
+ "learning_rate": 3.761431775479432e-05,
+ "loss": 1.0528,
+ "step": 929
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.44218148822636044,
+ "learning_rate": 3.752922147178807e-05,
+ "loss": 1.0742,
+ "step": 930
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.4435063485893757,
+ "learning_rate": 3.744413641254339e-05,
+ "loss": 1.0825,
+ "step": 931
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.46841574994107515,
+ "learning_rate": 3.735906296356778e-05,
+ "loss": 1.0471,
+ "step": 932
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.40093716627657294,
+ "learning_rate": 3.727400151131599e-05,
+ "loss": 1.0474,
+ "step": 933
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3866415067997244,
+ "learning_rate": 3.71889524421883e-05,
+ "loss": 1.0209,
+ "step": 934
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.4881546110706673,
+ "learning_rate": 3.710391614252867e-05,
+ "loss": 1.0768,
+ "step": 935
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.4133084639324523,
+ "learning_rate": 3.701889299862314e-05,
+ "loss": 1.0423,
+ "step": 936
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.40523563084001196,
+ "learning_rate": 3.6933883396697936e-05,
+ "loss": 1.005,
+ "step": 937
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.38757352418642405,
+ "learning_rate": 3.684888772291777e-05,
+ "loss": 0.9659,
+ "step": 938
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.421394551890689,
+ "learning_rate": 3.676390636338411e-05,
+ "loss": 1.0454,
+ "step": 939
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.45693070958342186,
+ "learning_rate": 3.667893970413337e-05,
+ "loss": 1.1459,
+ "step": 940
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.4172025376377795,
+ "learning_rate": 3.659398813113522e-05,
+ "loss": 0.9954,
+ "step": 941
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3871624019510191,
+ "learning_rate": 3.650905203029075e-05,
+ "loss": 1.0441,
+ "step": 942
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.38541342610032325,
+ "learning_rate": 3.642413178743083e-05,
+ "loss": 0.9465,
+ "step": 943
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.4208031670525743,
+ "learning_rate": 3.633922778831423e-05,
+ "loss": 1.0367,
+ "step": 944
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.41867209013040035,
+ "learning_rate": 3.6254340418625975e-05,
+ "loss": 1.0868,
+ "step": 945
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.431758149074127,
+ "learning_rate": 3.6169470063975536e-05,
+ "loss": 1.0689,
+ "step": 946
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4988803338819952,
+ "learning_rate": 3.608461710989509e-05,
+ "loss": 1.0879,
+ "step": 947
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4094858411191625,
+ "learning_rate": 3.5999781941837755e-05,
+ "loss": 1.0332,
+ "step": 948
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.3831847195845155,
+ "learning_rate": 3.591496494517589e-05,
+ "loss": 0.9751,
+ "step": 949
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.40535692821947267,
+ "learning_rate": 3.5830166505199284e-05,
+ "loss": 1.0594,
+ "step": 950
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.4875663789389966,
+ "learning_rate": 3.574538700711343e-05,
+ "loss": 0.9749,
+ "step": 951
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5155923998285772,
+ "learning_rate": 3.566062683603778e-05,
+ "loss": 0.9999,
+ "step": 952
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5280285947816189,
+ "learning_rate": 3.557588637700399e-05,
+ "loss": 1.1061,
+ "step": 953
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.46573407357796753,
+ "learning_rate": 3.5491166014954174e-05,
+ "loss": 1.102,
+ "step": 954
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.4122542582865379,
+ "learning_rate": 3.540646613473915e-05,
+ "loss": 1.0469,
+ "step": 955
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.41414476980823367,
+ "learning_rate": 3.53217871211167e-05,
+ "loss": 0.9973,
+ "step": 956
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4030707611608045,
+ "learning_rate": 3.523712935874983e-05,
+ "loss": 0.9796,
+ "step": 957
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4235313349747291,
+ "learning_rate": 3.5152493232204975e-05,
+ "loss": 1.0601,
+ "step": 958
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4165235178302652,
+ "learning_rate": 3.5067879125950316e-05,
+ "loss": 1.0358,
+ "step": 959
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.44083984701952955,
+ "learning_rate": 3.4983287424354e-05,
+ "loss": 1.0957,
+ "step": 960
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3781161039063518,
+ "learning_rate": 3.489871851168238e-05,
+ "loss": 0.9838,
+ "step": 961
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4095747724038915,
+ "learning_rate": 3.4814172772098314e-05,
+ "loss": 1.014,
+ "step": 962
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.42197119558898466,
+ "learning_rate": 3.472965058965938e-05,
+ "loss": 1.0096,
+ "step": 963
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4339963388152155,
+ "learning_rate": 3.464515234831615e-05,
+ "loss": 1.0158,
+ "step": 964
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4284638765548976,
+ "learning_rate": 3.4560678431910424e-05,
+ "loss": 1.1047,
+ "step": 965
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3935144535755794,
+ "learning_rate": 3.447622922417355e-05,
+ "loss": 0.9925,
+ "step": 966
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.45884343961025,
+ "learning_rate": 3.439180510872457e-05,
+ "loss": 1.0583,
+ "step": 967
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.42439320759788374,
+ "learning_rate": 3.4307406469068604e-05,
+ "loss": 0.9305,
+ "step": 968
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.45770082390324845,
+ "learning_rate": 3.4223033688594985e-05,
+ "loss": 1.054,
+ "step": 969
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.4284786643981094,
+ "learning_rate": 3.4138687150575634e-05,
+ "loss": 0.9409,
+ "step": 970
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.41356124058383237,
+ "learning_rate": 3.4054367238163215e-05,
+ "loss": 1.0739,
+ "step": 971
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.4255832249412624,
+ "learning_rate": 3.3970074334389496e-05,
+ "loss": 1.0764,
+ "step": 972
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4337695536142702,
+ "learning_rate": 3.388580882216349e-05,
+ "loss": 1.0195,
+ "step": 973
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.41363495650922455,
+ "learning_rate": 3.380157108426985e-05,
+ "loss": 1.0615,
+ "step": 974
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3950691247686479,
+ "learning_rate": 3.371736150336701e-05,
+ "loss": 1.0283,
+ "step": 975
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4042823691555822,
+ "learning_rate": 3.3633180461985505e-05,
+ "loss": 1.0309,
+ "step": 976
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3921158850479399,
+ "learning_rate": 3.354902834252627e-05,
+ "loss": 1.068,
+ "step": 977
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38349545732725654,
+ "learning_rate": 3.346490552725879e-05,
+ "loss": 1.0886,
+ "step": 978
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38689221457248724,
+ "learning_rate": 3.33808123983195e-05,
+ "loss": 0.987,
+ "step": 979
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38660550867425647,
+ "learning_rate": 3.329674933770992e-05,
+ "loss": 1.069,
+ "step": 980
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3917593746353493,
+ "learning_rate": 3.321271672729504e-05,
+ "loss": 0.9858,
+ "step": 981
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.4292314072827653,
+ "learning_rate": 3.3128714948801474e-05,
+ "loss": 1.0477,
+ "step": 982
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.479414638418211,
+ "learning_rate": 3.3044744383815835e-05,
+ "loss": 1.0763,
+ "step": 983
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.380831894995463,
+ "learning_rate": 3.2960805413782884e-05,
+ "loss": 1.0393,
+ "step": 984
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.42402274703362114,
+ "learning_rate": 3.2876898420003914e-05,
+ "loss": 1.0837,
+ "step": 985
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.4571447203722258,
+ "learning_rate": 3.279302378363491e-05,
+ "loss": 1.0594,
+ "step": 986
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3776673281658531,
+ "learning_rate": 3.270918188568493e-05,
+ "loss": 1.0121,
+ "step": 987
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.4367173448132159,
+ "learning_rate": 3.262537310701425e-05,
+ "loss": 0.9612,
+ "step": 988
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.43679765208840926,
+ "learning_rate": 3.254159782833276e-05,
+ "loss": 1.0565,
+ "step": 989
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.4018151260013493,
+ "learning_rate": 3.2457856430198126e-05,
+ "loss": 0.9975,
+ "step": 990
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.40461959940721076,
+ "learning_rate": 3.237414929301412e-05,
+ "loss": 1.0255,
+ "step": 991
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.41342378541540653,
+ "learning_rate": 3.2290476797028926e-05,
+ "loss": 1.024,
+ "step": 992
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3926173909201105,
+ "learning_rate": 3.220683932233328e-05,
+ "loss": 1.0877,
+ "step": 993
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3835623199834992,
+ "learning_rate": 3.21232372488589e-05,
+ "loss": 1.0992,
+ "step": 994
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.39901809497083496,
+ "learning_rate": 3.2039670956376656e-05,
+ "loss": 1.0723,
+ "step": 995
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3979604537466272,
+ "learning_rate": 3.195614082449492e-05,
+ "loss": 1.0201,
+ "step": 996
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.4057122427176845,
+ "learning_rate": 3.1872647232657723e-05,
+ "loss": 1.0885,
+ "step": 997
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.39747060350754754,
+ "learning_rate": 3.17891905601432e-05,
+ "loss": 1.0544,
+ "step": 998
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.4397658078291558,
+ "learning_rate": 3.1705771186061715e-05,
+ "loss": 1.0998,
+ "step": 999
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37373547663810053,
+ "learning_rate": 3.162238948935423e-05,
+ "loss": 1.0465,
+ "step": 1000
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 1036715984683008.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-1000/training_args.bin b/checkpoint-1000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8c2dfa20e1da5754719c3d7e300b9b86407f077f
--- /dev/null
+++ b/checkpoint-1000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3f2f7bd873b9dca108c5ca2e32ea140480fabeed2dec60f702daabd0a44d071e
+size 6776
diff --git a/checkpoint-1000/zero_to_fp32.py b/checkpoint-1000/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/checkpoint-1000/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-1050/README.md b/checkpoint-1050/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/checkpoint-1050/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/checkpoint-1050/adapter_config.json b/checkpoint-1050/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e9287964d2e2ba3ecf1c52d1ac5d36e39722c816
--- /dev/null
+++ b/checkpoint-1050/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "v_proj",
+ "o_proj",
+ "k_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/checkpoint-1050/adapter_model.safetensors b/checkpoint-1050/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..52a256b8635154e3e1c50914fb9731c141fd6e97
--- /dev/null
+++ b/checkpoint-1050/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:74fa537a65ece7afb48497d0fc7176394772e474c2176bdf26eb2a12c5eb6133
+size 1048664848
diff --git a/checkpoint-1050/global_step1050/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f072516904992e6467b803eac8b74bf51da2113d
--- /dev/null
+++ b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a5d9fa14d2e94404b03a22c39c78e592828c9a5097f79e10ba4542e903b7a1c5
+size 787270042
diff --git a/checkpoint-1050/global_step1050/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..dc231d33088f1706c848d0925c9931e2af3ca505
--- /dev/null
+++ b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fba576d371c7e5e476dcd7036e9b13fccea09de427e33b695c9568a2be1f7cf5
+size 787270042
diff --git a/checkpoint-1050/global_step1050/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7af46958d6d2d4c9fab3c1a2844775c7d3e5fe91
--- /dev/null
+++ b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a1334c39b8090974ad0010c20339347ea540835715d396496ed7b1a12c65a9e1
+size 787270042
diff --git a/checkpoint-1050/global_step1050/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..50b2f75e4a20bbedfd177ccb39c167299428e65b
--- /dev/null
+++ b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:90bf5319cb9f6c53b43afddef0851bd6b32baf2f58e3aa847dd87dc8f45d3376
+size 787270042
diff --git a/checkpoint-1050/global_step1050/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..53356fdcb6b53724cd7e1df6126b087fbde6263c
--- /dev/null
+++ b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:199ef91ca33e98122ae5d9db72923ca01b0d645958c36e49a0a5cfda5db895e3
+size 787270042
diff --git a/checkpoint-1050/global_step1050/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..27dc590d694566ef99411fdb57cfc4979250d40d
--- /dev/null
+++ b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d994fb7db41608b2f3eced488c5cd4e9fb33d5a927664ab830e3bdb2ca39b711
+size 787270042
diff --git a/checkpoint-1050/global_step1050/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b1ec1ae28902cd9e9920ad9c4041b6db2ffb8a88
--- /dev/null
+++ b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:99edcf3ebb349e86e46ec91ad1777bf3678b03b4761aff5c7aff143cb43c1a15
+size 787270042
diff --git a/checkpoint-1050/global_step1050/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c84a53f2a53e81bc3732e711354bd6edc3b22e72
--- /dev/null
+++ b/checkpoint-1050/global_step1050/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:93429d616accac9d7fa2e84862008e4559616af4790d2826f1dd941869e8442b
+size 787270042
diff --git a/checkpoint-1050/global_step1050/zero_pp_rank_0_mp_rank_00_model_states.pt b/checkpoint-1050/global_step1050/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9bb149842f0c52e742d84b64484866ee97d0db5b
--- /dev/null
+++ b/checkpoint-1050/global_step1050/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b3f51e27f57bf07ab75a22b3be7686c834716ac6bf1e42e58696786749e40eb
+size 653742
diff --git a/checkpoint-1050/global_step1050/zero_pp_rank_1_mp_rank_00_model_states.pt b/checkpoint-1050/global_step1050/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1219ce5aa5f2d8540ecf3486a6ad722b4466a575
--- /dev/null
+++ b/checkpoint-1050/global_step1050/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4bc7d961b5a083a83f9206e13485480e5edd384db7a2eccd8f5de849a65b61f5
+size 653742
diff --git a/checkpoint-1050/global_step1050/zero_pp_rank_2_mp_rank_00_model_states.pt b/checkpoint-1050/global_step1050/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a1e506bf194d5bd563e7858c695a0dca57eb5b1a
--- /dev/null
+++ b/checkpoint-1050/global_step1050/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:414a5f5d4247b734ac16b08d4c1e5659697300fde198d5056b0505887c25a2b8
+size 653742
diff --git a/checkpoint-1050/global_step1050/zero_pp_rank_3_mp_rank_00_model_states.pt b/checkpoint-1050/global_step1050/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1ecc7612d7f59e3d15454c22604efc92a13b066b
--- /dev/null
+++ b/checkpoint-1050/global_step1050/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be1b2039a29e8f23b7cad84b8eaf0024e18a2fbdcbc41fdb9a652e4b7e95bbba
+size 653742
diff --git a/checkpoint-1050/global_step1050/zero_pp_rank_4_mp_rank_00_model_states.pt b/checkpoint-1050/global_step1050/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..112eaa5d108be50233d04f7818f3ff710d29f49b
--- /dev/null
+++ b/checkpoint-1050/global_step1050/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:58a942392d335b9d82ec508fea9e99a76ccf8a2ff510cb38c92c0b167fcf67fe
+size 653742
diff --git a/checkpoint-1050/global_step1050/zero_pp_rank_5_mp_rank_00_model_states.pt b/checkpoint-1050/global_step1050/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..714f18ffa311c637f8bdce3df1110cbf6384db5e
--- /dev/null
+++ b/checkpoint-1050/global_step1050/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0a11536ae6ed3f56eb0983e22371db0076f2a0540b433142639548e46348cf36
+size 653742
diff --git a/checkpoint-1050/global_step1050/zero_pp_rank_6_mp_rank_00_model_states.pt b/checkpoint-1050/global_step1050/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..982c2af2865dc7330afdf3a30411d22b48d449ec
--- /dev/null
+++ b/checkpoint-1050/global_step1050/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c76a9d81f76ff90f486b98ee2f90924bfa83ac298d3b1a865d2945e241717a6b
+size 653742
diff --git a/checkpoint-1050/global_step1050/zero_pp_rank_7_mp_rank_00_model_states.pt b/checkpoint-1050/global_step1050/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4660be82da4673bca83817646ebc658ec434cc74
--- /dev/null
+++ b/checkpoint-1050/global_step1050/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:31c89a7f8a58901e1ef71eb63cc914bb06f34c661e4657336851817a1cfa1ccb
+size 653742
diff --git a/checkpoint-1050/latest b/checkpoint-1050/latest
new file mode 100644
index 0000000000000000000000000000000000000000..9003e5f7e95704409b5d8f438ed7572043c8b9ad
--- /dev/null
+++ b/checkpoint-1050/latest
@@ -0,0 +1 @@
+global_step1050
\ No newline at end of file
diff --git a/checkpoint-1050/rng_state_0.pth b/checkpoint-1050/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6a74f25da28f01a2e6b66587824ee5f5cc9be737
--- /dev/null
+++ b/checkpoint-1050/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3ee195ebde9bf012f945f068f133e7fe22fef5450c496607e3ef11cc2034a186
+size 15984
diff --git a/checkpoint-1050/rng_state_1.pth b/checkpoint-1050/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f44ddc47315653477728c971b4ea191a3df8b92c
--- /dev/null
+++ b/checkpoint-1050/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bf0fe1a3315d60b197207c5cb249d0ce4f9ce6d7585e696276d9ffbcb5379893
+size 15984
diff --git a/checkpoint-1050/rng_state_2.pth b/checkpoint-1050/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..04636b9eca6484a4339eaa1e3acdf15d42d493b3
--- /dev/null
+++ b/checkpoint-1050/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01c5bd6eae04542162b3e94245555bd81312524066bc01d0ebbfc4fd8554240e
+size 15984
diff --git a/checkpoint-1050/rng_state_3.pth b/checkpoint-1050/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..05435e407541728c3159054a4beb6705039a8ddf
--- /dev/null
+++ b/checkpoint-1050/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:45b74942c68b00d657cfce186b0eeb4aa8f52efa04b114803b605fee8de45972
+size 15984
diff --git a/checkpoint-1050/rng_state_4.pth b/checkpoint-1050/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..94fdf5f2c3e5df27424e6482bf52255531147a23
--- /dev/null
+++ b/checkpoint-1050/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0cd66dd2ba958fc9929441817d8154abbd929c0aa9cd66ff3171965bdaaf5d78
+size 15984
diff --git a/checkpoint-1050/rng_state_5.pth b/checkpoint-1050/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..da6e37fc011d97a1512e1e746bdd410a738c018a
--- /dev/null
+++ b/checkpoint-1050/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:89eeedefdd62514d0130acc330a5c08e9774c95d38c60997905cfd65fc54b710
+size 15984
diff --git a/checkpoint-1050/rng_state_6.pth b/checkpoint-1050/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..751fd85c617e15dee9713bc0f0c533af5bd18c8e
--- /dev/null
+++ b/checkpoint-1050/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f43ced939100082608f57561a10e1888e69210c80675068db530c5815889910e
+size 15984
diff --git a/checkpoint-1050/rng_state_7.pth b/checkpoint-1050/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4aacf54fa8285b7e199a7cd62f1ee3d8b9beb5e5
--- /dev/null
+++ b/checkpoint-1050/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0d8d6ee244d99525e7004ae3f02d44ae63082d81fbbab7306f641ac6aeeb736f
+size 15984
diff --git a/checkpoint-1050/scheduler.pt b/checkpoint-1050/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..706e20a94a7e2991f07bfa5aa44e2d6c47b72da7
--- /dev/null
+++ b/checkpoint-1050/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:97feef438f4431a4ad6f82ff4896fbb93af122cfbe57a1b829aa9b6b13da43a5
+size 1064
diff --git a/checkpoint-1050/special_tokens_map.json b/checkpoint-1050/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-1050/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-1050/tokenizer.model b/checkpoint-1050/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-1050/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-1050/tokenizer_config.json b/checkpoint-1050/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/checkpoint-1050/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-1050/trainer_state.json b/checkpoint-1050/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..b72a74def5711737305b1858d0876290508e5a13
--- /dev/null
+++ b/checkpoint-1050/trainer_state.json
@@ -0,0 +1,7371 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.0914494741655236,
+ "eval_steps": 500,
+ "global_step": 1050,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2698278585334131,
+ "learning_rate": 7.274705407864332e-05,
+ "loss": 1.194,
+ "step": 451
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.23678313024953834,
+ "learning_rate": 7.26980224535416e-05,
+ "loss": 1.2349,
+ "step": 452
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24851875792002978,
+ "learning_rate": 7.264884229436293e-05,
+ "loss": 1.1758,
+ "step": 453
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24122080121681125,
+ "learning_rate": 7.259951382451318e-05,
+ "loss": 1.1962,
+ "step": 454
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.22741322959884405,
+ "learning_rate": 7.25500372680719e-05,
+ "loss": 1.1702,
+ "step": 455
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.2297475610861458,
+ "learning_rate": 7.250041284979137e-05,
+ "loss": 1.1466,
+ "step": 456
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.3057605989721467,
+ "learning_rate": 7.245064079509553e-05,
+ "loss": 1.246,
+ "step": 457
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2719638501597136,
+ "learning_rate": 7.240072133007899e-05,
+ "loss": 1.2184,
+ "step": 458
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2436807816414479,
+ "learning_rate": 7.235065468150593e-05,
+ "loss": 1.2324,
+ "step": 459
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.23436349430255515,
+ "learning_rate": 7.23004410768092e-05,
+ "loss": 1.1813,
+ "step": 460
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2398940990211377,
+ "learning_rate": 7.22500807440892e-05,
+ "loss": 1.1924,
+ "step": 461
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2605716625062531,
+ "learning_rate": 7.219957391211281e-05,
+ "loss": 1.182,
+ "step": 462
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.260462524570941,
+ "learning_rate": 7.214892081031244e-05,
+ "loss": 1.2136,
+ "step": 463
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.21979766512306334,
+ "learning_rate": 7.209812166878491e-05,
+ "loss": 1.2066,
+ "step": 464
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.23324453647530663,
+ "learning_rate": 7.204717671829051e-05,
+ "loss": 1.1657,
+ "step": 465
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.2529434935507481,
+ "learning_rate": 7.199608619025177e-05,
+ "loss": 1.2093,
+ "step": 466
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.25371701891720116,
+ "learning_rate": 7.194485031675265e-05,
+ "loss": 1.2225,
+ "step": 467
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.23272423066292103,
+ "learning_rate": 7.189346933053725e-05,
+ "loss": 1.1721,
+ "step": 468
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.25122928735587546,
+ "learning_rate": 7.184194346500892e-05,
+ "loss": 1.2537,
+ "step": 469
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2159270875490409,
+ "learning_rate": 7.179027295422913e-05,
+ "loss": 1.197,
+ "step": 470
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2633111059076544,
+ "learning_rate": 7.173845803291636e-05,
+ "loss": 1.1721,
+ "step": 471
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.30555936322098703,
+ "learning_rate": 7.168649893644517e-05,
+ "loss": 1.3011,
+ "step": 472
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.23492670111453726,
+ "learning_rate": 7.163439590084502e-05,
+ "loss": 1.1601,
+ "step": 473
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.26602734263721806,
+ "learning_rate": 7.158214916279923e-05,
+ "loss": 1.2808,
+ "step": 474
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.3182695007856262,
+ "learning_rate": 7.152975895964386e-05,
+ "loss": 1.2967,
+ "step": 475
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2785021674736721,
+ "learning_rate": 7.147722552936673e-05,
+ "loss": 1.1789,
+ "step": 476
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.279474303138652,
+ "learning_rate": 7.142454911060627e-05,
+ "loss": 1.2596,
+ "step": 477
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2556980144910755,
+ "learning_rate": 7.137172994265044e-05,
+ "loss": 1.2426,
+ "step": 478
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.3311256331993533,
+ "learning_rate": 7.131876826543565e-05,
+ "loss": 1.2059,
+ "step": 479
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.26467296197775253,
+ "learning_rate": 7.12656643195457e-05,
+ "loss": 1.2482,
+ "step": 480
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.27444885274652553,
+ "learning_rate": 7.121241834621064e-05,
+ "loss": 1.2528,
+ "step": 481
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2572283861115396,
+ "learning_rate": 7.115903058730567e-05,
+ "loss": 1.1849,
+ "step": 482
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2677065778235683,
+ "learning_rate": 7.11055012853501e-05,
+ "loss": 1.2011,
+ "step": 483
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29470622036742816,
+ "learning_rate": 7.105183068350619e-05,
+ "loss": 1.2398,
+ "step": 484
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.27609230248969197,
+ "learning_rate": 7.099801902557811e-05,
+ "loss": 1.2259,
+ "step": 485
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.24248634168099284,
+ "learning_rate": 7.094406655601073e-05,
+ "loss": 1.2282,
+ "step": 486
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.2765941767688746,
+ "learning_rate": 7.088997351988865e-05,
+ "loss": 1.2319,
+ "step": 487
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29347776909858947,
+ "learning_rate": 7.083574016293493e-05,
+ "loss": 1.1765,
+ "step": 488
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.285370295424537,
+ "learning_rate": 7.078136673151008e-05,
+ "loss": 1.26,
+ "step": 489
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.29408734903836536,
+ "learning_rate": 7.072685347261093e-05,
+ "loss": 1.226,
+ "step": 490
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27437470239205813,
+ "learning_rate": 7.067220063386947e-05,
+ "loss": 1.1976,
+ "step": 491
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2680770258777871,
+ "learning_rate": 7.061740846355176e-05,
+ "loss": 1.1915,
+ "step": 492
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27200362879502954,
+ "learning_rate": 7.056247721055678e-05,
+ "loss": 1.2002,
+ "step": 493
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2637811092577037,
+ "learning_rate": 7.050740712441528e-05,
+ "loss": 1.287,
+ "step": 494
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.24657959209271266,
+ "learning_rate": 7.045219845528875e-05,
+ "loss": 1.2284,
+ "step": 495
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.25311992110358666,
+ "learning_rate": 7.039685145396812e-05,
+ "loss": 1.1616,
+ "step": 496
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2564633694193358,
+ "learning_rate": 7.034136637187275e-05,
+ "loss": 1.2067,
+ "step": 497
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2446797651174144,
+ "learning_rate": 7.028574346104926e-05,
+ "loss": 1.2284,
+ "step": 498
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2592751463399255,
+ "learning_rate": 7.022998297417034e-05,
+ "loss": 1.2371,
+ "step": 499
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2500713943206808,
+ "learning_rate": 7.017408516453365e-05,
+ "loss": 1.1061,
+ "step": 500
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2812266276040743,
+ "learning_rate": 7.011805028606064e-05,
+ "loss": 1.1949,
+ "step": 501
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.298829667668083,
+ "learning_rate": 7.006187859329544e-05,
+ "loss": 1.2313,
+ "step": 502
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.26518768159745104,
+ "learning_rate": 7.000557034140361e-05,
+ "loss": 1.2246,
+ "step": 503
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.3037280360760458,
+ "learning_rate": 6.994912578617113e-05,
+ "loss": 1.1617,
+ "step": 504
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2726903109255714,
+ "learning_rate": 6.989254518400309e-05,
+ "loss": 1.2415,
+ "step": 505
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25568082003046966,
+ "learning_rate": 6.98358287919226e-05,
+ "loss": 1.1817,
+ "step": 506
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25633294893705044,
+ "learning_rate": 6.97789768675696e-05,
+ "loss": 1.2149,
+ "step": 507
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.28291439435087123,
+ "learning_rate": 6.972198966919972e-05,
+ "loss": 1.1578,
+ "step": 508
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.27195184756655516,
+ "learning_rate": 6.966486745568308e-05,
+ "loss": 1.2355,
+ "step": 509
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.239159568376005,
+ "learning_rate": 6.960761048650312e-05,
+ "loss": 1.1688,
+ "step": 510
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.22961475425949177,
+ "learning_rate": 6.955021902175543e-05,
+ "loss": 1.2094,
+ "step": 511
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.27443773600741117,
+ "learning_rate": 6.949269332214651e-05,
+ "loss": 1.2559,
+ "step": 512
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.26230551832002097,
+ "learning_rate": 6.94350336489927e-05,
+ "loss": 1.2121,
+ "step": 513
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2716742985303849,
+ "learning_rate": 6.937724026421892e-05,
+ "loss": 1.2444,
+ "step": 514
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2537850139439542,
+ "learning_rate": 6.931931343035742e-05,
+ "loss": 1.1327,
+ "step": 515
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.28599587967496826,
+ "learning_rate": 6.926125341054676e-05,
+ "loss": 1.2236,
+ "step": 516
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.26780654378470103,
+ "learning_rate": 6.920306046853043e-05,
+ "loss": 1.2295,
+ "step": 517
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.23606296888412015,
+ "learning_rate": 6.914473486865577e-05,
+ "loss": 1.1543,
+ "step": 518
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.34976881174240837,
+ "learning_rate": 6.90862768758727e-05,
+ "loss": 1.2067,
+ "step": 519
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2481257873494882,
+ "learning_rate": 6.902768675573258e-05,
+ "loss": 1.2188,
+ "step": 520
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2996395778117021,
+ "learning_rate": 6.896896477438699e-05,
+ "loss": 1.2326,
+ "step": 521
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.8839768816333193,
+ "learning_rate": 6.891011119858643e-05,
+ "loss": 1.2435,
+ "step": 522
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2851882482058998,
+ "learning_rate": 6.885112629567927e-05,
+ "loss": 1.2644,
+ "step": 523
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2813663482913699,
+ "learning_rate": 6.879201033361035e-05,
+ "loss": 1.2309,
+ "step": 524
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3257551560135454,
+ "learning_rate": 6.873276358091996e-05,
+ "loss": 1.2755,
+ "step": 525
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.28930479952494365,
+ "learning_rate": 6.867338630674247e-05,
+ "loss": 1.1962,
+ "step": 526
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3077462996938649,
+ "learning_rate": 6.861387878080511e-05,
+ "loss": 1.2402,
+ "step": 527
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.2848900193452761,
+ "learning_rate": 6.855424127342688e-05,
+ "loss": 1.2748,
+ "step": 528
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.4765938812802202,
+ "learning_rate": 6.849447405551718e-05,
+ "loss": 1.2226,
+ "step": 529
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.53184473292579,
+ "learning_rate": 6.843457739857467e-05,
+ "loss": 1.2347,
+ "step": 530
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.6416239346492343,
+ "learning_rate": 6.837455157468596e-05,
+ "loss": 1.2429,
+ "step": 531
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3188092712502773,
+ "learning_rate": 6.831439685652442e-05,
+ "loss": 1.216,
+ "step": 532
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3527495731006385,
+ "learning_rate": 6.825411351734895e-05,
+ "loss": 1.1682,
+ "step": 533
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.29603753744741856,
+ "learning_rate": 6.819370183100274e-05,
+ "loss": 1.1434,
+ "step": 534
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.5252450389976622,
+ "learning_rate": 6.813316207191198e-05,
+ "loss": 1.1943,
+ "step": 535
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.32999419558659937,
+ "learning_rate": 6.807249451508466e-05,
+ "loss": 1.192,
+ "step": 536
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.3650175469778724,
+ "learning_rate": 6.801169943610929e-05,
+ "loss": 1.2141,
+ "step": 537
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 1.0643532150783557,
+ "learning_rate": 6.795077711115368e-05,
+ "loss": 1.2253,
+ "step": 538
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5041310609130145,
+ "learning_rate": 6.788972781696363e-05,
+ "loss": 1.278,
+ "step": 539
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5123058164360991,
+ "learning_rate": 6.782855183086177e-05,
+ "loss": 1.2231,
+ "step": 540
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.3533015702394419,
+ "learning_rate": 6.776724943074619e-05,
+ "loss": 1.2072,
+ "step": 541
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.30253964625417207,
+ "learning_rate": 6.770582089508927e-05,
+ "loss": 1.1382,
+ "step": 542
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.348991618828202,
+ "learning_rate": 6.764426650293633e-05,
+ "loss": 1.2079,
+ "step": 543
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.46017440578788743,
+ "learning_rate": 6.758258653390444e-05,
+ "loss": 1.1813,
+ "step": 544
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.31962101755594885,
+ "learning_rate": 6.75207812681811e-05,
+ "loss": 1.1339,
+ "step": 545
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.37092024548285923,
+ "learning_rate": 6.745885098652298e-05,
+ "loss": 1.2591,
+ "step": 546
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.32347106450715835,
+ "learning_rate": 6.739679597025466e-05,
+ "loss": 1.2017,
+ "step": 547
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39250187112342494,
+ "learning_rate": 6.733461650126733e-05,
+ "loss": 1.0933,
+ "step": 548
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.473522452217324,
+ "learning_rate": 6.727231286201752e-05,
+ "loss": 1.1124,
+ "step": 549
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4809062179622052,
+ "learning_rate": 6.720988533552582e-05,
+ "loss": 1.1585,
+ "step": 550
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3529662801059162,
+ "learning_rate": 6.714733420537559e-05,
+ "loss": 1.0501,
+ "step": 551
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5958247214391118,
+ "learning_rate": 6.708465975571168e-05,
+ "loss": 1.1086,
+ "step": 552
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5341364205022454,
+ "learning_rate": 6.70218622712391e-05,
+ "loss": 1.0518,
+ "step": 553
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3601805724462006,
+ "learning_rate": 6.695894203722181e-05,
+ "loss": 1.1779,
+ "step": 554
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.43410190338280613,
+ "learning_rate": 6.68958993394813e-05,
+ "loss": 1.093,
+ "step": 555
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46217742572873594,
+ "learning_rate": 6.683273446439546e-05,
+ "loss": 1.0117,
+ "step": 556
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.8591682373623357,
+ "learning_rate": 6.676944769889708e-05,
+ "loss": 1.1002,
+ "step": 557
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.7383229487622726,
+ "learning_rate": 6.670603933047272e-05,
+ "loss": 1.0779,
+ "step": 558
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5965305891207813,
+ "learning_rate": 6.664250964716131e-05,
+ "loss": 1.0889,
+ "step": 559
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.6030858606684543,
+ "learning_rate": 6.657885893755288e-05,
+ "loss": 1.0982,
+ "step": 560
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4644510682398409,
+ "learning_rate": 6.65150874907872e-05,
+ "loss": 1.1004,
+ "step": 561
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.43943285132452564,
+ "learning_rate": 6.645119559655254e-05,
+ "loss": 1.0536,
+ "step": 562
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4456395978600012,
+ "learning_rate": 6.638718354508427e-05,
+ "loss": 1.0733,
+ "step": 563
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3303824433217466,
+ "learning_rate": 6.632305162716365e-05,
+ "loss": 1.0552,
+ "step": 564
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3617704823170143,
+ "learning_rate": 6.62588001341164e-05,
+ "loss": 1.1092,
+ "step": 565
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4465013349903427,
+ "learning_rate": 6.619442935781141e-05,
+ "loss": 1.0781,
+ "step": 566
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.48516780613791277,
+ "learning_rate": 6.612993959065947e-05,
+ "loss": 1.0686,
+ "step": 567
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38867820318536633,
+ "learning_rate": 6.606533112561186e-05,
+ "loss": 1.1215,
+ "step": 568
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38566119820378336,
+ "learning_rate": 6.600060425615907e-05,
+ "loss": 1.1213,
+ "step": 569
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.35534855445058544,
+ "learning_rate": 6.593575927632947e-05,
+ "loss": 1.0955,
+ "step": 570
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38124406233349717,
+ "learning_rate": 6.587079648068795e-05,
+ "loss": 1.0659,
+ "step": 571
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.454750160923548,
+ "learning_rate": 6.580571616433457e-05,
+ "loss": 1.1149,
+ "step": 572
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.35353190088025255,
+ "learning_rate": 6.574051862290325e-05,
+ "loss": 1.0388,
+ "step": 573
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3249395594793626,
+ "learning_rate": 6.567520415256045e-05,
+ "loss": 1.0784,
+ "step": 574
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.40078898818247227,
+ "learning_rate": 6.560977305000375e-05,
+ "loss": 1.0859,
+ "step": 575
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4115264795060035,
+ "learning_rate": 6.554422561246054e-05,
+ "loss": 1.1828,
+ "step": 576
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.30090229228069215,
+ "learning_rate": 6.54785621376867e-05,
+ "loss": 1.0901,
+ "step": 577
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.28827860350299206,
+ "learning_rate": 6.541278292396523e-05,
+ "loss": 1.0277,
+ "step": 578
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.34690404488996757,
+ "learning_rate": 6.534688827010484e-05,
+ "loss": 1.048,
+ "step": 579
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.29943113556644785,
+ "learning_rate": 6.528087847543867e-05,
+ "loss": 1.0646,
+ "step": 580
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.37318202575874415,
+ "learning_rate": 6.521475383982291e-05,
+ "loss": 1.1091,
+ "step": 581
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3049663659203959,
+ "learning_rate": 6.51485146636354e-05,
+ "loss": 1.0552,
+ "step": 582
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3342407867509692,
+ "learning_rate": 6.508216124777431e-05,
+ "loss": 1.2227,
+ "step": 583
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3348396047855952,
+ "learning_rate": 6.501569389365674e-05,
+ "loss": 1.0861,
+ "step": 584
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.30951429367513383,
+ "learning_rate": 6.494911290321737e-05,
+ "loss": 1.0461,
+ "step": 585
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.33898401361064606,
+ "learning_rate": 6.488241857890711e-05,
+ "loss": 1.0854,
+ "step": 586
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4901462068263497,
+ "learning_rate": 6.481561122369164e-05,
+ "loss": 1.1012,
+ "step": 587
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3179574879809652,
+ "learning_rate": 6.474869114105018e-05,
+ "loss": 1.0451,
+ "step": 588
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.32159328915060714,
+ "learning_rate": 6.468165863497395e-05,
+ "loss": 1.0458,
+ "step": 589
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.36462235008537297,
+ "learning_rate": 6.461451400996491e-05,
+ "loss": 1.1247,
+ "step": 590
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.5373862753611778,
+ "learning_rate": 6.454725757103432e-05,
+ "loss": 1.0542,
+ "step": 591
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3160409270291303,
+ "learning_rate": 6.447988962370133e-05,
+ "loss": 1.0829,
+ "step": 592
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.390452102978435,
+ "learning_rate": 6.441241047399169e-05,
+ "loss": 1.192,
+ "step": 593
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3802122712014928,
+ "learning_rate": 6.434482042843627e-05,
+ "loss": 1.1153,
+ "step": 594
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4081584328242501,
+ "learning_rate": 6.427711979406966e-05,
+ "loss": 1.1635,
+ "step": 595
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3791962989638633,
+ "learning_rate": 6.420930887842889e-05,
+ "loss": 1.1581,
+ "step": 596
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.33239440056484193,
+ "learning_rate": 6.414138798955189e-05,
+ "loss": 1.0926,
+ "step": 597
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3279881540815014,
+ "learning_rate": 6.407335743597616e-05,
+ "loss": 1.1386,
+ "step": 598
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.30309644763750837,
+ "learning_rate": 6.40052175267374e-05,
+ "loss": 1.0523,
+ "step": 599
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3349097308403333,
+ "learning_rate": 6.393696857136801e-05,
+ "loss": 1.0815,
+ "step": 600
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3288227593556618,
+ "learning_rate": 6.386861087989581e-05,
+ "loss": 1.015,
+ "step": 601
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36685586740843157,
+ "learning_rate": 6.380014476284255e-05,
+ "loss": 1.1232,
+ "step": 602
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3620977714204643,
+ "learning_rate": 6.373157053122243e-05,
+ "loss": 1.1138,
+ "step": 603
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3130587018197183,
+ "learning_rate": 6.366288849654091e-05,
+ "loss": 1.1255,
+ "step": 604
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3602737087072766,
+ "learning_rate": 6.359409897079303e-05,
+ "loss": 1.0282,
+ "step": 605
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.31168852571991945,
+ "learning_rate": 6.352520226646222e-05,
+ "loss": 1.0779,
+ "step": 606
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3516045580189353,
+ "learning_rate": 6.345619869651871e-05,
+ "loss": 1.1028,
+ "step": 607
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3231857927563657,
+ "learning_rate": 6.33870885744182e-05,
+ "loss": 1.1202,
+ "step": 608
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.30205205129701157,
+ "learning_rate": 6.331787221410041e-05,
+ "loss": 1.1369,
+ "step": 609
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3198359813888166,
+ "learning_rate": 6.32485499299877e-05,
+ "loss": 1.1763,
+ "step": 610
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3128641370321787,
+ "learning_rate": 6.31791220369835e-05,
+ "loss": 1.0223,
+ "step": 611
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.2989105616213649,
+ "learning_rate": 6.31095888504711e-05,
+ "loss": 1.0358,
+ "step": 612
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3103537906853337,
+ "learning_rate": 6.303995068631203e-05,
+ "loss": 1.1261,
+ "step": 613
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.28598715532508207,
+ "learning_rate": 6.297020786084467e-05,
+ "loss": 1.0629,
+ "step": 614
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.29809789918093255,
+ "learning_rate": 6.290036069088288e-05,
+ "loss": 1.035,
+ "step": 615
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.33765270252261453,
+ "learning_rate": 6.283040949371451e-05,
+ "loss": 1.1221,
+ "step": 616
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3424617501293415,
+ "learning_rate": 6.276035458709993e-05,
+ "loss": 1.155,
+ "step": 617
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3799189737987811,
+ "learning_rate": 6.269019628927067e-05,
+ "loss": 1.0701,
+ "step": 618
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3358898935253196,
+ "learning_rate": 6.261993491892791e-05,
+ "loss": 1.1649,
+ "step": 619
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.31569979424117356,
+ "learning_rate": 6.254957079524099e-05,
+ "loss": 1.0633,
+ "step": 620
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3002168156888237,
+ "learning_rate": 6.247910423784609e-05,
+ "loss": 1.0846,
+ "step": 621
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3097238823450595,
+ "learning_rate": 6.24085355668447e-05,
+ "loss": 1.0808,
+ "step": 622
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3120312761417578,
+ "learning_rate": 6.233786510280212e-05,
+ "loss": 1.0142,
+ "step": 623
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3335343015064923,
+ "learning_rate": 6.22670931667461e-05,
+ "loss": 1.0674,
+ "step": 624
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3234062304634526,
+ "learning_rate": 6.219622008016533e-05,
+ "loss": 1.0981,
+ "step": 625
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.32152678786547273,
+ "learning_rate": 6.212524616500798e-05,
+ "loss": 1.0244,
+ "step": 626
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.39031977608147594,
+ "learning_rate": 6.205417174368023e-05,
+ "loss": 1.1205,
+ "step": 627
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3806189090017157,
+ "learning_rate": 6.198299713904485e-05,
+ "loss": 1.1134,
+ "step": 628
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.2978349276971668,
+ "learning_rate": 6.191172267441967e-05,
+ "loss": 1.0088,
+ "step": 629
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3190354077382501,
+ "learning_rate": 6.184034867357617e-05,
+ "loss": 1.108,
+ "step": 630
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.32633048665038994,
+ "learning_rate": 6.176887546073797e-05,
+ "loss": 1.0825,
+ "step": 631
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3428026413020903,
+ "learning_rate": 6.169730336057939e-05,
+ "loss": 1.0765,
+ "step": 632
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3475737151929015,
+ "learning_rate": 6.162563269822391e-05,
+ "loss": 1.0693,
+ "step": 633
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3870252154591392,
+ "learning_rate": 6.15538637992428e-05,
+ "loss": 1.1081,
+ "step": 634
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.33597355193652834,
+ "learning_rate": 6.148199698965352e-05,
+ "loss": 1.0893,
+ "step": 635
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.30805894179787247,
+ "learning_rate": 6.141003259591834e-05,
+ "loss": 1.0995,
+ "step": 636
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3025073882734066,
+ "learning_rate": 6.133797094494281e-05,
+ "loss": 1.0388,
+ "step": 637
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3524395196391662,
+ "learning_rate": 6.126581236407429e-05,
+ "loss": 1.1196,
+ "step": 638
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3377646188130345,
+ "learning_rate": 6.119355718110039e-05,
+ "loss": 1.0382,
+ "step": 639
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.35508400659785483,
+ "learning_rate": 6.112120572424763e-05,
+ "loss": 1.1402,
+ "step": 640
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3454418793700457,
+ "learning_rate": 6.104875832217982e-05,
+ "loss": 1.1032,
+ "step": 641
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.32629806837059866,
+ "learning_rate": 6.097621530399661e-05,
+ "loss": 1.0959,
+ "step": 642
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3329536837751315,
+ "learning_rate": 6.090357699923202e-05,
+ "loss": 1.0467,
+ "step": 643
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.32302233828349475,
+ "learning_rate": 6.083084373785287e-05,
+ "loss": 1.0858,
+ "step": 644
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3310358826507611,
+ "learning_rate": 6.075801585025739e-05,
+ "loss": 1.0715,
+ "step": 645
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.319322035854079,
+ "learning_rate": 6.068509366727362e-05,
+ "loss": 1.177,
+ "step": 646
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3065230667302707,
+ "learning_rate": 6.061207752015797e-05,
+ "loss": 1.0649,
+ "step": 647
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.29926795565748227,
+ "learning_rate": 6.053896774059368e-05,
+ "loss": 1.1325,
+ "step": 648
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3556069634279046,
+ "learning_rate": 6.046576466068931e-05,
+ "loss": 1.1366,
+ "step": 649
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3189191131461966,
+ "learning_rate": 6.039246861297727e-05,
+ "loss": 1.0693,
+ "step": 650
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3347197156648834,
+ "learning_rate": 6.031907993041227e-05,
+ "loss": 1.1009,
+ "step": 651
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.32274156348185445,
+ "learning_rate": 6.0245598946369826e-05,
+ "loss": 1.1675,
+ "step": 652
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.35534089035455224,
+ "learning_rate": 6.017202599464476e-05,
+ "loss": 1.1723,
+ "step": 653
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3106026578570133,
+ "learning_rate": 6.009836140944965e-05,
+ "loss": 1.0954,
+ "step": 654
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3309144454564729,
+ "learning_rate": 6.002460552541331e-05,
+ "loss": 1.0209,
+ "step": 655
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3023619281400003,
+ "learning_rate": 5.9950758677579345e-05,
+ "loss": 1.0363,
+ "step": 656
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3311182880219704,
+ "learning_rate": 5.987682120140451e-05,
+ "loss": 1.0515,
+ "step": 657
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.33396486010030413,
+ "learning_rate": 5.980279343275729e-05,
+ "loss": 1.1251,
+ "step": 658
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3465764556678002,
+ "learning_rate": 5.97286757079163e-05,
+ "loss": 1.165,
+ "step": 659
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.304193441363374,
+ "learning_rate": 5.965446836356882e-05,
+ "loss": 1.0228,
+ "step": 660
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3415149030413082,
+ "learning_rate": 5.9580171736809224e-05,
+ "loss": 1.0742,
+ "step": 661
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.33138658321132064,
+ "learning_rate": 5.950578616513746e-05,
+ "loss": 1.0843,
+ "step": 662
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.30774403421162994,
+ "learning_rate": 5.943131198645752e-05,
+ "loss": 1.065,
+ "step": 663
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3428877492183819,
+ "learning_rate": 5.9356749539075885e-05,
+ "loss": 1.1101,
+ "step": 664
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3621290546130101,
+ "learning_rate": 5.928209916170003e-05,
+ "loss": 1.1372,
+ "step": 665
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3482375945469884,
+ "learning_rate": 5.9207361193436865e-05,
+ "loss": 1.132,
+ "step": 666
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.31754384974068384,
+ "learning_rate": 5.9132535973791156e-05,
+ "loss": 1.148,
+ "step": 667
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.36003834782050365,
+ "learning_rate": 5.9057623842664044e-05,
+ "loss": 1.1099,
+ "step": 668
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.2963701622969662,
+ "learning_rate": 5.8982625140351464e-05,
+ "loss": 1.0755,
+ "step": 669
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.32579569606066516,
+ "learning_rate": 5.8907540207542616e-05,
+ "loss": 1.0809,
+ "step": 670
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4247563451753457,
+ "learning_rate": 5.8832369385318416e-05,
+ "loss": 1.097,
+ "step": 671
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.33076932102169776,
+ "learning_rate": 5.875711301514992e-05,
+ "loss": 1.1078,
+ "step": 672
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3609238032332309,
+ "learning_rate": 5.8681771438896815e-05,
+ "loss": 1.1031,
+ "step": 673
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.325159585649425,
+ "learning_rate": 5.860634499880583e-05,
+ "loss": 1.0707,
+ "step": 674
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4620687271068983,
+ "learning_rate": 5.853083403750922e-05,
+ "loss": 1.1017,
+ "step": 675
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33485279064365936,
+ "learning_rate": 5.845523889802316e-05,
+ "loss": 1.0989,
+ "step": 676
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.30952573170841513,
+ "learning_rate": 5.8379559923746214e-05,
+ "loss": 1.0393,
+ "step": 677
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33498605810588283,
+ "learning_rate": 5.830379745845781e-05,
+ "loss": 1.1259,
+ "step": 678
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.35771921163037307,
+ "learning_rate": 5.822795184631659e-05,
+ "loss": 1.0815,
+ "step": 679
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.3329650192347647,
+ "learning_rate": 5.815202343185894e-05,
+ "loss": 1.1344,
+ "step": 680
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3356634465845771,
+ "learning_rate": 5.807601255999736e-05,
+ "loss": 1.1297,
+ "step": 681
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3289442034151235,
+ "learning_rate": 5.7999919576018934e-05,
+ "loss": 1.022,
+ "step": 682
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3207007334784113,
+ "learning_rate": 5.7923744825583745e-05,
+ "loss": 1.0571,
+ "step": 683
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3582460325329284,
+ "learning_rate": 5.7847488654723304e-05,
+ "loss": 1.0778,
+ "step": 684
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3563317666176927,
+ "learning_rate": 5.777115140983899e-05,
+ "loss": 1.1003,
+ "step": 685
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 3.4694912945702105,
+ "learning_rate": 5.769473343770047e-05,
+ "loss": 1.121,
+ "step": 686
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.43002349520483113,
+ "learning_rate": 5.761823508544411e-05,
+ "loss": 1.0765,
+ "step": 687
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39467783104839754,
+ "learning_rate": 5.754165670057142e-05,
+ "loss": 1.0788,
+ "step": 688
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39629029674867916,
+ "learning_rate": 5.7464998630947464e-05,
+ "loss": 1.0812,
+ "step": 689
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3880152093965208,
+ "learning_rate": 5.738826122479929e-05,
+ "loss": 1.1228,
+ "step": 690
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3777874121959188,
+ "learning_rate": 5.7311444830714324e-05,
+ "loss": 1.0907,
+ "step": 691
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.38004041653523696,
+ "learning_rate": 5.723454979763882e-05,
+ "loss": 1.1263,
+ "step": 692
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.37049672627797636,
+ "learning_rate": 5.7157576474876246e-05,
+ "loss": 1.1438,
+ "step": 693
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32973606103437614,
+ "learning_rate": 5.7080525212085725e-05,
+ "loss": 1.0553,
+ "step": 694
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.31674639252070325,
+ "learning_rate": 5.700339635928038e-05,
+ "loss": 1.06,
+ "step": 695
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32282199426553837,
+ "learning_rate": 5.692619026682588e-05,
+ "loss": 1.0841,
+ "step": 696
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4810882958061859,
+ "learning_rate": 5.684890728543869e-05,
+ "loss": 1.0803,
+ "step": 697
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3995638550178378,
+ "learning_rate": 5.6771547766184566e-05,
+ "loss": 1.1187,
+ "step": 698
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35264932960583484,
+ "learning_rate": 5.669411206047699e-05,
+ "loss": 1.0641,
+ "step": 699
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35240640524733,
+ "learning_rate": 5.661660052007547e-05,
+ "loss": 1.076,
+ "step": 700
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3540694609860389,
+ "learning_rate": 5.653901349708401e-05,
+ "loss": 1.1369,
+ "step": 701
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3196055112925304,
+ "learning_rate": 5.646135134394955e-05,
+ "loss": 1.0677,
+ "step": 702
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4214141007955914,
+ "learning_rate": 5.6383614413460266e-05,
+ "loss": 1.1139,
+ "step": 703
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3625611311798579,
+ "learning_rate": 5.630580305874402e-05,
+ "loss": 1.1845,
+ "step": 704
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3425208672181188,
+ "learning_rate": 5.62279176332668e-05,
+ "loss": 1.174,
+ "step": 705
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3108419862818321,
+ "learning_rate": 5.6149958490830996e-05,
+ "loss": 1.0331,
+ "step": 706
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3274644181571904,
+ "learning_rate": 5.607192598557394e-05,
+ "loss": 1.0664,
+ "step": 707
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.346218197215145,
+ "learning_rate": 5.599382047196617e-05,
+ "loss": 1.2088,
+ "step": 708
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.328497632267458,
+ "learning_rate": 5.591564230480989e-05,
+ "loss": 1.0287,
+ "step": 709
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3708173720611468,
+ "learning_rate": 5.583739183923732e-05,
+ "loss": 1.0883,
+ "step": 710
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3631427403535479,
+ "learning_rate": 5.575906943070915e-05,
+ "loss": 1.1155,
+ "step": 711
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3305201458598695,
+ "learning_rate": 5.5680675435012834e-05,
+ "loss": 1.0958,
+ "step": 712
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.34978833532083714,
+ "learning_rate": 5.5602210208261036e-05,
+ "loss": 1.1437,
+ "step": 713
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3510553882510229,
+ "learning_rate": 5.552367410688999e-05,
+ "loss": 1.0941,
+ "step": 714
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3523747462465078,
+ "learning_rate": 5.544506748765789e-05,
+ "loss": 1.1289,
+ "step": 715
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38262637783927445,
+ "learning_rate": 5.5366390707643266e-05,
+ "loss": 1.099,
+ "step": 716
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38620065989073454,
+ "learning_rate": 5.528764412424334e-05,
+ "loss": 1.083,
+ "step": 717
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3401355276121096,
+ "learning_rate": 5.520882809517245e-05,
+ "loss": 1.028,
+ "step": 718
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3392061008943934,
+ "learning_rate": 5.512994297846039e-05,
+ "loss": 1.1083,
+ "step": 719
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.34219480421015414,
+ "learning_rate": 5.505098913245077e-05,
+ "loss": 1.1108,
+ "step": 720
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3275058061553761,
+ "learning_rate": 5.497196691579945e-05,
+ "loss": 1.111,
+ "step": 721
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36800249746509384,
+ "learning_rate": 5.489287668747283e-05,
+ "loss": 1.1221,
+ "step": 722
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4129005533101575,
+ "learning_rate": 5.481371880674628e-05,
+ "loss": 1.0966,
+ "step": 723
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36563906596251655,
+ "learning_rate": 5.4734493633202505e-05,
+ "loss": 1.0927,
+ "step": 724
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3614650536839971,
+ "learning_rate": 5.465520152672986e-05,
+ "loss": 1.13,
+ "step": 725
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.36419665098633497,
+ "learning_rate": 5.4575842847520765e-05,
+ "loss": 1.1183,
+ "step": 726
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.34490689807258995,
+ "learning_rate": 5.449641795607005e-05,
+ "loss": 1.0919,
+ "step": 727
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3627643746876298,
+ "learning_rate": 5.441692721317334e-05,
+ "loss": 1.0411,
+ "step": 728
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.323620411949565,
+ "learning_rate": 5.433737097992537e-05,
+ "loss": 1.0725,
+ "step": 729
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3521599501824965,
+ "learning_rate": 5.425774961771838e-05,
+ "loss": 1.0926,
+ "step": 730
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3302390546764222,
+ "learning_rate": 5.417806348824047e-05,
+ "loss": 1.0468,
+ "step": 731
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3833325802616019,
+ "learning_rate": 5.4098312953473956e-05,
+ "loss": 1.1291,
+ "step": 732
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3708621126835512,
+ "learning_rate": 5.401849837569372e-05,
+ "loss": 1.0887,
+ "step": 733
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3625834373416278,
+ "learning_rate": 5.393862011746555e-05,
+ "loss": 1.0981,
+ "step": 734
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3583343965080617,
+ "learning_rate": 5.385867854164451e-05,
+ "loss": 1.1021,
+ "step": 735
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34598320594096066,
+ "learning_rate": 5.377867401137332e-05,
+ "loss": 1.1376,
+ "step": 736
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3046382791315433,
+ "learning_rate": 5.369860689008066e-05,
+ "loss": 1.0206,
+ "step": 737
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34464948380043725,
+ "learning_rate": 5.3618477541479505e-05,
+ "loss": 1.1084,
+ "step": 738
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3203242519627101,
+ "learning_rate": 5.353828632956557e-05,
+ "loss": 1.0731,
+ "step": 739
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3431169960355163,
+ "learning_rate": 5.3458033618615516e-05,
+ "loss": 1.091,
+ "step": 740
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.33492074521678705,
+ "learning_rate": 5.337771977318543e-05,
+ "loss": 1.1112,
+ "step": 741
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.32576546585541344,
+ "learning_rate": 5.3297345158109086e-05,
+ "loss": 1.0993,
+ "step": 742
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3410007245037574,
+ "learning_rate": 5.3216910138496286e-05,
+ "loss": 1.094,
+ "step": 743
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.34891180680896833,
+ "learning_rate": 5.313641507973128e-05,
+ "loss": 1.1331,
+ "step": 744
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.37135766946717214,
+ "learning_rate": 5.3055860347471006e-05,
+ "loss": 1.1,
+ "step": 745
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3465019415478411,
+ "learning_rate": 5.297524630764349e-05,
+ "loss": 1.1256,
+ "step": 746
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37035388481626563,
+ "learning_rate": 5.289457332644615e-05,
+ "loss": 1.0366,
+ "step": 747
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.33853883270759155,
+ "learning_rate": 5.281384177034421e-05,
+ "loss": 1.0547,
+ "step": 748
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.364306618627317,
+ "learning_rate": 5.2733052006068897e-05,
+ "loss": 1.0768,
+ "step": 749
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.4021754315731627,
+ "learning_rate": 5.2652204400615916e-05,
+ "loss": 1.1382,
+ "step": 750
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.3332185389039008,
+ "learning_rate": 5.257129932124368e-05,
+ "loss": 1.0815,
+ "step": 751
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3453105709879854,
+ "learning_rate": 5.249033713547173e-05,
+ "loss": 1.1109,
+ "step": 752
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3385397539717797,
+ "learning_rate": 5.2409318211078966e-05,
+ "loss": 1.0529,
+ "step": 753
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.33197994450130447,
+ "learning_rate": 5.232824291610206e-05,
+ "loss": 1.0721,
+ "step": 754
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32836289576124167,
+ "learning_rate": 5.224711161883375e-05,
+ "loss": 1.0459,
+ "step": 755
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32491620058831744,
+ "learning_rate": 5.216592468782117e-05,
+ "loss": 1.0897,
+ "step": 756
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3137879047811153,
+ "learning_rate": 5.2084682491864155e-05,
+ "loss": 1.096,
+ "step": 757
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3356938043023012,
+ "learning_rate": 5.200338540001364e-05,
+ "loss": 1.0827,
+ "step": 758
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.36044340490819055,
+ "learning_rate": 5.192203378156984e-05,
+ "loss": 1.0617,
+ "step": 759
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.34674262047888293,
+ "learning_rate": 5.184062800608077e-05,
+ "loss": 1.1267,
+ "step": 760
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.32469442322149333,
+ "learning_rate": 5.1759168443340375e-05,
+ "loss": 1.1483,
+ "step": 761
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3290384307774216,
+ "learning_rate": 5.167765546338698e-05,
+ "loss": 1.047,
+ "step": 762
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.31637612188770403,
+ "learning_rate": 5.1596089436501525e-05,
+ "loss": 1.0311,
+ "step": 763
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3168693829641207,
+ "learning_rate": 5.151447073320597e-05,
+ "loss": 1.1405,
+ "step": 764
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.34322421571238926,
+ "learning_rate": 5.143279972426153e-05,
+ "loss": 1.1428,
+ "step": 765
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3291030435830325,
+ "learning_rate": 5.1351076780667026e-05,
+ "loss": 1.0473,
+ "step": 766
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.33772039158758044,
+ "learning_rate": 5.1269302273657195e-05,
+ "loss": 1.0909,
+ "step": 767
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3802031736890876,
+ "learning_rate": 5.118747657470102e-05,
+ "loss": 1.1482,
+ "step": 768
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3296067628997962,
+ "learning_rate": 5.1105600055500025e-05,
+ "loss": 1.0085,
+ "step": 769
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3707139982828035,
+ "learning_rate": 5.102367308798658e-05,
+ "loss": 1.0746,
+ "step": 770
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3378537316757011,
+ "learning_rate": 5.094169604432225e-05,
+ "loss": 1.0482,
+ "step": 771
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.4008417246255145,
+ "learning_rate": 5.085966929689601e-05,
+ "loss": 1.1065,
+ "step": 772
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3244385106988064,
+ "learning_rate": 5.077759321832271e-05,
+ "loss": 1.0827,
+ "step": 773
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.37228575732812336,
+ "learning_rate": 5.0695468181441215e-05,
+ "loss": 1.1146,
+ "step": 774
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33761714797540276,
+ "learning_rate": 5.061329455931283e-05,
+ "loss": 1.092,
+ "step": 775
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3158158390913494,
+ "learning_rate": 5.053107272521955e-05,
+ "loss": 1.1058,
+ "step": 776
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3691501929738938,
+ "learning_rate": 5.044880305266239e-05,
+ "loss": 1.1599,
+ "step": 777
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33730914019805525,
+ "learning_rate": 5.0366485915359645e-05,
+ "loss": 1.0615,
+ "step": 778
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.34970059240017,
+ "learning_rate": 5.0284121687245257e-05,
+ "loss": 1.1475,
+ "step": 779
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3374028029407197,
+ "learning_rate": 5.020171074246707e-05,
+ "loss": 1.0926,
+ "step": 780
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3350020681123992,
+ "learning_rate": 5.011925345538514e-05,
+ "loss": 1.1276,
+ "step": 781
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3224228965786606,
+ "learning_rate": 5.003675020057003e-05,
+ "loss": 1.0183,
+ "step": 782
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3357310714740298,
+ "learning_rate": 4.995420135280114e-05,
+ "loss": 1.1114,
+ "step": 783
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3590203255363759,
+ "learning_rate": 4.9871607287064966e-05,
+ "loss": 1.1504,
+ "step": 784
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.33011195419611655,
+ "learning_rate": 4.9788968378553396e-05,
+ "loss": 1.0826,
+ "step": 785
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.31088868195439445,
+ "learning_rate": 4.970628500266207e-05,
+ "loss": 1.0704,
+ "step": 786
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3144996103179409,
+ "learning_rate": 4.962355753498858e-05,
+ "loss": 1.1403,
+ "step": 787
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3147269555419068,
+ "learning_rate": 4.954078635133081e-05,
+ "loss": 1.0898,
+ "step": 788
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3280151747783868,
+ "learning_rate": 4.945797182768524e-05,
+ "loss": 1.1115,
+ "step": 789
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3551996569232493,
+ "learning_rate": 4.937511434024524e-05,
+ "loss": 1.1731,
+ "step": 790
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.343863208057807,
+ "learning_rate": 4.9292214265399336e-05,
+ "loss": 1.0866,
+ "step": 791
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.37316699385322466,
+ "learning_rate": 4.920927197972949e-05,
+ "loss": 1.1083,
+ "step": 792
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635739774067832,
+ "learning_rate": 4.9126287860009453e-05,
+ "loss": 1.1393,
+ "step": 793
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3755910554972886,
+ "learning_rate": 4.9043262283202974e-05,
+ "loss": 1.1624,
+ "step": 794
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635899120146823,
+ "learning_rate": 4.8960195626462145e-05,
+ "loss": 1.2095,
+ "step": 795
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3642202684342816,
+ "learning_rate": 4.8877088267125664e-05,
+ "loss": 1.1099,
+ "step": 796
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3339946548799316,
+ "learning_rate": 4.879394058271712e-05,
+ "loss": 1.1157,
+ "step": 797
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3457189703100475,
+ "learning_rate": 4.871075295094329e-05,
+ "loss": 1.129,
+ "step": 798
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3550931839691424,
+ "learning_rate": 4.862752574969241e-05,
+ "loss": 1.076,
+ "step": 799
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.36139108917966734,
+ "learning_rate": 4.8544259357032475e-05,
+ "loss": 1.1577,
+ "step": 800
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39569703665247874,
+ "learning_rate": 4.8460954151209486e-05,
+ "loss": 1.0543,
+ "step": 801
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.3879033670170866,
+ "learning_rate": 4.837761051064579e-05,
+ "loss": 1.0688,
+ "step": 802
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3796846713967255,
+ "learning_rate": 4.8294228813938285e-05,
+ "loss": 0.9911,
+ "step": 803
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4007831430409375,
+ "learning_rate": 4.8210809439856804e-05,
+ "loss": 1.0126,
+ "step": 804
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.37588078665500885,
+ "learning_rate": 4.8127352767342276e-05,
+ "loss": 0.9302,
+ "step": 805
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4078509175013281,
+ "learning_rate": 4.8043859175505095e-05,
+ "loss": 0.9982,
+ "step": 806
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.379096046185539,
+ "learning_rate": 4.7960329043623344e-05,
+ "loss": 1.0035,
+ "step": 807
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3813938568133554,
+ "learning_rate": 4.787676275114111e-05,
+ "loss": 0.9579,
+ "step": 808
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.3686863564511168,
+ "learning_rate": 4.779316067766673e-05,
+ "loss": 1.0105,
+ "step": 809
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.4263940878847523,
+ "learning_rate": 4.770952320297109e-05,
+ "loss": 1.0677,
+ "step": 810
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.37178778374665006,
+ "learning_rate": 4.7625850706985886e-05,
+ "loss": 1.0019,
+ "step": 811
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.36803355429187945,
+ "learning_rate": 4.7542143569801894e-05,
+ "loss": 0.9937,
+ "step": 812
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.3897072472941179,
+ "learning_rate": 4.745840217166725e-05,
+ "loss": 1.0877,
+ "step": 813
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.35571833841716255,
+ "learning_rate": 4.737462689298577e-05,
+ "loss": 1.0015,
+ "step": 814
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.38930229991094323,
+ "learning_rate": 4.7290818114315086e-05,
+ "loss": 1.028,
+ "step": 815
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.411005007105147,
+ "learning_rate": 4.72069762163651e-05,
+ "loss": 1.0068,
+ "step": 816
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3980240190337736,
+ "learning_rate": 4.7123101579996106e-05,
+ "loss": 0.9919,
+ "step": 817
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.36369517703115467,
+ "learning_rate": 4.7039194586217136e-05,
+ "loss": 0.967,
+ "step": 818
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.38591148840458894,
+ "learning_rate": 4.695525561618418e-05,
+ "loss": 0.9743,
+ "step": 819
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.45873135108949337,
+ "learning_rate": 4.687128505119853e-05,
+ "loss": 1.0516,
+ "step": 820
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.3866330351411308,
+ "learning_rate": 4.6787283272704966e-05,
+ "loss": 0.9939,
+ "step": 821
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4620340173291326,
+ "learning_rate": 4.670325066229009e-05,
+ "loss": 1.0526,
+ "step": 822
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38877454299870284,
+ "learning_rate": 4.661918760168052e-05,
+ "loss": 0.9904,
+ "step": 823
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.3880489386116793,
+ "learning_rate": 4.653509447274121e-05,
+ "loss": 0.9623,
+ "step": 824
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3827392356186151,
+ "learning_rate": 4.6450971657473743e-05,
+ "loss": 1.0772,
+ "step": 825
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4132814641854327,
+ "learning_rate": 4.63668195380145e-05,
+ "loss": 1.0533,
+ "step": 826
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3703610182402835,
+ "learning_rate": 4.628263849663301e-05,
+ "loss": 0.9336,
+ "step": 827
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4152053683299823,
+ "learning_rate": 4.619842891573016e-05,
+ "loss": 0.9801,
+ "step": 828
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.41791059043554274,
+ "learning_rate": 4.6114191177836514e-05,
+ "loss": 1.0617,
+ "step": 829
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.46363896517299136,
+ "learning_rate": 4.6029925665610524e-05,
+ "loss": 0.9687,
+ "step": 830
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.41141959057512445,
+ "learning_rate": 4.59456327618368e-05,
+ "loss": 1.0965,
+ "step": 831
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3789192764519836,
+ "learning_rate": 4.5861312849424386e-05,
+ "loss": 0.9793,
+ "step": 832
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4047291581107866,
+ "learning_rate": 4.5776966311405035e-05,
+ "loss": 1.0342,
+ "step": 833
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4425157400959256,
+ "learning_rate": 4.5692593530931416e-05,
+ "loss": 1.0892,
+ "step": 834
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3707332144806616,
+ "learning_rate": 4.560819489127545e-05,
+ "loss": 0.9815,
+ "step": 835
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3897444102572823,
+ "learning_rate": 4.552377077582646e-05,
+ "loss": 0.9884,
+ "step": 836
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.42725787957019346,
+ "learning_rate": 4.543932156808959e-05,
+ "loss": 0.9972,
+ "step": 837
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.40615269781820007,
+ "learning_rate": 4.535484765168386e-05,
+ "loss": 0.9529,
+ "step": 838
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3505829736050887,
+ "learning_rate": 4.527034941034063e-05,
+ "loss": 0.9492,
+ "step": 839
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.36688064686440497,
+ "learning_rate": 4.51858272279017e-05,
+ "loss": 0.9592,
+ "step": 840
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4043468777955929,
+ "learning_rate": 4.5101281488317634e-05,
+ "loss": 1.048,
+ "step": 841
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3811489793242706,
+ "learning_rate": 4.501671257564602e-05,
+ "loss": 1.0138,
+ "step": 842
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.39813004142325986,
+ "learning_rate": 4.49321208740497e-05,
+ "loss": 1.071,
+ "step": 843
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3809751022095503,
+ "learning_rate": 4.484750676779504e-05,
+ "loss": 1.0351,
+ "step": 844
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.384312178013823,
+ "learning_rate": 4.4762870641250185e-05,
+ "loss": 0.9737,
+ "step": 845
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.40769404907923557,
+ "learning_rate": 4.467821287888331e-05,
+ "loss": 0.9659,
+ "step": 846
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.39594136851937817,
+ "learning_rate": 4.459353386526086e-05,
+ "loss": 0.9405,
+ "step": 847
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.37180161011562185,
+ "learning_rate": 4.450883398504584e-05,
+ "loss": 1.0732,
+ "step": 848
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3772603623154663,
+ "learning_rate": 4.442411362299602e-05,
+ "loss": 0.9646,
+ "step": 849
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4346142368506476,
+ "learning_rate": 4.433937316396224e-05,
+ "loss": 0.9572,
+ "step": 850
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3997258084612474,
+ "learning_rate": 4.425461299288659e-05,
+ "loss": 0.9492,
+ "step": 851
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.41245476865247155,
+ "learning_rate": 4.416983349480073e-05,
+ "loss": 0.8732,
+ "step": 852
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.6761499297939195,
+ "learning_rate": 4.408503505482412e-05,
+ "loss": 1.0425,
+ "step": 853
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.40340979486858985,
+ "learning_rate": 4.400021805816225e-05,
+ "loss": 0.9596,
+ "step": 854
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.43290732392699666,
+ "learning_rate": 4.391538289010493e-05,
+ "loss": 1.0123,
+ "step": 855
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36878054442190156,
+ "learning_rate": 4.383052993602448e-05,
+ "loss": 0.9448,
+ "step": 856
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.7146145128961262,
+ "learning_rate": 4.374565958137404e-05,
+ "loss": 1.0342,
+ "step": 857
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.44429357586145607,
+ "learning_rate": 4.3660772211685775e-05,
+ "loss": 1.0436,
+ "step": 858
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.4565751973640598,
+ "learning_rate": 4.357586821256918e-05,
+ "loss": 1.0311,
+ "step": 859
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3919991236654277,
+ "learning_rate": 4.349094796970925e-05,
+ "loss": 1.1401,
+ "step": 860
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.4347441949284011,
+ "learning_rate": 4.3406011868864795e-05,
+ "loss": 1.0252,
+ "step": 861
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.38339976027415407,
+ "learning_rate": 4.3321060295866635e-05,
+ "loss": 1.0536,
+ "step": 862
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.37688790408195166,
+ "learning_rate": 4.32360936366159e-05,
+ "loss": 1.012,
+ "step": 863
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4317538207582504,
+ "learning_rate": 4.315111227708224e-05,
+ "loss": 1.0505,
+ "step": 864
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4145324872228796,
+ "learning_rate": 4.306611660330208e-05,
+ "loss": 1.0496,
+ "step": 865
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.416535227064448,
+ "learning_rate": 4.298110700137687e-05,
+ "loss": 0.9628,
+ "step": 866
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.46564356187492717,
+ "learning_rate": 4.2896083857471345e-05,
+ "loss": 1.0016,
+ "step": 867
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4228980941889828,
+ "learning_rate": 4.281104755781172e-05,
+ "loss": 1.0904,
+ "step": 868
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.4267821214430208,
+ "learning_rate": 4.272599848868402e-05,
+ "loss": 1.0544,
+ "step": 869
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.45763332095792075,
+ "learning_rate": 4.264093703643223e-05,
+ "loss": 1.0686,
+ "step": 870
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.4347555516548761,
+ "learning_rate": 4.255586358745662e-05,
+ "loss": 1.0264,
+ "step": 871
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3817726381103066,
+ "learning_rate": 4.247077852821194e-05,
+ "loss": 1.0045,
+ "step": 872
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3882808845457995,
+ "learning_rate": 4.2385682245205685e-05,
+ "loss": 1.0193,
+ "step": 873
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.39410930252966775,
+ "learning_rate": 4.230057512499634e-05,
+ "loss": 0.9832,
+ "step": 874
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4373094593907156,
+ "learning_rate": 4.221545755419159e-05,
+ "loss": 1.0343,
+ "step": 875
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4462843721698891,
+ "learning_rate": 4.2130329919446646e-05,
+ "loss": 1.0324,
+ "step": 876
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4747274247448112,
+ "learning_rate": 4.20451926074624e-05,
+ "loss": 0.9903,
+ "step": 877
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4157472897596409,
+ "learning_rate": 4.196004600498369e-05,
+ "loss": 0.9266,
+ "step": 878
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.41625958088960685,
+ "learning_rate": 4.1874890498797605e-05,
+ "loss": 0.9658,
+ "step": 879
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.44784944130574333,
+ "learning_rate": 4.178972647573163e-05,
+ "loss": 0.9671,
+ "step": 880
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.4116839177956385,
+ "learning_rate": 4.1704554322651975e-05,
+ "loss": 0.9591,
+ "step": 881
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.4025569857639452,
+ "learning_rate": 4.161937442646176e-05,
+ "loss": 1.0072,
+ "step": 882
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.41518478124763597,
+ "learning_rate": 4.1534187174099285e-05,
+ "loss": 1.0275,
+ "step": 883
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3987815564664466,
+ "learning_rate": 4.1448992952536275e-05,
+ "loss": 1.0039,
+ "step": 884
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4270378155679982,
+ "learning_rate": 4.136379214877609e-05,
+ "loss": 1.0369,
+ "step": 885
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.42144733922972777,
+ "learning_rate": 4.127858514985203e-05,
+ "loss": 1.0269,
+ "step": 886
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4198664438272548,
+ "learning_rate": 4.1193372342825494e-05,
+ "loss": 1.0427,
+ "step": 887
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3985048256281719,
+ "learning_rate": 4.1108154114784275e-05,
+ "loss": 1.0702,
+ "step": 888
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.605520808292362,
+ "learning_rate": 4.102293085284083e-05,
+ "loss": 0.9749,
+ "step": 889
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4150515863924052,
+ "learning_rate": 4.0937702944130426e-05,
+ "loss": 1.0231,
+ "step": 890
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3935997576565283,
+ "learning_rate": 4.085247077580948e-05,
+ "loss": 1.0014,
+ "step": 891
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.399446131403209,
+ "learning_rate": 4.076723473505374e-05,
+ "loss": 0.9602,
+ "step": 892
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4406024397129952,
+ "learning_rate": 4.068199520905655e-05,
+ "loss": 1.0425,
+ "step": 893
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4036917571496492,
+ "learning_rate": 4.059675258502709e-05,
+ "loss": 0.973,
+ "step": 894
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4057196459433299,
+ "learning_rate": 4.05115072501886e-05,
+ "loss": 0.9997,
+ "step": 895
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4374124954708759,
+ "learning_rate": 4.0426259591776645e-05,
+ "loss": 0.9826,
+ "step": 896
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4545699371285546,
+ "learning_rate": 4.0341009997037356e-05,
+ "loss": 1.0554,
+ "step": 897
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4251917031237376,
+ "learning_rate": 4.025575885322563e-05,
+ "loss": 1.0217,
+ "step": 898
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3857651901893941,
+ "learning_rate": 4.0170506547603427e-05,
+ "loss": 1.0317,
+ "step": 899
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.46323573798490897,
+ "learning_rate": 4.008525346743797e-05,
+ "loss": 1.0398,
+ "step": 900
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4011541121460918,
+ "learning_rate": 4e-05,
+ "loss": 1.0706,
+ "step": 901
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.46493281221028004,
+ "learning_rate": 3.991474653256204e-05,
+ "loss": 1.0525,
+ "step": 902
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.41683080924539023,
+ "learning_rate": 3.982949345239658e-05,
+ "loss": 1.0905,
+ "step": 903
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.4750350025014512,
+ "learning_rate": 3.974424114677437e-05,
+ "loss": 1.049,
+ "step": 904
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3867445073614702,
+ "learning_rate": 3.965899000296266e-05,
+ "loss": 0.9624,
+ "step": 905
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.378387661131469,
+ "learning_rate": 3.957374040822335e-05,
+ "loss": 1.0223,
+ "step": 906
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3905996390559077,
+ "learning_rate": 3.948849274981141e-05,
+ "loss": 1.0315,
+ "step": 907
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.4139717689498189,
+ "learning_rate": 3.940324741497291e-05,
+ "loss": 0.9297,
+ "step": 908
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.39086355684921514,
+ "learning_rate": 3.9318004790943465e-05,
+ "loss": 0.9684,
+ "step": 909
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.4334915643736419,
+ "learning_rate": 3.923276526494627e-05,
+ "loss": 0.996,
+ "step": 910
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.40782018986229496,
+ "learning_rate": 3.9147529224190536e-05,
+ "loss": 1.0875,
+ "step": 911
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.43578702386625723,
+ "learning_rate": 3.906229705586959e-05,
+ "loss": 1.1214,
+ "step": 912
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.414945683409524,
+ "learning_rate": 3.89770691471592e-05,
+ "loss": 1.1037,
+ "step": 913
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.40665801579679106,
+ "learning_rate": 3.889184588521573e-05,
+ "loss": 0.9743,
+ "step": 914
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.4064250611574517,
+ "learning_rate": 3.880662765717453e-05,
+ "loss": 0.8814,
+ "step": 915
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.48023046298843347,
+ "learning_rate": 3.8721414850147985e-05,
+ "loss": 0.9663,
+ "step": 916
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.42358024833566227,
+ "learning_rate": 3.8636207851223924e-05,
+ "loss": 1.0491,
+ "step": 917
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.41522494786195835,
+ "learning_rate": 3.855100704746374e-05,
+ "loss": 1.033,
+ "step": 918
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.40890517696706496,
+ "learning_rate": 3.8465812825900715e-05,
+ "loss": 1.0369,
+ "step": 919
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.4325851866408538,
+ "learning_rate": 3.838062557353825e-05,
+ "loss": 0.9362,
+ "step": 920
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.4185860919050069,
+ "learning_rate": 3.8295445677348025e-05,
+ "loss": 1.026,
+ "step": 921
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3975762375934804,
+ "learning_rate": 3.8210273524268375e-05,
+ "loss": 1.0412,
+ "step": 922
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.41725298241987474,
+ "learning_rate": 3.8125109501202395e-05,
+ "loss": 1.0004,
+ "step": 923
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.455183913149126,
+ "learning_rate": 3.803995399501632e-05,
+ "loss": 1.0594,
+ "step": 924
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3993993856483797,
+ "learning_rate": 3.795480739253761e-05,
+ "loss": 0.9761,
+ "step": 925
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.41638796815161494,
+ "learning_rate": 3.786967008055337e-05,
+ "loss": 1.0369,
+ "step": 926
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.40015112695810534,
+ "learning_rate": 3.7784542445808414e-05,
+ "loss": 1.0271,
+ "step": 927
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3995749494729548,
+ "learning_rate": 3.769942487500368e-05,
+ "loss": 1.0613,
+ "step": 928
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.4073556267037492,
+ "learning_rate": 3.761431775479432e-05,
+ "loss": 1.0528,
+ "step": 929
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.44218148822636044,
+ "learning_rate": 3.752922147178807e-05,
+ "loss": 1.0742,
+ "step": 930
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.4435063485893757,
+ "learning_rate": 3.744413641254339e-05,
+ "loss": 1.0825,
+ "step": 931
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.46841574994107515,
+ "learning_rate": 3.735906296356778e-05,
+ "loss": 1.0471,
+ "step": 932
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.40093716627657294,
+ "learning_rate": 3.727400151131599e-05,
+ "loss": 1.0474,
+ "step": 933
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3866415067997244,
+ "learning_rate": 3.71889524421883e-05,
+ "loss": 1.0209,
+ "step": 934
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.4881546110706673,
+ "learning_rate": 3.710391614252867e-05,
+ "loss": 1.0768,
+ "step": 935
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.4133084639324523,
+ "learning_rate": 3.701889299862314e-05,
+ "loss": 1.0423,
+ "step": 936
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.40523563084001196,
+ "learning_rate": 3.6933883396697936e-05,
+ "loss": 1.005,
+ "step": 937
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.38757352418642405,
+ "learning_rate": 3.684888772291777e-05,
+ "loss": 0.9659,
+ "step": 938
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.421394551890689,
+ "learning_rate": 3.676390636338411e-05,
+ "loss": 1.0454,
+ "step": 939
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.45693070958342186,
+ "learning_rate": 3.667893970413337e-05,
+ "loss": 1.1459,
+ "step": 940
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.4172025376377795,
+ "learning_rate": 3.659398813113522e-05,
+ "loss": 0.9954,
+ "step": 941
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3871624019510191,
+ "learning_rate": 3.650905203029075e-05,
+ "loss": 1.0441,
+ "step": 942
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.38541342610032325,
+ "learning_rate": 3.642413178743083e-05,
+ "loss": 0.9465,
+ "step": 943
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.4208031670525743,
+ "learning_rate": 3.633922778831423e-05,
+ "loss": 1.0367,
+ "step": 944
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.41867209013040035,
+ "learning_rate": 3.6254340418625975e-05,
+ "loss": 1.0868,
+ "step": 945
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.431758149074127,
+ "learning_rate": 3.6169470063975536e-05,
+ "loss": 1.0689,
+ "step": 946
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4988803338819952,
+ "learning_rate": 3.608461710989509e-05,
+ "loss": 1.0879,
+ "step": 947
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4094858411191625,
+ "learning_rate": 3.5999781941837755e-05,
+ "loss": 1.0332,
+ "step": 948
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.3831847195845155,
+ "learning_rate": 3.591496494517589e-05,
+ "loss": 0.9751,
+ "step": 949
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.40535692821947267,
+ "learning_rate": 3.5830166505199284e-05,
+ "loss": 1.0594,
+ "step": 950
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.4875663789389966,
+ "learning_rate": 3.574538700711343e-05,
+ "loss": 0.9749,
+ "step": 951
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5155923998285772,
+ "learning_rate": 3.566062683603778e-05,
+ "loss": 0.9999,
+ "step": 952
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5280285947816189,
+ "learning_rate": 3.557588637700399e-05,
+ "loss": 1.1061,
+ "step": 953
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.46573407357796753,
+ "learning_rate": 3.5491166014954174e-05,
+ "loss": 1.102,
+ "step": 954
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.4122542582865379,
+ "learning_rate": 3.540646613473915e-05,
+ "loss": 1.0469,
+ "step": 955
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.41414476980823367,
+ "learning_rate": 3.53217871211167e-05,
+ "loss": 0.9973,
+ "step": 956
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4030707611608045,
+ "learning_rate": 3.523712935874983e-05,
+ "loss": 0.9796,
+ "step": 957
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4235313349747291,
+ "learning_rate": 3.5152493232204975e-05,
+ "loss": 1.0601,
+ "step": 958
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4165235178302652,
+ "learning_rate": 3.5067879125950316e-05,
+ "loss": 1.0358,
+ "step": 959
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.44083984701952955,
+ "learning_rate": 3.4983287424354e-05,
+ "loss": 1.0957,
+ "step": 960
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3781161039063518,
+ "learning_rate": 3.489871851168238e-05,
+ "loss": 0.9838,
+ "step": 961
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4095747724038915,
+ "learning_rate": 3.4814172772098314e-05,
+ "loss": 1.014,
+ "step": 962
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.42197119558898466,
+ "learning_rate": 3.472965058965938e-05,
+ "loss": 1.0096,
+ "step": 963
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4339963388152155,
+ "learning_rate": 3.464515234831615e-05,
+ "loss": 1.0158,
+ "step": 964
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4284638765548976,
+ "learning_rate": 3.4560678431910424e-05,
+ "loss": 1.1047,
+ "step": 965
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3935144535755794,
+ "learning_rate": 3.447622922417355e-05,
+ "loss": 0.9925,
+ "step": 966
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.45884343961025,
+ "learning_rate": 3.439180510872457e-05,
+ "loss": 1.0583,
+ "step": 967
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.42439320759788374,
+ "learning_rate": 3.4307406469068604e-05,
+ "loss": 0.9305,
+ "step": 968
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.45770082390324845,
+ "learning_rate": 3.4223033688594985e-05,
+ "loss": 1.054,
+ "step": 969
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.4284786643981094,
+ "learning_rate": 3.4138687150575634e-05,
+ "loss": 0.9409,
+ "step": 970
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.41356124058383237,
+ "learning_rate": 3.4054367238163215e-05,
+ "loss": 1.0739,
+ "step": 971
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.4255832249412624,
+ "learning_rate": 3.3970074334389496e-05,
+ "loss": 1.0764,
+ "step": 972
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4337695536142702,
+ "learning_rate": 3.388580882216349e-05,
+ "loss": 1.0195,
+ "step": 973
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.41363495650922455,
+ "learning_rate": 3.380157108426985e-05,
+ "loss": 1.0615,
+ "step": 974
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3950691247686479,
+ "learning_rate": 3.371736150336701e-05,
+ "loss": 1.0283,
+ "step": 975
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4042823691555822,
+ "learning_rate": 3.3633180461985505e-05,
+ "loss": 1.0309,
+ "step": 976
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3921158850479399,
+ "learning_rate": 3.354902834252627e-05,
+ "loss": 1.068,
+ "step": 977
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38349545732725654,
+ "learning_rate": 3.346490552725879e-05,
+ "loss": 1.0886,
+ "step": 978
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38689221457248724,
+ "learning_rate": 3.33808123983195e-05,
+ "loss": 0.987,
+ "step": 979
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38660550867425647,
+ "learning_rate": 3.329674933770992e-05,
+ "loss": 1.069,
+ "step": 980
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3917593746353493,
+ "learning_rate": 3.321271672729504e-05,
+ "loss": 0.9858,
+ "step": 981
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.4292314072827653,
+ "learning_rate": 3.3128714948801474e-05,
+ "loss": 1.0477,
+ "step": 982
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.479414638418211,
+ "learning_rate": 3.3044744383815835e-05,
+ "loss": 1.0763,
+ "step": 983
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.380831894995463,
+ "learning_rate": 3.2960805413782884e-05,
+ "loss": 1.0393,
+ "step": 984
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.42402274703362114,
+ "learning_rate": 3.2876898420003914e-05,
+ "loss": 1.0837,
+ "step": 985
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.4571447203722258,
+ "learning_rate": 3.279302378363491e-05,
+ "loss": 1.0594,
+ "step": 986
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3776673281658531,
+ "learning_rate": 3.270918188568493e-05,
+ "loss": 1.0121,
+ "step": 987
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.4367173448132159,
+ "learning_rate": 3.262537310701425e-05,
+ "loss": 0.9612,
+ "step": 988
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.43679765208840926,
+ "learning_rate": 3.254159782833276e-05,
+ "loss": 1.0565,
+ "step": 989
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.4018151260013493,
+ "learning_rate": 3.2457856430198126e-05,
+ "loss": 0.9975,
+ "step": 990
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.40461959940721076,
+ "learning_rate": 3.237414929301412e-05,
+ "loss": 1.0255,
+ "step": 991
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.41342378541540653,
+ "learning_rate": 3.2290476797028926e-05,
+ "loss": 1.024,
+ "step": 992
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3926173909201105,
+ "learning_rate": 3.220683932233328e-05,
+ "loss": 1.0877,
+ "step": 993
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3835623199834992,
+ "learning_rate": 3.21232372488589e-05,
+ "loss": 1.0992,
+ "step": 994
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.39901809497083496,
+ "learning_rate": 3.2039670956376656e-05,
+ "loss": 1.0723,
+ "step": 995
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3979604537466272,
+ "learning_rate": 3.195614082449492e-05,
+ "loss": 1.0201,
+ "step": 996
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.4057122427176845,
+ "learning_rate": 3.1872647232657723e-05,
+ "loss": 1.0885,
+ "step": 997
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.39747060350754754,
+ "learning_rate": 3.17891905601432e-05,
+ "loss": 1.0544,
+ "step": 998
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.4397658078291558,
+ "learning_rate": 3.1705771186061715e-05,
+ "loss": 1.0998,
+ "step": 999
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37373547663810053,
+ "learning_rate": 3.162238948935423e-05,
+ "loss": 1.0465,
+ "step": 1000
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.4042576001255747,
+ "learning_rate": 3.153904584879052e-05,
+ "loss": 0.9206,
+ "step": 1001
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.4042994886900337,
+ "learning_rate": 3.1455740642967545e-05,
+ "loss": 0.975,
+ "step": 1002
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4359721725421234,
+ "learning_rate": 3.1372474250307594e-05,
+ "loss": 0.9163,
+ "step": 1003
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4886423524029179,
+ "learning_rate": 3.128924704905673e-05,
+ "loss": 0.9956,
+ "step": 1004
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.48669990170138744,
+ "learning_rate": 3.1206059417282894e-05,
+ "loss": 0.9874,
+ "step": 1005
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.41954255928633066,
+ "learning_rate": 3.1122911732874356e-05,
+ "loss": 0.8986,
+ "step": 1006
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.43363878644039366,
+ "learning_rate": 3.103980437353787e-05,
+ "loss": 0.9268,
+ "step": 1007
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5199775120765874,
+ "learning_rate": 3.0956737716797047e-05,
+ "loss": 0.9341,
+ "step": 1008
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.40735757951139595,
+ "learning_rate": 3.087371213999056e-05,
+ "loss": 0.9142,
+ "step": 1009
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.44449027493884186,
+ "learning_rate": 3.079072802027051e-05,
+ "loss": 0.966,
+ "step": 1010
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46590494286419365,
+ "learning_rate": 3.070778573460068e-05,
+ "loss": 0.8768,
+ "step": 1011
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.45161453051587425,
+ "learning_rate": 3.062488565975476e-05,
+ "loss": 0.9299,
+ "step": 1012
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5022364894382346,
+ "learning_rate": 3.054202817231477e-05,
+ "loss": 0.9352,
+ "step": 1013
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.46443439138730447,
+ "learning_rate": 3.0459213648669195e-05,
+ "loss": 0.8913,
+ "step": 1014
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.41932307219261455,
+ "learning_rate": 3.0376442465011436e-05,
+ "loss": 0.8968,
+ "step": 1015
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.42445864358441704,
+ "learning_rate": 3.0293714997337927e-05,
+ "loss": 0.8449,
+ "step": 1016
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4489777773688699,
+ "learning_rate": 3.0211031621446607e-05,
+ "loss": 0.927,
+ "step": 1017
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.45180577504235525,
+ "learning_rate": 3.0128392712935044e-05,
+ "loss": 0.8834,
+ "step": 1018
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.44680469596106914,
+ "learning_rate": 3.0045798647198882e-05,
+ "loss": 1.0176,
+ "step": 1019
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.45747851649657734,
+ "learning_rate": 2.9963249799429986e-05,
+ "loss": 0.9036,
+ "step": 1020
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.5045904501932169,
+ "learning_rate": 2.988074654461489e-05,
+ "loss": 1.0475,
+ "step": 1021
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.47086144833942983,
+ "learning_rate": 2.9798289257532946e-05,
+ "loss": 0.9596,
+ "step": 1022
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4406706196288816,
+ "learning_rate": 2.9715878312754767e-05,
+ "loss": 1.0054,
+ "step": 1023
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.44584179061175105,
+ "learning_rate": 2.9633514084640365e-05,
+ "loss": 0.8981,
+ "step": 1024
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.462343843042957,
+ "learning_rate": 2.955119694733763e-05,
+ "loss": 0.974,
+ "step": 1025
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.46767265335377156,
+ "learning_rate": 2.946892727478045e-05,
+ "loss": 1.0063,
+ "step": 1026
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.43250194002958803,
+ "learning_rate": 2.9386705440687168e-05,
+ "loss": 0.9332,
+ "step": 1027
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.44391321845917453,
+ "learning_rate": 2.9304531818558795e-05,
+ "loss": 0.8937,
+ "step": 1028
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.45616826414927975,
+ "learning_rate": 2.9222406781677294e-05,
+ "loss": 0.869,
+ "step": 1029
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.5670635396983207,
+ "learning_rate": 2.9140330703103992e-05,
+ "loss": 0.9697,
+ "step": 1030
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4860829361401993,
+ "learning_rate": 2.905830395567776e-05,
+ "loss": 0.9677,
+ "step": 1031
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4484206829172443,
+ "learning_rate": 2.8976326912013422e-05,
+ "loss": 0.9582,
+ "step": 1032
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.46728002332884067,
+ "learning_rate": 2.8894399944499974e-05,
+ "loss": 0.9023,
+ "step": 1033
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.48539702863685763,
+ "learning_rate": 2.8812523425299e-05,
+ "loss": 0.9725,
+ "step": 1034
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.42521485032555006,
+ "learning_rate": 2.873069772634281e-05,
+ "loss": 0.9525,
+ "step": 1035
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4068824768950637,
+ "learning_rate": 2.8648923219332997e-05,
+ "loss": 0.8318,
+ "step": 1036
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.45227216852040214,
+ "learning_rate": 2.856720027573848e-05,
+ "loss": 1.0211,
+ "step": 1037
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.42310911927974604,
+ "learning_rate": 2.8485529266794043e-05,
+ "loss": 0.9422,
+ "step": 1038
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4494478185683011,
+ "learning_rate": 2.8403910563498482e-05,
+ "loss": 0.9577,
+ "step": 1039
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.517885146963669,
+ "learning_rate": 2.832234453661304e-05,
+ "loss": 0.9551,
+ "step": 1040
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.46117112897797924,
+ "learning_rate": 2.8240831556659635e-05,
+ "loss": 0.9336,
+ "step": 1041
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.4610208217170147,
+ "learning_rate": 2.815937199391924e-05,
+ "loss": 0.926,
+ "step": 1042
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.445775414660019,
+ "learning_rate": 2.807796621843016e-05,
+ "loss": 0.9737,
+ "step": 1043
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.46809555676786746,
+ "learning_rate": 2.799661459998638e-05,
+ "loss": 0.9916,
+ "step": 1044
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.4366867439876077,
+ "learning_rate": 2.7915317508135848e-05,
+ "loss": 0.9549,
+ "step": 1045
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.388979809570948,
+ "learning_rate": 2.7834075312178838e-05,
+ "loss": 0.8967,
+ "step": 1046
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.45918748975994583,
+ "learning_rate": 2.775288838116626e-05,
+ "loss": 1.032,
+ "step": 1047
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4607131980517622,
+ "learning_rate": 2.767175708389794e-05,
+ "loss": 0.9638,
+ "step": 1048
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4583573438714022,
+ "learning_rate": 2.759068178892105e-05,
+ "loss": 0.8574,
+ "step": 1049
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4506028295056863,
+ "learning_rate": 2.750966286452828e-05,
+ "loss": 0.904,
+ "step": 1050
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 1088318406131712.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-1050/training_args.bin b/checkpoint-1050/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2a09fdd266ae9810360e648350362ad7d2be5739
--- /dev/null
+++ b/checkpoint-1050/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a909eb0ea0915df22d4edfd0cec39e5fb548124f1fa8377ddc8d0ea5dab0d0cc
+size 6776
diff --git a/checkpoint-1050/zero_to_fp32.py b/checkpoint-1050/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/checkpoint-1050/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-1100/README.md b/checkpoint-1100/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/checkpoint-1100/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/checkpoint-1100/adapter_config.json b/checkpoint-1100/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..17e258368eee5905260715114fa1cc21fde3fe1c
--- /dev/null
+++ b/checkpoint-1100/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "k_proj",
+ "q_proj",
+ "o_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/checkpoint-1100/adapter_model.safetensors b/checkpoint-1100/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9d022d74bf3613c8f84c170d4658e83535a3e026
--- /dev/null
+++ b/checkpoint-1100/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:316ac193cc5592bd8d6e50a1701ef5c28729e999a88c35dc9173f4c9569c2273
+size 1048664848
diff --git a/checkpoint-1100/global_step1100/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..910725281a3c4439a0731165371ac049d1b49e96
--- /dev/null
+++ b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e9e593f2ba9cb2b98bec0b40c7eb880c3bd54c0a3d63a0cdfd32f94305bd695
+size 787270042
diff --git a/checkpoint-1100/global_step1100/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ecbce101f5ff38ab78f0e433e18405528ec4fa1c
--- /dev/null
+++ b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a4d424ba42629a4cd118ea7b3a16fee22cadbd2dd1766d07d8714f033c1c1636
+size 787270042
diff --git a/checkpoint-1100/global_step1100/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d0129ad9a5a9a6f8880f0c80422f9e422108635d
--- /dev/null
+++ b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b48c1d0ad0c55442d28d51d18e27a901d5a514306d58f92a9e4c42b6559e36ee
+size 787270042
diff --git a/checkpoint-1100/global_step1100/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1933e20cd73be2ba6e600f64cd1223e911acb777
--- /dev/null
+++ b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fc96e99992a02ff0d586173a7e789722264da08bb8c3af1d6a2fa8a7a53a5c47
+size 787270042
diff --git a/checkpoint-1100/global_step1100/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ccdd831979de4f3107539660333fc854a69d80d0
--- /dev/null
+++ b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:984e073e59af9c46df4c004cceda03b60089ac6401f3e66ede86cdb574808f19
+size 787270042
diff --git a/checkpoint-1100/global_step1100/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..de7b2c23e961cddd677e9485668d222360dbd49e
--- /dev/null
+++ b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b08f0fd2c469f967a09b8848ddf371c0e29ad9c940ae18682c6d15846a629af
+size 787270042
diff --git a/checkpoint-1100/global_step1100/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5aa8452e224e919bf9c47e59876009b4133c0e07
--- /dev/null
+++ b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:99744a67797dd2ddd3291f10d48463a00c17a1ebdfbc6629a3de50a73feb53b6
+size 787270042
diff --git a/checkpoint-1100/global_step1100/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..872a8a8ea67d65fed422f7abb75e6ec18e835d06
--- /dev/null
+++ b/checkpoint-1100/global_step1100/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7cc97a2cbfe7f66897b4eece4afe406cff5351261db7fb7b7c4b444a4a7fcbb4
+size 787270042
diff --git a/checkpoint-1100/global_step1100/zero_pp_rank_0_mp_rank_00_model_states.pt b/checkpoint-1100/global_step1100/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..56b1b9a47bbd6bdc455849e47649a03dccb32248
--- /dev/null
+++ b/checkpoint-1100/global_step1100/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9c769cbfb9bb02c99ba367fb77ae4600142c10796a5f2981693d9217e84ec52a
+size 653742
diff --git a/checkpoint-1100/global_step1100/zero_pp_rank_1_mp_rank_00_model_states.pt b/checkpoint-1100/global_step1100/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..26ea63fae69e18c1fbeb1640836919e8a9290e13
--- /dev/null
+++ b/checkpoint-1100/global_step1100/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5285eb24bfe43180e25a0aed0d6135893889228b40d88c09d87172badb155d02
+size 653742
diff --git a/checkpoint-1100/global_step1100/zero_pp_rank_2_mp_rank_00_model_states.pt b/checkpoint-1100/global_step1100/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7c5c53379e0cb424446e8424be9cca7081c51134
--- /dev/null
+++ b/checkpoint-1100/global_step1100/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:45d17f3b92f5f0897599a89c9ca82ccd4bcf980ac05427ec47b37774dd0d7836
+size 653742
diff --git a/checkpoint-1100/global_step1100/zero_pp_rank_3_mp_rank_00_model_states.pt b/checkpoint-1100/global_step1100/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e2ed0042a11be1fd9dfddd799b76d5c7a9d5e35a
--- /dev/null
+++ b/checkpoint-1100/global_step1100/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e1005b64cba9f84408a432a5465c0821012797187041b73dd62245150d699401
+size 653742
diff --git a/checkpoint-1100/global_step1100/zero_pp_rank_4_mp_rank_00_model_states.pt b/checkpoint-1100/global_step1100/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6abe8550bbc218a84119ae98c6fa9d182ffd3179
--- /dev/null
+++ b/checkpoint-1100/global_step1100/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:33fea851aea5540e1e14ef2e33ba8014b1a1f69e8c22311c0cedfd7e359f979d
+size 653742
diff --git a/checkpoint-1100/global_step1100/zero_pp_rank_5_mp_rank_00_model_states.pt b/checkpoint-1100/global_step1100/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0f1fa6c671027baf62c12a8618cf9a646cb0dc21
--- /dev/null
+++ b/checkpoint-1100/global_step1100/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e48f64d145ae4602275a2877ecc448efc9ec58f93f8364811a30a93ced0b76ad
+size 653742
diff --git a/checkpoint-1100/global_step1100/zero_pp_rank_6_mp_rank_00_model_states.pt b/checkpoint-1100/global_step1100/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..20720bdb5e208aa3129f7ee9ed0da54850f1a41b
--- /dev/null
+++ b/checkpoint-1100/global_step1100/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60582efe7474aa55d4745b11de94f5e294ef1c61df59dc79360753e051374e71
+size 653742
diff --git a/checkpoint-1100/global_step1100/zero_pp_rank_7_mp_rank_00_model_states.pt b/checkpoint-1100/global_step1100/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..800a772e5b331ddeb871df43a2c5b72366b05364
--- /dev/null
+++ b/checkpoint-1100/global_step1100/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d1f58731d732acd2c0b1bdda0b00be0412c32ccdac00d654324bbb3ad53c323a
+size 653742
diff --git a/checkpoint-1100/latest b/checkpoint-1100/latest
new file mode 100644
index 0000000000000000000000000000000000000000..22cd5c3402316b70299aed2025d7943595f5d495
--- /dev/null
+++ b/checkpoint-1100/latest
@@ -0,0 +1 @@
+global_step1100
\ No newline at end of file
diff --git a/checkpoint-1100/rng_state_0.pth b/checkpoint-1100/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d4ade713ef57d0535c32a9251c786bc57de03d06
--- /dev/null
+++ b/checkpoint-1100/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fb1165242405b17b3d6a8186ae61b13dcb1faa5a54320bebd74ef8d71b964bf7
+size 15984
diff --git a/checkpoint-1100/rng_state_1.pth b/checkpoint-1100/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d91c511b147b4dd17988903c57adcefb6c1f20b0
--- /dev/null
+++ b/checkpoint-1100/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:562c262916c9997ec644c42fed9655ab28706b74fca20290ca921c4761d6a4b0
+size 15984
diff --git a/checkpoint-1100/rng_state_2.pth b/checkpoint-1100/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f71e829b3e3570a540263d07783c4e906a78a803
--- /dev/null
+++ b/checkpoint-1100/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a8d40f8118f513299624ded0a9bcf09778b961635615090409394d4f96f928f6
+size 15984
diff --git a/checkpoint-1100/rng_state_3.pth b/checkpoint-1100/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..be7f0176676a7c526bb10cbb336b2afa89d8841c
--- /dev/null
+++ b/checkpoint-1100/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b4391f924238a4cb855c4cbdc6d1a14954f785431c75997d05c7a4ee6615dae7
+size 15984
diff --git a/checkpoint-1100/rng_state_4.pth b/checkpoint-1100/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8dd1a877dd1f03799067fd08739e82b9f2cd2ad3
--- /dev/null
+++ b/checkpoint-1100/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be7b19bb9543a16bf9f4cd96466ac581436f63070f5815f3a7ba57980608994f
+size 15984
diff --git a/checkpoint-1100/rng_state_5.pth b/checkpoint-1100/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..dcf1b720014f72a27a09ab9ef8570430a8e3c96d
--- /dev/null
+++ b/checkpoint-1100/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:97da4a1ede0a3e0f96411cacd5bfdf84d9355198f7aadc9bcb8be41122043f63
+size 15984
diff --git a/checkpoint-1100/rng_state_6.pth b/checkpoint-1100/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..2b58cbeed7b25ef61c6439aced60df473cbaf6d4
--- /dev/null
+++ b/checkpoint-1100/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:544cb6421b975bd5d2b2360a4e666003794e6197ae654d2ad963cd6572a86ede
+size 15984
diff --git a/checkpoint-1100/rng_state_7.pth b/checkpoint-1100/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..36a7dcefe0e0264868d40586546699306878a454
--- /dev/null
+++ b/checkpoint-1100/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f8d6eb32a23f3bef6262bbcb2eda724b2fd6f5e579969aa27c71a5971331722b
+size 15984
diff --git a/checkpoint-1100/scheduler.pt b/checkpoint-1100/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..82133a41263df3772d03bb81dbdf70b97ffb13d9
--- /dev/null
+++ b/checkpoint-1100/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c08441fe4d6081d0dce5306ab4664d738c567d94c90b431beb90d45c64769f66
+size 1064
diff --git a/checkpoint-1100/special_tokens_map.json b/checkpoint-1100/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-1100/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-1100/tokenizer.model b/checkpoint-1100/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-1100/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-1100/tokenizer_config.json b/checkpoint-1100/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/checkpoint-1100/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-1100/trainer_state.json b/checkpoint-1100/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..956ff5b8c9b05ef2d9af28dd21efc1598843c2e6
--- /dev/null
+++ b/checkpoint-1100/trainer_state.json
@@ -0,0 +1,7721 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.0914494741655236,
+ "eval_steps": 500,
+ "global_step": 1100,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2698278585334131,
+ "learning_rate": 7.274705407864332e-05,
+ "loss": 1.194,
+ "step": 451
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.23678313024953834,
+ "learning_rate": 7.26980224535416e-05,
+ "loss": 1.2349,
+ "step": 452
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24851875792002978,
+ "learning_rate": 7.264884229436293e-05,
+ "loss": 1.1758,
+ "step": 453
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24122080121681125,
+ "learning_rate": 7.259951382451318e-05,
+ "loss": 1.1962,
+ "step": 454
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.22741322959884405,
+ "learning_rate": 7.25500372680719e-05,
+ "loss": 1.1702,
+ "step": 455
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.2297475610861458,
+ "learning_rate": 7.250041284979137e-05,
+ "loss": 1.1466,
+ "step": 456
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.3057605989721467,
+ "learning_rate": 7.245064079509553e-05,
+ "loss": 1.246,
+ "step": 457
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2719638501597136,
+ "learning_rate": 7.240072133007899e-05,
+ "loss": 1.2184,
+ "step": 458
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2436807816414479,
+ "learning_rate": 7.235065468150593e-05,
+ "loss": 1.2324,
+ "step": 459
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.23436349430255515,
+ "learning_rate": 7.23004410768092e-05,
+ "loss": 1.1813,
+ "step": 460
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2398940990211377,
+ "learning_rate": 7.22500807440892e-05,
+ "loss": 1.1924,
+ "step": 461
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2605716625062531,
+ "learning_rate": 7.219957391211281e-05,
+ "loss": 1.182,
+ "step": 462
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.260462524570941,
+ "learning_rate": 7.214892081031244e-05,
+ "loss": 1.2136,
+ "step": 463
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.21979766512306334,
+ "learning_rate": 7.209812166878491e-05,
+ "loss": 1.2066,
+ "step": 464
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.23324453647530663,
+ "learning_rate": 7.204717671829051e-05,
+ "loss": 1.1657,
+ "step": 465
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.2529434935507481,
+ "learning_rate": 7.199608619025177e-05,
+ "loss": 1.2093,
+ "step": 466
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.25371701891720116,
+ "learning_rate": 7.194485031675265e-05,
+ "loss": 1.2225,
+ "step": 467
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.23272423066292103,
+ "learning_rate": 7.189346933053725e-05,
+ "loss": 1.1721,
+ "step": 468
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.25122928735587546,
+ "learning_rate": 7.184194346500892e-05,
+ "loss": 1.2537,
+ "step": 469
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2159270875490409,
+ "learning_rate": 7.179027295422913e-05,
+ "loss": 1.197,
+ "step": 470
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2633111059076544,
+ "learning_rate": 7.173845803291636e-05,
+ "loss": 1.1721,
+ "step": 471
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.30555936322098703,
+ "learning_rate": 7.168649893644517e-05,
+ "loss": 1.3011,
+ "step": 472
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.23492670111453726,
+ "learning_rate": 7.163439590084502e-05,
+ "loss": 1.1601,
+ "step": 473
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.26602734263721806,
+ "learning_rate": 7.158214916279923e-05,
+ "loss": 1.2808,
+ "step": 474
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.3182695007856262,
+ "learning_rate": 7.152975895964386e-05,
+ "loss": 1.2967,
+ "step": 475
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2785021674736721,
+ "learning_rate": 7.147722552936673e-05,
+ "loss": 1.1789,
+ "step": 476
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.279474303138652,
+ "learning_rate": 7.142454911060627e-05,
+ "loss": 1.2596,
+ "step": 477
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2556980144910755,
+ "learning_rate": 7.137172994265044e-05,
+ "loss": 1.2426,
+ "step": 478
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.3311256331993533,
+ "learning_rate": 7.131876826543565e-05,
+ "loss": 1.2059,
+ "step": 479
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.26467296197775253,
+ "learning_rate": 7.12656643195457e-05,
+ "loss": 1.2482,
+ "step": 480
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.27444885274652553,
+ "learning_rate": 7.121241834621064e-05,
+ "loss": 1.2528,
+ "step": 481
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2572283861115396,
+ "learning_rate": 7.115903058730567e-05,
+ "loss": 1.1849,
+ "step": 482
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2677065778235683,
+ "learning_rate": 7.11055012853501e-05,
+ "loss": 1.2011,
+ "step": 483
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29470622036742816,
+ "learning_rate": 7.105183068350619e-05,
+ "loss": 1.2398,
+ "step": 484
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.27609230248969197,
+ "learning_rate": 7.099801902557811e-05,
+ "loss": 1.2259,
+ "step": 485
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.24248634168099284,
+ "learning_rate": 7.094406655601073e-05,
+ "loss": 1.2282,
+ "step": 486
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.2765941767688746,
+ "learning_rate": 7.088997351988865e-05,
+ "loss": 1.2319,
+ "step": 487
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29347776909858947,
+ "learning_rate": 7.083574016293493e-05,
+ "loss": 1.1765,
+ "step": 488
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.285370295424537,
+ "learning_rate": 7.078136673151008e-05,
+ "loss": 1.26,
+ "step": 489
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.29408734903836536,
+ "learning_rate": 7.072685347261093e-05,
+ "loss": 1.226,
+ "step": 490
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27437470239205813,
+ "learning_rate": 7.067220063386947e-05,
+ "loss": 1.1976,
+ "step": 491
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2680770258777871,
+ "learning_rate": 7.061740846355176e-05,
+ "loss": 1.1915,
+ "step": 492
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27200362879502954,
+ "learning_rate": 7.056247721055678e-05,
+ "loss": 1.2002,
+ "step": 493
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2637811092577037,
+ "learning_rate": 7.050740712441528e-05,
+ "loss": 1.287,
+ "step": 494
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.24657959209271266,
+ "learning_rate": 7.045219845528875e-05,
+ "loss": 1.2284,
+ "step": 495
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.25311992110358666,
+ "learning_rate": 7.039685145396812e-05,
+ "loss": 1.1616,
+ "step": 496
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2564633694193358,
+ "learning_rate": 7.034136637187275e-05,
+ "loss": 1.2067,
+ "step": 497
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2446797651174144,
+ "learning_rate": 7.028574346104926e-05,
+ "loss": 1.2284,
+ "step": 498
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2592751463399255,
+ "learning_rate": 7.022998297417034e-05,
+ "loss": 1.2371,
+ "step": 499
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2500713943206808,
+ "learning_rate": 7.017408516453365e-05,
+ "loss": 1.1061,
+ "step": 500
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2812266276040743,
+ "learning_rate": 7.011805028606064e-05,
+ "loss": 1.1949,
+ "step": 501
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.298829667668083,
+ "learning_rate": 7.006187859329544e-05,
+ "loss": 1.2313,
+ "step": 502
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.26518768159745104,
+ "learning_rate": 7.000557034140361e-05,
+ "loss": 1.2246,
+ "step": 503
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.3037280360760458,
+ "learning_rate": 6.994912578617113e-05,
+ "loss": 1.1617,
+ "step": 504
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2726903109255714,
+ "learning_rate": 6.989254518400309e-05,
+ "loss": 1.2415,
+ "step": 505
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25568082003046966,
+ "learning_rate": 6.98358287919226e-05,
+ "loss": 1.1817,
+ "step": 506
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25633294893705044,
+ "learning_rate": 6.97789768675696e-05,
+ "loss": 1.2149,
+ "step": 507
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.28291439435087123,
+ "learning_rate": 6.972198966919972e-05,
+ "loss": 1.1578,
+ "step": 508
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.27195184756655516,
+ "learning_rate": 6.966486745568308e-05,
+ "loss": 1.2355,
+ "step": 509
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.239159568376005,
+ "learning_rate": 6.960761048650312e-05,
+ "loss": 1.1688,
+ "step": 510
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.22961475425949177,
+ "learning_rate": 6.955021902175543e-05,
+ "loss": 1.2094,
+ "step": 511
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.27443773600741117,
+ "learning_rate": 6.949269332214651e-05,
+ "loss": 1.2559,
+ "step": 512
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.26230551832002097,
+ "learning_rate": 6.94350336489927e-05,
+ "loss": 1.2121,
+ "step": 513
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2716742985303849,
+ "learning_rate": 6.937724026421892e-05,
+ "loss": 1.2444,
+ "step": 514
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2537850139439542,
+ "learning_rate": 6.931931343035742e-05,
+ "loss": 1.1327,
+ "step": 515
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.28599587967496826,
+ "learning_rate": 6.926125341054676e-05,
+ "loss": 1.2236,
+ "step": 516
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.26780654378470103,
+ "learning_rate": 6.920306046853043e-05,
+ "loss": 1.2295,
+ "step": 517
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.23606296888412015,
+ "learning_rate": 6.914473486865577e-05,
+ "loss": 1.1543,
+ "step": 518
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.34976881174240837,
+ "learning_rate": 6.90862768758727e-05,
+ "loss": 1.2067,
+ "step": 519
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2481257873494882,
+ "learning_rate": 6.902768675573258e-05,
+ "loss": 1.2188,
+ "step": 520
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2996395778117021,
+ "learning_rate": 6.896896477438699e-05,
+ "loss": 1.2326,
+ "step": 521
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.8839768816333193,
+ "learning_rate": 6.891011119858643e-05,
+ "loss": 1.2435,
+ "step": 522
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2851882482058998,
+ "learning_rate": 6.885112629567927e-05,
+ "loss": 1.2644,
+ "step": 523
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2813663482913699,
+ "learning_rate": 6.879201033361035e-05,
+ "loss": 1.2309,
+ "step": 524
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3257551560135454,
+ "learning_rate": 6.873276358091996e-05,
+ "loss": 1.2755,
+ "step": 525
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.28930479952494365,
+ "learning_rate": 6.867338630674247e-05,
+ "loss": 1.1962,
+ "step": 526
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3077462996938649,
+ "learning_rate": 6.861387878080511e-05,
+ "loss": 1.2402,
+ "step": 527
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.2848900193452761,
+ "learning_rate": 6.855424127342688e-05,
+ "loss": 1.2748,
+ "step": 528
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.4765938812802202,
+ "learning_rate": 6.849447405551718e-05,
+ "loss": 1.2226,
+ "step": 529
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.53184473292579,
+ "learning_rate": 6.843457739857467e-05,
+ "loss": 1.2347,
+ "step": 530
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.6416239346492343,
+ "learning_rate": 6.837455157468596e-05,
+ "loss": 1.2429,
+ "step": 531
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3188092712502773,
+ "learning_rate": 6.831439685652442e-05,
+ "loss": 1.216,
+ "step": 532
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3527495731006385,
+ "learning_rate": 6.825411351734895e-05,
+ "loss": 1.1682,
+ "step": 533
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.29603753744741856,
+ "learning_rate": 6.819370183100274e-05,
+ "loss": 1.1434,
+ "step": 534
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.5252450389976622,
+ "learning_rate": 6.813316207191198e-05,
+ "loss": 1.1943,
+ "step": 535
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.32999419558659937,
+ "learning_rate": 6.807249451508466e-05,
+ "loss": 1.192,
+ "step": 536
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.3650175469778724,
+ "learning_rate": 6.801169943610929e-05,
+ "loss": 1.2141,
+ "step": 537
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 1.0643532150783557,
+ "learning_rate": 6.795077711115368e-05,
+ "loss": 1.2253,
+ "step": 538
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5041310609130145,
+ "learning_rate": 6.788972781696363e-05,
+ "loss": 1.278,
+ "step": 539
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5123058164360991,
+ "learning_rate": 6.782855183086177e-05,
+ "loss": 1.2231,
+ "step": 540
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.3533015702394419,
+ "learning_rate": 6.776724943074619e-05,
+ "loss": 1.2072,
+ "step": 541
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.30253964625417207,
+ "learning_rate": 6.770582089508927e-05,
+ "loss": 1.1382,
+ "step": 542
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.348991618828202,
+ "learning_rate": 6.764426650293633e-05,
+ "loss": 1.2079,
+ "step": 543
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.46017440578788743,
+ "learning_rate": 6.758258653390444e-05,
+ "loss": 1.1813,
+ "step": 544
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.31962101755594885,
+ "learning_rate": 6.75207812681811e-05,
+ "loss": 1.1339,
+ "step": 545
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.37092024548285923,
+ "learning_rate": 6.745885098652298e-05,
+ "loss": 1.2591,
+ "step": 546
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.32347106450715835,
+ "learning_rate": 6.739679597025466e-05,
+ "loss": 1.2017,
+ "step": 547
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39250187112342494,
+ "learning_rate": 6.733461650126733e-05,
+ "loss": 1.0933,
+ "step": 548
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.473522452217324,
+ "learning_rate": 6.727231286201752e-05,
+ "loss": 1.1124,
+ "step": 549
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4809062179622052,
+ "learning_rate": 6.720988533552582e-05,
+ "loss": 1.1585,
+ "step": 550
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3529662801059162,
+ "learning_rate": 6.714733420537559e-05,
+ "loss": 1.0501,
+ "step": 551
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5958247214391118,
+ "learning_rate": 6.708465975571168e-05,
+ "loss": 1.1086,
+ "step": 552
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5341364205022454,
+ "learning_rate": 6.70218622712391e-05,
+ "loss": 1.0518,
+ "step": 553
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3601805724462006,
+ "learning_rate": 6.695894203722181e-05,
+ "loss": 1.1779,
+ "step": 554
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.43410190338280613,
+ "learning_rate": 6.68958993394813e-05,
+ "loss": 1.093,
+ "step": 555
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46217742572873594,
+ "learning_rate": 6.683273446439546e-05,
+ "loss": 1.0117,
+ "step": 556
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.8591682373623357,
+ "learning_rate": 6.676944769889708e-05,
+ "loss": 1.1002,
+ "step": 557
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.7383229487622726,
+ "learning_rate": 6.670603933047272e-05,
+ "loss": 1.0779,
+ "step": 558
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5965305891207813,
+ "learning_rate": 6.664250964716131e-05,
+ "loss": 1.0889,
+ "step": 559
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.6030858606684543,
+ "learning_rate": 6.657885893755288e-05,
+ "loss": 1.0982,
+ "step": 560
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4644510682398409,
+ "learning_rate": 6.65150874907872e-05,
+ "loss": 1.1004,
+ "step": 561
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.43943285132452564,
+ "learning_rate": 6.645119559655254e-05,
+ "loss": 1.0536,
+ "step": 562
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4456395978600012,
+ "learning_rate": 6.638718354508427e-05,
+ "loss": 1.0733,
+ "step": 563
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3303824433217466,
+ "learning_rate": 6.632305162716365e-05,
+ "loss": 1.0552,
+ "step": 564
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3617704823170143,
+ "learning_rate": 6.62588001341164e-05,
+ "loss": 1.1092,
+ "step": 565
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4465013349903427,
+ "learning_rate": 6.619442935781141e-05,
+ "loss": 1.0781,
+ "step": 566
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.48516780613791277,
+ "learning_rate": 6.612993959065947e-05,
+ "loss": 1.0686,
+ "step": 567
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38867820318536633,
+ "learning_rate": 6.606533112561186e-05,
+ "loss": 1.1215,
+ "step": 568
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38566119820378336,
+ "learning_rate": 6.600060425615907e-05,
+ "loss": 1.1213,
+ "step": 569
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.35534855445058544,
+ "learning_rate": 6.593575927632947e-05,
+ "loss": 1.0955,
+ "step": 570
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38124406233349717,
+ "learning_rate": 6.587079648068795e-05,
+ "loss": 1.0659,
+ "step": 571
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.454750160923548,
+ "learning_rate": 6.580571616433457e-05,
+ "loss": 1.1149,
+ "step": 572
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.35353190088025255,
+ "learning_rate": 6.574051862290325e-05,
+ "loss": 1.0388,
+ "step": 573
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3249395594793626,
+ "learning_rate": 6.567520415256045e-05,
+ "loss": 1.0784,
+ "step": 574
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.40078898818247227,
+ "learning_rate": 6.560977305000375e-05,
+ "loss": 1.0859,
+ "step": 575
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4115264795060035,
+ "learning_rate": 6.554422561246054e-05,
+ "loss": 1.1828,
+ "step": 576
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.30090229228069215,
+ "learning_rate": 6.54785621376867e-05,
+ "loss": 1.0901,
+ "step": 577
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.28827860350299206,
+ "learning_rate": 6.541278292396523e-05,
+ "loss": 1.0277,
+ "step": 578
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.34690404488996757,
+ "learning_rate": 6.534688827010484e-05,
+ "loss": 1.048,
+ "step": 579
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.29943113556644785,
+ "learning_rate": 6.528087847543867e-05,
+ "loss": 1.0646,
+ "step": 580
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.37318202575874415,
+ "learning_rate": 6.521475383982291e-05,
+ "loss": 1.1091,
+ "step": 581
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3049663659203959,
+ "learning_rate": 6.51485146636354e-05,
+ "loss": 1.0552,
+ "step": 582
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3342407867509692,
+ "learning_rate": 6.508216124777431e-05,
+ "loss": 1.2227,
+ "step": 583
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3348396047855952,
+ "learning_rate": 6.501569389365674e-05,
+ "loss": 1.0861,
+ "step": 584
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.30951429367513383,
+ "learning_rate": 6.494911290321737e-05,
+ "loss": 1.0461,
+ "step": 585
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.33898401361064606,
+ "learning_rate": 6.488241857890711e-05,
+ "loss": 1.0854,
+ "step": 586
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4901462068263497,
+ "learning_rate": 6.481561122369164e-05,
+ "loss": 1.1012,
+ "step": 587
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3179574879809652,
+ "learning_rate": 6.474869114105018e-05,
+ "loss": 1.0451,
+ "step": 588
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.32159328915060714,
+ "learning_rate": 6.468165863497395e-05,
+ "loss": 1.0458,
+ "step": 589
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.36462235008537297,
+ "learning_rate": 6.461451400996491e-05,
+ "loss": 1.1247,
+ "step": 590
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.5373862753611778,
+ "learning_rate": 6.454725757103432e-05,
+ "loss": 1.0542,
+ "step": 591
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3160409270291303,
+ "learning_rate": 6.447988962370133e-05,
+ "loss": 1.0829,
+ "step": 592
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.390452102978435,
+ "learning_rate": 6.441241047399169e-05,
+ "loss": 1.192,
+ "step": 593
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3802122712014928,
+ "learning_rate": 6.434482042843627e-05,
+ "loss": 1.1153,
+ "step": 594
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4081584328242501,
+ "learning_rate": 6.427711979406966e-05,
+ "loss": 1.1635,
+ "step": 595
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3791962989638633,
+ "learning_rate": 6.420930887842889e-05,
+ "loss": 1.1581,
+ "step": 596
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.33239440056484193,
+ "learning_rate": 6.414138798955189e-05,
+ "loss": 1.0926,
+ "step": 597
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3279881540815014,
+ "learning_rate": 6.407335743597616e-05,
+ "loss": 1.1386,
+ "step": 598
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.30309644763750837,
+ "learning_rate": 6.40052175267374e-05,
+ "loss": 1.0523,
+ "step": 599
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3349097308403333,
+ "learning_rate": 6.393696857136801e-05,
+ "loss": 1.0815,
+ "step": 600
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3288227593556618,
+ "learning_rate": 6.386861087989581e-05,
+ "loss": 1.015,
+ "step": 601
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36685586740843157,
+ "learning_rate": 6.380014476284255e-05,
+ "loss": 1.1232,
+ "step": 602
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3620977714204643,
+ "learning_rate": 6.373157053122243e-05,
+ "loss": 1.1138,
+ "step": 603
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3130587018197183,
+ "learning_rate": 6.366288849654091e-05,
+ "loss": 1.1255,
+ "step": 604
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3602737087072766,
+ "learning_rate": 6.359409897079303e-05,
+ "loss": 1.0282,
+ "step": 605
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.31168852571991945,
+ "learning_rate": 6.352520226646222e-05,
+ "loss": 1.0779,
+ "step": 606
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3516045580189353,
+ "learning_rate": 6.345619869651871e-05,
+ "loss": 1.1028,
+ "step": 607
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3231857927563657,
+ "learning_rate": 6.33870885744182e-05,
+ "loss": 1.1202,
+ "step": 608
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.30205205129701157,
+ "learning_rate": 6.331787221410041e-05,
+ "loss": 1.1369,
+ "step": 609
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3198359813888166,
+ "learning_rate": 6.32485499299877e-05,
+ "loss": 1.1763,
+ "step": 610
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3128641370321787,
+ "learning_rate": 6.31791220369835e-05,
+ "loss": 1.0223,
+ "step": 611
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.2989105616213649,
+ "learning_rate": 6.31095888504711e-05,
+ "loss": 1.0358,
+ "step": 612
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3103537906853337,
+ "learning_rate": 6.303995068631203e-05,
+ "loss": 1.1261,
+ "step": 613
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.28598715532508207,
+ "learning_rate": 6.297020786084467e-05,
+ "loss": 1.0629,
+ "step": 614
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.29809789918093255,
+ "learning_rate": 6.290036069088288e-05,
+ "loss": 1.035,
+ "step": 615
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.33765270252261453,
+ "learning_rate": 6.283040949371451e-05,
+ "loss": 1.1221,
+ "step": 616
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3424617501293415,
+ "learning_rate": 6.276035458709993e-05,
+ "loss": 1.155,
+ "step": 617
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3799189737987811,
+ "learning_rate": 6.269019628927067e-05,
+ "loss": 1.0701,
+ "step": 618
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3358898935253196,
+ "learning_rate": 6.261993491892791e-05,
+ "loss": 1.1649,
+ "step": 619
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.31569979424117356,
+ "learning_rate": 6.254957079524099e-05,
+ "loss": 1.0633,
+ "step": 620
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3002168156888237,
+ "learning_rate": 6.247910423784609e-05,
+ "loss": 1.0846,
+ "step": 621
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3097238823450595,
+ "learning_rate": 6.24085355668447e-05,
+ "loss": 1.0808,
+ "step": 622
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3120312761417578,
+ "learning_rate": 6.233786510280212e-05,
+ "loss": 1.0142,
+ "step": 623
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3335343015064923,
+ "learning_rate": 6.22670931667461e-05,
+ "loss": 1.0674,
+ "step": 624
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3234062304634526,
+ "learning_rate": 6.219622008016533e-05,
+ "loss": 1.0981,
+ "step": 625
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.32152678786547273,
+ "learning_rate": 6.212524616500798e-05,
+ "loss": 1.0244,
+ "step": 626
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.39031977608147594,
+ "learning_rate": 6.205417174368023e-05,
+ "loss": 1.1205,
+ "step": 627
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3806189090017157,
+ "learning_rate": 6.198299713904485e-05,
+ "loss": 1.1134,
+ "step": 628
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.2978349276971668,
+ "learning_rate": 6.191172267441967e-05,
+ "loss": 1.0088,
+ "step": 629
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3190354077382501,
+ "learning_rate": 6.184034867357617e-05,
+ "loss": 1.108,
+ "step": 630
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.32633048665038994,
+ "learning_rate": 6.176887546073797e-05,
+ "loss": 1.0825,
+ "step": 631
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3428026413020903,
+ "learning_rate": 6.169730336057939e-05,
+ "loss": 1.0765,
+ "step": 632
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3475737151929015,
+ "learning_rate": 6.162563269822391e-05,
+ "loss": 1.0693,
+ "step": 633
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3870252154591392,
+ "learning_rate": 6.15538637992428e-05,
+ "loss": 1.1081,
+ "step": 634
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.33597355193652834,
+ "learning_rate": 6.148199698965352e-05,
+ "loss": 1.0893,
+ "step": 635
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.30805894179787247,
+ "learning_rate": 6.141003259591834e-05,
+ "loss": 1.0995,
+ "step": 636
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3025073882734066,
+ "learning_rate": 6.133797094494281e-05,
+ "loss": 1.0388,
+ "step": 637
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3524395196391662,
+ "learning_rate": 6.126581236407429e-05,
+ "loss": 1.1196,
+ "step": 638
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3377646188130345,
+ "learning_rate": 6.119355718110039e-05,
+ "loss": 1.0382,
+ "step": 639
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.35508400659785483,
+ "learning_rate": 6.112120572424763e-05,
+ "loss": 1.1402,
+ "step": 640
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3454418793700457,
+ "learning_rate": 6.104875832217982e-05,
+ "loss": 1.1032,
+ "step": 641
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.32629806837059866,
+ "learning_rate": 6.097621530399661e-05,
+ "loss": 1.0959,
+ "step": 642
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3329536837751315,
+ "learning_rate": 6.090357699923202e-05,
+ "loss": 1.0467,
+ "step": 643
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.32302233828349475,
+ "learning_rate": 6.083084373785287e-05,
+ "loss": 1.0858,
+ "step": 644
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3310358826507611,
+ "learning_rate": 6.075801585025739e-05,
+ "loss": 1.0715,
+ "step": 645
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.319322035854079,
+ "learning_rate": 6.068509366727362e-05,
+ "loss": 1.177,
+ "step": 646
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3065230667302707,
+ "learning_rate": 6.061207752015797e-05,
+ "loss": 1.0649,
+ "step": 647
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.29926795565748227,
+ "learning_rate": 6.053896774059368e-05,
+ "loss": 1.1325,
+ "step": 648
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3556069634279046,
+ "learning_rate": 6.046576466068931e-05,
+ "loss": 1.1366,
+ "step": 649
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3189191131461966,
+ "learning_rate": 6.039246861297727e-05,
+ "loss": 1.0693,
+ "step": 650
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3347197156648834,
+ "learning_rate": 6.031907993041227e-05,
+ "loss": 1.1009,
+ "step": 651
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.32274156348185445,
+ "learning_rate": 6.0245598946369826e-05,
+ "loss": 1.1675,
+ "step": 652
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.35534089035455224,
+ "learning_rate": 6.017202599464476e-05,
+ "loss": 1.1723,
+ "step": 653
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3106026578570133,
+ "learning_rate": 6.009836140944965e-05,
+ "loss": 1.0954,
+ "step": 654
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3309144454564729,
+ "learning_rate": 6.002460552541331e-05,
+ "loss": 1.0209,
+ "step": 655
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3023619281400003,
+ "learning_rate": 5.9950758677579345e-05,
+ "loss": 1.0363,
+ "step": 656
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3311182880219704,
+ "learning_rate": 5.987682120140451e-05,
+ "loss": 1.0515,
+ "step": 657
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.33396486010030413,
+ "learning_rate": 5.980279343275729e-05,
+ "loss": 1.1251,
+ "step": 658
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3465764556678002,
+ "learning_rate": 5.97286757079163e-05,
+ "loss": 1.165,
+ "step": 659
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.304193441363374,
+ "learning_rate": 5.965446836356882e-05,
+ "loss": 1.0228,
+ "step": 660
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3415149030413082,
+ "learning_rate": 5.9580171736809224e-05,
+ "loss": 1.0742,
+ "step": 661
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.33138658321132064,
+ "learning_rate": 5.950578616513746e-05,
+ "loss": 1.0843,
+ "step": 662
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.30774403421162994,
+ "learning_rate": 5.943131198645752e-05,
+ "loss": 1.065,
+ "step": 663
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3428877492183819,
+ "learning_rate": 5.9356749539075885e-05,
+ "loss": 1.1101,
+ "step": 664
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3621290546130101,
+ "learning_rate": 5.928209916170003e-05,
+ "loss": 1.1372,
+ "step": 665
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3482375945469884,
+ "learning_rate": 5.9207361193436865e-05,
+ "loss": 1.132,
+ "step": 666
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.31754384974068384,
+ "learning_rate": 5.9132535973791156e-05,
+ "loss": 1.148,
+ "step": 667
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.36003834782050365,
+ "learning_rate": 5.9057623842664044e-05,
+ "loss": 1.1099,
+ "step": 668
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.2963701622969662,
+ "learning_rate": 5.8982625140351464e-05,
+ "loss": 1.0755,
+ "step": 669
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.32579569606066516,
+ "learning_rate": 5.8907540207542616e-05,
+ "loss": 1.0809,
+ "step": 670
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4247563451753457,
+ "learning_rate": 5.8832369385318416e-05,
+ "loss": 1.097,
+ "step": 671
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.33076932102169776,
+ "learning_rate": 5.875711301514992e-05,
+ "loss": 1.1078,
+ "step": 672
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3609238032332309,
+ "learning_rate": 5.8681771438896815e-05,
+ "loss": 1.1031,
+ "step": 673
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.325159585649425,
+ "learning_rate": 5.860634499880583e-05,
+ "loss": 1.0707,
+ "step": 674
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4620687271068983,
+ "learning_rate": 5.853083403750922e-05,
+ "loss": 1.1017,
+ "step": 675
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33485279064365936,
+ "learning_rate": 5.845523889802316e-05,
+ "loss": 1.0989,
+ "step": 676
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.30952573170841513,
+ "learning_rate": 5.8379559923746214e-05,
+ "loss": 1.0393,
+ "step": 677
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33498605810588283,
+ "learning_rate": 5.830379745845781e-05,
+ "loss": 1.1259,
+ "step": 678
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.35771921163037307,
+ "learning_rate": 5.822795184631659e-05,
+ "loss": 1.0815,
+ "step": 679
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.3329650192347647,
+ "learning_rate": 5.815202343185894e-05,
+ "loss": 1.1344,
+ "step": 680
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3356634465845771,
+ "learning_rate": 5.807601255999736e-05,
+ "loss": 1.1297,
+ "step": 681
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3289442034151235,
+ "learning_rate": 5.7999919576018934e-05,
+ "loss": 1.022,
+ "step": 682
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3207007334784113,
+ "learning_rate": 5.7923744825583745e-05,
+ "loss": 1.0571,
+ "step": 683
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3582460325329284,
+ "learning_rate": 5.7847488654723304e-05,
+ "loss": 1.0778,
+ "step": 684
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3563317666176927,
+ "learning_rate": 5.777115140983899e-05,
+ "loss": 1.1003,
+ "step": 685
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 3.4694912945702105,
+ "learning_rate": 5.769473343770047e-05,
+ "loss": 1.121,
+ "step": 686
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.43002349520483113,
+ "learning_rate": 5.761823508544411e-05,
+ "loss": 1.0765,
+ "step": 687
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39467783104839754,
+ "learning_rate": 5.754165670057142e-05,
+ "loss": 1.0788,
+ "step": 688
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39629029674867916,
+ "learning_rate": 5.7464998630947464e-05,
+ "loss": 1.0812,
+ "step": 689
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3880152093965208,
+ "learning_rate": 5.738826122479929e-05,
+ "loss": 1.1228,
+ "step": 690
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3777874121959188,
+ "learning_rate": 5.7311444830714324e-05,
+ "loss": 1.0907,
+ "step": 691
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.38004041653523696,
+ "learning_rate": 5.723454979763882e-05,
+ "loss": 1.1263,
+ "step": 692
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.37049672627797636,
+ "learning_rate": 5.7157576474876246e-05,
+ "loss": 1.1438,
+ "step": 693
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32973606103437614,
+ "learning_rate": 5.7080525212085725e-05,
+ "loss": 1.0553,
+ "step": 694
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.31674639252070325,
+ "learning_rate": 5.700339635928038e-05,
+ "loss": 1.06,
+ "step": 695
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32282199426553837,
+ "learning_rate": 5.692619026682588e-05,
+ "loss": 1.0841,
+ "step": 696
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4810882958061859,
+ "learning_rate": 5.684890728543869e-05,
+ "loss": 1.0803,
+ "step": 697
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3995638550178378,
+ "learning_rate": 5.6771547766184566e-05,
+ "loss": 1.1187,
+ "step": 698
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35264932960583484,
+ "learning_rate": 5.669411206047699e-05,
+ "loss": 1.0641,
+ "step": 699
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35240640524733,
+ "learning_rate": 5.661660052007547e-05,
+ "loss": 1.076,
+ "step": 700
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3540694609860389,
+ "learning_rate": 5.653901349708401e-05,
+ "loss": 1.1369,
+ "step": 701
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3196055112925304,
+ "learning_rate": 5.646135134394955e-05,
+ "loss": 1.0677,
+ "step": 702
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4214141007955914,
+ "learning_rate": 5.6383614413460266e-05,
+ "loss": 1.1139,
+ "step": 703
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3625611311798579,
+ "learning_rate": 5.630580305874402e-05,
+ "loss": 1.1845,
+ "step": 704
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3425208672181188,
+ "learning_rate": 5.62279176332668e-05,
+ "loss": 1.174,
+ "step": 705
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3108419862818321,
+ "learning_rate": 5.6149958490830996e-05,
+ "loss": 1.0331,
+ "step": 706
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3274644181571904,
+ "learning_rate": 5.607192598557394e-05,
+ "loss": 1.0664,
+ "step": 707
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.346218197215145,
+ "learning_rate": 5.599382047196617e-05,
+ "loss": 1.2088,
+ "step": 708
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.328497632267458,
+ "learning_rate": 5.591564230480989e-05,
+ "loss": 1.0287,
+ "step": 709
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3708173720611468,
+ "learning_rate": 5.583739183923732e-05,
+ "loss": 1.0883,
+ "step": 710
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3631427403535479,
+ "learning_rate": 5.575906943070915e-05,
+ "loss": 1.1155,
+ "step": 711
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3305201458598695,
+ "learning_rate": 5.5680675435012834e-05,
+ "loss": 1.0958,
+ "step": 712
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.34978833532083714,
+ "learning_rate": 5.5602210208261036e-05,
+ "loss": 1.1437,
+ "step": 713
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3510553882510229,
+ "learning_rate": 5.552367410688999e-05,
+ "loss": 1.0941,
+ "step": 714
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3523747462465078,
+ "learning_rate": 5.544506748765789e-05,
+ "loss": 1.1289,
+ "step": 715
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38262637783927445,
+ "learning_rate": 5.5366390707643266e-05,
+ "loss": 1.099,
+ "step": 716
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38620065989073454,
+ "learning_rate": 5.528764412424334e-05,
+ "loss": 1.083,
+ "step": 717
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3401355276121096,
+ "learning_rate": 5.520882809517245e-05,
+ "loss": 1.028,
+ "step": 718
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3392061008943934,
+ "learning_rate": 5.512994297846039e-05,
+ "loss": 1.1083,
+ "step": 719
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.34219480421015414,
+ "learning_rate": 5.505098913245077e-05,
+ "loss": 1.1108,
+ "step": 720
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3275058061553761,
+ "learning_rate": 5.497196691579945e-05,
+ "loss": 1.111,
+ "step": 721
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36800249746509384,
+ "learning_rate": 5.489287668747283e-05,
+ "loss": 1.1221,
+ "step": 722
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4129005533101575,
+ "learning_rate": 5.481371880674628e-05,
+ "loss": 1.0966,
+ "step": 723
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36563906596251655,
+ "learning_rate": 5.4734493633202505e-05,
+ "loss": 1.0927,
+ "step": 724
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3614650536839971,
+ "learning_rate": 5.465520152672986e-05,
+ "loss": 1.13,
+ "step": 725
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.36419665098633497,
+ "learning_rate": 5.4575842847520765e-05,
+ "loss": 1.1183,
+ "step": 726
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.34490689807258995,
+ "learning_rate": 5.449641795607005e-05,
+ "loss": 1.0919,
+ "step": 727
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3627643746876298,
+ "learning_rate": 5.441692721317334e-05,
+ "loss": 1.0411,
+ "step": 728
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.323620411949565,
+ "learning_rate": 5.433737097992537e-05,
+ "loss": 1.0725,
+ "step": 729
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3521599501824965,
+ "learning_rate": 5.425774961771838e-05,
+ "loss": 1.0926,
+ "step": 730
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3302390546764222,
+ "learning_rate": 5.417806348824047e-05,
+ "loss": 1.0468,
+ "step": 731
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3833325802616019,
+ "learning_rate": 5.4098312953473956e-05,
+ "loss": 1.1291,
+ "step": 732
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3708621126835512,
+ "learning_rate": 5.401849837569372e-05,
+ "loss": 1.0887,
+ "step": 733
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3625834373416278,
+ "learning_rate": 5.393862011746555e-05,
+ "loss": 1.0981,
+ "step": 734
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3583343965080617,
+ "learning_rate": 5.385867854164451e-05,
+ "loss": 1.1021,
+ "step": 735
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34598320594096066,
+ "learning_rate": 5.377867401137332e-05,
+ "loss": 1.1376,
+ "step": 736
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3046382791315433,
+ "learning_rate": 5.369860689008066e-05,
+ "loss": 1.0206,
+ "step": 737
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34464948380043725,
+ "learning_rate": 5.3618477541479505e-05,
+ "loss": 1.1084,
+ "step": 738
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3203242519627101,
+ "learning_rate": 5.353828632956557e-05,
+ "loss": 1.0731,
+ "step": 739
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3431169960355163,
+ "learning_rate": 5.3458033618615516e-05,
+ "loss": 1.091,
+ "step": 740
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.33492074521678705,
+ "learning_rate": 5.337771977318543e-05,
+ "loss": 1.1112,
+ "step": 741
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.32576546585541344,
+ "learning_rate": 5.3297345158109086e-05,
+ "loss": 1.0993,
+ "step": 742
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3410007245037574,
+ "learning_rate": 5.3216910138496286e-05,
+ "loss": 1.094,
+ "step": 743
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.34891180680896833,
+ "learning_rate": 5.313641507973128e-05,
+ "loss": 1.1331,
+ "step": 744
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.37135766946717214,
+ "learning_rate": 5.3055860347471006e-05,
+ "loss": 1.1,
+ "step": 745
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3465019415478411,
+ "learning_rate": 5.297524630764349e-05,
+ "loss": 1.1256,
+ "step": 746
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37035388481626563,
+ "learning_rate": 5.289457332644615e-05,
+ "loss": 1.0366,
+ "step": 747
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.33853883270759155,
+ "learning_rate": 5.281384177034421e-05,
+ "loss": 1.0547,
+ "step": 748
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.364306618627317,
+ "learning_rate": 5.2733052006068897e-05,
+ "loss": 1.0768,
+ "step": 749
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.4021754315731627,
+ "learning_rate": 5.2652204400615916e-05,
+ "loss": 1.1382,
+ "step": 750
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.3332185389039008,
+ "learning_rate": 5.257129932124368e-05,
+ "loss": 1.0815,
+ "step": 751
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3453105709879854,
+ "learning_rate": 5.249033713547173e-05,
+ "loss": 1.1109,
+ "step": 752
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3385397539717797,
+ "learning_rate": 5.2409318211078966e-05,
+ "loss": 1.0529,
+ "step": 753
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.33197994450130447,
+ "learning_rate": 5.232824291610206e-05,
+ "loss": 1.0721,
+ "step": 754
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32836289576124167,
+ "learning_rate": 5.224711161883375e-05,
+ "loss": 1.0459,
+ "step": 755
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32491620058831744,
+ "learning_rate": 5.216592468782117e-05,
+ "loss": 1.0897,
+ "step": 756
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3137879047811153,
+ "learning_rate": 5.2084682491864155e-05,
+ "loss": 1.096,
+ "step": 757
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3356938043023012,
+ "learning_rate": 5.200338540001364e-05,
+ "loss": 1.0827,
+ "step": 758
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.36044340490819055,
+ "learning_rate": 5.192203378156984e-05,
+ "loss": 1.0617,
+ "step": 759
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.34674262047888293,
+ "learning_rate": 5.184062800608077e-05,
+ "loss": 1.1267,
+ "step": 760
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.32469442322149333,
+ "learning_rate": 5.1759168443340375e-05,
+ "loss": 1.1483,
+ "step": 761
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3290384307774216,
+ "learning_rate": 5.167765546338698e-05,
+ "loss": 1.047,
+ "step": 762
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.31637612188770403,
+ "learning_rate": 5.1596089436501525e-05,
+ "loss": 1.0311,
+ "step": 763
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3168693829641207,
+ "learning_rate": 5.151447073320597e-05,
+ "loss": 1.1405,
+ "step": 764
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.34322421571238926,
+ "learning_rate": 5.143279972426153e-05,
+ "loss": 1.1428,
+ "step": 765
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3291030435830325,
+ "learning_rate": 5.1351076780667026e-05,
+ "loss": 1.0473,
+ "step": 766
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.33772039158758044,
+ "learning_rate": 5.1269302273657195e-05,
+ "loss": 1.0909,
+ "step": 767
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3802031736890876,
+ "learning_rate": 5.118747657470102e-05,
+ "loss": 1.1482,
+ "step": 768
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3296067628997962,
+ "learning_rate": 5.1105600055500025e-05,
+ "loss": 1.0085,
+ "step": 769
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3707139982828035,
+ "learning_rate": 5.102367308798658e-05,
+ "loss": 1.0746,
+ "step": 770
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3378537316757011,
+ "learning_rate": 5.094169604432225e-05,
+ "loss": 1.0482,
+ "step": 771
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.4008417246255145,
+ "learning_rate": 5.085966929689601e-05,
+ "loss": 1.1065,
+ "step": 772
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3244385106988064,
+ "learning_rate": 5.077759321832271e-05,
+ "loss": 1.0827,
+ "step": 773
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.37228575732812336,
+ "learning_rate": 5.0695468181441215e-05,
+ "loss": 1.1146,
+ "step": 774
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33761714797540276,
+ "learning_rate": 5.061329455931283e-05,
+ "loss": 1.092,
+ "step": 775
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3158158390913494,
+ "learning_rate": 5.053107272521955e-05,
+ "loss": 1.1058,
+ "step": 776
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3691501929738938,
+ "learning_rate": 5.044880305266239e-05,
+ "loss": 1.1599,
+ "step": 777
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33730914019805525,
+ "learning_rate": 5.0366485915359645e-05,
+ "loss": 1.0615,
+ "step": 778
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.34970059240017,
+ "learning_rate": 5.0284121687245257e-05,
+ "loss": 1.1475,
+ "step": 779
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3374028029407197,
+ "learning_rate": 5.020171074246707e-05,
+ "loss": 1.0926,
+ "step": 780
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3350020681123992,
+ "learning_rate": 5.011925345538514e-05,
+ "loss": 1.1276,
+ "step": 781
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3224228965786606,
+ "learning_rate": 5.003675020057003e-05,
+ "loss": 1.0183,
+ "step": 782
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3357310714740298,
+ "learning_rate": 4.995420135280114e-05,
+ "loss": 1.1114,
+ "step": 783
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3590203255363759,
+ "learning_rate": 4.9871607287064966e-05,
+ "loss": 1.1504,
+ "step": 784
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.33011195419611655,
+ "learning_rate": 4.9788968378553396e-05,
+ "loss": 1.0826,
+ "step": 785
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.31088868195439445,
+ "learning_rate": 4.970628500266207e-05,
+ "loss": 1.0704,
+ "step": 786
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3144996103179409,
+ "learning_rate": 4.962355753498858e-05,
+ "loss": 1.1403,
+ "step": 787
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3147269555419068,
+ "learning_rate": 4.954078635133081e-05,
+ "loss": 1.0898,
+ "step": 788
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3280151747783868,
+ "learning_rate": 4.945797182768524e-05,
+ "loss": 1.1115,
+ "step": 789
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3551996569232493,
+ "learning_rate": 4.937511434024524e-05,
+ "loss": 1.1731,
+ "step": 790
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.343863208057807,
+ "learning_rate": 4.9292214265399336e-05,
+ "loss": 1.0866,
+ "step": 791
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.37316699385322466,
+ "learning_rate": 4.920927197972949e-05,
+ "loss": 1.1083,
+ "step": 792
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635739774067832,
+ "learning_rate": 4.9126287860009453e-05,
+ "loss": 1.1393,
+ "step": 793
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3755910554972886,
+ "learning_rate": 4.9043262283202974e-05,
+ "loss": 1.1624,
+ "step": 794
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635899120146823,
+ "learning_rate": 4.8960195626462145e-05,
+ "loss": 1.2095,
+ "step": 795
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3642202684342816,
+ "learning_rate": 4.8877088267125664e-05,
+ "loss": 1.1099,
+ "step": 796
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3339946548799316,
+ "learning_rate": 4.879394058271712e-05,
+ "loss": 1.1157,
+ "step": 797
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3457189703100475,
+ "learning_rate": 4.871075295094329e-05,
+ "loss": 1.129,
+ "step": 798
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3550931839691424,
+ "learning_rate": 4.862752574969241e-05,
+ "loss": 1.076,
+ "step": 799
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.36139108917966734,
+ "learning_rate": 4.8544259357032475e-05,
+ "loss": 1.1577,
+ "step": 800
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39569703665247874,
+ "learning_rate": 4.8460954151209486e-05,
+ "loss": 1.0543,
+ "step": 801
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.3879033670170866,
+ "learning_rate": 4.837761051064579e-05,
+ "loss": 1.0688,
+ "step": 802
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3796846713967255,
+ "learning_rate": 4.8294228813938285e-05,
+ "loss": 0.9911,
+ "step": 803
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4007831430409375,
+ "learning_rate": 4.8210809439856804e-05,
+ "loss": 1.0126,
+ "step": 804
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.37588078665500885,
+ "learning_rate": 4.8127352767342276e-05,
+ "loss": 0.9302,
+ "step": 805
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4078509175013281,
+ "learning_rate": 4.8043859175505095e-05,
+ "loss": 0.9982,
+ "step": 806
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.379096046185539,
+ "learning_rate": 4.7960329043623344e-05,
+ "loss": 1.0035,
+ "step": 807
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3813938568133554,
+ "learning_rate": 4.787676275114111e-05,
+ "loss": 0.9579,
+ "step": 808
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.3686863564511168,
+ "learning_rate": 4.779316067766673e-05,
+ "loss": 1.0105,
+ "step": 809
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.4263940878847523,
+ "learning_rate": 4.770952320297109e-05,
+ "loss": 1.0677,
+ "step": 810
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.37178778374665006,
+ "learning_rate": 4.7625850706985886e-05,
+ "loss": 1.0019,
+ "step": 811
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.36803355429187945,
+ "learning_rate": 4.7542143569801894e-05,
+ "loss": 0.9937,
+ "step": 812
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.3897072472941179,
+ "learning_rate": 4.745840217166725e-05,
+ "loss": 1.0877,
+ "step": 813
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.35571833841716255,
+ "learning_rate": 4.737462689298577e-05,
+ "loss": 1.0015,
+ "step": 814
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.38930229991094323,
+ "learning_rate": 4.7290818114315086e-05,
+ "loss": 1.028,
+ "step": 815
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.411005007105147,
+ "learning_rate": 4.72069762163651e-05,
+ "loss": 1.0068,
+ "step": 816
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3980240190337736,
+ "learning_rate": 4.7123101579996106e-05,
+ "loss": 0.9919,
+ "step": 817
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.36369517703115467,
+ "learning_rate": 4.7039194586217136e-05,
+ "loss": 0.967,
+ "step": 818
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.38591148840458894,
+ "learning_rate": 4.695525561618418e-05,
+ "loss": 0.9743,
+ "step": 819
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.45873135108949337,
+ "learning_rate": 4.687128505119853e-05,
+ "loss": 1.0516,
+ "step": 820
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.3866330351411308,
+ "learning_rate": 4.6787283272704966e-05,
+ "loss": 0.9939,
+ "step": 821
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4620340173291326,
+ "learning_rate": 4.670325066229009e-05,
+ "loss": 1.0526,
+ "step": 822
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38877454299870284,
+ "learning_rate": 4.661918760168052e-05,
+ "loss": 0.9904,
+ "step": 823
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.3880489386116793,
+ "learning_rate": 4.653509447274121e-05,
+ "loss": 0.9623,
+ "step": 824
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3827392356186151,
+ "learning_rate": 4.6450971657473743e-05,
+ "loss": 1.0772,
+ "step": 825
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4132814641854327,
+ "learning_rate": 4.63668195380145e-05,
+ "loss": 1.0533,
+ "step": 826
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3703610182402835,
+ "learning_rate": 4.628263849663301e-05,
+ "loss": 0.9336,
+ "step": 827
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4152053683299823,
+ "learning_rate": 4.619842891573016e-05,
+ "loss": 0.9801,
+ "step": 828
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.41791059043554274,
+ "learning_rate": 4.6114191177836514e-05,
+ "loss": 1.0617,
+ "step": 829
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.46363896517299136,
+ "learning_rate": 4.6029925665610524e-05,
+ "loss": 0.9687,
+ "step": 830
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.41141959057512445,
+ "learning_rate": 4.59456327618368e-05,
+ "loss": 1.0965,
+ "step": 831
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3789192764519836,
+ "learning_rate": 4.5861312849424386e-05,
+ "loss": 0.9793,
+ "step": 832
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4047291581107866,
+ "learning_rate": 4.5776966311405035e-05,
+ "loss": 1.0342,
+ "step": 833
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4425157400959256,
+ "learning_rate": 4.5692593530931416e-05,
+ "loss": 1.0892,
+ "step": 834
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3707332144806616,
+ "learning_rate": 4.560819489127545e-05,
+ "loss": 0.9815,
+ "step": 835
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3897444102572823,
+ "learning_rate": 4.552377077582646e-05,
+ "loss": 0.9884,
+ "step": 836
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.42725787957019346,
+ "learning_rate": 4.543932156808959e-05,
+ "loss": 0.9972,
+ "step": 837
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.40615269781820007,
+ "learning_rate": 4.535484765168386e-05,
+ "loss": 0.9529,
+ "step": 838
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3505829736050887,
+ "learning_rate": 4.527034941034063e-05,
+ "loss": 0.9492,
+ "step": 839
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.36688064686440497,
+ "learning_rate": 4.51858272279017e-05,
+ "loss": 0.9592,
+ "step": 840
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4043468777955929,
+ "learning_rate": 4.5101281488317634e-05,
+ "loss": 1.048,
+ "step": 841
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3811489793242706,
+ "learning_rate": 4.501671257564602e-05,
+ "loss": 1.0138,
+ "step": 842
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.39813004142325986,
+ "learning_rate": 4.49321208740497e-05,
+ "loss": 1.071,
+ "step": 843
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3809751022095503,
+ "learning_rate": 4.484750676779504e-05,
+ "loss": 1.0351,
+ "step": 844
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.384312178013823,
+ "learning_rate": 4.4762870641250185e-05,
+ "loss": 0.9737,
+ "step": 845
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.40769404907923557,
+ "learning_rate": 4.467821287888331e-05,
+ "loss": 0.9659,
+ "step": 846
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.39594136851937817,
+ "learning_rate": 4.459353386526086e-05,
+ "loss": 0.9405,
+ "step": 847
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.37180161011562185,
+ "learning_rate": 4.450883398504584e-05,
+ "loss": 1.0732,
+ "step": 848
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3772603623154663,
+ "learning_rate": 4.442411362299602e-05,
+ "loss": 0.9646,
+ "step": 849
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4346142368506476,
+ "learning_rate": 4.433937316396224e-05,
+ "loss": 0.9572,
+ "step": 850
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3997258084612474,
+ "learning_rate": 4.425461299288659e-05,
+ "loss": 0.9492,
+ "step": 851
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.41245476865247155,
+ "learning_rate": 4.416983349480073e-05,
+ "loss": 0.8732,
+ "step": 852
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.6761499297939195,
+ "learning_rate": 4.408503505482412e-05,
+ "loss": 1.0425,
+ "step": 853
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.40340979486858985,
+ "learning_rate": 4.400021805816225e-05,
+ "loss": 0.9596,
+ "step": 854
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.43290732392699666,
+ "learning_rate": 4.391538289010493e-05,
+ "loss": 1.0123,
+ "step": 855
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36878054442190156,
+ "learning_rate": 4.383052993602448e-05,
+ "loss": 0.9448,
+ "step": 856
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.7146145128961262,
+ "learning_rate": 4.374565958137404e-05,
+ "loss": 1.0342,
+ "step": 857
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.44429357586145607,
+ "learning_rate": 4.3660772211685775e-05,
+ "loss": 1.0436,
+ "step": 858
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.4565751973640598,
+ "learning_rate": 4.357586821256918e-05,
+ "loss": 1.0311,
+ "step": 859
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3919991236654277,
+ "learning_rate": 4.349094796970925e-05,
+ "loss": 1.1401,
+ "step": 860
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.4347441949284011,
+ "learning_rate": 4.3406011868864795e-05,
+ "loss": 1.0252,
+ "step": 861
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.38339976027415407,
+ "learning_rate": 4.3321060295866635e-05,
+ "loss": 1.0536,
+ "step": 862
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.37688790408195166,
+ "learning_rate": 4.32360936366159e-05,
+ "loss": 1.012,
+ "step": 863
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4317538207582504,
+ "learning_rate": 4.315111227708224e-05,
+ "loss": 1.0505,
+ "step": 864
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4145324872228796,
+ "learning_rate": 4.306611660330208e-05,
+ "loss": 1.0496,
+ "step": 865
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.416535227064448,
+ "learning_rate": 4.298110700137687e-05,
+ "loss": 0.9628,
+ "step": 866
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.46564356187492717,
+ "learning_rate": 4.2896083857471345e-05,
+ "loss": 1.0016,
+ "step": 867
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.4228980941889828,
+ "learning_rate": 4.281104755781172e-05,
+ "loss": 1.0904,
+ "step": 868
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.4267821214430208,
+ "learning_rate": 4.272599848868402e-05,
+ "loss": 1.0544,
+ "step": 869
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.45763332095792075,
+ "learning_rate": 4.264093703643223e-05,
+ "loss": 1.0686,
+ "step": 870
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.4347555516548761,
+ "learning_rate": 4.255586358745662e-05,
+ "loss": 1.0264,
+ "step": 871
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3817726381103066,
+ "learning_rate": 4.247077852821194e-05,
+ "loss": 1.0045,
+ "step": 872
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3882808845457995,
+ "learning_rate": 4.2385682245205685e-05,
+ "loss": 1.0193,
+ "step": 873
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.39410930252966775,
+ "learning_rate": 4.230057512499634e-05,
+ "loss": 0.9832,
+ "step": 874
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4373094593907156,
+ "learning_rate": 4.221545755419159e-05,
+ "loss": 1.0343,
+ "step": 875
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4462843721698891,
+ "learning_rate": 4.2130329919446646e-05,
+ "loss": 1.0324,
+ "step": 876
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4747274247448112,
+ "learning_rate": 4.20451926074624e-05,
+ "loss": 0.9903,
+ "step": 877
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.4157472897596409,
+ "learning_rate": 4.196004600498369e-05,
+ "loss": 0.9266,
+ "step": 878
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.41625958088960685,
+ "learning_rate": 4.1874890498797605e-05,
+ "loss": 0.9658,
+ "step": 879
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.44784944130574333,
+ "learning_rate": 4.178972647573163e-05,
+ "loss": 0.9671,
+ "step": 880
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.4116839177956385,
+ "learning_rate": 4.1704554322651975e-05,
+ "loss": 0.9591,
+ "step": 881
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.4025569857639452,
+ "learning_rate": 4.161937442646176e-05,
+ "loss": 1.0072,
+ "step": 882
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.41518478124763597,
+ "learning_rate": 4.1534187174099285e-05,
+ "loss": 1.0275,
+ "step": 883
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3987815564664466,
+ "learning_rate": 4.1448992952536275e-05,
+ "loss": 1.0039,
+ "step": 884
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4270378155679982,
+ "learning_rate": 4.136379214877609e-05,
+ "loss": 1.0369,
+ "step": 885
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.42144733922972777,
+ "learning_rate": 4.127858514985203e-05,
+ "loss": 1.0269,
+ "step": 886
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4198664438272548,
+ "learning_rate": 4.1193372342825494e-05,
+ "loss": 1.0427,
+ "step": 887
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3985048256281719,
+ "learning_rate": 4.1108154114784275e-05,
+ "loss": 1.0702,
+ "step": 888
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.605520808292362,
+ "learning_rate": 4.102293085284083e-05,
+ "loss": 0.9749,
+ "step": 889
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.4150515863924052,
+ "learning_rate": 4.0937702944130426e-05,
+ "loss": 1.0231,
+ "step": 890
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3935997576565283,
+ "learning_rate": 4.085247077580948e-05,
+ "loss": 1.0014,
+ "step": 891
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.399446131403209,
+ "learning_rate": 4.076723473505374e-05,
+ "loss": 0.9602,
+ "step": 892
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4406024397129952,
+ "learning_rate": 4.068199520905655e-05,
+ "loss": 1.0425,
+ "step": 893
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4036917571496492,
+ "learning_rate": 4.059675258502709e-05,
+ "loss": 0.973,
+ "step": 894
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.4057196459433299,
+ "learning_rate": 4.05115072501886e-05,
+ "loss": 0.9997,
+ "step": 895
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4374124954708759,
+ "learning_rate": 4.0426259591776645e-05,
+ "loss": 0.9826,
+ "step": 896
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4545699371285546,
+ "learning_rate": 4.0341009997037356e-05,
+ "loss": 1.0554,
+ "step": 897
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4251917031237376,
+ "learning_rate": 4.025575885322563e-05,
+ "loss": 1.0217,
+ "step": 898
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3857651901893941,
+ "learning_rate": 4.0170506547603427e-05,
+ "loss": 1.0317,
+ "step": 899
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.46323573798490897,
+ "learning_rate": 4.008525346743797e-05,
+ "loss": 1.0398,
+ "step": 900
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.4011541121460918,
+ "learning_rate": 4e-05,
+ "loss": 1.0706,
+ "step": 901
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.46493281221028004,
+ "learning_rate": 3.991474653256204e-05,
+ "loss": 1.0525,
+ "step": 902
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.41683080924539023,
+ "learning_rate": 3.982949345239658e-05,
+ "loss": 1.0905,
+ "step": 903
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.4750350025014512,
+ "learning_rate": 3.974424114677437e-05,
+ "loss": 1.049,
+ "step": 904
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3867445073614702,
+ "learning_rate": 3.965899000296266e-05,
+ "loss": 0.9624,
+ "step": 905
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.378387661131469,
+ "learning_rate": 3.957374040822335e-05,
+ "loss": 1.0223,
+ "step": 906
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3905996390559077,
+ "learning_rate": 3.948849274981141e-05,
+ "loss": 1.0315,
+ "step": 907
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.4139717689498189,
+ "learning_rate": 3.940324741497291e-05,
+ "loss": 0.9297,
+ "step": 908
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.39086355684921514,
+ "learning_rate": 3.9318004790943465e-05,
+ "loss": 0.9684,
+ "step": 909
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.4334915643736419,
+ "learning_rate": 3.923276526494627e-05,
+ "loss": 0.996,
+ "step": 910
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.40782018986229496,
+ "learning_rate": 3.9147529224190536e-05,
+ "loss": 1.0875,
+ "step": 911
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.43578702386625723,
+ "learning_rate": 3.906229705586959e-05,
+ "loss": 1.1214,
+ "step": 912
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.414945683409524,
+ "learning_rate": 3.89770691471592e-05,
+ "loss": 1.1037,
+ "step": 913
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.40665801579679106,
+ "learning_rate": 3.889184588521573e-05,
+ "loss": 0.9743,
+ "step": 914
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.4064250611574517,
+ "learning_rate": 3.880662765717453e-05,
+ "loss": 0.8814,
+ "step": 915
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.48023046298843347,
+ "learning_rate": 3.8721414850147985e-05,
+ "loss": 0.9663,
+ "step": 916
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.42358024833566227,
+ "learning_rate": 3.8636207851223924e-05,
+ "loss": 1.0491,
+ "step": 917
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.41522494786195835,
+ "learning_rate": 3.855100704746374e-05,
+ "loss": 1.033,
+ "step": 918
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.40890517696706496,
+ "learning_rate": 3.8465812825900715e-05,
+ "loss": 1.0369,
+ "step": 919
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.4325851866408538,
+ "learning_rate": 3.838062557353825e-05,
+ "loss": 0.9362,
+ "step": 920
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.4185860919050069,
+ "learning_rate": 3.8295445677348025e-05,
+ "loss": 1.026,
+ "step": 921
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3975762375934804,
+ "learning_rate": 3.8210273524268375e-05,
+ "loss": 1.0412,
+ "step": 922
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.41725298241987474,
+ "learning_rate": 3.8125109501202395e-05,
+ "loss": 1.0004,
+ "step": 923
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.455183913149126,
+ "learning_rate": 3.803995399501632e-05,
+ "loss": 1.0594,
+ "step": 924
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3993993856483797,
+ "learning_rate": 3.795480739253761e-05,
+ "loss": 0.9761,
+ "step": 925
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.41638796815161494,
+ "learning_rate": 3.786967008055337e-05,
+ "loss": 1.0369,
+ "step": 926
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.40015112695810534,
+ "learning_rate": 3.7784542445808414e-05,
+ "loss": 1.0271,
+ "step": 927
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3995749494729548,
+ "learning_rate": 3.769942487500368e-05,
+ "loss": 1.0613,
+ "step": 928
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.4073556267037492,
+ "learning_rate": 3.761431775479432e-05,
+ "loss": 1.0528,
+ "step": 929
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.44218148822636044,
+ "learning_rate": 3.752922147178807e-05,
+ "loss": 1.0742,
+ "step": 930
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.4435063485893757,
+ "learning_rate": 3.744413641254339e-05,
+ "loss": 1.0825,
+ "step": 931
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.46841574994107515,
+ "learning_rate": 3.735906296356778e-05,
+ "loss": 1.0471,
+ "step": 932
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.40093716627657294,
+ "learning_rate": 3.727400151131599e-05,
+ "loss": 1.0474,
+ "step": 933
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3866415067997244,
+ "learning_rate": 3.71889524421883e-05,
+ "loss": 1.0209,
+ "step": 934
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.4881546110706673,
+ "learning_rate": 3.710391614252867e-05,
+ "loss": 1.0768,
+ "step": 935
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.4133084639324523,
+ "learning_rate": 3.701889299862314e-05,
+ "loss": 1.0423,
+ "step": 936
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.40523563084001196,
+ "learning_rate": 3.6933883396697936e-05,
+ "loss": 1.005,
+ "step": 937
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.38757352418642405,
+ "learning_rate": 3.684888772291777e-05,
+ "loss": 0.9659,
+ "step": 938
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.421394551890689,
+ "learning_rate": 3.676390636338411e-05,
+ "loss": 1.0454,
+ "step": 939
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.45693070958342186,
+ "learning_rate": 3.667893970413337e-05,
+ "loss": 1.1459,
+ "step": 940
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.4172025376377795,
+ "learning_rate": 3.659398813113522e-05,
+ "loss": 0.9954,
+ "step": 941
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3871624019510191,
+ "learning_rate": 3.650905203029075e-05,
+ "loss": 1.0441,
+ "step": 942
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.38541342610032325,
+ "learning_rate": 3.642413178743083e-05,
+ "loss": 0.9465,
+ "step": 943
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.4208031670525743,
+ "learning_rate": 3.633922778831423e-05,
+ "loss": 1.0367,
+ "step": 944
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.41867209013040035,
+ "learning_rate": 3.6254340418625975e-05,
+ "loss": 1.0868,
+ "step": 945
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.431758149074127,
+ "learning_rate": 3.6169470063975536e-05,
+ "loss": 1.0689,
+ "step": 946
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4988803338819952,
+ "learning_rate": 3.608461710989509e-05,
+ "loss": 1.0879,
+ "step": 947
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4094858411191625,
+ "learning_rate": 3.5999781941837755e-05,
+ "loss": 1.0332,
+ "step": 948
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.3831847195845155,
+ "learning_rate": 3.591496494517589e-05,
+ "loss": 0.9751,
+ "step": 949
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.40535692821947267,
+ "learning_rate": 3.5830166505199284e-05,
+ "loss": 1.0594,
+ "step": 950
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.4875663789389966,
+ "learning_rate": 3.574538700711343e-05,
+ "loss": 0.9749,
+ "step": 951
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5155923998285772,
+ "learning_rate": 3.566062683603778e-05,
+ "loss": 0.9999,
+ "step": 952
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5280285947816189,
+ "learning_rate": 3.557588637700399e-05,
+ "loss": 1.1061,
+ "step": 953
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.46573407357796753,
+ "learning_rate": 3.5491166014954174e-05,
+ "loss": 1.102,
+ "step": 954
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.4122542582865379,
+ "learning_rate": 3.540646613473915e-05,
+ "loss": 1.0469,
+ "step": 955
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.41414476980823367,
+ "learning_rate": 3.53217871211167e-05,
+ "loss": 0.9973,
+ "step": 956
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4030707611608045,
+ "learning_rate": 3.523712935874983e-05,
+ "loss": 0.9796,
+ "step": 957
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4235313349747291,
+ "learning_rate": 3.5152493232204975e-05,
+ "loss": 1.0601,
+ "step": 958
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4165235178302652,
+ "learning_rate": 3.5067879125950316e-05,
+ "loss": 1.0358,
+ "step": 959
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.44083984701952955,
+ "learning_rate": 3.4983287424354e-05,
+ "loss": 1.0957,
+ "step": 960
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3781161039063518,
+ "learning_rate": 3.489871851168238e-05,
+ "loss": 0.9838,
+ "step": 961
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4095747724038915,
+ "learning_rate": 3.4814172772098314e-05,
+ "loss": 1.014,
+ "step": 962
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.42197119558898466,
+ "learning_rate": 3.472965058965938e-05,
+ "loss": 1.0096,
+ "step": 963
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4339963388152155,
+ "learning_rate": 3.464515234831615e-05,
+ "loss": 1.0158,
+ "step": 964
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.4284638765548976,
+ "learning_rate": 3.4560678431910424e-05,
+ "loss": 1.1047,
+ "step": 965
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3935144535755794,
+ "learning_rate": 3.447622922417355e-05,
+ "loss": 0.9925,
+ "step": 966
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.45884343961025,
+ "learning_rate": 3.439180510872457e-05,
+ "loss": 1.0583,
+ "step": 967
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.42439320759788374,
+ "learning_rate": 3.4307406469068604e-05,
+ "loss": 0.9305,
+ "step": 968
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.45770082390324845,
+ "learning_rate": 3.4223033688594985e-05,
+ "loss": 1.054,
+ "step": 969
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.4284786643981094,
+ "learning_rate": 3.4138687150575634e-05,
+ "loss": 0.9409,
+ "step": 970
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.41356124058383237,
+ "learning_rate": 3.4054367238163215e-05,
+ "loss": 1.0739,
+ "step": 971
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.4255832249412624,
+ "learning_rate": 3.3970074334389496e-05,
+ "loss": 1.0764,
+ "step": 972
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4337695536142702,
+ "learning_rate": 3.388580882216349e-05,
+ "loss": 1.0195,
+ "step": 973
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.41363495650922455,
+ "learning_rate": 3.380157108426985e-05,
+ "loss": 1.0615,
+ "step": 974
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3950691247686479,
+ "learning_rate": 3.371736150336701e-05,
+ "loss": 1.0283,
+ "step": 975
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4042823691555822,
+ "learning_rate": 3.3633180461985505e-05,
+ "loss": 1.0309,
+ "step": 976
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3921158850479399,
+ "learning_rate": 3.354902834252627e-05,
+ "loss": 1.068,
+ "step": 977
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38349545732725654,
+ "learning_rate": 3.346490552725879e-05,
+ "loss": 1.0886,
+ "step": 978
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38689221457248724,
+ "learning_rate": 3.33808123983195e-05,
+ "loss": 0.987,
+ "step": 979
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.38660550867425647,
+ "learning_rate": 3.329674933770992e-05,
+ "loss": 1.069,
+ "step": 980
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3917593746353493,
+ "learning_rate": 3.321271672729504e-05,
+ "loss": 0.9858,
+ "step": 981
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.4292314072827653,
+ "learning_rate": 3.3128714948801474e-05,
+ "loss": 1.0477,
+ "step": 982
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.479414638418211,
+ "learning_rate": 3.3044744383815835e-05,
+ "loss": 1.0763,
+ "step": 983
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.380831894995463,
+ "learning_rate": 3.2960805413782884e-05,
+ "loss": 1.0393,
+ "step": 984
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.42402274703362114,
+ "learning_rate": 3.2876898420003914e-05,
+ "loss": 1.0837,
+ "step": 985
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.4571447203722258,
+ "learning_rate": 3.279302378363491e-05,
+ "loss": 1.0594,
+ "step": 986
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3776673281658531,
+ "learning_rate": 3.270918188568493e-05,
+ "loss": 1.0121,
+ "step": 987
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.4367173448132159,
+ "learning_rate": 3.262537310701425e-05,
+ "loss": 0.9612,
+ "step": 988
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.43679765208840926,
+ "learning_rate": 3.254159782833276e-05,
+ "loss": 1.0565,
+ "step": 989
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.4018151260013493,
+ "learning_rate": 3.2457856430198126e-05,
+ "loss": 0.9975,
+ "step": 990
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.40461959940721076,
+ "learning_rate": 3.237414929301412e-05,
+ "loss": 1.0255,
+ "step": 991
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.41342378541540653,
+ "learning_rate": 3.2290476797028926e-05,
+ "loss": 1.024,
+ "step": 992
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3926173909201105,
+ "learning_rate": 3.220683932233328e-05,
+ "loss": 1.0877,
+ "step": 993
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3835623199834992,
+ "learning_rate": 3.21232372488589e-05,
+ "loss": 1.0992,
+ "step": 994
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.39901809497083496,
+ "learning_rate": 3.2039670956376656e-05,
+ "loss": 1.0723,
+ "step": 995
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3979604537466272,
+ "learning_rate": 3.195614082449492e-05,
+ "loss": 1.0201,
+ "step": 996
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.4057122427176845,
+ "learning_rate": 3.1872647232657723e-05,
+ "loss": 1.0885,
+ "step": 997
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.39747060350754754,
+ "learning_rate": 3.17891905601432e-05,
+ "loss": 1.0544,
+ "step": 998
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.4397658078291558,
+ "learning_rate": 3.1705771186061715e-05,
+ "loss": 1.0998,
+ "step": 999
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37373547663810053,
+ "learning_rate": 3.162238948935423e-05,
+ "loss": 1.0465,
+ "step": 1000
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.4042576001255747,
+ "learning_rate": 3.153904584879052e-05,
+ "loss": 0.9206,
+ "step": 1001
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.4042994886900337,
+ "learning_rate": 3.1455740642967545e-05,
+ "loss": 0.975,
+ "step": 1002
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4359721725421234,
+ "learning_rate": 3.1372474250307594e-05,
+ "loss": 0.9163,
+ "step": 1003
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4886423524029179,
+ "learning_rate": 3.128924704905673e-05,
+ "loss": 0.9956,
+ "step": 1004
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.48669990170138744,
+ "learning_rate": 3.1206059417282894e-05,
+ "loss": 0.9874,
+ "step": 1005
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.41954255928633066,
+ "learning_rate": 3.1122911732874356e-05,
+ "loss": 0.8986,
+ "step": 1006
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.43363878644039366,
+ "learning_rate": 3.103980437353787e-05,
+ "loss": 0.9268,
+ "step": 1007
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5199775120765874,
+ "learning_rate": 3.0956737716797047e-05,
+ "loss": 0.9341,
+ "step": 1008
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.40735757951139595,
+ "learning_rate": 3.087371213999056e-05,
+ "loss": 0.9142,
+ "step": 1009
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.44449027493884186,
+ "learning_rate": 3.079072802027051e-05,
+ "loss": 0.966,
+ "step": 1010
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46590494286419365,
+ "learning_rate": 3.070778573460068e-05,
+ "loss": 0.8768,
+ "step": 1011
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.45161453051587425,
+ "learning_rate": 3.062488565975476e-05,
+ "loss": 0.9299,
+ "step": 1012
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5022364894382346,
+ "learning_rate": 3.054202817231477e-05,
+ "loss": 0.9352,
+ "step": 1013
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.46443439138730447,
+ "learning_rate": 3.0459213648669195e-05,
+ "loss": 0.8913,
+ "step": 1014
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.41932307219261455,
+ "learning_rate": 3.0376442465011436e-05,
+ "loss": 0.8968,
+ "step": 1015
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.42445864358441704,
+ "learning_rate": 3.0293714997337927e-05,
+ "loss": 0.8449,
+ "step": 1016
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4489777773688699,
+ "learning_rate": 3.0211031621446607e-05,
+ "loss": 0.927,
+ "step": 1017
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.45180577504235525,
+ "learning_rate": 3.0128392712935044e-05,
+ "loss": 0.8834,
+ "step": 1018
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.44680469596106914,
+ "learning_rate": 3.0045798647198882e-05,
+ "loss": 1.0176,
+ "step": 1019
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.45747851649657734,
+ "learning_rate": 2.9963249799429986e-05,
+ "loss": 0.9036,
+ "step": 1020
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.5045904501932169,
+ "learning_rate": 2.988074654461489e-05,
+ "loss": 1.0475,
+ "step": 1021
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.47086144833942983,
+ "learning_rate": 2.9798289257532946e-05,
+ "loss": 0.9596,
+ "step": 1022
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4406706196288816,
+ "learning_rate": 2.9715878312754767e-05,
+ "loss": 1.0054,
+ "step": 1023
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.44584179061175105,
+ "learning_rate": 2.9633514084640365e-05,
+ "loss": 0.8981,
+ "step": 1024
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.462343843042957,
+ "learning_rate": 2.955119694733763e-05,
+ "loss": 0.974,
+ "step": 1025
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.46767265335377156,
+ "learning_rate": 2.946892727478045e-05,
+ "loss": 1.0063,
+ "step": 1026
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.43250194002958803,
+ "learning_rate": 2.9386705440687168e-05,
+ "loss": 0.9332,
+ "step": 1027
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.44391321845917453,
+ "learning_rate": 2.9304531818558795e-05,
+ "loss": 0.8937,
+ "step": 1028
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.45616826414927975,
+ "learning_rate": 2.9222406781677294e-05,
+ "loss": 0.869,
+ "step": 1029
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.5670635396983207,
+ "learning_rate": 2.9140330703103992e-05,
+ "loss": 0.9697,
+ "step": 1030
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4860829361401993,
+ "learning_rate": 2.905830395567776e-05,
+ "loss": 0.9677,
+ "step": 1031
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4484206829172443,
+ "learning_rate": 2.8976326912013422e-05,
+ "loss": 0.9582,
+ "step": 1032
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.46728002332884067,
+ "learning_rate": 2.8894399944499974e-05,
+ "loss": 0.9023,
+ "step": 1033
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.48539702863685763,
+ "learning_rate": 2.8812523425299e-05,
+ "loss": 0.9725,
+ "step": 1034
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.42521485032555006,
+ "learning_rate": 2.873069772634281e-05,
+ "loss": 0.9525,
+ "step": 1035
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4068824768950637,
+ "learning_rate": 2.8648923219332997e-05,
+ "loss": 0.8318,
+ "step": 1036
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.45227216852040214,
+ "learning_rate": 2.856720027573848e-05,
+ "loss": 1.0211,
+ "step": 1037
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.42310911927974604,
+ "learning_rate": 2.8485529266794043e-05,
+ "loss": 0.9422,
+ "step": 1038
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4494478185683011,
+ "learning_rate": 2.8403910563498482e-05,
+ "loss": 0.9577,
+ "step": 1039
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.517885146963669,
+ "learning_rate": 2.832234453661304e-05,
+ "loss": 0.9551,
+ "step": 1040
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.46117112897797924,
+ "learning_rate": 2.8240831556659635e-05,
+ "loss": 0.9336,
+ "step": 1041
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.4610208217170147,
+ "learning_rate": 2.815937199391924e-05,
+ "loss": 0.926,
+ "step": 1042
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.445775414660019,
+ "learning_rate": 2.807796621843016e-05,
+ "loss": 0.9737,
+ "step": 1043
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.46809555676786746,
+ "learning_rate": 2.799661459998638e-05,
+ "loss": 0.9916,
+ "step": 1044
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.4366867439876077,
+ "learning_rate": 2.7915317508135848e-05,
+ "loss": 0.9549,
+ "step": 1045
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.388979809570948,
+ "learning_rate": 2.7834075312178838e-05,
+ "loss": 0.8967,
+ "step": 1046
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.45918748975994583,
+ "learning_rate": 2.775288838116626e-05,
+ "loss": 1.032,
+ "step": 1047
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4607131980517622,
+ "learning_rate": 2.767175708389794e-05,
+ "loss": 0.9638,
+ "step": 1048
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4583573438714022,
+ "learning_rate": 2.759068178892105e-05,
+ "loss": 0.8574,
+ "step": 1049
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4506028295056863,
+ "learning_rate": 2.750966286452828e-05,
+ "loss": 0.904,
+ "step": 1050
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.5185176506584814,
+ "learning_rate": 2.7428700678756334e-05,
+ "loss": 0.8967,
+ "step": 1051
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.4898570882755602,
+ "learning_rate": 2.7347795599384097e-05,
+ "loss": 0.9361,
+ "step": 1052
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4715298019133342,
+ "learning_rate": 2.7266947993931113e-05,
+ "loss": 0.8779,
+ "step": 1053
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4635241319149805,
+ "learning_rate": 2.7186158229655805e-05,
+ "loss": 0.8891,
+ "step": 1054
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4623859512294341,
+ "learning_rate": 2.7105426673553855e-05,
+ "loss": 0.8135,
+ "step": 1055
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4484341139188101,
+ "learning_rate": 2.7024753692356526e-05,
+ "loss": 0.9299,
+ "step": 1056
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.46758873631226766,
+ "learning_rate": 2.694413965252901e-05,
+ "loss": 0.7719,
+ "step": 1057
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.44651575615691763,
+ "learning_rate": 2.686358492026873e-05,
+ "loss": 0.9053,
+ "step": 1058
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5155154510102731,
+ "learning_rate": 2.6783089861503717e-05,
+ "loss": 1.0115,
+ "step": 1059
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.49165729879997466,
+ "learning_rate": 2.670265484189093e-05,
+ "loss": 1.0023,
+ "step": 1060
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46460007026601335,
+ "learning_rate": 2.6622280226814582e-05,
+ "loss": 0.8825,
+ "step": 1061
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46979936842787406,
+ "learning_rate": 2.6541966381384487e-05,
+ "loss": 0.9605,
+ "step": 1062
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46001266952399505,
+ "learning_rate": 2.6461713670434445e-05,
+ "loss": 0.9462,
+ "step": 1063
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4965534915567658,
+ "learning_rate": 2.6381522458520498e-05,
+ "loss": 0.8811,
+ "step": 1064
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.49637266648924844,
+ "learning_rate": 2.6301393109919353e-05,
+ "loss": 0.9957,
+ "step": 1065
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.43785076760992014,
+ "learning_rate": 2.6221325988626686e-05,
+ "loss": 0.9486,
+ "step": 1066
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.49005821628705587,
+ "learning_rate": 2.61413214583555e-05,
+ "loss": 1.0479,
+ "step": 1067
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.43997445074451363,
+ "learning_rate": 2.6061379882534466e-05,
+ "loss": 0.91,
+ "step": 1068
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.47582752827416086,
+ "learning_rate": 2.5981501624306296e-05,
+ "loss": 0.9833,
+ "step": 1069
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.475702334553824,
+ "learning_rate": 2.590168704652605e-05,
+ "loss": 0.9387,
+ "step": 1070
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.46797543318073137,
+ "learning_rate": 2.582193651175954e-05,
+ "loss": 0.8675,
+ "step": 1071
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.502869009197904,
+ "learning_rate": 2.5742250382281638e-05,
+ "loss": 0.95,
+ "step": 1072
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.48932775538777384,
+ "learning_rate": 2.5662629020074647e-05,
+ "loss": 0.8834,
+ "step": 1073
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.5117876032138348,
+ "learning_rate": 2.5583072786826678e-05,
+ "loss": 0.9326,
+ "step": 1074
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.45206721861847016,
+ "learning_rate": 2.5503582043929963e-05,
+ "loss": 0.9122,
+ "step": 1075
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.478569067482158,
+ "learning_rate": 2.542415715247926e-05,
+ "loss": 0.8907,
+ "step": 1076
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4615412716962746,
+ "learning_rate": 2.5344798473270152e-05,
+ "loss": 0.8945,
+ "step": 1077
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4844154360763313,
+ "learning_rate": 2.526550636679751e-05,
+ "loss": 0.9517,
+ "step": 1078
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.5073794346028544,
+ "learning_rate": 2.5186281193253726e-05,
+ "loss": 0.9625,
+ "step": 1079
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.49073451792300016,
+ "learning_rate": 2.510712331252719e-05,
+ "loss": 0.8753,
+ "step": 1080
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.45076485507525016,
+ "learning_rate": 2.5028033084200566e-05,
+ "loss": 0.8979,
+ "step": 1081
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4963935920521398,
+ "learning_rate": 2.494901086754923e-05,
+ "loss": 1.0287,
+ "step": 1082
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4553597545112614,
+ "learning_rate": 2.4870057021539628e-05,
+ "loss": 0.8624,
+ "step": 1083
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4574238940524242,
+ "learning_rate": 2.4791171904827548e-05,
+ "loss": 0.7923,
+ "step": 1084
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4771229947789797,
+ "learning_rate": 2.4712355875756666e-05,
+ "loss": 0.8912,
+ "step": 1085
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.6440729243869642,
+ "learning_rate": 2.4633609292356737e-05,
+ "loss": 1.022,
+ "step": 1086
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.47469602978835046,
+ "learning_rate": 2.4554932512342117e-05,
+ "loss": 1.0285,
+ "step": 1087
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.5204144570210799,
+ "learning_rate": 2.4476325893110008e-05,
+ "loss": 1.0108,
+ "step": 1088
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.47985475663425353,
+ "learning_rate": 2.4397789791738974e-05,
+ "loss": 0.8768,
+ "step": 1089
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4962961122211798,
+ "learning_rate": 2.431932456498717e-05,
+ "loss": 0.9328,
+ "step": 1090
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.5084494441414602,
+ "learning_rate": 2.4240930569290867e-05,
+ "loss": 0.9511,
+ "step": 1091
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.5089598583736452,
+ "learning_rate": 2.416260816076269e-05,
+ "loss": 0.9491,
+ "step": 1092
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.48221222121556606,
+ "learning_rate": 2.408435769519014e-05,
+ "loss": 1.0269,
+ "step": 1093
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.4703359259662912,
+ "learning_rate": 2.4006179528033844e-05,
+ "loss": 0.9321,
+ "step": 1094
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.4790398670509022,
+ "learning_rate": 2.3928074014426077e-05,
+ "loss": 0.9077,
+ "step": 1095
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 2.9832894495461018,
+ "learning_rate": 2.3850041509169007e-05,
+ "loss": 1.0024,
+ "step": 1096
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.5157931440937726,
+ "learning_rate": 2.3772082366733224e-05,
+ "loss": 0.9028,
+ "step": 1097
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.597535205195858,
+ "learning_rate": 2.3694196941255988e-05,
+ "loss": 0.9252,
+ "step": 1098
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.5068463743400509,
+ "learning_rate": 2.361638558653974e-05,
+ "loss": 0.9658,
+ "step": 1099
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.5227973124909935,
+ "learning_rate": 2.3538648656050463e-05,
+ "loss": 0.8794,
+ "step": 1100
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 1139920827580416.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-1100/training_args.bin b/checkpoint-1100/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..019ba23a7fdbf801e820866ee31c58074364f008
--- /dev/null
+++ b/checkpoint-1100/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bac5e6482c01f264d88bde470e1276960bac33683cfdcf64b2409e1a5fb73c89
+size 6776
diff --git a/checkpoint-1100/zero_to_fp32.py b/checkpoint-1100/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/checkpoint-1100/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-450/README.md b/checkpoint-450/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/checkpoint-450/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/checkpoint-450/adapter_config.json b/checkpoint-450/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3cd6dba5d79f7ca21fd4ad465cbbcac1e0960476
--- /dev/null
+++ b/checkpoint-450/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "k_proj",
+ "q_proj",
+ "o_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/checkpoint-450/adapter_model.safetensors b/checkpoint-450/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..48cc46217d14642e6c5cf96af194d4afe7365fe0
--- /dev/null
+++ b/checkpoint-450/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0189156bd202d07758f9b3af49e74d7a50c78a8e44013b92375c1c3deae1fd8b
+size 1048664848
diff --git a/checkpoint-450/global_step450/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-450/global_step450/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..bc77e2dc53f2730674512f300c2657a3b2772ba0
--- /dev/null
+++ b/checkpoint-450/global_step450/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bcb0a7e1c71d757416c1979d9712bf9ae8da6a7dadcf890a182653ef6edd0b9a
+size 787270042
diff --git a/checkpoint-450/global_step450/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-450/global_step450/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..934adb07c9cd7a6ae499f7f7b904ee70190b0d6d
--- /dev/null
+++ b/checkpoint-450/global_step450/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3816457b22b729fb8ccc5aea564c58c51f3feb22310620c1690c8ba78dc1f905
+size 787270042
diff --git a/checkpoint-450/global_step450/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-450/global_step450/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9f877de0b9d4f04271bf4209f676857ab5f5caa1
--- /dev/null
+++ b/checkpoint-450/global_step450/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:677fd137e293ab4ba17ecb1a7d00cc2c79a6c8ad5b97bd667b29c6a7d9238930
+size 787270042
diff --git a/checkpoint-450/global_step450/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-450/global_step450/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b7ad2a23667cfab036d2f8d983a86b4f46e9fddb
--- /dev/null
+++ b/checkpoint-450/global_step450/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8045c9f7d95d85a7b56e6473d48be84ad92f75bb0d3de107b55e3abfca20530a
+size 787270042
diff --git a/checkpoint-450/global_step450/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-450/global_step450/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b37a37c078934d0b1a02e8514541456c0d8b8b9f
--- /dev/null
+++ b/checkpoint-450/global_step450/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2d1bd333d9570d6db4d01adf6edd0ce2a0925f28aa998c2d714a62d00ce593b8
+size 787270042
diff --git a/checkpoint-450/global_step450/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-450/global_step450/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..11a5ac9c1cff36b08c261dfc5794314d832a51b2
--- /dev/null
+++ b/checkpoint-450/global_step450/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:57809e10728bce5408308872b7988bfd3a973523220ec1c9c250cd0aefb9183d
+size 787270042
diff --git a/checkpoint-450/global_step450/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-450/global_step450/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..62b798efbebc4e455df0b773d4cac9b1e823c45a
--- /dev/null
+++ b/checkpoint-450/global_step450/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:170f6bf6c8936d2807f11d74ba597ce08924358b3c7079834a2c6f958f21742a
+size 787270042
diff --git a/checkpoint-450/global_step450/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-450/global_step450/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..403021b1f29b98378c0927029aca697a2d95720e
--- /dev/null
+++ b/checkpoint-450/global_step450/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b639bc02be13a9b0459df37e333bc1b29fa8077acde40bb792dd5d10cd41250
+size 787270042
diff --git a/checkpoint-450/global_step450/zero_pp_rank_0_mp_rank_00_model_states.pt b/checkpoint-450/global_step450/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b81c53b6eb98dd7c493af564306aa7a40a090304
--- /dev/null
+++ b/checkpoint-450/global_step450/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:090fe59e869db598283a64e5dc2789748a58152e1018b12e84e6ec9f22f6c1ef
+size 653742
diff --git a/checkpoint-450/global_step450/zero_pp_rank_1_mp_rank_00_model_states.pt b/checkpoint-450/global_step450/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b704b54319eb800c66e48555423a4b30d53f1144
--- /dev/null
+++ b/checkpoint-450/global_step450/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a922dbda3030e4c2add850cfb9e11d1eaeeb822edb40340f263f9658f5a5a620
+size 653742
diff --git a/checkpoint-450/global_step450/zero_pp_rank_2_mp_rank_00_model_states.pt b/checkpoint-450/global_step450/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e21ae584e1f8c8042d4196587ad5c8e7d546f800
--- /dev/null
+++ b/checkpoint-450/global_step450/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7779c1112419f128a16587b227e6c4b2fb78c16eed4d94fa256dddc4d7eb0116
+size 653742
diff --git a/checkpoint-450/global_step450/zero_pp_rank_3_mp_rank_00_model_states.pt b/checkpoint-450/global_step450/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5746bff4623cf6a788a2eaf54ea1e532efcc6749
--- /dev/null
+++ b/checkpoint-450/global_step450/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:96c4349c65cff45d1bc9e710a0a5fcd2fb1fdcf8b86a3872f2e5693e087e4eab
+size 653742
diff --git a/checkpoint-450/global_step450/zero_pp_rank_4_mp_rank_00_model_states.pt b/checkpoint-450/global_step450/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c36f00f85a1f6367fa0237e914023cfb6f339094
--- /dev/null
+++ b/checkpoint-450/global_step450/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9fa3fb28afd32f6ce6d88deb1ff883dffea4785b833f53c0bbaec9ce14e7b643
+size 653742
diff --git a/checkpoint-450/global_step450/zero_pp_rank_5_mp_rank_00_model_states.pt b/checkpoint-450/global_step450/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..48285cca8a70311956589d91b9ab25b98c2e429b
--- /dev/null
+++ b/checkpoint-450/global_step450/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60ac36ebcb8f6bcefb72bf66bc760c5cc4ead6edddccbb6be2aa84af7b5191e0
+size 653742
diff --git a/checkpoint-450/global_step450/zero_pp_rank_6_mp_rank_00_model_states.pt b/checkpoint-450/global_step450/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..69685d0a55c64731170e89c8b4239b38a56a8857
--- /dev/null
+++ b/checkpoint-450/global_step450/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f54b4c130357abfe0a3bc252acbb4965805212c35ffb1b66c3f81d2b582307c9
+size 653742
diff --git a/checkpoint-450/global_step450/zero_pp_rank_7_mp_rank_00_model_states.pt b/checkpoint-450/global_step450/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f11cd1a664e4e1cb15f347f01098821fcfbb89f2
--- /dev/null
+++ b/checkpoint-450/global_step450/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac62b78b9cc99f4838aa43e132cb1829fa014d54127a23ef7742cce9d6d33e09
+size 653742
diff --git a/checkpoint-450/latest b/checkpoint-450/latest
new file mode 100644
index 0000000000000000000000000000000000000000..1480cb77d39204b528f1515cd41be7cdbe05e78b
--- /dev/null
+++ b/checkpoint-450/latest
@@ -0,0 +1 @@
+global_step450
\ No newline at end of file
diff --git a/checkpoint-450/rng_state_0.pth b/checkpoint-450/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b346349ce12dd5a17d4b91ed2a5722bb52550950
--- /dev/null
+++ b/checkpoint-450/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad8a35afd8967cbb748405387e44426e43ad127028e826eddc9b67d2ca873c85
+size 15984
diff --git a/checkpoint-450/rng_state_1.pth b/checkpoint-450/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..68f3c6994456cb8d0592a5375d99503c8924b1c4
--- /dev/null
+++ b/checkpoint-450/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f338ce80d7c441076bfc8c53b84067a0181f5a14e80c13d5acb8150b659f4d73
+size 15984
diff --git a/checkpoint-450/rng_state_2.pth b/checkpoint-450/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..be044f6ceeed587d30e80c2f72d5aa19fdc9947b
--- /dev/null
+++ b/checkpoint-450/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9fbc9fa428939be10b46779f0eb5cd833e0da426b1cbdee77b3a55b6952235b
+size 15984
diff --git a/checkpoint-450/rng_state_3.pth b/checkpoint-450/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fc825249656a9b858782542bd3f4386250f1dfe0
--- /dev/null
+++ b/checkpoint-450/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac55dba0b79d5fa4699d239da2f966d52040d576d31234ac8d4632e6956481bc
+size 15984
diff --git a/checkpoint-450/rng_state_4.pth b/checkpoint-450/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d30f52a44be563c152ae09db6ae934da6da0d3ed
--- /dev/null
+++ b/checkpoint-450/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af2d0c015100768ffa23faf3b6c2d54ea89eb045603e30e55cd211e06ff34972
+size 15984
diff --git a/checkpoint-450/rng_state_5.pth b/checkpoint-450/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..c8715d27ab23ae545d58039cf949cc44ecc1da5e
--- /dev/null
+++ b/checkpoint-450/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c60a1b40608e34bc801c8231f97b81c53b5290dfaed1b9cd0ccbeca29574a991
+size 15984
diff --git a/checkpoint-450/rng_state_6.pth b/checkpoint-450/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1ed791b6ef76eadf0b0c55a5733411771e2ae027
--- /dev/null
+++ b/checkpoint-450/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3ad6a142a403eb9aafc4a3a9a856bca648fe31fd22d796867baca31fb13656aa
+size 15984
diff --git a/checkpoint-450/rng_state_7.pth b/checkpoint-450/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..800c3bbbc5edf7db01a8316069d439c5fb8d8c30
--- /dev/null
+++ b/checkpoint-450/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38bc23a138cc800b22881742c0f3f9a71731a9a7111c6058a0077e6274d21773
+size 15984
diff --git a/checkpoint-450/scheduler.pt b/checkpoint-450/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e46ab54fb8147fbaae665f86673db3154fa95ca4
--- /dev/null
+++ b/checkpoint-450/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8233715ac83c710af8824b8baf5d0d3ff081dedd7c651ffe5e70c9cee90b4ace
+size 1064
diff --git a/checkpoint-450/special_tokens_map.json b/checkpoint-450/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-450/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-450/tokenizer.model b/checkpoint-450/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-450/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-450/tokenizer_config.json b/checkpoint-450/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/checkpoint-450/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-450/trainer_state.json b/checkpoint-450/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..2573fd6fdd6034ae34ad05bb5f303907770590b6
--- /dev/null
+++ b/checkpoint-450/trainer_state.json
@@ -0,0 +1,3171 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.823045267489712,
+ "eval_steps": 500,
+ "global_step": 450,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 466496262242304.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-450/training_args.bin b/checkpoint-450/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c5d2416a3b70bb5260978ec9996f00154a724ba7
--- /dev/null
+++ b/checkpoint-450/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b22e8f9d51a16d03a2c506fa3d1eafa8f4b1ae992992c2086a4d435ffd97387e
+size 6712
diff --git a/checkpoint-450/zero_to_fp32.py b/checkpoint-450/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/checkpoint-450/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-500/README.md b/checkpoint-500/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/checkpoint-500/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/checkpoint-500/adapter_config.json b/checkpoint-500/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3cd6dba5d79f7ca21fd4ad465cbbcac1e0960476
--- /dev/null
+++ b/checkpoint-500/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "k_proj",
+ "q_proj",
+ "o_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/checkpoint-500/adapter_model.safetensors b/checkpoint-500/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..25998e6401d2eddee182d853ec7076e001d867bb
--- /dev/null
+++ b/checkpoint-500/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:83758c1ddea613ef778179c38abd9da6838eb7dd974e77e3e56c84640f4107f0
+size 1048664848
diff --git a/checkpoint-500/global_step500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-500/global_step500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..30cde85576578f264f48c87e8ab62f94960e448c
--- /dev/null
+++ b/checkpoint-500/global_step500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0d14f988a761a7ce00710b0b629c9fcd09cb201de111af5c7cedd4518b0c683a
+size 787270042
diff --git a/checkpoint-500/global_step500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-500/global_step500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..951371adaa80900bff0bc86f90eb2a70f070ff54
--- /dev/null
+++ b/checkpoint-500/global_step500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b7a9b590ff45a757f25e2c576d88c5a1ab260ae0dab9a61ec933d171371ea21b
+size 787270042
diff --git a/checkpoint-500/global_step500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-500/global_step500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6c0fe2b0b6bc490c50cb4e776266717393ff1a17
--- /dev/null
+++ b/checkpoint-500/global_step500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b29865cf6ac925303112462d79c8f920ffa70517aaa32fc92c33d8ce11660e8a
+size 787270042
diff --git a/checkpoint-500/global_step500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-500/global_step500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b028efe4d32ea387151a6229c3e499005b4aa565
--- /dev/null
+++ b/checkpoint-500/global_step500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6eac043d481e9c249f1d38e77b3ee662b36e55fa35a81f4e7b572b8958e513ff
+size 787270042
diff --git a/checkpoint-500/global_step500/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-500/global_step500/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..23a39c058d10522a1fb90b9ebaeb0d0dee4afcda
--- /dev/null
+++ b/checkpoint-500/global_step500/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:722060c492e959244200c0c3fd8e64d5653e040fca5655c0b737386cf39eea26
+size 787270042
diff --git a/checkpoint-500/global_step500/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-500/global_step500/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c04d178900a3db573531a14582cdad8abf9384f4
--- /dev/null
+++ b/checkpoint-500/global_step500/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9eb253220e37a6cef37c54b6a5828f27e5144b24016dc7eb58d892f5a0af94aa
+size 787270042
diff --git a/checkpoint-500/global_step500/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-500/global_step500/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..086ae6788796fb71d5d42a67a710abc19a63fafb
--- /dev/null
+++ b/checkpoint-500/global_step500/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f11fc7f8012e1e361f24c6cd0adffb2642a0d691544be4bddcdbf2d5fb7a7c76
+size 787270042
diff --git a/checkpoint-500/global_step500/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-500/global_step500/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e998dff96600e95d931a97ccc30f41214d965eb8
--- /dev/null
+++ b/checkpoint-500/global_step500/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cc078293f190630f7e77c6b3f2fe73669f2057b6ce982490fe425ad19fd0ccf7
+size 787270042
diff --git a/checkpoint-500/global_step500/zero_pp_rank_0_mp_rank_00_model_states.pt b/checkpoint-500/global_step500/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7b89211b0c52111571e2de9f61fdcca7c60d3a4b
--- /dev/null
+++ b/checkpoint-500/global_step500/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bc93ec286c6a45468a2ec8a2733f7c25526cf847e51bafb4eb0596094a5e9f49
+size 653742
diff --git a/checkpoint-500/global_step500/zero_pp_rank_1_mp_rank_00_model_states.pt b/checkpoint-500/global_step500/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..66f8e3a20049977242bae7db50de408317a9be5e
--- /dev/null
+++ b/checkpoint-500/global_step500/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cd5e50551ae370d21a908003a7bb17c2fb644cf991279d1a990036afc0525251
+size 653742
diff --git a/checkpoint-500/global_step500/zero_pp_rank_2_mp_rank_00_model_states.pt b/checkpoint-500/global_step500/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..76f4775785d2987e539fd855932130a4bfb9d610
--- /dev/null
+++ b/checkpoint-500/global_step500/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:87eb5b17005b0580a82f8e1669ba240a18471d321a5944410c3ec48ea7a847fb
+size 653742
diff --git a/checkpoint-500/global_step500/zero_pp_rank_3_mp_rank_00_model_states.pt b/checkpoint-500/global_step500/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..71627c723bead832d2456242b1cf1c563213e7d2
--- /dev/null
+++ b/checkpoint-500/global_step500/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a2842b4114ea3bb10c42198fdfc0a2958005d39a235d39988f95eaa7e90a93ca
+size 653742
diff --git a/checkpoint-500/global_step500/zero_pp_rank_4_mp_rank_00_model_states.pt b/checkpoint-500/global_step500/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2fd51663fe1eadce5468ea6c2682f132dec70e1d
--- /dev/null
+++ b/checkpoint-500/global_step500/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c68448b2d7ebaf4c38271b8c9cd7a48f1906f2ef23e3ddbeef5c39545508ac00
+size 653742
diff --git a/checkpoint-500/global_step500/zero_pp_rank_5_mp_rank_00_model_states.pt b/checkpoint-500/global_step500/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3570b4d435301b46f73b5abb029637ae66b1cd6f
--- /dev/null
+++ b/checkpoint-500/global_step500/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ca270f4813e13d2c89663a5b1a70ec46408b87096075443f24a10c95b0c2863d
+size 653742
diff --git a/checkpoint-500/global_step500/zero_pp_rank_6_mp_rank_00_model_states.pt b/checkpoint-500/global_step500/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..97eac0f6180f2a455a1a67ce25aa79cec19e2f1b
--- /dev/null
+++ b/checkpoint-500/global_step500/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f8eef63270ad1d645a07b1a0b5a0bf3cfaba8785a660af7a96d7f3e5156363e3
+size 653742
diff --git a/checkpoint-500/global_step500/zero_pp_rank_7_mp_rank_00_model_states.pt b/checkpoint-500/global_step500/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..eecfad8f6db4dbe91d5524150ea9641b7926d820
--- /dev/null
+++ b/checkpoint-500/global_step500/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50e64b923ed1ba99bbf51c5d8eb3a3448a03037421db1c1a6f65839ee6e8c258
+size 653742
diff --git a/checkpoint-500/latest b/checkpoint-500/latest
new file mode 100644
index 0000000000000000000000000000000000000000..f0b47ce15fff9a01b2a416a473b2148085048a50
--- /dev/null
+++ b/checkpoint-500/latest
@@ -0,0 +1 @@
+global_step500
\ No newline at end of file
diff --git a/checkpoint-500/rng_state_0.pth b/checkpoint-500/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b346349ce12dd5a17d4b91ed2a5722bb52550950
--- /dev/null
+++ b/checkpoint-500/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad8a35afd8967cbb748405387e44426e43ad127028e826eddc9b67d2ca873c85
+size 15984
diff --git a/checkpoint-500/rng_state_1.pth b/checkpoint-500/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..68f3c6994456cb8d0592a5375d99503c8924b1c4
--- /dev/null
+++ b/checkpoint-500/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f338ce80d7c441076bfc8c53b84067a0181f5a14e80c13d5acb8150b659f4d73
+size 15984
diff --git a/checkpoint-500/rng_state_2.pth b/checkpoint-500/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..be044f6ceeed587d30e80c2f72d5aa19fdc9947b
--- /dev/null
+++ b/checkpoint-500/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9fbc9fa428939be10b46779f0eb5cd833e0da426b1cbdee77b3a55b6952235b
+size 15984
diff --git a/checkpoint-500/rng_state_3.pth b/checkpoint-500/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fc825249656a9b858782542bd3f4386250f1dfe0
--- /dev/null
+++ b/checkpoint-500/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac55dba0b79d5fa4699d239da2f966d52040d576d31234ac8d4632e6956481bc
+size 15984
diff --git a/checkpoint-500/rng_state_4.pth b/checkpoint-500/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d30f52a44be563c152ae09db6ae934da6da0d3ed
--- /dev/null
+++ b/checkpoint-500/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af2d0c015100768ffa23faf3b6c2d54ea89eb045603e30e55cd211e06ff34972
+size 15984
diff --git a/checkpoint-500/rng_state_5.pth b/checkpoint-500/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..c8715d27ab23ae545d58039cf949cc44ecc1da5e
--- /dev/null
+++ b/checkpoint-500/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c60a1b40608e34bc801c8231f97b81c53b5290dfaed1b9cd0ccbeca29574a991
+size 15984
diff --git a/checkpoint-500/rng_state_6.pth b/checkpoint-500/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1ed791b6ef76eadf0b0c55a5733411771e2ae027
--- /dev/null
+++ b/checkpoint-500/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3ad6a142a403eb9aafc4a3a9a856bca648fe31fd22d796867baca31fb13656aa
+size 15984
diff --git a/checkpoint-500/rng_state_7.pth b/checkpoint-500/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..800c3bbbc5edf7db01a8316069d439c5fb8d8c30
--- /dev/null
+++ b/checkpoint-500/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38bc23a138cc800b22881742c0f3f9a71731a9a7111c6058a0077e6274d21773
+size 15984
diff --git a/checkpoint-500/scheduler.pt b/checkpoint-500/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..92346318f4ff3265cc7c4b0b86c905c695421b97
--- /dev/null
+++ b/checkpoint-500/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f864fbe1f67c6816a27efc600470a00e4227c0e65abc1cef188658f5ca2a8f8b
+size 1064
diff --git a/checkpoint-500/special_tokens_map.json b/checkpoint-500/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-500/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-500/tokenizer.model b/checkpoint-500/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-500/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-500/tokenizer_config.json b/checkpoint-500/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/checkpoint-500/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-500/trainer_state.json b/checkpoint-500/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..979a3c8490cb57b1be16d7fd6fdd2b11c86916d1
--- /dev/null
+++ b/checkpoint-500/trainer_state.json
@@ -0,0 +1,3521 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.9144947416552355,
+ "eval_steps": 500,
+ "global_step": 500,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2698278585334131,
+ "learning_rate": 7.274705407864332e-05,
+ "loss": 1.194,
+ "step": 451
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.23678313024953834,
+ "learning_rate": 7.26980224535416e-05,
+ "loss": 1.2349,
+ "step": 452
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24851875792002978,
+ "learning_rate": 7.264884229436293e-05,
+ "loss": 1.1758,
+ "step": 453
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24122080121681125,
+ "learning_rate": 7.259951382451318e-05,
+ "loss": 1.1962,
+ "step": 454
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.22741322959884405,
+ "learning_rate": 7.25500372680719e-05,
+ "loss": 1.1702,
+ "step": 455
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.2297475610861458,
+ "learning_rate": 7.250041284979137e-05,
+ "loss": 1.1466,
+ "step": 456
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.3057605989721467,
+ "learning_rate": 7.245064079509553e-05,
+ "loss": 1.246,
+ "step": 457
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2719638501597136,
+ "learning_rate": 7.240072133007899e-05,
+ "loss": 1.2184,
+ "step": 458
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2436807816414479,
+ "learning_rate": 7.235065468150593e-05,
+ "loss": 1.2324,
+ "step": 459
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.23436349430255515,
+ "learning_rate": 7.23004410768092e-05,
+ "loss": 1.1813,
+ "step": 460
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2398940990211377,
+ "learning_rate": 7.22500807440892e-05,
+ "loss": 1.1924,
+ "step": 461
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2605716625062531,
+ "learning_rate": 7.219957391211281e-05,
+ "loss": 1.182,
+ "step": 462
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.260462524570941,
+ "learning_rate": 7.214892081031244e-05,
+ "loss": 1.2136,
+ "step": 463
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.21979766512306334,
+ "learning_rate": 7.209812166878491e-05,
+ "loss": 1.2066,
+ "step": 464
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.23324453647530663,
+ "learning_rate": 7.204717671829051e-05,
+ "loss": 1.1657,
+ "step": 465
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.2529434935507481,
+ "learning_rate": 7.199608619025177e-05,
+ "loss": 1.2093,
+ "step": 466
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.25371701891720116,
+ "learning_rate": 7.194485031675265e-05,
+ "loss": 1.2225,
+ "step": 467
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.23272423066292103,
+ "learning_rate": 7.189346933053725e-05,
+ "loss": 1.1721,
+ "step": 468
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.25122928735587546,
+ "learning_rate": 7.184194346500892e-05,
+ "loss": 1.2537,
+ "step": 469
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2159270875490409,
+ "learning_rate": 7.179027295422913e-05,
+ "loss": 1.197,
+ "step": 470
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2633111059076544,
+ "learning_rate": 7.173845803291636e-05,
+ "loss": 1.1721,
+ "step": 471
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.30555936322098703,
+ "learning_rate": 7.168649893644517e-05,
+ "loss": 1.3011,
+ "step": 472
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.23492670111453726,
+ "learning_rate": 7.163439590084502e-05,
+ "loss": 1.1601,
+ "step": 473
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.26602734263721806,
+ "learning_rate": 7.158214916279923e-05,
+ "loss": 1.2808,
+ "step": 474
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.3182695007856262,
+ "learning_rate": 7.152975895964386e-05,
+ "loss": 1.2967,
+ "step": 475
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2785021674736721,
+ "learning_rate": 7.147722552936673e-05,
+ "loss": 1.1789,
+ "step": 476
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.279474303138652,
+ "learning_rate": 7.142454911060627e-05,
+ "loss": 1.2596,
+ "step": 477
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2556980144910755,
+ "learning_rate": 7.137172994265044e-05,
+ "loss": 1.2426,
+ "step": 478
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.3311256331993533,
+ "learning_rate": 7.131876826543565e-05,
+ "loss": 1.2059,
+ "step": 479
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.26467296197775253,
+ "learning_rate": 7.12656643195457e-05,
+ "loss": 1.2482,
+ "step": 480
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.27444885274652553,
+ "learning_rate": 7.121241834621064e-05,
+ "loss": 1.2528,
+ "step": 481
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2572283861115396,
+ "learning_rate": 7.115903058730567e-05,
+ "loss": 1.1849,
+ "step": 482
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2677065778235683,
+ "learning_rate": 7.11055012853501e-05,
+ "loss": 1.2011,
+ "step": 483
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29470622036742816,
+ "learning_rate": 7.105183068350619e-05,
+ "loss": 1.2398,
+ "step": 484
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.27609230248969197,
+ "learning_rate": 7.099801902557811e-05,
+ "loss": 1.2259,
+ "step": 485
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.24248634168099284,
+ "learning_rate": 7.094406655601073e-05,
+ "loss": 1.2282,
+ "step": 486
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.2765941767688746,
+ "learning_rate": 7.088997351988865e-05,
+ "loss": 1.2319,
+ "step": 487
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29347776909858947,
+ "learning_rate": 7.083574016293493e-05,
+ "loss": 1.1765,
+ "step": 488
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.285370295424537,
+ "learning_rate": 7.078136673151008e-05,
+ "loss": 1.26,
+ "step": 489
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.29408734903836536,
+ "learning_rate": 7.072685347261093e-05,
+ "loss": 1.226,
+ "step": 490
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27437470239205813,
+ "learning_rate": 7.067220063386947e-05,
+ "loss": 1.1976,
+ "step": 491
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2680770258777871,
+ "learning_rate": 7.061740846355176e-05,
+ "loss": 1.1915,
+ "step": 492
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27200362879502954,
+ "learning_rate": 7.056247721055678e-05,
+ "loss": 1.2002,
+ "step": 493
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2637811092577037,
+ "learning_rate": 7.050740712441528e-05,
+ "loss": 1.287,
+ "step": 494
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.24657959209271266,
+ "learning_rate": 7.045219845528875e-05,
+ "loss": 1.2284,
+ "step": 495
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.25311992110358666,
+ "learning_rate": 7.039685145396812e-05,
+ "loss": 1.1616,
+ "step": 496
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2564633694193358,
+ "learning_rate": 7.034136637187275e-05,
+ "loss": 1.2067,
+ "step": 497
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2446797651174144,
+ "learning_rate": 7.028574346104926e-05,
+ "loss": 1.2284,
+ "step": 498
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2592751463399255,
+ "learning_rate": 7.022998297417034e-05,
+ "loss": 1.2371,
+ "step": 499
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2500713943206808,
+ "learning_rate": 7.017408516453365e-05,
+ "loss": 1.1061,
+ "step": 500
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 518357992341504.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-500/training_args.bin b/checkpoint-500/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c5d2416a3b70bb5260978ec9996f00154a724ba7
--- /dev/null
+++ b/checkpoint-500/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b22e8f9d51a16d03a2c506fa3d1eafa8f4b1ae992992c2086a4d435ffd97387e
+size 6712
diff --git a/checkpoint-500/zero_to_fp32.py b/checkpoint-500/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/checkpoint-500/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-550/README.md b/checkpoint-550/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/checkpoint-550/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/checkpoint-550/adapter_config.json b/checkpoint-550/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3cd6dba5d79f7ca21fd4ad465cbbcac1e0960476
--- /dev/null
+++ b/checkpoint-550/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "k_proj",
+ "q_proj",
+ "o_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/checkpoint-550/adapter_model.safetensors b/checkpoint-550/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..32e1954bb1c649a08bf8ea73cff1f174ddf0dec4
--- /dev/null
+++ b/checkpoint-550/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8c75b39d45cca7d206d977667bc6f8c9c1466310d7e45d2b24582dd05c09d3db
+size 1048664848
diff --git a/checkpoint-550/global_step550/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-550/global_step550/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f39a5d0af0a7ea2a0629a459487b48598c4e557d
--- /dev/null
+++ b/checkpoint-550/global_step550/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9bdb8e9714818d43e310ae5399805ac80d7ae6269611e90f9dd23963ed30838
+size 787270042
diff --git a/checkpoint-550/global_step550/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-550/global_step550/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a45ae9086bd58f91c4bd3de98e1620f82546bd96
--- /dev/null
+++ b/checkpoint-550/global_step550/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e0443646b7690f9304417958fb5a414b64baa76379152e0bd516ede2965a97e9
+size 787270042
diff --git a/checkpoint-550/global_step550/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-550/global_step550/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..20dcedc5e8c8745891d355d6f378e2ebe0bee10b
--- /dev/null
+++ b/checkpoint-550/global_step550/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:afc4a95e25b4f44f2ff1bd820133c35ecf8aa5ec9050f73dfb4fad574492bedf
+size 787270042
diff --git a/checkpoint-550/global_step550/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-550/global_step550/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b3b2e351f6d2c7e59d7afc9750152b8132f0f618
--- /dev/null
+++ b/checkpoint-550/global_step550/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:992e4ad48b550c9b90f88b6618771e8506ca25504d1fcd4e6eed468584bded16
+size 787270042
diff --git a/checkpoint-550/global_step550/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-550/global_step550/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..22b535e2f0e78cfbed91c70f3186b86314b0b443
--- /dev/null
+++ b/checkpoint-550/global_step550/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6e9ef5d326419cdae9d907d5de6e3f99eb044ac2cfbb99807e487457d17fd8ca
+size 787270042
diff --git a/checkpoint-550/global_step550/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-550/global_step550/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1f3ea5f34070ed1b88839aacea0ac9d236af9ff6
--- /dev/null
+++ b/checkpoint-550/global_step550/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b99e005d2dba98532c74b684aceb1c3a39f3e07367a7e13c1200dec9743c797
+size 787270042
diff --git a/checkpoint-550/global_step550/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-550/global_step550/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ea803c68de7912923a9a4de8756d3c25af7dcd9b
--- /dev/null
+++ b/checkpoint-550/global_step550/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c269f1869bf87711b60c46947dccc4cd8681ea1928ef78648755f7f6e4038bec
+size 787270042
diff --git a/checkpoint-550/global_step550/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-550/global_step550/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..94ba0996ed37a37b32c5677b71d859f850668300
--- /dev/null
+++ b/checkpoint-550/global_step550/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e4d47a45eba702ef60e7bc54a1cd392064cdc931b040e08d10c82aaefabf8729
+size 787270042
diff --git a/checkpoint-550/global_step550/zero_pp_rank_0_mp_rank_00_model_states.pt b/checkpoint-550/global_step550/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..95dea22e460e244a4bd3bdcd488d690e16af38f8
--- /dev/null
+++ b/checkpoint-550/global_step550/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:447754814077d8171e639808eaec7f09a69b51aaa33d141f25fc81cac7eb9680
+size 653742
diff --git a/checkpoint-550/global_step550/zero_pp_rank_1_mp_rank_00_model_states.pt b/checkpoint-550/global_step550/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..73bfdcc01b8ed843ca4c6087e88e721e12b7c260
--- /dev/null
+++ b/checkpoint-550/global_step550/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:96179433ffa678357050191f70db8b1315e460028f80e7047d21a0e3353e0244
+size 653742
diff --git a/checkpoint-550/global_step550/zero_pp_rank_2_mp_rank_00_model_states.pt b/checkpoint-550/global_step550/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..efb5294b9ff666cc57531d42fba0b54f2fe93715
--- /dev/null
+++ b/checkpoint-550/global_step550/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1f1fd925d4de9ef7d7d59ca38c304f25d64f0234d3f4daba18404e56c8d0f2de
+size 653742
diff --git a/checkpoint-550/global_step550/zero_pp_rank_3_mp_rank_00_model_states.pt b/checkpoint-550/global_step550/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..599cf68642f398ddbfba68b01d8778b09e49f71b
--- /dev/null
+++ b/checkpoint-550/global_step550/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da3108e83cd4db592cb1e8a8cd99164b398f2ba3aee6e7e26083d5ebc5b43ebe
+size 653742
diff --git a/checkpoint-550/global_step550/zero_pp_rank_4_mp_rank_00_model_states.pt b/checkpoint-550/global_step550/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5863f4f0d1ffc776e22c872b2d435f5fe5e3b94e
--- /dev/null
+++ b/checkpoint-550/global_step550/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b884d28b156e88efe0190688ca8b0c7d7a3a1eca22d067534d3bd88f8732d592
+size 653742
diff --git a/checkpoint-550/global_step550/zero_pp_rank_5_mp_rank_00_model_states.pt b/checkpoint-550/global_step550/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e0b71021f810289c008896de21ea62e594b08b56
--- /dev/null
+++ b/checkpoint-550/global_step550/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:924dc44d269e8843dc537a87bca260b55d6843a6ef03a6a40e699d6b251fabf3
+size 653742
diff --git a/checkpoint-550/global_step550/zero_pp_rank_6_mp_rank_00_model_states.pt b/checkpoint-550/global_step550/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..89d7e71fcabfc2848909f4b712f402481d021154
--- /dev/null
+++ b/checkpoint-550/global_step550/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f0a2ba01f539e197fda6de338350c7aafecb693b871f33faa40c627a772b6ce
+size 653742
diff --git a/checkpoint-550/global_step550/zero_pp_rank_7_mp_rank_00_model_states.pt b/checkpoint-550/global_step550/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a7ee13bd1ecdb8b4a1696dae6d8fdb272170e7a7
--- /dev/null
+++ b/checkpoint-550/global_step550/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a18f4449b3c2850948d30da549ad064e9d34b2f009393c309722cb30f55e4d97
+size 653742
diff --git a/checkpoint-550/latest b/checkpoint-550/latest
new file mode 100644
index 0000000000000000000000000000000000000000..1606c8674d0d1cc86edce34c7f47c11b57f13e09
--- /dev/null
+++ b/checkpoint-550/latest
@@ -0,0 +1 @@
+global_step550
\ No newline at end of file
diff --git a/checkpoint-550/rng_state_0.pth b/checkpoint-550/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4e5b7e2ec90fdb824c8932464c1d9068330655a7
--- /dev/null
+++ b/checkpoint-550/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:36d2a2034ebb05cb71c510897f2795b31164e50f17b270bc25d2be3ad9a17b22
+size 15984
diff --git a/checkpoint-550/rng_state_1.pth b/checkpoint-550/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7d8d7722fc72cab6d492b76cb99c8177dcc47544
--- /dev/null
+++ b/checkpoint-550/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:060dfdb1c49102cbdc8868a6031e68787601b4ccd782f3fb9b137e20c1fd2c7a
+size 15984
diff --git a/checkpoint-550/rng_state_2.pth b/checkpoint-550/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3c9f84eff30cfa9ea1feedaf262d61fb12e4cba7
--- /dev/null
+++ b/checkpoint-550/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af01895cb66e616591f2e4baa8dcd8151530eab133c73571ccb31c74f35422ce
+size 15984
diff --git a/checkpoint-550/rng_state_3.pth b/checkpoint-550/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6eebfb928f8e91eff0ea1645a20b5aa4465c705b
--- /dev/null
+++ b/checkpoint-550/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:677921992b1e0cef3aee776f245975003d22f51d9bd6ed20f248ded1deb72fa9
+size 15984
diff --git a/checkpoint-550/rng_state_4.pth b/checkpoint-550/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0866030a266c6d003cc378a9418a723f69e8ab99
--- /dev/null
+++ b/checkpoint-550/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d69353c629541c690c5471f8ec05fdab2bfecf3d37afaa436bc45939da6db68f
+size 15984
diff --git a/checkpoint-550/rng_state_5.pth b/checkpoint-550/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..554638d77107f832d7aa51c61645ee2d6c48a36d
--- /dev/null
+++ b/checkpoint-550/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e40ba6668cc03c9162c68a933d164bf38ae2d196a9a6fec03ae615491201185
+size 15984
diff --git a/checkpoint-550/rng_state_6.pth b/checkpoint-550/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..964331b65172a1bcac03e4673415fa787f724268
--- /dev/null
+++ b/checkpoint-550/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:870968fea834e24b2e099cf3e4fe1e3fb8caf38d8f8e5b790d7d47386d4d05f5
+size 15984
diff --git a/checkpoint-550/rng_state_7.pth b/checkpoint-550/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..cd4754d65217d0f9d1f2d3334397df7a8a079652
--- /dev/null
+++ b/checkpoint-550/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9e19618bee7c6ef43256fea25abe19bca88535eb1e7dc213cde8929ae4e8180
+size 15984
diff --git a/checkpoint-550/scheduler.pt b/checkpoint-550/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ee995c910a44d9ae50344b80840b95e1f818da04
--- /dev/null
+++ b/checkpoint-550/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e0e7272088a458885697bbc37f90655326bd1ad16b6e603840724e6ba896f59f
+size 1064
diff --git a/checkpoint-550/special_tokens_map.json b/checkpoint-550/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-550/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-550/tokenizer.model b/checkpoint-550/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-550/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-550/tokenizer_config.json b/checkpoint-550/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/checkpoint-550/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-550/trainer_state.json b/checkpoint-550/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..3d8108b8aa12bbcb297f8976c18b1a479ebe992c
--- /dev/null
+++ b/checkpoint-550/trainer_state.json
@@ -0,0 +1,3871 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.005944215820759,
+ "eval_steps": 500,
+ "global_step": 550,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2698278585334131,
+ "learning_rate": 7.274705407864332e-05,
+ "loss": 1.194,
+ "step": 451
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.23678313024953834,
+ "learning_rate": 7.26980224535416e-05,
+ "loss": 1.2349,
+ "step": 452
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24851875792002978,
+ "learning_rate": 7.264884229436293e-05,
+ "loss": 1.1758,
+ "step": 453
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24122080121681125,
+ "learning_rate": 7.259951382451318e-05,
+ "loss": 1.1962,
+ "step": 454
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.22741322959884405,
+ "learning_rate": 7.25500372680719e-05,
+ "loss": 1.1702,
+ "step": 455
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.2297475610861458,
+ "learning_rate": 7.250041284979137e-05,
+ "loss": 1.1466,
+ "step": 456
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.3057605989721467,
+ "learning_rate": 7.245064079509553e-05,
+ "loss": 1.246,
+ "step": 457
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2719638501597136,
+ "learning_rate": 7.240072133007899e-05,
+ "loss": 1.2184,
+ "step": 458
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2436807816414479,
+ "learning_rate": 7.235065468150593e-05,
+ "loss": 1.2324,
+ "step": 459
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.23436349430255515,
+ "learning_rate": 7.23004410768092e-05,
+ "loss": 1.1813,
+ "step": 460
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2398940990211377,
+ "learning_rate": 7.22500807440892e-05,
+ "loss": 1.1924,
+ "step": 461
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2605716625062531,
+ "learning_rate": 7.219957391211281e-05,
+ "loss": 1.182,
+ "step": 462
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.260462524570941,
+ "learning_rate": 7.214892081031244e-05,
+ "loss": 1.2136,
+ "step": 463
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.21979766512306334,
+ "learning_rate": 7.209812166878491e-05,
+ "loss": 1.2066,
+ "step": 464
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.23324453647530663,
+ "learning_rate": 7.204717671829051e-05,
+ "loss": 1.1657,
+ "step": 465
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.2529434935507481,
+ "learning_rate": 7.199608619025177e-05,
+ "loss": 1.2093,
+ "step": 466
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.25371701891720116,
+ "learning_rate": 7.194485031675265e-05,
+ "loss": 1.2225,
+ "step": 467
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.23272423066292103,
+ "learning_rate": 7.189346933053725e-05,
+ "loss": 1.1721,
+ "step": 468
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.25122928735587546,
+ "learning_rate": 7.184194346500892e-05,
+ "loss": 1.2537,
+ "step": 469
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2159270875490409,
+ "learning_rate": 7.179027295422913e-05,
+ "loss": 1.197,
+ "step": 470
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2633111059076544,
+ "learning_rate": 7.173845803291636e-05,
+ "loss": 1.1721,
+ "step": 471
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.30555936322098703,
+ "learning_rate": 7.168649893644517e-05,
+ "loss": 1.3011,
+ "step": 472
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.23492670111453726,
+ "learning_rate": 7.163439590084502e-05,
+ "loss": 1.1601,
+ "step": 473
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.26602734263721806,
+ "learning_rate": 7.158214916279923e-05,
+ "loss": 1.2808,
+ "step": 474
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.3182695007856262,
+ "learning_rate": 7.152975895964386e-05,
+ "loss": 1.2967,
+ "step": 475
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2785021674736721,
+ "learning_rate": 7.147722552936673e-05,
+ "loss": 1.1789,
+ "step": 476
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.279474303138652,
+ "learning_rate": 7.142454911060627e-05,
+ "loss": 1.2596,
+ "step": 477
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2556980144910755,
+ "learning_rate": 7.137172994265044e-05,
+ "loss": 1.2426,
+ "step": 478
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.3311256331993533,
+ "learning_rate": 7.131876826543565e-05,
+ "loss": 1.2059,
+ "step": 479
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.26467296197775253,
+ "learning_rate": 7.12656643195457e-05,
+ "loss": 1.2482,
+ "step": 480
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.27444885274652553,
+ "learning_rate": 7.121241834621064e-05,
+ "loss": 1.2528,
+ "step": 481
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2572283861115396,
+ "learning_rate": 7.115903058730567e-05,
+ "loss": 1.1849,
+ "step": 482
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2677065778235683,
+ "learning_rate": 7.11055012853501e-05,
+ "loss": 1.2011,
+ "step": 483
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29470622036742816,
+ "learning_rate": 7.105183068350619e-05,
+ "loss": 1.2398,
+ "step": 484
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.27609230248969197,
+ "learning_rate": 7.099801902557811e-05,
+ "loss": 1.2259,
+ "step": 485
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.24248634168099284,
+ "learning_rate": 7.094406655601073e-05,
+ "loss": 1.2282,
+ "step": 486
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.2765941767688746,
+ "learning_rate": 7.088997351988865e-05,
+ "loss": 1.2319,
+ "step": 487
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29347776909858947,
+ "learning_rate": 7.083574016293493e-05,
+ "loss": 1.1765,
+ "step": 488
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.285370295424537,
+ "learning_rate": 7.078136673151008e-05,
+ "loss": 1.26,
+ "step": 489
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.29408734903836536,
+ "learning_rate": 7.072685347261093e-05,
+ "loss": 1.226,
+ "step": 490
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27437470239205813,
+ "learning_rate": 7.067220063386947e-05,
+ "loss": 1.1976,
+ "step": 491
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2680770258777871,
+ "learning_rate": 7.061740846355176e-05,
+ "loss": 1.1915,
+ "step": 492
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27200362879502954,
+ "learning_rate": 7.056247721055678e-05,
+ "loss": 1.2002,
+ "step": 493
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2637811092577037,
+ "learning_rate": 7.050740712441528e-05,
+ "loss": 1.287,
+ "step": 494
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.24657959209271266,
+ "learning_rate": 7.045219845528875e-05,
+ "loss": 1.2284,
+ "step": 495
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.25311992110358666,
+ "learning_rate": 7.039685145396812e-05,
+ "loss": 1.1616,
+ "step": 496
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2564633694193358,
+ "learning_rate": 7.034136637187275e-05,
+ "loss": 1.2067,
+ "step": 497
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2446797651174144,
+ "learning_rate": 7.028574346104926e-05,
+ "loss": 1.2284,
+ "step": 498
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2592751463399255,
+ "learning_rate": 7.022998297417034e-05,
+ "loss": 1.2371,
+ "step": 499
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2500713943206808,
+ "learning_rate": 7.017408516453365e-05,
+ "loss": 1.1061,
+ "step": 500
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2812266276040743,
+ "learning_rate": 7.011805028606064e-05,
+ "loss": 1.1949,
+ "step": 501
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.298829667668083,
+ "learning_rate": 7.006187859329544e-05,
+ "loss": 1.2313,
+ "step": 502
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.26518768159745104,
+ "learning_rate": 7.000557034140361e-05,
+ "loss": 1.2246,
+ "step": 503
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.3037280360760458,
+ "learning_rate": 6.994912578617113e-05,
+ "loss": 1.1617,
+ "step": 504
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2726903109255714,
+ "learning_rate": 6.989254518400309e-05,
+ "loss": 1.2415,
+ "step": 505
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25568082003046966,
+ "learning_rate": 6.98358287919226e-05,
+ "loss": 1.1817,
+ "step": 506
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25633294893705044,
+ "learning_rate": 6.97789768675696e-05,
+ "loss": 1.2149,
+ "step": 507
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.28291439435087123,
+ "learning_rate": 6.972198966919972e-05,
+ "loss": 1.1578,
+ "step": 508
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.27195184756655516,
+ "learning_rate": 6.966486745568308e-05,
+ "loss": 1.2355,
+ "step": 509
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.239159568376005,
+ "learning_rate": 6.960761048650312e-05,
+ "loss": 1.1688,
+ "step": 510
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.22961475425949177,
+ "learning_rate": 6.955021902175543e-05,
+ "loss": 1.2094,
+ "step": 511
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.27443773600741117,
+ "learning_rate": 6.949269332214651e-05,
+ "loss": 1.2559,
+ "step": 512
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.26230551832002097,
+ "learning_rate": 6.94350336489927e-05,
+ "loss": 1.2121,
+ "step": 513
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2716742985303849,
+ "learning_rate": 6.937724026421892e-05,
+ "loss": 1.2444,
+ "step": 514
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2537850139439542,
+ "learning_rate": 6.931931343035742e-05,
+ "loss": 1.1327,
+ "step": 515
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.28599587967496826,
+ "learning_rate": 6.926125341054676e-05,
+ "loss": 1.2236,
+ "step": 516
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.26780654378470103,
+ "learning_rate": 6.920306046853043e-05,
+ "loss": 1.2295,
+ "step": 517
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.23606296888412015,
+ "learning_rate": 6.914473486865577e-05,
+ "loss": 1.1543,
+ "step": 518
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.34976881174240837,
+ "learning_rate": 6.90862768758727e-05,
+ "loss": 1.2067,
+ "step": 519
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2481257873494882,
+ "learning_rate": 6.902768675573258e-05,
+ "loss": 1.2188,
+ "step": 520
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2996395778117021,
+ "learning_rate": 6.896896477438699e-05,
+ "loss": 1.2326,
+ "step": 521
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.8839768816333193,
+ "learning_rate": 6.891011119858643e-05,
+ "loss": 1.2435,
+ "step": 522
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2851882482058998,
+ "learning_rate": 6.885112629567927e-05,
+ "loss": 1.2644,
+ "step": 523
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2813663482913699,
+ "learning_rate": 6.879201033361035e-05,
+ "loss": 1.2309,
+ "step": 524
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3257551560135454,
+ "learning_rate": 6.873276358091996e-05,
+ "loss": 1.2755,
+ "step": 525
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.28930479952494365,
+ "learning_rate": 6.867338630674247e-05,
+ "loss": 1.1962,
+ "step": 526
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3077462996938649,
+ "learning_rate": 6.861387878080511e-05,
+ "loss": 1.2402,
+ "step": 527
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.2848900193452761,
+ "learning_rate": 6.855424127342688e-05,
+ "loss": 1.2748,
+ "step": 528
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.4765938812802202,
+ "learning_rate": 6.849447405551718e-05,
+ "loss": 1.2226,
+ "step": 529
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.53184473292579,
+ "learning_rate": 6.843457739857467e-05,
+ "loss": 1.2347,
+ "step": 530
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.6416239346492343,
+ "learning_rate": 6.837455157468596e-05,
+ "loss": 1.2429,
+ "step": 531
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3188092712502773,
+ "learning_rate": 6.831439685652442e-05,
+ "loss": 1.216,
+ "step": 532
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3527495731006385,
+ "learning_rate": 6.825411351734895e-05,
+ "loss": 1.1682,
+ "step": 533
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.29603753744741856,
+ "learning_rate": 6.819370183100274e-05,
+ "loss": 1.1434,
+ "step": 534
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.5252450389976622,
+ "learning_rate": 6.813316207191198e-05,
+ "loss": 1.1943,
+ "step": 535
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.32999419558659937,
+ "learning_rate": 6.807249451508466e-05,
+ "loss": 1.192,
+ "step": 536
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.3650175469778724,
+ "learning_rate": 6.801169943610929e-05,
+ "loss": 1.2141,
+ "step": 537
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 1.0643532150783557,
+ "learning_rate": 6.795077711115368e-05,
+ "loss": 1.2253,
+ "step": 538
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5041310609130145,
+ "learning_rate": 6.788972781696363e-05,
+ "loss": 1.278,
+ "step": 539
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5123058164360991,
+ "learning_rate": 6.782855183086177e-05,
+ "loss": 1.2231,
+ "step": 540
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.3533015702394419,
+ "learning_rate": 6.776724943074619e-05,
+ "loss": 1.2072,
+ "step": 541
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.30253964625417207,
+ "learning_rate": 6.770582089508927e-05,
+ "loss": 1.1382,
+ "step": 542
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.348991618828202,
+ "learning_rate": 6.764426650293633e-05,
+ "loss": 1.2079,
+ "step": 543
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.46017440578788743,
+ "learning_rate": 6.758258653390444e-05,
+ "loss": 1.1813,
+ "step": 544
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.31962101755594885,
+ "learning_rate": 6.75207812681811e-05,
+ "loss": 1.1339,
+ "step": 545
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.37092024548285923,
+ "learning_rate": 6.745885098652298e-05,
+ "loss": 1.2591,
+ "step": 546
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.32347106450715835,
+ "learning_rate": 6.739679597025466e-05,
+ "loss": 1.2017,
+ "step": 547
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39250187112342494,
+ "learning_rate": 6.733461650126733e-05,
+ "loss": 1.0933,
+ "step": 548
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.473522452217324,
+ "learning_rate": 6.727231286201752e-05,
+ "loss": 1.1124,
+ "step": 549
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4809062179622052,
+ "learning_rate": 6.720988533552582e-05,
+ "loss": 1.1585,
+ "step": 550
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 570219722440704.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-550/training_args.bin b/checkpoint-550/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c5d2416a3b70bb5260978ec9996f00154a724ba7
--- /dev/null
+++ b/checkpoint-550/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b22e8f9d51a16d03a2c506fa3d1eafa8f4b1ae992992c2086a4d435ffd97387e
+size 6712
diff --git a/checkpoint-550/zero_to_fp32.py b/checkpoint-550/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/checkpoint-550/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-600/README.md b/checkpoint-600/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/checkpoint-600/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/checkpoint-600/adapter_config.json b/checkpoint-600/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3cd6dba5d79f7ca21fd4ad465cbbcac1e0960476
--- /dev/null
+++ b/checkpoint-600/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "k_proj",
+ "q_proj",
+ "o_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/checkpoint-600/adapter_model.safetensors b/checkpoint-600/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..00ef5b34c7f75ce63bd1ab43eba0fb951610c198
--- /dev/null
+++ b/checkpoint-600/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cb297e9519efcd7ab54e95cd64b61847178435f7c7a6642c0652f43ae35aa26c
+size 1048664848
diff --git a/checkpoint-600/global_step600/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-600/global_step600/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7023dc633db2a0cfae7a2d0ca0db132574a681c1
--- /dev/null
+++ b/checkpoint-600/global_step600/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:daeed6817a52847b6785965455a6a15eea57c4c3f7b5c1d035202bd339ac01aa
+size 787270042
diff --git a/checkpoint-600/global_step600/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-600/global_step600/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..56397218fc74e5af1abfd22837de5c8130c21785
--- /dev/null
+++ b/checkpoint-600/global_step600/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:085252fbe925832c9a5c5baa1d5c2a225a822143477e003587bb8d6d593445da
+size 787270042
diff --git a/checkpoint-600/global_step600/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-600/global_step600/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..72584de54afdf8e52b6be39c84c9771d8e81cdf9
--- /dev/null
+++ b/checkpoint-600/global_step600/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8c1974b5b430b34e0c4964eff3ebf140668ec51f5693994ddc330df75c29ada0
+size 787270042
diff --git a/checkpoint-600/global_step600/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-600/global_step600/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..abec2f2b3b33d3b329d16cdb92ff651fd9301363
--- /dev/null
+++ b/checkpoint-600/global_step600/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8f4be8a150d37f7afb036dded8b756296292589581e3bcdbb6d80e2e43992cce
+size 787270042
diff --git a/checkpoint-600/global_step600/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-600/global_step600/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..cb3f8c58437980f0fcd84aa5ec73104c4920acbd
--- /dev/null
+++ b/checkpoint-600/global_step600/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7849b2d8ab9ef87c7b1c3b693e9aba355550030bbbb6fb2b2b9a36bcfe23073f
+size 787270042
diff --git a/checkpoint-600/global_step600/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-600/global_step600/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1a76ccf8db17da6de0e2365e6ee5d1052bb52b8b
--- /dev/null
+++ b/checkpoint-600/global_step600/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ec1659e11ad3c6a1cfc6acdad2e71d862ede7a846bf76295aef5cb7ea7722af7
+size 787270042
diff --git a/checkpoint-600/global_step600/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-600/global_step600/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..119bc348311672b4625013ca7b81510c8b482101
--- /dev/null
+++ b/checkpoint-600/global_step600/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:10ff0aeba9ef61ecf988f2448c881a742db7dde7b78faa460dfd424a5549fb3a
+size 787270042
diff --git a/checkpoint-600/global_step600/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-600/global_step600/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..291253731f84c4c7cf1b34a9297441f87fd417d8
--- /dev/null
+++ b/checkpoint-600/global_step600/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9fe648c37b5e0dab4a6387a25b61ce0fbf1ce696a3f8fdbbf6004fe1dedd8b15
+size 787270042
diff --git a/checkpoint-600/global_step600/zero_pp_rank_0_mp_rank_00_model_states.pt b/checkpoint-600/global_step600/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b0700bada730a121c64947397b7fd736206a55bd
--- /dev/null
+++ b/checkpoint-600/global_step600/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ade19297ad74c4436df14f6ff0bbbdf6b3b6e8f902768d5a6ec8fdbb9ca7ddf6
+size 653742
diff --git a/checkpoint-600/global_step600/zero_pp_rank_1_mp_rank_00_model_states.pt b/checkpoint-600/global_step600/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8becc70b7945368127b629c1de0adb0a65a0cc12
--- /dev/null
+++ b/checkpoint-600/global_step600/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:94f74c568df86e5e544de53354ad2f324e1a6b8137e4be21d31c40fb1ed3e37c
+size 653742
diff --git a/checkpoint-600/global_step600/zero_pp_rank_2_mp_rank_00_model_states.pt b/checkpoint-600/global_step600/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..65e978d97fffa7f7d694ffd688f0717ba5f76ecb
--- /dev/null
+++ b/checkpoint-600/global_step600/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7cc460d9a4ee1728057abe7d0b24cc1916fc623a84e2bd17492f81de07e2a604
+size 653742
diff --git a/checkpoint-600/global_step600/zero_pp_rank_3_mp_rank_00_model_states.pt b/checkpoint-600/global_step600/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..efda9fd64665fe84134fe01aaf555b078df6cff0
--- /dev/null
+++ b/checkpoint-600/global_step600/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7112e5a6d3e6832046289f5a5e7a568fe717e312eda184c2d837559711f8a0b3
+size 653742
diff --git a/checkpoint-600/global_step600/zero_pp_rank_4_mp_rank_00_model_states.pt b/checkpoint-600/global_step600/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..967831a68e7b2b25beb65d0dc585f50ed5534b83
--- /dev/null
+++ b/checkpoint-600/global_step600/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:85a6aeeb1ee3bdb8e84c808249460b9dcbd4cb755be163aa3e1864825cf67ee2
+size 653742
diff --git a/checkpoint-600/global_step600/zero_pp_rank_5_mp_rank_00_model_states.pt b/checkpoint-600/global_step600/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7e098d9cd120716f3ddd0e1f993a0370e3227ad3
--- /dev/null
+++ b/checkpoint-600/global_step600/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:72c5926bc2ef060a8067a89a75d566967f25eb34a4f1d2c603701b9bdb27b7a8
+size 653742
diff --git a/checkpoint-600/global_step600/zero_pp_rank_6_mp_rank_00_model_states.pt b/checkpoint-600/global_step600/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..41a1ede5829046a80cf400c7a3e546bb60700045
--- /dev/null
+++ b/checkpoint-600/global_step600/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2aad8cf76a774f0a37ab26f6a2c0513015b27658e3cef347da37dbcc6d56ad6f
+size 653742
diff --git a/checkpoint-600/global_step600/zero_pp_rank_7_mp_rank_00_model_states.pt b/checkpoint-600/global_step600/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4140915f67888daf6d80b922b6ee5d8f4ace4f44
--- /dev/null
+++ b/checkpoint-600/global_step600/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a2efd459e97a86e45a83990e4707bc026470c35737b755d2ea70551b0cbdea6c
+size 653742
diff --git a/checkpoint-600/latest b/checkpoint-600/latest
new file mode 100644
index 0000000000000000000000000000000000000000..12cae1adf3af8546b4141c6f62261c8e99839a54
--- /dev/null
+++ b/checkpoint-600/latest
@@ -0,0 +1 @@
+global_step600
\ No newline at end of file
diff --git a/checkpoint-600/rng_state_0.pth b/checkpoint-600/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4e5b7e2ec90fdb824c8932464c1d9068330655a7
--- /dev/null
+++ b/checkpoint-600/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:36d2a2034ebb05cb71c510897f2795b31164e50f17b270bc25d2be3ad9a17b22
+size 15984
diff --git a/checkpoint-600/rng_state_1.pth b/checkpoint-600/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7d8d7722fc72cab6d492b76cb99c8177dcc47544
--- /dev/null
+++ b/checkpoint-600/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:060dfdb1c49102cbdc8868a6031e68787601b4ccd782f3fb9b137e20c1fd2c7a
+size 15984
diff --git a/checkpoint-600/rng_state_2.pth b/checkpoint-600/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3c9f84eff30cfa9ea1feedaf262d61fb12e4cba7
--- /dev/null
+++ b/checkpoint-600/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af01895cb66e616591f2e4baa8dcd8151530eab133c73571ccb31c74f35422ce
+size 15984
diff --git a/checkpoint-600/rng_state_3.pth b/checkpoint-600/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6eebfb928f8e91eff0ea1645a20b5aa4465c705b
--- /dev/null
+++ b/checkpoint-600/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:677921992b1e0cef3aee776f245975003d22f51d9bd6ed20f248ded1deb72fa9
+size 15984
diff --git a/checkpoint-600/rng_state_4.pth b/checkpoint-600/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0866030a266c6d003cc378a9418a723f69e8ab99
--- /dev/null
+++ b/checkpoint-600/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d69353c629541c690c5471f8ec05fdab2bfecf3d37afaa436bc45939da6db68f
+size 15984
diff --git a/checkpoint-600/rng_state_5.pth b/checkpoint-600/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..554638d77107f832d7aa51c61645ee2d6c48a36d
--- /dev/null
+++ b/checkpoint-600/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e40ba6668cc03c9162c68a933d164bf38ae2d196a9a6fec03ae615491201185
+size 15984
diff --git a/checkpoint-600/rng_state_6.pth b/checkpoint-600/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..964331b65172a1bcac03e4673415fa787f724268
--- /dev/null
+++ b/checkpoint-600/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:870968fea834e24b2e099cf3e4fe1e3fb8caf38d8f8e5b790d7d47386d4d05f5
+size 15984
diff --git a/checkpoint-600/rng_state_7.pth b/checkpoint-600/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..cd4754d65217d0f9d1f2d3334397df7a8a079652
--- /dev/null
+++ b/checkpoint-600/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9e19618bee7c6ef43256fea25abe19bca88535eb1e7dc213cde8929ae4e8180
+size 15984
diff --git a/checkpoint-600/scheduler.pt b/checkpoint-600/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5e77672ea908f9d30363be50ef230174a2d8afc1
--- /dev/null
+++ b/checkpoint-600/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:89fa8d447eeca80a7f7254134cf50626f0ba8da3de27a47f21151e30f33f4960
+size 1064
diff --git a/checkpoint-600/special_tokens_map.json b/checkpoint-600/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-600/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-600/tokenizer.model b/checkpoint-600/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-600/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-600/tokenizer_config.json b/checkpoint-600/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/checkpoint-600/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-600/trainer_state.json b/checkpoint-600/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..fc81282be29d70d8028b799bd2261064cee77c34
--- /dev/null
+++ b/checkpoint-600/trainer_state.json
@@ -0,0 +1,4221 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.0973936899862826,
+ "eval_steps": 500,
+ "global_step": 600,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2698278585334131,
+ "learning_rate": 7.274705407864332e-05,
+ "loss": 1.194,
+ "step": 451
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.23678313024953834,
+ "learning_rate": 7.26980224535416e-05,
+ "loss": 1.2349,
+ "step": 452
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24851875792002978,
+ "learning_rate": 7.264884229436293e-05,
+ "loss": 1.1758,
+ "step": 453
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24122080121681125,
+ "learning_rate": 7.259951382451318e-05,
+ "loss": 1.1962,
+ "step": 454
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.22741322959884405,
+ "learning_rate": 7.25500372680719e-05,
+ "loss": 1.1702,
+ "step": 455
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.2297475610861458,
+ "learning_rate": 7.250041284979137e-05,
+ "loss": 1.1466,
+ "step": 456
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.3057605989721467,
+ "learning_rate": 7.245064079509553e-05,
+ "loss": 1.246,
+ "step": 457
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2719638501597136,
+ "learning_rate": 7.240072133007899e-05,
+ "loss": 1.2184,
+ "step": 458
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2436807816414479,
+ "learning_rate": 7.235065468150593e-05,
+ "loss": 1.2324,
+ "step": 459
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.23436349430255515,
+ "learning_rate": 7.23004410768092e-05,
+ "loss": 1.1813,
+ "step": 460
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2398940990211377,
+ "learning_rate": 7.22500807440892e-05,
+ "loss": 1.1924,
+ "step": 461
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2605716625062531,
+ "learning_rate": 7.219957391211281e-05,
+ "loss": 1.182,
+ "step": 462
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.260462524570941,
+ "learning_rate": 7.214892081031244e-05,
+ "loss": 1.2136,
+ "step": 463
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.21979766512306334,
+ "learning_rate": 7.209812166878491e-05,
+ "loss": 1.2066,
+ "step": 464
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.23324453647530663,
+ "learning_rate": 7.204717671829051e-05,
+ "loss": 1.1657,
+ "step": 465
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.2529434935507481,
+ "learning_rate": 7.199608619025177e-05,
+ "loss": 1.2093,
+ "step": 466
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.25371701891720116,
+ "learning_rate": 7.194485031675265e-05,
+ "loss": 1.2225,
+ "step": 467
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.23272423066292103,
+ "learning_rate": 7.189346933053725e-05,
+ "loss": 1.1721,
+ "step": 468
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.25122928735587546,
+ "learning_rate": 7.184194346500892e-05,
+ "loss": 1.2537,
+ "step": 469
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2159270875490409,
+ "learning_rate": 7.179027295422913e-05,
+ "loss": 1.197,
+ "step": 470
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2633111059076544,
+ "learning_rate": 7.173845803291636e-05,
+ "loss": 1.1721,
+ "step": 471
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.30555936322098703,
+ "learning_rate": 7.168649893644517e-05,
+ "loss": 1.3011,
+ "step": 472
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.23492670111453726,
+ "learning_rate": 7.163439590084502e-05,
+ "loss": 1.1601,
+ "step": 473
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.26602734263721806,
+ "learning_rate": 7.158214916279923e-05,
+ "loss": 1.2808,
+ "step": 474
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.3182695007856262,
+ "learning_rate": 7.152975895964386e-05,
+ "loss": 1.2967,
+ "step": 475
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2785021674736721,
+ "learning_rate": 7.147722552936673e-05,
+ "loss": 1.1789,
+ "step": 476
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.279474303138652,
+ "learning_rate": 7.142454911060627e-05,
+ "loss": 1.2596,
+ "step": 477
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2556980144910755,
+ "learning_rate": 7.137172994265044e-05,
+ "loss": 1.2426,
+ "step": 478
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.3311256331993533,
+ "learning_rate": 7.131876826543565e-05,
+ "loss": 1.2059,
+ "step": 479
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.26467296197775253,
+ "learning_rate": 7.12656643195457e-05,
+ "loss": 1.2482,
+ "step": 480
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.27444885274652553,
+ "learning_rate": 7.121241834621064e-05,
+ "loss": 1.2528,
+ "step": 481
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2572283861115396,
+ "learning_rate": 7.115903058730567e-05,
+ "loss": 1.1849,
+ "step": 482
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2677065778235683,
+ "learning_rate": 7.11055012853501e-05,
+ "loss": 1.2011,
+ "step": 483
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29470622036742816,
+ "learning_rate": 7.105183068350619e-05,
+ "loss": 1.2398,
+ "step": 484
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.27609230248969197,
+ "learning_rate": 7.099801902557811e-05,
+ "loss": 1.2259,
+ "step": 485
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.24248634168099284,
+ "learning_rate": 7.094406655601073e-05,
+ "loss": 1.2282,
+ "step": 486
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.2765941767688746,
+ "learning_rate": 7.088997351988865e-05,
+ "loss": 1.2319,
+ "step": 487
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29347776909858947,
+ "learning_rate": 7.083574016293493e-05,
+ "loss": 1.1765,
+ "step": 488
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.285370295424537,
+ "learning_rate": 7.078136673151008e-05,
+ "loss": 1.26,
+ "step": 489
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.29408734903836536,
+ "learning_rate": 7.072685347261093e-05,
+ "loss": 1.226,
+ "step": 490
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27437470239205813,
+ "learning_rate": 7.067220063386947e-05,
+ "loss": 1.1976,
+ "step": 491
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2680770258777871,
+ "learning_rate": 7.061740846355176e-05,
+ "loss": 1.1915,
+ "step": 492
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27200362879502954,
+ "learning_rate": 7.056247721055678e-05,
+ "loss": 1.2002,
+ "step": 493
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2637811092577037,
+ "learning_rate": 7.050740712441528e-05,
+ "loss": 1.287,
+ "step": 494
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.24657959209271266,
+ "learning_rate": 7.045219845528875e-05,
+ "loss": 1.2284,
+ "step": 495
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.25311992110358666,
+ "learning_rate": 7.039685145396812e-05,
+ "loss": 1.1616,
+ "step": 496
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2564633694193358,
+ "learning_rate": 7.034136637187275e-05,
+ "loss": 1.2067,
+ "step": 497
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2446797651174144,
+ "learning_rate": 7.028574346104926e-05,
+ "loss": 1.2284,
+ "step": 498
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2592751463399255,
+ "learning_rate": 7.022998297417034e-05,
+ "loss": 1.2371,
+ "step": 499
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2500713943206808,
+ "learning_rate": 7.017408516453365e-05,
+ "loss": 1.1061,
+ "step": 500
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2812266276040743,
+ "learning_rate": 7.011805028606064e-05,
+ "loss": 1.1949,
+ "step": 501
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.298829667668083,
+ "learning_rate": 7.006187859329544e-05,
+ "loss": 1.2313,
+ "step": 502
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.26518768159745104,
+ "learning_rate": 7.000557034140361e-05,
+ "loss": 1.2246,
+ "step": 503
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.3037280360760458,
+ "learning_rate": 6.994912578617113e-05,
+ "loss": 1.1617,
+ "step": 504
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2726903109255714,
+ "learning_rate": 6.989254518400309e-05,
+ "loss": 1.2415,
+ "step": 505
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25568082003046966,
+ "learning_rate": 6.98358287919226e-05,
+ "loss": 1.1817,
+ "step": 506
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25633294893705044,
+ "learning_rate": 6.97789768675696e-05,
+ "loss": 1.2149,
+ "step": 507
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.28291439435087123,
+ "learning_rate": 6.972198966919972e-05,
+ "loss": 1.1578,
+ "step": 508
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.27195184756655516,
+ "learning_rate": 6.966486745568308e-05,
+ "loss": 1.2355,
+ "step": 509
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.239159568376005,
+ "learning_rate": 6.960761048650312e-05,
+ "loss": 1.1688,
+ "step": 510
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.22961475425949177,
+ "learning_rate": 6.955021902175543e-05,
+ "loss": 1.2094,
+ "step": 511
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.27443773600741117,
+ "learning_rate": 6.949269332214651e-05,
+ "loss": 1.2559,
+ "step": 512
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.26230551832002097,
+ "learning_rate": 6.94350336489927e-05,
+ "loss": 1.2121,
+ "step": 513
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2716742985303849,
+ "learning_rate": 6.937724026421892e-05,
+ "loss": 1.2444,
+ "step": 514
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2537850139439542,
+ "learning_rate": 6.931931343035742e-05,
+ "loss": 1.1327,
+ "step": 515
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.28599587967496826,
+ "learning_rate": 6.926125341054676e-05,
+ "loss": 1.2236,
+ "step": 516
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.26780654378470103,
+ "learning_rate": 6.920306046853043e-05,
+ "loss": 1.2295,
+ "step": 517
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.23606296888412015,
+ "learning_rate": 6.914473486865577e-05,
+ "loss": 1.1543,
+ "step": 518
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.34976881174240837,
+ "learning_rate": 6.90862768758727e-05,
+ "loss": 1.2067,
+ "step": 519
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2481257873494882,
+ "learning_rate": 6.902768675573258e-05,
+ "loss": 1.2188,
+ "step": 520
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2996395778117021,
+ "learning_rate": 6.896896477438699e-05,
+ "loss": 1.2326,
+ "step": 521
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.8839768816333193,
+ "learning_rate": 6.891011119858643e-05,
+ "loss": 1.2435,
+ "step": 522
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2851882482058998,
+ "learning_rate": 6.885112629567927e-05,
+ "loss": 1.2644,
+ "step": 523
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2813663482913699,
+ "learning_rate": 6.879201033361035e-05,
+ "loss": 1.2309,
+ "step": 524
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3257551560135454,
+ "learning_rate": 6.873276358091996e-05,
+ "loss": 1.2755,
+ "step": 525
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.28930479952494365,
+ "learning_rate": 6.867338630674247e-05,
+ "loss": 1.1962,
+ "step": 526
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3077462996938649,
+ "learning_rate": 6.861387878080511e-05,
+ "loss": 1.2402,
+ "step": 527
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.2848900193452761,
+ "learning_rate": 6.855424127342688e-05,
+ "loss": 1.2748,
+ "step": 528
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.4765938812802202,
+ "learning_rate": 6.849447405551718e-05,
+ "loss": 1.2226,
+ "step": 529
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.53184473292579,
+ "learning_rate": 6.843457739857467e-05,
+ "loss": 1.2347,
+ "step": 530
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.6416239346492343,
+ "learning_rate": 6.837455157468596e-05,
+ "loss": 1.2429,
+ "step": 531
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3188092712502773,
+ "learning_rate": 6.831439685652442e-05,
+ "loss": 1.216,
+ "step": 532
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3527495731006385,
+ "learning_rate": 6.825411351734895e-05,
+ "loss": 1.1682,
+ "step": 533
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.29603753744741856,
+ "learning_rate": 6.819370183100274e-05,
+ "loss": 1.1434,
+ "step": 534
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.5252450389976622,
+ "learning_rate": 6.813316207191198e-05,
+ "loss": 1.1943,
+ "step": 535
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.32999419558659937,
+ "learning_rate": 6.807249451508466e-05,
+ "loss": 1.192,
+ "step": 536
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.3650175469778724,
+ "learning_rate": 6.801169943610929e-05,
+ "loss": 1.2141,
+ "step": 537
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 1.0643532150783557,
+ "learning_rate": 6.795077711115368e-05,
+ "loss": 1.2253,
+ "step": 538
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5041310609130145,
+ "learning_rate": 6.788972781696363e-05,
+ "loss": 1.278,
+ "step": 539
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5123058164360991,
+ "learning_rate": 6.782855183086177e-05,
+ "loss": 1.2231,
+ "step": 540
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.3533015702394419,
+ "learning_rate": 6.776724943074619e-05,
+ "loss": 1.2072,
+ "step": 541
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.30253964625417207,
+ "learning_rate": 6.770582089508927e-05,
+ "loss": 1.1382,
+ "step": 542
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.348991618828202,
+ "learning_rate": 6.764426650293633e-05,
+ "loss": 1.2079,
+ "step": 543
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.46017440578788743,
+ "learning_rate": 6.758258653390444e-05,
+ "loss": 1.1813,
+ "step": 544
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.31962101755594885,
+ "learning_rate": 6.75207812681811e-05,
+ "loss": 1.1339,
+ "step": 545
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.37092024548285923,
+ "learning_rate": 6.745885098652298e-05,
+ "loss": 1.2591,
+ "step": 546
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.32347106450715835,
+ "learning_rate": 6.739679597025466e-05,
+ "loss": 1.2017,
+ "step": 547
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39250187112342494,
+ "learning_rate": 6.733461650126733e-05,
+ "loss": 1.0933,
+ "step": 548
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.473522452217324,
+ "learning_rate": 6.727231286201752e-05,
+ "loss": 1.1124,
+ "step": 549
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4809062179622052,
+ "learning_rate": 6.720988533552582e-05,
+ "loss": 1.1585,
+ "step": 550
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3529662801059162,
+ "learning_rate": 6.714733420537559e-05,
+ "loss": 1.0501,
+ "step": 551
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5958247214391118,
+ "learning_rate": 6.708465975571168e-05,
+ "loss": 1.1086,
+ "step": 552
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5341364205022454,
+ "learning_rate": 6.70218622712391e-05,
+ "loss": 1.0518,
+ "step": 553
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3601805724462006,
+ "learning_rate": 6.695894203722181e-05,
+ "loss": 1.1779,
+ "step": 554
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.43410190338280613,
+ "learning_rate": 6.68958993394813e-05,
+ "loss": 1.093,
+ "step": 555
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46217742572873594,
+ "learning_rate": 6.683273446439546e-05,
+ "loss": 1.0117,
+ "step": 556
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.8591682373623357,
+ "learning_rate": 6.676944769889708e-05,
+ "loss": 1.1002,
+ "step": 557
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.7383229487622726,
+ "learning_rate": 6.670603933047272e-05,
+ "loss": 1.0779,
+ "step": 558
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5965305891207813,
+ "learning_rate": 6.664250964716131e-05,
+ "loss": 1.0889,
+ "step": 559
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.6030858606684543,
+ "learning_rate": 6.657885893755288e-05,
+ "loss": 1.0982,
+ "step": 560
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4644510682398409,
+ "learning_rate": 6.65150874907872e-05,
+ "loss": 1.1004,
+ "step": 561
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.43943285132452564,
+ "learning_rate": 6.645119559655254e-05,
+ "loss": 1.0536,
+ "step": 562
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4456395978600012,
+ "learning_rate": 6.638718354508427e-05,
+ "loss": 1.0733,
+ "step": 563
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3303824433217466,
+ "learning_rate": 6.632305162716365e-05,
+ "loss": 1.0552,
+ "step": 564
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3617704823170143,
+ "learning_rate": 6.62588001341164e-05,
+ "loss": 1.1092,
+ "step": 565
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4465013349903427,
+ "learning_rate": 6.619442935781141e-05,
+ "loss": 1.0781,
+ "step": 566
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.48516780613791277,
+ "learning_rate": 6.612993959065947e-05,
+ "loss": 1.0686,
+ "step": 567
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38867820318536633,
+ "learning_rate": 6.606533112561186e-05,
+ "loss": 1.1215,
+ "step": 568
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38566119820378336,
+ "learning_rate": 6.600060425615907e-05,
+ "loss": 1.1213,
+ "step": 569
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.35534855445058544,
+ "learning_rate": 6.593575927632947e-05,
+ "loss": 1.0955,
+ "step": 570
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38124406233349717,
+ "learning_rate": 6.587079648068795e-05,
+ "loss": 1.0659,
+ "step": 571
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.454750160923548,
+ "learning_rate": 6.580571616433457e-05,
+ "loss": 1.1149,
+ "step": 572
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.35353190088025255,
+ "learning_rate": 6.574051862290325e-05,
+ "loss": 1.0388,
+ "step": 573
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3249395594793626,
+ "learning_rate": 6.567520415256045e-05,
+ "loss": 1.0784,
+ "step": 574
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.40078898818247227,
+ "learning_rate": 6.560977305000375e-05,
+ "loss": 1.0859,
+ "step": 575
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4115264795060035,
+ "learning_rate": 6.554422561246054e-05,
+ "loss": 1.1828,
+ "step": 576
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.30090229228069215,
+ "learning_rate": 6.54785621376867e-05,
+ "loss": 1.0901,
+ "step": 577
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.28827860350299206,
+ "learning_rate": 6.541278292396523e-05,
+ "loss": 1.0277,
+ "step": 578
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.34690404488996757,
+ "learning_rate": 6.534688827010484e-05,
+ "loss": 1.048,
+ "step": 579
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.29943113556644785,
+ "learning_rate": 6.528087847543867e-05,
+ "loss": 1.0646,
+ "step": 580
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.37318202575874415,
+ "learning_rate": 6.521475383982291e-05,
+ "loss": 1.1091,
+ "step": 581
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3049663659203959,
+ "learning_rate": 6.51485146636354e-05,
+ "loss": 1.0552,
+ "step": 582
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3342407867509692,
+ "learning_rate": 6.508216124777431e-05,
+ "loss": 1.2227,
+ "step": 583
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3348396047855952,
+ "learning_rate": 6.501569389365674e-05,
+ "loss": 1.0861,
+ "step": 584
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.30951429367513383,
+ "learning_rate": 6.494911290321737e-05,
+ "loss": 1.0461,
+ "step": 585
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.33898401361064606,
+ "learning_rate": 6.488241857890711e-05,
+ "loss": 1.0854,
+ "step": 586
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4901462068263497,
+ "learning_rate": 6.481561122369164e-05,
+ "loss": 1.1012,
+ "step": 587
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3179574879809652,
+ "learning_rate": 6.474869114105018e-05,
+ "loss": 1.0451,
+ "step": 588
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.32159328915060714,
+ "learning_rate": 6.468165863497395e-05,
+ "loss": 1.0458,
+ "step": 589
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.36462235008537297,
+ "learning_rate": 6.461451400996491e-05,
+ "loss": 1.1247,
+ "step": 590
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.5373862753611778,
+ "learning_rate": 6.454725757103432e-05,
+ "loss": 1.0542,
+ "step": 591
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3160409270291303,
+ "learning_rate": 6.447988962370133e-05,
+ "loss": 1.0829,
+ "step": 592
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.390452102978435,
+ "learning_rate": 6.441241047399169e-05,
+ "loss": 1.192,
+ "step": 593
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3802122712014928,
+ "learning_rate": 6.434482042843627e-05,
+ "loss": 1.1153,
+ "step": 594
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4081584328242501,
+ "learning_rate": 6.427711979406966e-05,
+ "loss": 1.1635,
+ "step": 595
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3791962989638633,
+ "learning_rate": 6.420930887842889e-05,
+ "loss": 1.1581,
+ "step": 596
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.33239440056484193,
+ "learning_rate": 6.414138798955189e-05,
+ "loss": 1.0926,
+ "step": 597
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3279881540815014,
+ "learning_rate": 6.407335743597616e-05,
+ "loss": 1.1386,
+ "step": 598
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.30309644763750837,
+ "learning_rate": 6.40052175267374e-05,
+ "loss": 1.0523,
+ "step": 599
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3349097308403333,
+ "learning_rate": 6.393696857136801e-05,
+ "loss": 1.0815,
+ "step": 600
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 622081452539904.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-600/training_args.bin b/checkpoint-600/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c5d2416a3b70bb5260978ec9996f00154a724ba7
--- /dev/null
+++ b/checkpoint-600/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b22e8f9d51a16d03a2c506fa3d1eafa8f4b1ae992992c2086a4d435ffd97387e
+size 6712
diff --git a/checkpoint-600/zero_to_fp32.py b/checkpoint-600/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/checkpoint-600/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-750/README.md b/checkpoint-750/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/checkpoint-750/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/checkpoint-750/adapter_config.json b/checkpoint-750/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3cd6dba5d79f7ca21fd4ad465cbbcac1e0960476
--- /dev/null
+++ b/checkpoint-750/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "k_proj",
+ "q_proj",
+ "o_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/checkpoint-750/adapter_model.safetensors b/checkpoint-750/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a13b064da6a9ec698c137a73eba9a668c95aabc9
--- /dev/null
+++ b/checkpoint-750/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d08962f3cc64278013e1154366e9ca7f6847a52987d6f5be68a01f40a5344d8d
+size 1048664848
diff --git a/checkpoint-750/global_step750/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-750/global_step750/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..156285fe76bb84fc9b9ea5f08ee13307ef2c66cd
--- /dev/null
+++ b/checkpoint-750/global_step750/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a711fabb31e0d2159d23671070ef18972c8c1a55fb06f148b6ce0b9b54052fd3
+size 787270042
diff --git a/checkpoint-750/global_step750/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-750/global_step750/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f9276383cba2ca5acfcd136cfe7a3872ff4948e2
--- /dev/null
+++ b/checkpoint-750/global_step750/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:02b8656caf74761fc753d0398eb4b8d4b2a1f284d77cdaa713da66b088d8f494
+size 787270042
diff --git a/checkpoint-750/global_step750/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-750/global_step750/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..88dfa87da72326795ed42294dd3bee13d0f3d8df
--- /dev/null
+++ b/checkpoint-750/global_step750/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fafcd8b6b974b21163df6a4fd91c78e4a79519595f2412c66fe9a38e2c350725
+size 787270042
diff --git a/checkpoint-750/global_step750/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-750/global_step750/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2f86f2cb93a4004efc85a4b24747a0aadb1481c4
--- /dev/null
+++ b/checkpoint-750/global_step750/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dbb11855fbd7a091f1085b2057cb49b96a8726a48bde0fe69c8778480ab8941f
+size 787270042
diff --git a/checkpoint-750/global_step750/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-750/global_step750/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c3af1c86414aec1cb392f6c6571273a5fd8c2f19
--- /dev/null
+++ b/checkpoint-750/global_step750/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c2a49eb0fe6cfeb58c9fbfa4393a239434908146331a56f42ef6041bed7d719b
+size 787270042
diff --git a/checkpoint-750/global_step750/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-750/global_step750/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9062c6bd1e9f96d605e6b761c22e07a47746d8dc
--- /dev/null
+++ b/checkpoint-750/global_step750/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:970f21d22af841bcb7d7b1d57d8f2000c8b7cf698551b3b69757fbdc337dd112
+size 787270042
diff --git a/checkpoint-750/global_step750/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-750/global_step750/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..41acff84ea4aae21964cd74e14e741cbc4f86bb9
--- /dev/null
+++ b/checkpoint-750/global_step750/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8ed9432117d78258c483a7bc8af52ce435685949414278321041bb30cdf9b138
+size 787270042
diff --git a/checkpoint-750/global_step750/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-750/global_step750/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9331c8c8a96f0203f54c20facc1391c2bfd22413
--- /dev/null
+++ b/checkpoint-750/global_step750/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dcf390b7531b58dd58a8c9fa8dbc722d316d46661a99d4cbe21bdb1a913b9183
+size 787270042
diff --git a/checkpoint-750/global_step750/zero_pp_rank_0_mp_rank_00_model_states.pt b/checkpoint-750/global_step750/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..fe79240f6b4a706b86c019a7a4009f1657ec5974
--- /dev/null
+++ b/checkpoint-750/global_step750/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e1c3767dfc19806a88ba86871e990625153fd9b5e05127f3cfcccc700f6c07e6
+size 653742
diff --git a/checkpoint-750/global_step750/zero_pp_rank_1_mp_rank_00_model_states.pt b/checkpoint-750/global_step750/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..177f90f14ef5e987a3d5886470afa80c620a80eb
--- /dev/null
+++ b/checkpoint-750/global_step750/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b7dc078f6c51e1ac0798eb3cfcd60e50ad6f34a4faa986ca780e2f9d3f0fd776
+size 653742
diff --git a/checkpoint-750/global_step750/zero_pp_rank_2_mp_rank_00_model_states.pt b/checkpoint-750/global_step750/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5b7a513ff6a7c6051c59d443e8bbab412aef475a
--- /dev/null
+++ b/checkpoint-750/global_step750/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a367ca87901d820a9554c8d7296018a6d28f33d21c8e7d087ac59bba94204cc7
+size 653742
diff --git a/checkpoint-750/global_step750/zero_pp_rank_3_mp_rank_00_model_states.pt b/checkpoint-750/global_step750/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8af29a8256c90fc06047690be799aafd6621a12a
--- /dev/null
+++ b/checkpoint-750/global_step750/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:762d9a703f46f0ce600c1edf3107a2ca634ac1c6664925f5a3190c51698dc801
+size 653742
diff --git a/checkpoint-750/global_step750/zero_pp_rank_4_mp_rank_00_model_states.pt b/checkpoint-750/global_step750/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f4c0e663c450df3de6ae5ad4769eb1940d725be3
--- /dev/null
+++ b/checkpoint-750/global_step750/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:69ab1708ce4fae1650ea631de1ef6e460926fa6a787cdd7d989e35749a754dca
+size 653742
diff --git a/checkpoint-750/global_step750/zero_pp_rank_5_mp_rank_00_model_states.pt b/checkpoint-750/global_step750/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6f613e029841b0f414d01e85f4b066e85314643f
--- /dev/null
+++ b/checkpoint-750/global_step750/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0f81695549d4f5801aa7104af821ca3c18fca257c62a20eb1e9b65a69462930b
+size 653742
diff --git a/checkpoint-750/global_step750/zero_pp_rank_6_mp_rank_00_model_states.pt b/checkpoint-750/global_step750/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..005adfd9b83cf9ff36d3072c52b7df7c0337d32a
--- /dev/null
+++ b/checkpoint-750/global_step750/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1791542f5e2c71a6c3fdaba8ef916dcbe1edce8292a656307dc9f5298bb3fde4
+size 653742
diff --git a/checkpoint-750/global_step750/zero_pp_rank_7_mp_rank_00_model_states.pt b/checkpoint-750/global_step750/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..815e479cc697e2a18a6369b7d0ca607810e9c88d
--- /dev/null
+++ b/checkpoint-750/global_step750/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da14438c3212dca3b622deb8f8508e816c58644265ee0ff0ca9152efc9b9d8e6
+size 653742
diff --git a/checkpoint-750/latest b/checkpoint-750/latest
new file mode 100644
index 0000000000000000000000000000000000000000..f443e084e0e73b2cb9226c3b73c42d443059068f
--- /dev/null
+++ b/checkpoint-750/latest
@@ -0,0 +1 @@
+global_step750
\ No newline at end of file
diff --git a/checkpoint-750/rng_state_0.pth b/checkpoint-750/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4e5b7e2ec90fdb824c8932464c1d9068330655a7
--- /dev/null
+++ b/checkpoint-750/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:36d2a2034ebb05cb71c510897f2795b31164e50f17b270bc25d2be3ad9a17b22
+size 15984
diff --git a/checkpoint-750/rng_state_1.pth b/checkpoint-750/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7d8d7722fc72cab6d492b76cb99c8177dcc47544
--- /dev/null
+++ b/checkpoint-750/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:060dfdb1c49102cbdc8868a6031e68787601b4ccd782f3fb9b137e20c1fd2c7a
+size 15984
diff --git a/checkpoint-750/rng_state_2.pth b/checkpoint-750/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3c9f84eff30cfa9ea1feedaf262d61fb12e4cba7
--- /dev/null
+++ b/checkpoint-750/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af01895cb66e616591f2e4baa8dcd8151530eab133c73571ccb31c74f35422ce
+size 15984
diff --git a/checkpoint-750/rng_state_3.pth b/checkpoint-750/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6eebfb928f8e91eff0ea1645a20b5aa4465c705b
--- /dev/null
+++ b/checkpoint-750/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:677921992b1e0cef3aee776f245975003d22f51d9bd6ed20f248ded1deb72fa9
+size 15984
diff --git a/checkpoint-750/rng_state_4.pth b/checkpoint-750/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0866030a266c6d003cc378a9418a723f69e8ab99
--- /dev/null
+++ b/checkpoint-750/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d69353c629541c690c5471f8ec05fdab2bfecf3d37afaa436bc45939da6db68f
+size 15984
diff --git a/checkpoint-750/rng_state_5.pth b/checkpoint-750/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..554638d77107f832d7aa51c61645ee2d6c48a36d
--- /dev/null
+++ b/checkpoint-750/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e40ba6668cc03c9162c68a933d164bf38ae2d196a9a6fec03ae615491201185
+size 15984
diff --git a/checkpoint-750/rng_state_6.pth b/checkpoint-750/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..964331b65172a1bcac03e4673415fa787f724268
--- /dev/null
+++ b/checkpoint-750/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:870968fea834e24b2e099cf3e4fe1e3fb8caf38d8f8e5b790d7d47386d4d05f5
+size 15984
diff --git a/checkpoint-750/rng_state_7.pth b/checkpoint-750/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..cd4754d65217d0f9d1f2d3334397df7a8a079652
--- /dev/null
+++ b/checkpoint-750/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9e19618bee7c6ef43256fea25abe19bca88535eb1e7dc213cde8929ae4e8180
+size 15984
diff --git a/checkpoint-750/scheduler.pt b/checkpoint-750/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..41ff0494d20d190c25da992ece6b8b358028d564
--- /dev/null
+++ b/checkpoint-750/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:59e12f88a62db407aa272bd77b60965bec97929153b681326f419d8de8cda662
+size 1064
diff --git a/checkpoint-750/special_tokens_map.json b/checkpoint-750/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-750/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-750/tokenizer.model b/checkpoint-750/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-750/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-750/tokenizer_config.json b/checkpoint-750/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/checkpoint-750/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-750/trainer_state.json b/checkpoint-750/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..449168623e2d504d23331bacd9d0ed25aaf509c7
--- /dev/null
+++ b/checkpoint-750/trainer_state.json
@@ -0,0 +1,5271 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.3717421124828533,
+ "eval_steps": 500,
+ "global_step": 750,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2698278585334131,
+ "learning_rate": 7.274705407864332e-05,
+ "loss": 1.194,
+ "step": 451
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.23678313024953834,
+ "learning_rate": 7.26980224535416e-05,
+ "loss": 1.2349,
+ "step": 452
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24851875792002978,
+ "learning_rate": 7.264884229436293e-05,
+ "loss": 1.1758,
+ "step": 453
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24122080121681125,
+ "learning_rate": 7.259951382451318e-05,
+ "loss": 1.1962,
+ "step": 454
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.22741322959884405,
+ "learning_rate": 7.25500372680719e-05,
+ "loss": 1.1702,
+ "step": 455
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.2297475610861458,
+ "learning_rate": 7.250041284979137e-05,
+ "loss": 1.1466,
+ "step": 456
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.3057605989721467,
+ "learning_rate": 7.245064079509553e-05,
+ "loss": 1.246,
+ "step": 457
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2719638501597136,
+ "learning_rate": 7.240072133007899e-05,
+ "loss": 1.2184,
+ "step": 458
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2436807816414479,
+ "learning_rate": 7.235065468150593e-05,
+ "loss": 1.2324,
+ "step": 459
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.23436349430255515,
+ "learning_rate": 7.23004410768092e-05,
+ "loss": 1.1813,
+ "step": 460
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2398940990211377,
+ "learning_rate": 7.22500807440892e-05,
+ "loss": 1.1924,
+ "step": 461
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2605716625062531,
+ "learning_rate": 7.219957391211281e-05,
+ "loss": 1.182,
+ "step": 462
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.260462524570941,
+ "learning_rate": 7.214892081031244e-05,
+ "loss": 1.2136,
+ "step": 463
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.21979766512306334,
+ "learning_rate": 7.209812166878491e-05,
+ "loss": 1.2066,
+ "step": 464
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.23324453647530663,
+ "learning_rate": 7.204717671829051e-05,
+ "loss": 1.1657,
+ "step": 465
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.2529434935507481,
+ "learning_rate": 7.199608619025177e-05,
+ "loss": 1.2093,
+ "step": 466
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.25371701891720116,
+ "learning_rate": 7.194485031675265e-05,
+ "loss": 1.2225,
+ "step": 467
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.23272423066292103,
+ "learning_rate": 7.189346933053725e-05,
+ "loss": 1.1721,
+ "step": 468
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.25122928735587546,
+ "learning_rate": 7.184194346500892e-05,
+ "loss": 1.2537,
+ "step": 469
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2159270875490409,
+ "learning_rate": 7.179027295422913e-05,
+ "loss": 1.197,
+ "step": 470
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2633111059076544,
+ "learning_rate": 7.173845803291636e-05,
+ "loss": 1.1721,
+ "step": 471
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.30555936322098703,
+ "learning_rate": 7.168649893644517e-05,
+ "loss": 1.3011,
+ "step": 472
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.23492670111453726,
+ "learning_rate": 7.163439590084502e-05,
+ "loss": 1.1601,
+ "step": 473
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.26602734263721806,
+ "learning_rate": 7.158214916279923e-05,
+ "loss": 1.2808,
+ "step": 474
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.3182695007856262,
+ "learning_rate": 7.152975895964386e-05,
+ "loss": 1.2967,
+ "step": 475
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2785021674736721,
+ "learning_rate": 7.147722552936673e-05,
+ "loss": 1.1789,
+ "step": 476
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.279474303138652,
+ "learning_rate": 7.142454911060627e-05,
+ "loss": 1.2596,
+ "step": 477
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2556980144910755,
+ "learning_rate": 7.137172994265044e-05,
+ "loss": 1.2426,
+ "step": 478
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.3311256331993533,
+ "learning_rate": 7.131876826543565e-05,
+ "loss": 1.2059,
+ "step": 479
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.26467296197775253,
+ "learning_rate": 7.12656643195457e-05,
+ "loss": 1.2482,
+ "step": 480
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.27444885274652553,
+ "learning_rate": 7.121241834621064e-05,
+ "loss": 1.2528,
+ "step": 481
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2572283861115396,
+ "learning_rate": 7.115903058730567e-05,
+ "loss": 1.1849,
+ "step": 482
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2677065778235683,
+ "learning_rate": 7.11055012853501e-05,
+ "loss": 1.2011,
+ "step": 483
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29470622036742816,
+ "learning_rate": 7.105183068350619e-05,
+ "loss": 1.2398,
+ "step": 484
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.27609230248969197,
+ "learning_rate": 7.099801902557811e-05,
+ "loss": 1.2259,
+ "step": 485
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.24248634168099284,
+ "learning_rate": 7.094406655601073e-05,
+ "loss": 1.2282,
+ "step": 486
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.2765941767688746,
+ "learning_rate": 7.088997351988865e-05,
+ "loss": 1.2319,
+ "step": 487
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29347776909858947,
+ "learning_rate": 7.083574016293493e-05,
+ "loss": 1.1765,
+ "step": 488
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.285370295424537,
+ "learning_rate": 7.078136673151008e-05,
+ "loss": 1.26,
+ "step": 489
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.29408734903836536,
+ "learning_rate": 7.072685347261093e-05,
+ "loss": 1.226,
+ "step": 490
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27437470239205813,
+ "learning_rate": 7.067220063386947e-05,
+ "loss": 1.1976,
+ "step": 491
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2680770258777871,
+ "learning_rate": 7.061740846355176e-05,
+ "loss": 1.1915,
+ "step": 492
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27200362879502954,
+ "learning_rate": 7.056247721055678e-05,
+ "loss": 1.2002,
+ "step": 493
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2637811092577037,
+ "learning_rate": 7.050740712441528e-05,
+ "loss": 1.287,
+ "step": 494
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.24657959209271266,
+ "learning_rate": 7.045219845528875e-05,
+ "loss": 1.2284,
+ "step": 495
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.25311992110358666,
+ "learning_rate": 7.039685145396812e-05,
+ "loss": 1.1616,
+ "step": 496
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2564633694193358,
+ "learning_rate": 7.034136637187275e-05,
+ "loss": 1.2067,
+ "step": 497
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2446797651174144,
+ "learning_rate": 7.028574346104926e-05,
+ "loss": 1.2284,
+ "step": 498
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2592751463399255,
+ "learning_rate": 7.022998297417034e-05,
+ "loss": 1.2371,
+ "step": 499
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2500713943206808,
+ "learning_rate": 7.017408516453365e-05,
+ "loss": 1.1061,
+ "step": 500
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2812266276040743,
+ "learning_rate": 7.011805028606064e-05,
+ "loss": 1.1949,
+ "step": 501
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.298829667668083,
+ "learning_rate": 7.006187859329544e-05,
+ "loss": 1.2313,
+ "step": 502
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.26518768159745104,
+ "learning_rate": 7.000557034140361e-05,
+ "loss": 1.2246,
+ "step": 503
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.3037280360760458,
+ "learning_rate": 6.994912578617113e-05,
+ "loss": 1.1617,
+ "step": 504
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2726903109255714,
+ "learning_rate": 6.989254518400309e-05,
+ "loss": 1.2415,
+ "step": 505
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25568082003046966,
+ "learning_rate": 6.98358287919226e-05,
+ "loss": 1.1817,
+ "step": 506
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25633294893705044,
+ "learning_rate": 6.97789768675696e-05,
+ "loss": 1.2149,
+ "step": 507
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.28291439435087123,
+ "learning_rate": 6.972198966919972e-05,
+ "loss": 1.1578,
+ "step": 508
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.27195184756655516,
+ "learning_rate": 6.966486745568308e-05,
+ "loss": 1.2355,
+ "step": 509
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.239159568376005,
+ "learning_rate": 6.960761048650312e-05,
+ "loss": 1.1688,
+ "step": 510
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.22961475425949177,
+ "learning_rate": 6.955021902175543e-05,
+ "loss": 1.2094,
+ "step": 511
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.27443773600741117,
+ "learning_rate": 6.949269332214651e-05,
+ "loss": 1.2559,
+ "step": 512
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.26230551832002097,
+ "learning_rate": 6.94350336489927e-05,
+ "loss": 1.2121,
+ "step": 513
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2716742985303849,
+ "learning_rate": 6.937724026421892e-05,
+ "loss": 1.2444,
+ "step": 514
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2537850139439542,
+ "learning_rate": 6.931931343035742e-05,
+ "loss": 1.1327,
+ "step": 515
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.28599587967496826,
+ "learning_rate": 6.926125341054676e-05,
+ "loss": 1.2236,
+ "step": 516
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.26780654378470103,
+ "learning_rate": 6.920306046853043e-05,
+ "loss": 1.2295,
+ "step": 517
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.23606296888412015,
+ "learning_rate": 6.914473486865577e-05,
+ "loss": 1.1543,
+ "step": 518
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.34976881174240837,
+ "learning_rate": 6.90862768758727e-05,
+ "loss": 1.2067,
+ "step": 519
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2481257873494882,
+ "learning_rate": 6.902768675573258e-05,
+ "loss": 1.2188,
+ "step": 520
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2996395778117021,
+ "learning_rate": 6.896896477438699e-05,
+ "loss": 1.2326,
+ "step": 521
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.8839768816333193,
+ "learning_rate": 6.891011119858643e-05,
+ "loss": 1.2435,
+ "step": 522
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2851882482058998,
+ "learning_rate": 6.885112629567927e-05,
+ "loss": 1.2644,
+ "step": 523
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2813663482913699,
+ "learning_rate": 6.879201033361035e-05,
+ "loss": 1.2309,
+ "step": 524
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3257551560135454,
+ "learning_rate": 6.873276358091996e-05,
+ "loss": 1.2755,
+ "step": 525
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.28930479952494365,
+ "learning_rate": 6.867338630674247e-05,
+ "loss": 1.1962,
+ "step": 526
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3077462996938649,
+ "learning_rate": 6.861387878080511e-05,
+ "loss": 1.2402,
+ "step": 527
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.2848900193452761,
+ "learning_rate": 6.855424127342688e-05,
+ "loss": 1.2748,
+ "step": 528
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.4765938812802202,
+ "learning_rate": 6.849447405551718e-05,
+ "loss": 1.2226,
+ "step": 529
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.53184473292579,
+ "learning_rate": 6.843457739857467e-05,
+ "loss": 1.2347,
+ "step": 530
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.6416239346492343,
+ "learning_rate": 6.837455157468596e-05,
+ "loss": 1.2429,
+ "step": 531
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3188092712502773,
+ "learning_rate": 6.831439685652442e-05,
+ "loss": 1.216,
+ "step": 532
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3527495731006385,
+ "learning_rate": 6.825411351734895e-05,
+ "loss": 1.1682,
+ "step": 533
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.29603753744741856,
+ "learning_rate": 6.819370183100274e-05,
+ "loss": 1.1434,
+ "step": 534
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.5252450389976622,
+ "learning_rate": 6.813316207191198e-05,
+ "loss": 1.1943,
+ "step": 535
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.32999419558659937,
+ "learning_rate": 6.807249451508466e-05,
+ "loss": 1.192,
+ "step": 536
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.3650175469778724,
+ "learning_rate": 6.801169943610929e-05,
+ "loss": 1.2141,
+ "step": 537
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 1.0643532150783557,
+ "learning_rate": 6.795077711115368e-05,
+ "loss": 1.2253,
+ "step": 538
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5041310609130145,
+ "learning_rate": 6.788972781696363e-05,
+ "loss": 1.278,
+ "step": 539
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5123058164360991,
+ "learning_rate": 6.782855183086177e-05,
+ "loss": 1.2231,
+ "step": 540
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.3533015702394419,
+ "learning_rate": 6.776724943074619e-05,
+ "loss": 1.2072,
+ "step": 541
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.30253964625417207,
+ "learning_rate": 6.770582089508927e-05,
+ "loss": 1.1382,
+ "step": 542
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.348991618828202,
+ "learning_rate": 6.764426650293633e-05,
+ "loss": 1.2079,
+ "step": 543
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.46017440578788743,
+ "learning_rate": 6.758258653390444e-05,
+ "loss": 1.1813,
+ "step": 544
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.31962101755594885,
+ "learning_rate": 6.75207812681811e-05,
+ "loss": 1.1339,
+ "step": 545
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.37092024548285923,
+ "learning_rate": 6.745885098652298e-05,
+ "loss": 1.2591,
+ "step": 546
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.32347106450715835,
+ "learning_rate": 6.739679597025466e-05,
+ "loss": 1.2017,
+ "step": 547
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39250187112342494,
+ "learning_rate": 6.733461650126733e-05,
+ "loss": 1.0933,
+ "step": 548
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.473522452217324,
+ "learning_rate": 6.727231286201752e-05,
+ "loss": 1.1124,
+ "step": 549
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4809062179622052,
+ "learning_rate": 6.720988533552582e-05,
+ "loss": 1.1585,
+ "step": 550
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3529662801059162,
+ "learning_rate": 6.714733420537559e-05,
+ "loss": 1.0501,
+ "step": 551
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5958247214391118,
+ "learning_rate": 6.708465975571168e-05,
+ "loss": 1.1086,
+ "step": 552
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5341364205022454,
+ "learning_rate": 6.70218622712391e-05,
+ "loss": 1.0518,
+ "step": 553
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3601805724462006,
+ "learning_rate": 6.695894203722181e-05,
+ "loss": 1.1779,
+ "step": 554
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.43410190338280613,
+ "learning_rate": 6.68958993394813e-05,
+ "loss": 1.093,
+ "step": 555
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46217742572873594,
+ "learning_rate": 6.683273446439546e-05,
+ "loss": 1.0117,
+ "step": 556
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.8591682373623357,
+ "learning_rate": 6.676944769889708e-05,
+ "loss": 1.1002,
+ "step": 557
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.7383229487622726,
+ "learning_rate": 6.670603933047272e-05,
+ "loss": 1.0779,
+ "step": 558
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5965305891207813,
+ "learning_rate": 6.664250964716131e-05,
+ "loss": 1.0889,
+ "step": 559
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.6030858606684543,
+ "learning_rate": 6.657885893755288e-05,
+ "loss": 1.0982,
+ "step": 560
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4644510682398409,
+ "learning_rate": 6.65150874907872e-05,
+ "loss": 1.1004,
+ "step": 561
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.43943285132452564,
+ "learning_rate": 6.645119559655254e-05,
+ "loss": 1.0536,
+ "step": 562
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4456395978600012,
+ "learning_rate": 6.638718354508427e-05,
+ "loss": 1.0733,
+ "step": 563
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3303824433217466,
+ "learning_rate": 6.632305162716365e-05,
+ "loss": 1.0552,
+ "step": 564
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3617704823170143,
+ "learning_rate": 6.62588001341164e-05,
+ "loss": 1.1092,
+ "step": 565
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4465013349903427,
+ "learning_rate": 6.619442935781141e-05,
+ "loss": 1.0781,
+ "step": 566
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.48516780613791277,
+ "learning_rate": 6.612993959065947e-05,
+ "loss": 1.0686,
+ "step": 567
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38867820318536633,
+ "learning_rate": 6.606533112561186e-05,
+ "loss": 1.1215,
+ "step": 568
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38566119820378336,
+ "learning_rate": 6.600060425615907e-05,
+ "loss": 1.1213,
+ "step": 569
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.35534855445058544,
+ "learning_rate": 6.593575927632947e-05,
+ "loss": 1.0955,
+ "step": 570
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38124406233349717,
+ "learning_rate": 6.587079648068795e-05,
+ "loss": 1.0659,
+ "step": 571
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.454750160923548,
+ "learning_rate": 6.580571616433457e-05,
+ "loss": 1.1149,
+ "step": 572
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.35353190088025255,
+ "learning_rate": 6.574051862290325e-05,
+ "loss": 1.0388,
+ "step": 573
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3249395594793626,
+ "learning_rate": 6.567520415256045e-05,
+ "loss": 1.0784,
+ "step": 574
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.40078898818247227,
+ "learning_rate": 6.560977305000375e-05,
+ "loss": 1.0859,
+ "step": 575
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4115264795060035,
+ "learning_rate": 6.554422561246054e-05,
+ "loss": 1.1828,
+ "step": 576
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.30090229228069215,
+ "learning_rate": 6.54785621376867e-05,
+ "loss": 1.0901,
+ "step": 577
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.28827860350299206,
+ "learning_rate": 6.541278292396523e-05,
+ "loss": 1.0277,
+ "step": 578
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.34690404488996757,
+ "learning_rate": 6.534688827010484e-05,
+ "loss": 1.048,
+ "step": 579
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.29943113556644785,
+ "learning_rate": 6.528087847543867e-05,
+ "loss": 1.0646,
+ "step": 580
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.37318202575874415,
+ "learning_rate": 6.521475383982291e-05,
+ "loss": 1.1091,
+ "step": 581
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3049663659203959,
+ "learning_rate": 6.51485146636354e-05,
+ "loss": 1.0552,
+ "step": 582
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3342407867509692,
+ "learning_rate": 6.508216124777431e-05,
+ "loss": 1.2227,
+ "step": 583
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3348396047855952,
+ "learning_rate": 6.501569389365674e-05,
+ "loss": 1.0861,
+ "step": 584
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.30951429367513383,
+ "learning_rate": 6.494911290321737e-05,
+ "loss": 1.0461,
+ "step": 585
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.33898401361064606,
+ "learning_rate": 6.488241857890711e-05,
+ "loss": 1.0854,
+ "step": 586
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4901462068263497,
+ "learning_rate": 6.481561122369164e-05,
+ "loss": 1.1012,
+ "step": 587
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3179574879809652,
+ "learning_rate": 6.474869114105018e-05,
+ "loss": 1.0451,
+ "step": 588
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.32159328915060714,
+ "learning_rate": 6.468165863497395e-05,
+ "loss": 1.0458,
+ "step": 589
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.36462235008537297,
+ "learning_rate": 6.461451400996491e-05,
+ "loss": 1.1247,
+ "step": 590
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.5373862753611778,
+ "learning_rate": 6.454725757103432e-05,
+ "loss": 1.0542,
+ "step": 591
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3160409270291303,
+ "learning_rate": 6.447988962370133e-05,
+ "loss": 1.0829,
+ "step": 592
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.390452102978435,
+ "learning_rate": 6.441241047399169e-05,
+ "loss": 1.192,
+ "step": 593
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3802122712014928,
+ "learning_rate": 6.434482042843627e-05,
+ "loss": 1.1153,
+ "step": 594
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4081584328242501,
+ "learning_rate": 6.427711979406966e-05,
+ "loss": 1.1635,
+ "step": 595
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3791962989638633,
+ "learning_rate": 6.420930887842889e-05,
+ "loss": 1.1581,
+ "step": 596
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.33239440056484193,
+ "learning_rate": 6.414138798955189e-05,
+ "loss": 1.0926,
+ "step": 597
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3279881540815014,
+ "learning_rate": 6.407335743597616e-05,
+ "loss": 1.1386,
+ "step": 598
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.30309644763750837,
+ "learning_rate": 6.40052175267374e-05,
+ "loss": 1.0523,
+ "step": 599
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3349097308403333,
+ "learning_rate": 6.393696857136801e-05,
+ "loss": 1.0815,
+ "step": 600
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3288227593556618,
+ "learning_rate": 6.386861087989581e-05,
+ "loss": 1.015,
+ "step": 601
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36685586740843157,
+ "learning_rate": 6.380014476284255e-05,
+ "loss": 1.1232,
+ "step": 602
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3620977714204643,
+ "learning_rate": 6.373157053122243e-05,
+ "loss": 1.1138,
+ "step": 603
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3130587018197183,
+ "learning_rate": 6.366288849654091e-05,
+ "loss": 1.1255,
+ "step": 604
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3602737087072766,
+ "learning_rate": 6.359409897079303e-05,
+ "loss": 1.0282,
+ "step": 605
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.31168852571991945,
+ "learning_rate": 6.352520226646222e-05,
+ "loss": 1.0779,
+ "step": 606
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3516045580189353,
+ "learning_rate": 6.345619869651871e-05,
+ "loss": 1.1028,
+ "step": 607
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3231857927563657,
+ "learning_rate": 6.33870885744182e-05,
+ "loss": 1.1202,
+ "step": 608
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.30205205129701157,
+ "learning_rate": 6.331787221410041e-05,
+ "loss": 1.1369,
+ "step": 609
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3198359813888166,
+ "learning_rate": 6.32485499299877e-05,
+ "loss": 1.1763,
+ "step": 610
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3128641370321787,
+ "learning_rate": 6.31791220369835e-05,
+ "loss": 1.0223,
+ "step": 611
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.2989105616213649,
+ "learning_rate": 6.31095888504711e-05,
+ "loss": 1.0358,
+ "step": 612
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3103537906853337,
+ "learning_rate": 6.303995068631203e-05,
+ "loss": 1.1261,
+ "step": 613
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.28598715532508207,
+ "learning_rate": 6.297020786084467e-05,
+ "loss": 1.0629,
+ "step": 614
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.29809789918093255,
+ "learning_rate": 6.290036069088288e-05,
+ "loss": 1.035,
+ "step": 615
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.33765270252261453,
+ "learning_rate": 6.283040949371451e-05,
+ "loss": 1.1221,
+ "step": 616
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3424617501293415,
+ "learning_rate": 6.276035458709993e-05,
+ "loss": 1.155,
+ "step": 617
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3799189737987811,
+ "learning_rate": 6.269019628927067e-05,
+ "loss": 1.0701,
+ "step": 618
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3358898935253196,
+ "learning_rate": 6.261993491892791e-05,
+ "loss": 1.1649,
+ "step": 619
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.31569979424117356,
+ "learning_rate": 6.254957079524099e-05,
+ "loss": 1.0633,
+ "step": 620
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3002168156888237,
+ "learning_rate": 6.247910423784609e-05,
+ "loss": 1.0846,
+ "step": 621
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3097238823450595,
+ "learning_rate": 6.24085355668447e-05,
+ "loss": 1.0808,
+ "step": 622
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3120312761417578,
+ "learning_rate": 6.233786510280212e-05,
+ "loss": 1.0142,
+ "step": 623
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3335343015064923,
+ "learning_rate": 6.22670931667461e-05,
+ "loss": 1.0674,
+ "step": 624
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3234062304634526,
+ "learning_rate": 6.219622008016533e-05,
+ "loss": 1.0981,
+ "step": 625
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.32152678786547273,
+ "learning_rate": 6.212524616500798e-05,
+ "loss": 1.0244,
+ "step": 626
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.39031977608147594,
+ "learning_rate": 6.205417174368023e-05,
+ "loss": 1.1205,
+ "step": 627
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3806189090017157,
+ "learning_rate": 6.198299713904485e-05,
+ "loss": 1.1134,
+ "step": 628
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.2978349276971668,
+ "learning_rate": 6.191172267441967e-05,
+ "loss": 1.0088,
+ "step": 629
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3190354077382501,
+ "learning_rate": 6.184034867357617e-05,
+ "loss": 1.108,
+ "step": 630
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.32633048665038994,
+ "learning_rate": 6.176887546073797e-05,
+ "loss": 1.0825,
+ "step": 631
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3428026413020903,
+ "learning_rate": 6.169730336057939e-05,
+ "loss": 1.0765,
+ "step": 632
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3475737151929015,
+ "learning_rate": 6.162563269822391e-05,
+ "loss": 1.0693,
+ "step": 633
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3870252154591392,
+ "learning_rate": 6.15538637992428e-05,
+ "loss": 1.1081,
+ "step": 634
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.33597355193652834,
+ "learning_rate": 6.148199698965352e-05,
+ "loss": 1.0893,
+ "step": 635
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.30805894179787247,
+ "learning_rate": 6.141003259591834e-05,
+ "loss": 1.0995,
+ "step": 636
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3025073882734066,
+ "learning_rate": 6.133797094494281e-05,
+ "loss": 1.0388,
+ "step": 637
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3524395196391662,
+ "learning_rate": 6.126581236407429e-05,
+ "loss": 1.1196,
+ "step": 638
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3377646188130345,
+ "learning_rate": 6.119355718110039e-05,
+ "loss": 1.0382,
+ "step": 639
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.35508400659785483,
+ "learning_rate": 6.112120572424763e-05,
+ "loss": 1.1402,
+ "step": 640
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3454418793700457,
+ "learning_rate": 6.104875832217982e-05,
+ "loss": 1.1032,
+ "step": 641
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.32629806837059866,
+ "learning_rate": 6.097621530399661e-05,
+ "loss": 1.0959,
+ "step": 642
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3329536837751315,
+ "learning_rate": 6.090357699923202e-05,
+ "loss": 1.0467,
+ "step": 643
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.32302233828349475,
+ "learning_rate": 6.083084373785287e-05,
+ "loss": 1.0858,
+ "step": 644
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3310358826507611,
+ "learning_rate": 6.075801585025739e-05,
+ "loss": 1.0715,
+ "step": 645
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.319322035854079,
+ "learning_rate": 6.068509366727362e-05,
+ "loss": 1.177,
+ "step": 646
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3065230667302707,
+ "learning_rate": 6.061207752015797e-05,
+ "loss": 1.0649,
+ "step": 647
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.29926795565748227,
+ "learning_rate": 6.053896774059368e-05,
+ "loss": 1.1325,
+ "step": 648
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3556069634279046,
+ "learning_rate": 6.046576466068931e-05,
+ "loss": 1.1366,
+ "step": 649
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3189191131461966,
+ "learning_rate": 6.039246861297727e-05,
+ "loss": 1.0693,
+ "step": 650
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3347197156648834,
+ "learning_rate": 6.031907993041227e-05,
+ "loss": 1.1009,
+ "step": 651
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.32274156348185445,
+ "learning_rate": 6.0245598946369826e-05,
+ "loss": 1.1675,
+ "step": 652
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.35534089035455224,
+ "learning_rate": 6.017202599464476e-05,
+ "loss": 1.1723,
+ "step": 653
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3106026578570133,
+ "learning_rate": 6.009836140944965e-05,
+ "loss": 1.0954,
+ "step": 654
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3309144454564729,
+ "learning_rate": 6.002460552541331e-05,
+ "loss": 1.0209,
+ "step": 655
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3023619281400003,
+ "learning_rate": 5.9950758677579345e-05,
+ "loss": 1.0363,
+ "step": 656
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3311182880219704,
+ "learning_rate": 5.987682120140451e-05,
+ "loss": 1.0515,
+ "step": 657
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.33396486010030413,
+ "learning_rate": 5.980279343275729e-05,
+ "loss": 1.1251,
+ "step": 658
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3465764556678002,
+ "learning_rate": 5.97286757079163e-05,
+ "loss": 1.165,
+ "step": 659
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.304193441363374,
+ "learning_rate": 5.965446836356882e-05,
+ "loss": 1.0228,
+ "step": 660
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3415149030413082,
+ "learning_rate": 5.9580171736809224e-05,
+ "loss": 1.0742,
+ "step": 661
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.33138658321132064,
+ "learning_rate": 5.950578616513746e-05,
+ "loss": 1.0843,
+ "step": 662
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.30774403421162994,
+ "learning_rate": 5.943131198645752e-05,
+ "loss": 1.065,
+ "step": 663
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3428877492183819,
+ "learning_rate": 5.9356749539075885e-05,
+ "loss": 1.1101,
+ "step": 664
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3621290546130101,
+ "learning_rate": 5.928209916170003e-05,
+ "loss": 1.1372,
+ "step": 665
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3482375945469884,
+ "learning_rate": 5.9207361193436865e-05,
+ "loss": 1.132,
+ "step": 666
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.31754384974068384,
+ "learning_rate": 5.9132535973791156e-05,
+ "loss": 1.148,
+ "step": 667
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.36003834782050365,
+ "learning_rate": 5.9057623842664044e-05,
+ "loss": 1.1099,
+ "step": 668
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.2963701622969662,
+ "learning_rate": 5.8982625140351464e-05,
+ "loss": 1.0755,
+ "step": 669
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.32579569606066516,
+ "learning_rate": 5.8907540207542616e-05,
+ "loss": 1.0809,
+ "step": 670
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4247563451753457,
+ "learning_rate": 5.8832369385318416e-05,
+ "loss": 1.097,
+ "step": 671
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.33076932102169776,
+ "learning_rate": 5.875711301514992e-05,
+ "loss": 1.1078,
+ "step": 672
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3609238032332309,
+ "learning_rate": 5.8681771438896815e-05,
+ "loss": 1.1031,
+ "step": 673
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.325159585649425,
+ "learning_rate": 5.860634499880583e-05,
+ "loss": 1.0707,
+ "step": 674
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4620687271068983,
+ "learning_rate": 5.853083403750922e-05,
+ "loss": 1.1017,
+ "step": 675
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33485279064365936,
+ "learning_rate": 5.845523889802316e-05,
+ "loss": 1.0989,
+ "step": 676
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.30952573170841513,
+ "learning_rate": 5.8379559923746214e-05,
+ "loss": 1.0393,
+ "step": 677
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33498605810588283,
+ "learning_rate": 5.830379745845781e-05,
+ "loss": 1.1259,
+ "step": 678
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.35771921163037307,
+ "learning_rate": 5.822795184631659e-05,
+ "loss": 1.0815,
+ "step": 679
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.3329650192347647,
+ "learning_rate": 5.815202343185894e-05,
+ "loss": 1.1344,
+ "step": 680
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3356634465845771,
+ "learning_rate": 5.807601255999736e-05,
+ "loss": 1.1297,
+ "step": 681
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3289442034151235,
+ "learning_rate": 5.7999919576018934e-05,
+ "loss": 1.022,
+ "step": 682
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3207007334784113,
+ "learning_rate": 5.7923744825583745e-05,
+ "loss": 1.0571,
+ "step": 683
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3582460325329284,
+ "learning_rate": 5.7847488654723304e-05,
+ "loss": 1.0778,
+ "step": 684
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3563317666176927,
+ "learning_rate": 5.777115140983899e-05,
+ "loss": 1.1003,
+ "step": 685
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 3.4694912945702105,
+ "learning_rate": 5.769473343770047e-05,
+ "loss": 1.121,
+ "step": 686
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.43002349520483113,
+ "learning_rate": 5.761823508544411e-05,
+ "loss": 1.0765,
+ "step": 687
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39467783104839754,
+ "learning_rate": 5.754165670057142e-05,
+ "loss": 1.0788,
+ "step": 688
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39629029674867916,
+ "learning_rate": 5.7464998630947464e-05,
+ "loss": 1.0812,
+ "step": 689
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3880152093965208,
+ "learning_rate": 5.738826122479929e-05,
+ "loss": 1.1228,
+ "step": 690
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3777874121959188,
+ "learning_rate": 5.7311444830714324e-05,
+ "loss": 1.0907,
+ "step": 691
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.38004041653523696,
+ "learning_rate": 5.723454979763882e-05,
+ "loss": 1.1263,
+ "step": 692
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.37049672627797636,
+ "learning_rate": 5.7157576474876246e-05,
+ "loss": 1.1438,
+ "step": 693
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32973606103437614,
+ "learning_rate": 5.7080525212085725e-05,
+ "loss": 1.0553,
+ "step": 694
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.31674639252070325,
+ "learning_rate": 5.700339635928038e-05,
+ "loss": 1.06,
+ "step": 695
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32282199426553837,
+ "learning_rate": 5.692619026682588e-05,
+ "loss": 1.0841,
+ "step": 696
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4810882958061859,
+ "learning_rate": 5.684890728543869e-05,
+ "loss": 1.0803,
+ "step": 697
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3995638550178378,
+ "learning_rate": 5.6771547766184566e-05,
+ "loss": 1.1187,
+ "step": 698
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35264932960583484,
+ "learning_rate": 5.669411206047699e-05,
+ "loss": 1.0641,
+ "step": 699
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35240640524733,
+ "learning_rate": 5.661660052007547e-05,
+ "loss": 1.076,
+ "step": 700
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3540694609860389,
+ "learning_rate": 5.653901349708401e-05,
+ "loss": 1.1369,
+ "step": 701
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3196055112925304,
+ "learning_rate": 5.646135134394955e-05,
+ "loss": 1.0677,
+ "step": 702
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4214141007955914,
+ "learning_rate": 5.6383614413460266e-05,
+ "loss": 1.1139,
+ "step": 703
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3625611311798579,
+ "learning_rate": 5.630580305874402e-05,
+ "loss": 1.1845,
+ "step": 704
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3425208672181188,
+ "learning_rate": 5.62279176332668e-05,
+ "loss": 1.174,
+ "step": 705
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3108419862818321,
+ "learning_rate": 5.6149958490830996e-05,
+ "loss": 1.0331,
+ "step": 706
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3274644181571904,
+ "learning_rate": 5.607192598557394e-05,
+ "loss": 1.0664,
+ "step": 707
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.346218197215145,
+ "learning_rate": 5.599382047196617e-05,
+ "loss": 1.2088,
+ "step": 708
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.328497632267458,
+ "learning_rate": 5.591564230480989e-05,
+ "loss": 1.0287,
+ "step": 709
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3708173720611468,
+ "learning_rate": 5.583739183923732e-05,
+ "loss": 1.0883,
+ "step": 710
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3631427403535479,
+ "learning_rate": 5.575906943070915e-05,
+ "loss": 1.1155,
+ "step": 711
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3305201458598695,
+ "learning_rate": 5.5680675435012834e-05,
+ "loss": 1.0958,
+ "step": 712
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.34978833532083714,
+ "learning_rate": 5.5602210208261036e-05,
+ "loss": 1.1437,
+ "step": 713
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3510553882510229,
+ "learning_rate": 5.552367410688999e-05,
+ "loss": 1.0941,
+ "step": 714
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3523747462465078,
+ "learning_rate": 5.544506748765789e-05,
+ "loss": 1.1289,
+ "step": 715
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38262637783927445,
+ "learning_rate": 5.5366390707643266e-05,
+ "loss": 1.099,
+ "step": 716
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38620065989073454,
+ "learning_rate": 5.528764412424334e-05,
+ "loss": 1.083,
+ "step": 717
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3401355276121096,
+ "learning_rate": 5.520882809517245e-05,
+ "loss": 1.028,
+ "step": 718
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3392061008943934,
+ "learning_rate": 5.512994297846039e-05,
+ "loss": 1.1083,
+ "step": 719
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.34219480421015414,
+ "learning_rate": 5.505098913245077e-05,
+ "loss": 1.1108,
+ "step": 720
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3275058061553761,
+ "learning_rate": 5.497196691579945e-05,
+ "loss": 1.111,
+ "step": 721
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36800249746509384,
+ "learning_rate": 5.489287668747283e-05,
+ "loss": 1.1221,
+ "step": 722
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4129005533101575,
+ "learning_rate": 5.481371880674628e-05,
+ "loss": 1.0966,
+ "step": 723
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36563906596251655,
+ "learning_rate": 5.4734493633202505e-05,
+ "loss": 1.0927,
+ "step": 724
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3614650536839971,
+ "learning_rate": 5.465520152672986e-05,
+ "loss": 1.13,
+ "step": 725
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.36419665098633497,
+ "learning_rate": 5.4575842847520765e-05,
+ "loss": 1.1183,
+ "step": 726
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.34490689807258995,
+ "learning_rate": 5.449641795607005e-05,
+ "loss": 1.0919,
+ "step": 727
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3627643746876298,
+ "learning_rate": 5.441692721317334e-05,
+ "loss": 1.0411,
+ "step": 728
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.323620411949565,
+ "learning_rate": 5.433737097992537e-05,
+ "loss": 1.0725,
+ "step": 729
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3521599501824965,
+ "learning_rate": 5.425774961771838e-05,
+ "loss": 1.0926,
+ "step": 730
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3302390546764222,
+ "learning_rate": 5.417806348824047e-05,
+ "loss": 1.0468,
+ "step": 731
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3833325802616019,
+ "learning_rate": 5.4098312953473956e-05,
+ "loss": 1.1291,
+ "step": 732
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3708621126835512,
+ "learning_rate": 5.401849837569372e-05,
+ "loss": 1.0887,
+ "step": 733
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3625834373416278,
+ "learning_rate": 5.393862011746555e-05,
+ "loss": 1.0981,
+ "step": 734
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3583343965080617,
+ "learning_rate": 5.385867854164451e-05,
+ "loss": 1.1021,
+ "step": 735
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34598320594096066,
+ "learning_rate": 5.377867401137332e-05,
+ "loss": 1.1376,
+ "step": 736
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3046382791315433,
+ "learning_rate": 5.369860689008066e-05,
+ "loss": 1.0206,
+ "step": 737
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34464948380043725,
+ "learning_rate": 5.3618477541479505e-05,
+ "loss": 1.1084,
+ "step": 738
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3203242519627101,
+ "learning_rate": 5.353828632956557e-05,
+ "loss": 1.0731,
+ "step": 739
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3431169960355163,
+ "learning_rate": 5.3458033618615516e-05,
+ "loss": 1.091,
+ "step": 740
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.33492074521678705,
+ "learning_rate": 5.337771977318543e-05,
+ "loss": 1.1112,
+ "step": 741
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.32576546585541344,
+ "learning_rate": 5.3297345158109086e-05,
+ "loss": 1.0993,
+ "step": 742
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3410007245037574,
+ "learning_rate": 5.3216910138496286e-05,
+ "loss": 1.094,
+ "step": 743
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.34891180680896833,
+ "learning_rate": 5.313641507973128e-05,
+ "loss": 1.1331,
+ "step": 744
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.37135766946717214,
+ "learning_rate": 5.3055860347471006e-05,
+ "loss": 1.1,
+ "step": 745
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3465019415478411,
+ "learning_rate": 5.297524630764349e-05,
+ "loss": 1.1256,
+ "step": 746
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37035388481626563,
+ "learning_rate": 5.289457332644615e-05,
+ "loss": 1.0366,
+ "step": 747
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.33853883270759155,
+ "learning_rate": 5.281384177034421e-05,
+ "loss": 1.0547,
+ "step": 748
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.364306618627317,
+ "learning_rate": 5.2733052006068897e-05,
+ "loss": 1.0768,
+ "step": 749
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.4021754315731627,
+ "learning_rate": 5.2652204400615916e-05,
+ "loss": 1.1382,
+ "step": 750
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 777666642837504.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-750/training_args.bin b/checkpoint-750/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c5d2416a3b70bb5260978ec9996f00154a724ba7
--- /dev/null
+++ b/checkpoint-750/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b22e8f9d51a16d03a2c506fa3d1eafa8f4b1ae992992c2086a4d435ffd97387e
+size 6712
diff --git a/checkpoint-750/zero_to_fp32.py b/checkpoint-750/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/checkpoint-750/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-800/README.md b/checkpoint-800/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/checkpoint-800/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/checkpoint-800/adapter_config.json b/checkpoint-800/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3cd6dba5d79f7ca21fd4ad465cbbcac1e0960476
--- /dev/null
+++ b/checkpoint-800/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "k_proj",
+ "q_proj",
+ "o_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/checkpoint-800/adapter_model.safetensors b/checkpoint-800/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7b809fa170dcf5c1fe34a2567be2a0ca08b1fad4
--- /dev/null
+++ b/checkpoint-800/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:957309eb8fa80fd16965595c1dc6a97c9d8900131039bb55d965e13653175a9d
+size 1048664848
diff --git a/checkpoint-800/global_step800/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-800/global_step800/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e714c3aa6e55b3db5dd59e64f6d144c157a1e7aa
--- /dev/null
+++ b/checkpoint-800/global_step800/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c55c954a6d1a20b8fbfe142a705e1bb1ac59400f37f3fee03c0dbed4d8d3832f
+size 787270042
diff --git a/checkpoint-800/global_step800/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-800/global_step800/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e990ad16e430b3995cf0928d5fd1dc370fdfb28b
--- /dev/null
+++ b/checkpoint-800/global_step800/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9b4722a1d11c57d0721b138b81650ec52f6e7aefb835a66cfc5d29f2d60a794
+size 787270042
diff --git a/checkpoint-800/global_step800/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-800/global_step800/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ce01e60191d4eb91eed67fcdeada29c1500c1415
--- /dev/null
+++ b/checkpoint-800/global_step800/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:48f5e2d23e98ffb6690a97e988075882dc973e879d8d4a8408403adf6dd85355
+size 787270042
diff --git a/checkpoint-800/global_step800/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-800/global_step800/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..fa8ad470cd371e12d220443d3f268d7ea9a2a322
--- /dev/null
+++ b/checkpoint-800/global_step800/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2950e806e878460d1e46b94f05543dd19cf944318887735bc0cbca1e07e0ba58
+size 787270042
diff --git a/checkpoint-800/global_step800/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-800/global_step800/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2e74300dda38ec1c331372108eec284b90e148a2
--- /dev/null
+++ b/checkpoint-800/global_step800/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:279bbcae36e6e4ee14fee00c972840eaf4b1a59370003cef1c74026c31c378ff
+size 787270042
diff --git a/checkpoint-800/global_step800/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-800/global_step800/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..87593dc746f6b784f2fd57d6c1322fbc79323e94
--- /dev/null
+++ b/checkpoint-800/global_step800/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1aa9b1733767642452406caf7e6205632eb28e244cc44660fe32442be16a48b6
+size 787270042
diff --git a/checkpoint-800/global_step800/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-800/global_step800/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..180241eba094120819bef8ea4134e63de403681f
--- /dev/null
+++ b/checkpoint-800/global_step800/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3e890fac1e94ff55df91ae0554dc5c6f7520d375caab14aac7f60b135601cad2
+size 787270042
diff --git a/checkpoint-800/global_step800/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-800/global_step800/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1f3142cbf64f6387bbb75e6fd9c665ec778785a8
--- /dev/null
+++ b/checkpoint-800/global_step800/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9b09008e7481896f3e820578250007c53aa36ce5b02f98beef749b855be4783b
+size 787270042
diff --git a/checkpoint-800/global_step800/zero_pp_rank_0_mp_rank_00_model_states.pt b/checkpoint-800/global_step800/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9b11d0d44bbae33e5bc118045048e8845a542c64
--- /dev/null
+++ b/checkpoint-800/global_step800/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c577fddf8e24a7ffa762f5719315f724538aa93576ecfcbf0032d25212a3ce4a
+size 653742
diff --git a/checkpoint-800/global_step800/zero_pp_rank_1_mp_rank_00_model_states.pt b/checkpoint-800/global_step800/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2e4cd0e5ea0b57263d79c4bf375f9da70b304278
--- /dev/null
+++ b/checkpoint-800/global_step800/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:20b0c382515475979998f4d222f0b235c61aaa493c54354fb5551eeac507f756
+size 653742
diff --git a/checkpoint-800/global_step800/zero_pp_rank_2_mp_rank_00_model_states.pt b/checkpoint-800/global_step800/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1513b1cdd8ff350ab853d1624ca6573d1192d585
--- /dev/null
+++ b/checkpoint-800/global_step800/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e6cca9ad652cb5a3e1a28b18d9545a9750a3004db04fd38e1ef2ee5eccfb562
+size 653742
diff --git a/checkpoint-800/global_step800/zero_pp_rank_3_mp_rank_00_model_states.pt b/checkpoint-800/global_step800/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ac86b1cc4e75211cd593546784ef5b815018c809
--- /dev/null
+++ b/checkpoint-800/global_step800/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c1b5dcaa027488d8d6bc110cd54cfb85ddf3527cd94aa9429f04aebc6fca723
+size 653742
diff --git a/checkpoint-800/global_step800/zero_pp_rank_4_mp_rank_00_model_states.pt b/checkpoint-800/global_step800/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..fdf666fe4e3517fb692d423d5887d312a29dcac6
--- /dev/null
+++ b/checkpoint-800/global_step800/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dc878f32f7ef72294ca96f298fa1c7fcfbf2b6e425304525a7e2fa197bbd5d55
+size 653742
diff --git a/checkpoint-800/global_step800/zero_pp_rank_5_mp_rank_00_model_states.pt b/checkpoint-800/global_step800/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..245a928f1cc458a8716ad046bb0d58abd995ed61
--- /dev/null
+++ b/checkpoint-800/global_step800/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:350a480253a8dcec7cbf69b13ae08b32017dd165f7e112f0c1999a7c50e1cdeb
+size 653742
diff --git a/checkpoint-800/global_step800/zero_pp_rank_6_mp_rank_00_model_states.pt b/checkpoint-800/global_step800/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f05c3303bcd651cc9fe267280eb2f663de2479e8
--- /dev/null
+++ b/checkpoint-800/global_step800/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ccd1e8726e8abaa96f95daf2437a82f4c3b6295defc800b6d6ada66800c7ad59
+size 653742
diff --git a/checkpoint-800/global_step800/zero_pp_rank_7_mp_rank_00_model_states.pt b/checkpoint-800/global_step800/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8229754e8cf24d583b62eb943576567db0283a1f
--- /dev/null
+++ b/checkpoint-800/global_step800/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4233f2bed13c6017800b385c409efe11bc60406831614dc2aff33f60e00c00f4
+size 653742
diff --git a/checkpoint-800/latest b/checkpoint-800/latest
new file mode 100644
index 0000000000000000000000000000000000000000..57729c0be88118cbd582c8c68b4149cee821f0b4
--- /dev/null
+++ b/checkpoint-800/latest
@@ -0,0 +1 @@
+global_step800
\ No newline at end of file
diff --git a/checkpoint-800/rng_state_0.pth b/checkpoint-800/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4e5b7e2ec90fdb824c8932464c1d9068330655a7
--- /dev/null
+++ b/checkpoint-800/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:36d2a2034ebb05cb71c510897f2795b31164e50f17b270bc25d2be3ad9a17b22
+size 15984
diff --git a/checkpoint-800/rng_state_1.pth b/checkpoint-800/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7d8d7722fc72cab6d492b76cb99c8177dcc47544
--- /dev/null
+++ b/checkpoint-800/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:060dfdb1c49102cbdc8868a6031e68787601b4ccd782f3fb9b137e20c1fd2c7a
+size 15984
diff --git a/checkpoint-800/rng_state_2.pth b/checkpoint-800/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3c9f84eff30cfa9ea1feedaf262d61fb12e4cba7
--- /dev/null
+++ b/checkpoint-800/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af01895cb66e616591f2e4baa8dcd8151530eab133c73571ccb31c74f35422ce
+size 15984
diff --git a/checkpoint-800/rng_state_3.pth b/checkpoint-800/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6eebfb928f8e91eff0ea1645a20b5aa4465c705b
--- /dev/null
+++ b/checkpoint-800/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:677921992b1e0cef3aee776f245975003d22f51d9bd6ed20f248ded1deb72fa9
+size 15984
diff --git a/checkpoint-800/rng_state_4.pth b/checkpoint-800/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0866030a266c6d003cc378a9418a723f69e8ab99
--- /dev/null
+++ b/checkpoint-800/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d69353c629541c690c5471f8ec05fdab2bfecf3d37afaa436bc45939da6db68f
+size 15984
diff --git a/checkpoint-800/rng_state_5.pth b/checkpoint-800/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..554638d77107f832d7aa51c61645ee2d6c48a36d
--- /dev/null
+++ b/checkpoint-800/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e40ba6668cc03c9162c68a933d164bf38ae2d196a9a6fec03ae615491201185
+size 15984
diff --git a/checkpoint-800/rng_state_6.pth b/checkpoint-800/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..964331b65172a1bcac03e4673415fa787f724268
--- /dev/null
+++ b/checkpoint-800/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:870968fea834e24b2e099cf3e4fe1e3fb8caf38d8f8e5b790d7d47386d4d05f5
+size 15984
diff --git a/checkpoint-800/rng_state_7.pth b/checkpoint-800/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..cd4754d65217d0f9d1f2d3334397df7a8a079652
--- /dev/null
+++ b/checkpoint-800/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9e19618bee7c6ef43256fea25abe19bca88535eb1e7dc213cde8929ae4e8180
+size 15984
diff --git a/checkpoint-800/scheduler.pt b/checkpoint-800/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c8f56f167203bea14f45dcb6f31e429f6898e67a
--- /dev/null
+++ b/checkpoint-800/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9bf60fed2d5b3076be79b7c1fa2a43fe5a2f69188a45ae24f4bfafe186448b99
+size 1064
diff --git a/checkpoint-800/special_tokens_map.json b/checkpoint-800/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-800/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-800/tokenizer.model b/checkpoint-800/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-800/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-800/tokenizer_config.json b/checkpoint-800/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/checkpoint-800/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-800/trainer_state.json b/checkpoint-800/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..a03f0713c43233653146668b3dbe1287e237e421
--- /dev/null
+++ b/checkpoint-800/trainer_state.json
@@ -0,0 +1,5621 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.4631915866483767,
+ "eval_steps": 500,
+ "global_step": 800,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2698278585334131,
+ "learning_rate": 7.274705407864332e-05,
+ "loss": 1.194,
+ "step": 451
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.23678313024953834,
+ "learning_rate": 7.26980224535416e-05,
+ "loss": 1.2349,
+ "step": 452
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24851875792002978,
+ "learning_rate": 7.264884229436293e-05,
+ "loss": 1.1758,
+ "step": 453
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24122080121681125,
+ "learning_rate": 7.259951382451318e-05,
+ "loss": 1.1962,
+ "step": 454
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.22741322959884405,
+ "learning_rate": 7.25500372680719e-05,
+ "loss": 1.1702,
+ "step": 455
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.2297475610861458,
+ "learning_rate": 7.250041284979137e-05,
+ "loss": 1.1466,
+ "step": 456
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.3057605989721467,
+ "learning_rate": 7.245064079509553e-05,
+ "loss": 1.246,
+ "step": 457
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2719638501597136,
+ "learning_rate": 7.240072133007899e-05,
+ "loss": 1.2184,
+ "step": 458
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2436807816414479,
+ "learning_rate": 7.235065468150593e-05,
+ "loss": 1.2324,
+ "step": 459
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.23436349430255515,
+ "learning_rate": 7.23004410768092e-05,
+ "loss": 1.1813,
+ "step": 460
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2398940990211377,
+ "learning_rate": 7.22500807440892e-05,
+ "loss": 1.1924,
+ "step": 461
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2605716625062531,
+ "learning_rate": 7.219957391211281e-05,
+ "loss": 1.182,
+ "step": 462
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.260462524570941,
+ "learning_rate": 7.214892081031244e-05,
+ "loss": 1.2136,
+ "step": 463
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.21979766512306334,
+ "learning_rate": 7.209812166878491e-05,
+ "loss": 1.2066,
+ "step": 464
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.23324453647530663,
+ "learning_rate": 7.204717671829051e-05,
+ "loss": 1.1657,
+ "step": 465
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.2529434935507481,
+ "learning_rate": 7.199608619025177e-05,
+ "loss": 1.2093,
+ "step": 466
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.25371701891720116,
+ "learning_rate": 7.194485031675265e-05,
+ "loss": 1.2225,
+ "step": 467
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.23272423066292103,
+ "learning_rate": 7.189346933053725e-05,
+ "loss": 1.1721,
+ "step": 468
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.25122928735587546,
+ "learning_rate": 7.184194346500892e-05,
+ "loss": 1.2537,
+ "step": 469
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2159270875490409,
+ "learning_rate": 7.179027295422913e-05,
+ "loss": 1.197,
+ "step": 470
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2633111059076544,
+ "learning_rate": 7.173845803291636e-05,
+ "loss": 1.1721,
+ "step": 471
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.30555936322098703,
+ "learning_rate": 7.168649893644517e-05,
+ "loss": 1.3011,
+ "step": 472
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.23492670111453726,
+ "learning_rate": 7.163439590084502e-05,
+ "loss": 1.1601,
+ "step": 473
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.26602734263721806,
+ "learning_rate": 7.158214916279923e-05,
+ "loss": 1.2808,
+ "step": 474
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.3182695007856262,
+ "learning_rate": 7.152975895964386e-05,
+ "loss": 1.2967,
+ "step": 475
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2785021674736721,
+ "learning_rate": 7.147722552936673e-05,
+ "loss": 1.1789,
+ "step": 476
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.279474303138652,
+ "learning_rate": 7.142454911060627e-05,
+ "loss": 1.2596,
+ "step": 477
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2556980144910755,
+ "learning_rate": 7.137172994265044e-05,
+ "loss": 1.2426,
+ "step": 478
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.3311256331993533,
+ "learning_rate": 7.131876826543565e-05,
+ "loss": 1.2059,
+ "step": 479
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.26467296197775253,
+ "learning_rate": 7.12656643195457e-05,
+ "loss": 1.2482,
+ "step": 480
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.27444885274652553,
+ "learning_rate": 7.121241834621064e-05,
+ "loss": 1.2528,
+ "step": 481
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2572283861115396,
+ "learning_rate": 7.115903058730567e-05,
+ "loss": 1.1849,
+ "step": 482
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2677065778235683,
+ "learning_rate": 7.11055012853501e-05,
+ "loss": 1.2011,
+ "step": 483
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29470622036742816,
+ "learning_rate": 7.105183068350619e-05,
+ "loss": 1.2398,
+ "step": 484
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.27609230248969197,
+ "learning_rate": 7.099801902557811e-05,
+ "loss": 1.2259,
+ "step": 485
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.24248634168099284,
+ "learning_rate": 7.094406655601073e-05,
+ "loss": 1.2282,
+ "step": 486
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.2765941767688746,
+ "learning_rate": 7.088997351988865e-05,
+ "loss": 1.2319,
+ "step": 487
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29347776909858947,
+ "learning_rate": 7.083574016293493e-05,
+ "loss": 1.1765,
+ "step": 488
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.285370295424537,
+ "learning_rate": 7.078136673151008e-05,
+ "loss": 1.26,
+ "step": 489
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.29408734903836536,
+ "learning_rate": 7.072685347261093e-05,
+ "loss": 1.226,
+ "step": 490
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27437470239205813,
+ "learning_rate": 7.067220063386947e-05,
+ "loss": 1.1976,
+ "step": 491
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2680770258777871,
+ "learning_rate": 7.061740846355176e-05,
+ "loss": 1.1915,
+ "step": 492
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27200362879502954,
+ "learning_rate": 7.056247721055678e-05,
+ "loss": 1.2002,
+ "step": 493
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2637811092577037,
+ "learning_rate": 7.050740712441528e-05,
+ "loss": 1.287,
+ "step": 494
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.24657959209271266,
+ "learning_rate": 7.045219845528875e-05,
+ "loss": 1.2284,
+ "step": 495
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.25311992110358666,
+ "learning_rate": 7.039685145396812e-05,
+ "loss": 1.1616,
+ "step": 496
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2564633694193358,
+ "learning_rate": 7.034136637187275e-05,
+ "loss": 1.2067,
+ "step": 497
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2446797651174144,
+ "learning_rate": 7.028574346104926e-05,
+ "loss": 1.2284,
+ "step": 498
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2592751463399255,
+ "learning_rate": 7.022998297417034e-05,
+ "loss": 1.2371,
+ "step": 499
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2500713943206808,
+ "learning_rate": 7.017408516453365e-05,
+ "loss": 1.1061,
+ "step": 500
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2812266276040743,
+ "learning_rate": 7.011805028606064e-05,
+ "loss": 1.1949,
+ "step": 501
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.298829667668083,
+ "learning_rate": 7.006187859329544e-05,
+ "loss": 1.2313,
+ "step": 502
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.26518768159745104,
+ "learning_rate": 7.000557034140361e-05,
+ "loss": 1.2246,
+ "step": 503
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.3037280360760458,
+ "learning_rate": 6.994912578617113e-05,
+ "loss": 1.1617,
+ "step": 504
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2726903109255714,
+ "learning_rate": 6.989254518400309e-05,
+ "loss": 1.2415,
+ "step": 505
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25568082003046966,
+ "learning_rate": 6.98358287919226e-05,
+ "loss": 1.1817,
+ "step": 506
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25633294893705044,
+ "learning_rate": 6.97789768675696e-05,
+ "loss": 1.2149,
+ "step": 507
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.28291439435087123,
+ "learning_rate": 6.972198966919972e-05,
+ "loss": 1.1578,
+ "step": 508
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.27195184756655516,
+ "learning_rate": 6.966486745568308e-05,
+ "loss": 1.2355,
+ "step": 509
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.239159568376005,
+ "learning_rate": 6.960761048650312e-05,
+ "loss": 1.1688,
+ "step": 510
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.22961475425949177,
+ "learning_rate": 6.955021902175543e-05,
+ "loss": 1.2094,
+ "step": 511
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.27443773600741117,
+ "learning_rate": 6.949269332214651e-05,
+ "loss": 1.2559,
+ "step": 512
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.26230551832002097,
+ "learning_rate": 6.94350336489927e-05,
+ "loss": 1.2121,
+ "step": 513
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2716742985303849,
+ "learning_rate": 6.937724026421892e-05,
+ "loss": 1.2444,
+ "step": 514
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2537850139439542,
+ "learning_rate": 6.931931343035742e-05,
+ "loss": 1.1327,
+ "step": 515
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.28599587967496826,
+ "learning_rate": 6.926125341054676e-05,
+ "loss": 1.2236,
+ "step": 516
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.26780654378470103,
+ "learning_rate": 6.920306046853043e-05,
+ "loss": 1.2295,
+ "step": 517
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.23606296888412015,
+ "learning_rate": 6.914473486865577e-05,
+ "loss": 1.1543,
+ "step": 518
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.34976881174240837,
+ "learning_rate": 6.90862768758727e-05,
+ "loss": 1.2067,
+ "step": 519
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2481257873494882,
+ "learning_rate": 6.902768675573258e-05,
+ "loss": 1.2188,
+ "step": 520
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2996395778117021,
+ "learning_rate": 6.896896477438699e-05,
+ "loss": 1.2326,
+ "step": 521
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.8839768816333193,
+ "learning_rate": 6.891011119858643e-05,
+ "loss": 1.2435,
+ "step": 522
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2851882482058998,
+ "learning_rate": 6.885112629567927e-05,
+ "loss": 1.2644,
+ "step": 523
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2813663482913699,
+ "learning_rate": 6.879201033361035e-05,
+ "loss": 1.2309,
+ "step": 524
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3257551560135454,
+ "learning_rate": 6.873276358091996e-05,
+ "loss": 1.2755,
+ "step": 525
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.28930479952494365,
+ "learning_rate": 6.867338630674247e-05,
+ "loss": 1.1962,
+ "step": 526
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3077462996938649,
+ "learning_rate": 6.861387878080511e-05,
+ "loss": 1.2402,
+ "step": 527
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.2848900193452761,
+ "learning_rate": 6.855424127342688e-05,
+ "loss": 1.2748,
+ "step": 528
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.4765938812802202,
+ "learning_rate": 6.849447405551718e-05,
+ "loss": 1.2226,
+ "step": 529
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.53184473292579,
+ "learning_rate": 6.843457739857467e-05,
+ "loss": 1.2347,
+ "step": 530
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.6416239346492343,
+ "learning_rate": 6.837455157468596e-05,
+ "loss": 1.2429,
+ "step": 531
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3188092712502773,
+ "learning_rate": 6.831439685652442e-05,
+ "loss": 1.216,
+ "step": 532
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3527495731006385,
+ "learning_rate": 6.825411351734895e-05,
+ "loss": 1.1682,
+ "step": 533
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.29603753744741856,
+ "learning_rate": 6.819370183100274e-05,
+ "loss": 1.1434,
+ "step": 534
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.5252450389976622,
+ "learning_rate": 6.813316207191198e-05,
+ "loss": 1.1943,
+ "step": 535
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.32999419558659937,
+ "learning_rate": 6.807249451508466e-05,
+ "loss": 1.192,
+ "step": 536
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.3650175469778724,
+ "learning_rate": 6.801169943610929e-05,
+ "loss": 1.2141,
+ "step": 537
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 1.0643532150783557,
+ "learning_rate": 6.795077711115368e-05,
+ "loss": 1.2253,
+ "step": 538
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5041310609130145,
+ "learning_rate": 6.788972781696363e-05,
+ "loss": 1.278,
+ "step": 539
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5123058164360991,
+ "learning_rate": 6.782855183086177e-05,
+ "loss": 1.2231,
+ "step": 540
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.3533015702394419,
+ "learning_rate": 6.776724943074619e-05,
+ "loss": 1.2072,
+ "step": 541
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.30253964625417207,
+ "learning_rate": 6.770582089508927e-05,
+ "loss": 1.1382,
+ "step": 542
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.348991618828202,
+ "learning_rate": 6.764426650293633e-05,
+ "loss": 1.2079,
+ "step": 543
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.46017440578788743,
+ "learning_rate": 6.758258653390444e-05,
+ "loss": 1.1813,
+ "step": 544
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.31962101755594885,
+ "learning_rate": 6.75207812681811e-05,
+ "loss": 1.1339,
+ "step": 545
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.37092024548285923,
+ "learning_rate": 6.745885098652298e-05,
+ "loss": 1.2591,
+ "step": 546
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.32347106450715835,
+ "learning_rate": 6.739679597025466e-05,
+ "loss": 1.2017,
+ "step": 547
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39250187112342494,
+ "learning_rate": 6.733461650126733e-05,
+ "loss": 1.0933,
+ "step": 548
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.473522452217324,
+ "learning_rate": 6.727231286201752e-05,
+ "loss": 1.1124,
+ "step": 549
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4809062179622052,
+ "learning_rate": 6.720988533552582e-05,
+ "loss": 1.1585,
+ "step": 550
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3529662801059162,
+ "learning_rate": 6.714733420537559e-05,
+ "loss": 1.0501,
+ "step": 551
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5958247214391118,
+ "learning_rate": 6.708465975571168e-05,
+ "loss": 1.1086,
+ "step": 552
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5341364205022454,
+ "learning_rate": 6.70218622712391e-05,
+ "loss": 1.0518,
+ "step": 553
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3601805724462006,
+ "learning_rate": 6.695894203722181e-05,
+ "loss": 1.1779,
+ "step": 554
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.43410190338280613,
+ "learning_rate": 6.68958993394813e-05,
+ "loss": 1.093,
+ "step": 555
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46217742572873594,
+ "learning_rate": 6.683273446439546e-05,
+ "loss": 1.0117,
+ "step": 556
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.8591682373623357,
+ "learning_rate": 6.676944769889708e-05,
+ "loss": 1.1002,
+ "step": 557
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.7383229487622726,
+ "learning_rate": 6.670603933047272e-05,
+ "loss": 1.0779,
+ "step": 558
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5965305891207813,
+ "learning_rate": 6.664250964716131e-05,
+ "loss": 1.0889,
+ "step": 559
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.6030858606684543,
+ "learning_rate": 6.657885893755288e-05,
+ "loss": 1.0982,
+ "step": 560
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4644510682398409,
+ "learning_rate": 6.65150874907872e-05,
+ "loss": 1.1004,
+ "step": 561
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.43943285132452564,
+ "learning_rate": 6.645119559655254e-05,
+ "loss": 1.0536,
+ "step": 562
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4456395978600012,
+ "learning_rate": 6.638718354508427e-05,
+ "loss": 1.0733,
+ "step": 563
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3303824433217466,
+ "learning_rate": 6.632305162716365e-05,
+ "loss": 1.0552,
+ "step": 564
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3617704823170143,
+ "learning_rate": 6.62588001341164e-05,
+ "loss": 1.1092,
+ "step": 565
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4465013349903427,
+ "learning_rate": 6.619442935781141e-05,
+ "loss": 1.0781,
+ "step": 566
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.48516780613791277,
+ "learning_rate": 6.612993959065947e-05,
+ "loss": 1.0686,
+ "step": 567
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38867820318536633,
+ "learning_rate": 6.606533112561186e-05,
+ "loss": 1.1215,
+ "step": 568
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38566119820378336,
+ "learning_rate": 6.600060425615907e-05,
+ "loss": 1.1213,
+ "step": 569
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.35534855445058544,
+ "learning_rate": 6.593575927632947e-05,
+ "loss": 1.0955,
+ "step": 570
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38124406233349717,
+ "learning_rate": 6.587079648068795e-05,
+ "loss": 1.0659,
+ "step": 571
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.454750160923548,
+ "learning_rate": 6.580571616433457e-05,
+ "loss": 1.1149,
+ "step": 572
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.35353190088025255,
+ "learning_rate": 6.574051862290325e-05,
+ "loss": 1.0388,
+ "step": 573
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3249395594793626,
+ "learning_rate": 6.567520415256045e-05,
+ "loss": 1.0784,
+ "step": 574
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.40078898818247227,
+ "learning_rate": 6.560977305000375e-05,
+ "loss": 1.0859,
+ "step": 575
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4115264795060035,
+ "learning_rate": 6.554422561246054e-05,
+ "loss": 1.1828,
+ "step": 576
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.30090229228069215,
+ "learning_rate": 6.54785621376867e-05,
+ "loss": 1.0901,
+ "step": 577
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.28827860350299206,
+ "learning_rate": 6.541278292396523e-05,
+ "loss": 1.0277,
+ "step": 578
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.34690404488996757,
+ "learning_rate": 6.534688827010484e-05,
+ "loss": 1.048,
+ "step": 579
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.29943113556644785,
+ "learning_rate": 6.528087847543867e-05,
+ "loss": 1.0646,
+ "step": 580
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.37318202575874415,
+ "learning_rate": 6.521475383982291e-05,
+ "loss": 1.1091,
+ "step": 581
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3049663659203959,
+ "learning_rate": 6.51485146636354e-05,
+ "loss": 1.0552,
+ "step": 582
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3342407867509692,
+ "learning_rate": 6.508216124777431e-05,
+ "loss": 1.2227,
+ "step": 583
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3348396047855952,
+ "learning_rate": 6.501569389365674e-05,
+ "loss": 1.0861,
+ "step": 584
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.30951429367513383,
+ "learning_rate": 6.494911290321737e-05,
+ "loss": 1.0461,
+ "step": 585
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.33898401361064606,
+ "learning_rate": 6.488241857890711e-05,
+ "loss": 1.0854,
+ "step": 586
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4901462068263497,
+ "learning_rate": 6.481561122369164e-05,
+ "loss": 1.1012,
+ "step": 587
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3179574879809652,
+ "learning_rate": 6.474869114105018e-05,
+ "loss": 1.0451,
+ "step": 588
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.32159328915060714,
+ "learning_rate": 6.468165863497395e-05,
+ "loss": 1.0458,
+ "step": 589
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.36462235008537297,
+ "learning_rate": 6.461451400996491e-05,
+ "loss": 1.1247,
+ "step": 590
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.5373862753611778,
+ "learning_rate": 6.454725757103432e-05,
+ "loss": 1.0542,
+ "step": 591
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3160409270291303,
+ "learning_rate": 6.447988962370133e-05,
+ "loss": 1.0829,
+ "step": 592
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.390452102978435,
+ "learning_rate": 6.441241047399169e-05,
+ "loss": 1.192,
+ "step": 593
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3802122712014928,
+ "learning_rate": 6.434482042843627e-05,
+ "loss": 1.1153,
+ "step": 594
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4081584328242501,
+ "learning_rate": 6.427711979406966e-05,
+ "loss": 1.1635,
+ "step": 595
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3791962989638633,
+ "learning_rate": 6.420930887842889e-05,
+ "loss": 1.1581,
+ "step": 596
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.33239440056484193,
+ "learning_rate": 6.414138798955189e-05,
+ "loss": 1.0926,
+ "step": 597
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3279881540815014,
+ "learning_rate": 6.407335743597616e-05,
+ "loss": 1.1386,
+ "step": 598
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.30309644763750837,
+ "learning_rate": 6.40052175267374e-05,
+ "loss": 1.0523,
+ "step": 599
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3349097308403333,
+ "learning_rate": 6.393696857136801e-05,
+ "loss": 1.0815,
+ "step": 600
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3288227593556618,
+ "learning_rate": 6.386861087989581e-05,
+ "loss": 1.015,
+ "step": 601
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36685586740843157,
+ "learning_rate": 6.380014476284255e-05,
+ "loss": 1.1232,
+ "step": 602
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3620977714204643,
+ "learning_rate": 6.373157053122243e-05,
+ "loss": 1.1138,
+ "step": 603
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3130587018197183,
+ "learning_rate": 6.366288849654091e-05,
+ "loss": 1.1255,
+ "step": 604
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3602737087072766,
+ "learning_rate": 6.359409897079303e-05,
+ "loss": 1.0282,
+ "step": 605
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.31168852571991945,
+ "learning_rate": 6.352520226646222e-05,
+ "loss": 1.0779,
+ "step": 606
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3516045580189353,
+ "learning_rate": 6.345619869651871e-05,
+ "loss": 1.1028,
+ "step": 607
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3231857927563657,
+ "learning_rate": 6.33870885744182e-05,
+ "loss": 1.1202,
+ "step": 608
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.30205205129701157,
+ "learning_rate": 6.331787221410041e-05,
+ "loss": 1.1369,
+ "step": 609
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3198359813888166,
+ "learning_rate": 6.32485499299877e-05,
+ "loss": 1.1763,
+ "step": 610
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3128641370321787,
+ "learning_rate": 6.31791220369835e-05,
+ "loss": 1.0223,
+ "step": 611
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.2989105616213649,
+ "learning_rate": 6.31095888504711e-05,
+ "loss": 1.0358,
+ "step": 612
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3103537906853337,
+ "learning_rate": 6.303995068631203e-05,
+ "loss": 1.1261,
+ "step": 613
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.28598715532508207,
+ "learning_rate": 6.297020786084467e-05,
+ "loss": 1.0629,
+ "step": 614
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.29809789918093255,
+ "learning_rate": 6.290036069088288e-05,
+ "loss": 1.035,
+ "step": 615
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.33765270252261453,
+ "learning_rate": 6.283040949371451e-05,
+ "loss": 1.1221,
+ "step": 616
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3424617501293415,
+ "learning_rate": 6.276035458709993e-05,
+ "loss": 1.155,
+ "step": 617
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3799189737987811,
+ "learning_rate": 6.269019628927067e-05,
+ "loss": 1.0701,
+ "step": 618
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3358898935253196,
+ "learning_rate": 6.261993491892791e-05,
+ "loss": 1.1649,
+ "step": 619
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.31569979424117356,
+ "learning_rate": 6.254957079524099e-05,
+ "loss": 1.0633,
+ "step": 620
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3002168156888237,
+ "learning_rate": 6.247910423784609e-05,
+ "loss": 1.0846,
+ "step": 621
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3097238823450595,
+ "learning_rate": 6.24085355668447e-05,
+ "loss": 1.0808,
+ "step": 622
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3120312761417578,
+ "learning_rate": 6.233786510280212e-05,
+ "loss": 1.0142,
+ "step": 623
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3335343015064923,
+ "learning_rate": 6.22670931667461e-05,
+ "loss": 1.0674,
+ "step": 624
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3234062304634526,
+ "learning_rate": 6.219622008016533e-05,
+ "loss": 1.0981,
+ "step": 625
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.32152678786547273,
+ "learning_rate": 6.212524616500798e-05,
+ "loss": 1.0244,
+ "step": 626
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.39031977608147594,
+ "learning_rate": 6.205417174368023e-05,
+ "loss": 1.1205,
+ "step": 627
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3806189090017157,
+ "learning_rate": 6.198299713904485e-05,
+ "loss": 1.1134,
+ "step": 628
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.2978349276971668,
+ "learning_rate": 6.191172267441967e-05,
+ "loss": 1.0088,
+ "step": 629
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3190354077382501,
+ "learning_rate": 6.184034867357617e-05,
+ "loss": 1.108,
+ "step": 630
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.32633048665038994,
+ "learning_rate": 6.176887546073797e-05,
+ "loss": 1.0825,
+ "step": 631
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3428026413020903,
+ "learning_rate": 6.169730336057939e-05,
+ "loss": 1.0765,
+ "step": 632
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3475737151929015,
+ "learning_rate": 6.162563269822391e-05,
+ "loss": 1.0693,
+ "step": 633
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3870252154591392,
+ "learning_rate": 6.15538637992428e-05,
+ "loss": 1.1081,
+ "step": 634
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.33597355193652834,
+ "learning_rate": 6.148199698965352e-05,
+ "loss": 1.0893,
+ "step": 635
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.30805894179787247,
+ "learning_rate": 6.141003259591834e-05,
+ "loss": 1.0995,
+ "step": 636
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3025073882734066,
+ "learning_rate": 6.133797094494281e-05,
+ "loss": 1.0388,
+ "step": 637
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3524395196391662,
+ "learning_rate": 6.126581236407429e-05,
+ "loss": 1.1196,
+ "step": 638
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3377646188130345,
+ "learning_rate": 6.119355718110039e-05,
+ "loss": 1.0382,
+ "step": 639
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.35508400659785483,
+ "learning_rate": 6.112120572424763e-05,
+ "loss": 1.1402,
+ "step": 640
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3454418793700457,
+ "learning_rate": 6.104875832217982e-05,
+ "loss": 1.1032,
+ "step": 641
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.32629806837059866,
+ "learning_rate": 6.097621530399661e-05,
+ "loss": 1.0959,
+ "step": 642
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3329536837751315,
+ "learning_rate": 6.090357699923202e-05,
+ "loss": 1.0467,
+ "step": 643
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.32302233828349475,
+ "learning_rate": 6.083084373785287e-05,
+ "loss": 1.0858,
+ "step": 644
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3310358826507611,
+ "learning_rate": 6.075801585025739e-05,
+ "loss": 1.0715,
+ "step": 645
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.319322035854079,
+ "learning_rate": 6.068509366727362e-05,
+ "loss": 1.177,
+ "step": 646
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3065230667302707,
+ "learning_rate": 6.061207752015797e-05,
+ "loss": 1.0649,
+ "step": 647
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.29926795565748227,
+ "learning_rate": 6.053896774059368e-05,
+ "loss": 1.1325,
+ "step": 648
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3556069634279046,
+ "learning_rate": 6.046576466068931e-05,
+ "loss": 1.1366,
+ "step": 649
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3189191131461966,
+ "learning_rate": 6.039246861297727e-05,
+ "loss": 1.0693,
+ "step": 650
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3347197156648834,
+ "learning_rate": 6.031907993041227e-05,
+ "loss": 1.1009,
+ "step": 651
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.32274156348185445,
+ "learning_rate": 6.0245598946369826e-05,
+ "loss": 1.1675,
+ "step": 652
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.35534089035455224,
+ "learning_rate": 6.017202599464476e-05,
+ "loss": 1.1723,
+ "step": 653
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3106026578570133,
+ "learning_rate": 6.009836140944965e-05,
+ "loss": 1.0954,
+ "step": 654
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3309144454564729,
+ "learning_rate": 6.002460552541331e-05,
+ "loss": 1.0209,
+ "step": 655
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3023619281400003,
+ "learning_rate": 5.9950758677579345e-05,
+ "loss": 1.0363,
+ "step": 656
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3311182880219704,
+ "learning_rate": 5.987682120140451e-05,
+ "loss": 1.0515,
+ "step": 657
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.33396486010030413,
+ "learning_rate": 5.980279343275729e-05,
+ "loss": 1.1251,
+ "step": 658
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3465764556678002,
+ "learning_rate": 5.97286757079163e-05,
+ "loss": 1.165,
+ "step": 659
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.304193441363374,
+ "learning_rate": 5.965446836356882e-05,
+ "loss": 1.0228,
+ "step": 660
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3415149030413082,
+ "learning_rate": 5.9580171736809224e-05,
+ "loss": 1.0742,
+ "step": 661
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.33138658321132064,
+ "learning_rate": 5.950578616513746e-05,
+ "loss": 1.0843,
+ "step": 662
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.30774403421162994,
+ "learning_rate": 5.943131198645752e-05,
+ "loss": 1.065,
+ "step": 663
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3428877492183819,
+ "learning_rate": 5.9356749539075885e-05,
+ "loss": 1.1101,
+ "step": 664
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3621290546130101,
+ "learning_rate": 5.928209916170003e-05,
+ "loss": 1.1372,
+ "step": 665
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3482375945469884,
+ "learning_rate": 5.9207361193436865e-05,
+ "loss": 1.132,
+ "step": 666
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.31754384974068384,
+ "learning_rate": 5.9132535973791156e-05,
+ "loss": 1.148,
+ "step": 667
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.36003834782050365,
+ "learning_rate": 5.9057623842664044e-05,
+ "loss": 1.1099,
+ "step": 668
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.2963701622969662,
+ "learning_rate": 5.8982625140351464e-05,
+ "loss": 1.0755,
+ "step": 669
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.32579569606066516,
+ "learning_rate": 5.8907540207542616e-05,
+ "loss": 1.0809,
+ "step": 670
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4247563451753457,
+ "learning_rate": 5.8832369385318416e-05,
+ "loss": 1.097,
+ "step": 671
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.33076932102169776,
+ "learning_rate": 5.875711301514992e-05,
+ "loss": 1.1078,
+ "step": 672
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3609238032332309,
+ "learning_rate": 5.8681771438896815e-05,
+ "loss": 1.1031,
+ "step": 673
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.325159585649425,
+ "learning_rate": 5.860634499880583e-05,
+ "loss": 1.0707,
+ "step": 674
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4620687271068983,
+ "learning_rate": 5.853083403750922e-05,
+ "loss": 1.1017,
+ "step": 675
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33485279064365936,
+ "learning_rate": 5.845523889802316e-05,
+ "loss": 1.0989,
+ "step": 676
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.30952573170841513,
+ "learning_rate": 5.8379559923746214e-05,
+ "loss": 1.0393,
+ "step": 677
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33498605810588283,
+ "learning_rate": 5.830379745845781e-05,
+ "loss": 1.1259,
+ "step": 678
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.35771921163037307,
+ "learning_rate": 5.822795184631659e-05,
+ "loss": 1.0815,
+ "step": 679
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.3329650192347647,
+ "learning_rate": 5.815202343185894e-05,
+ "loss": 1.1344,
+ "step": 680
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3356634465845771,
+ "learning_rate": 5.807601255999736e-05,
+ "loss": 1.1297,
+ "step": 681
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3289442034151235,
+ "learning_rate": 5.7999919576018934e-05,
+ "loss": 1.022,
+ "step": 682
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3207007334784113,
+ "learning_rate": 5.7923744825583745e-05,
+ "loss": 1.0571,
+ "step": 683
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3582460325329284,
+ "learning_rate": 5.7847488654723304e-05,
+ "loss": 1.0778,
+ "step": 684
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3563317666176927,
+ "learning_rate": 5.777115140983899e-05,
+ "loss": 1.1003,
+ "step": 685
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 3.4694912945702105,
+ "learning_rate": 5.769473343770047e-05,
+ "loss": 1.121,
+ "step": 686
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.43002349520483113,
+ "learning_rate": 5.761823508544411e-05,
+ "loss": 1.0765,
+ "step": 687
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39467783104839754,
+ "learning_rate": 5.754165670057142e-05,
+ "loss": 1.0788,
+ "step": 688
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39629029674867916,
+ "learning_rate": 5.7464998630947464e-05,
+ "loss": 1.0812,
+ "step": 689
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3880152093965208,
+ "learning_rate": 5.738826122479929e-05,
+ "loss": 1.1228,
+ "step": 690
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3777874121959188,
+ "learning_rate": 5.7311444830714324e-05,
+ "loss": 1.0907,
+ "step": 691
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.38004041653523696,
+ "learning_rate": 5.723454979763882e-05,
+ "loss": 1.1263,
+ "step": 692
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.37049672627797636,
+ "learning_rate": 5.7157576474876246e-05,
+ "loss": 1.1438,
+ "step": 693
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32973606103437614,
+ "learning_rate": 5.7080525212085725e-05,
+ "loss": 1.0553,
+ "step": 694
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.31674639252070325,
+ "learning_rate": 5.700339635928038e-05,
+ "loss": 1.06,
+ "step": 695
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32282199426553837,
+ "learning_rate": 5.692619026682588e-05,
+ "loss": 1.0841,
+ "step": 696
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4810882958061859,
+ "learning_rate": 5.684890728543869e-05,
+ "loss": 1.0803,
+ "step": 697
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3995638550178378,
+ "learning_rate": 5.6771547766184566e-05,
+ "loss": 1.1187,
+ "step": 698
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35264932960583484,
+ "learning_rate": 5.669411206047699e-05,
+ "loss": 1.0641,
+ "step": 699
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35240640524733,
+ "learning_rate": 5.661660052007547e-05,
+ "loss": 1.076,
+ "step": 700
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3540694609860389,
+ "learning_rate": 5.653901349708401e-05,
+ "loss": 1.1369,
+ "step": 701
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3196055112925304,
+ "learning_rate": 5.646135134394955e-05,
+ "loss": 1.0677,
+ "step": 702
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4214141007955914,
+ "learning_rate": 5.6383614413460266e-05,
+ "loss": 1.1139,
+ "step": 703
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3625611311798579,
+ "learning_rate": 5.630580305874402e-05,
+ "loss": 1.1845,
+ "step": 704
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3425208672181188,
+ "learning_rate": 5.62279176332668e-05,
+ "loss": 1.174,
+ "step": 705
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3108419862818321,
+ "learning_rate": 5.6149958490830996e-05,
+ "loss": 1.0331,
+ "step": 706
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3274644181571904,
+ "learning_rate": 5.607192598557394e-05,
+ "loss": 1.0664,
+ "step": 707
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.346218197215145,
+ "learning_rate": 5.599382047196617e-05,
+ "loss": 1.2088,
+ "step": 708
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.328497632267458,
+ "learning_rate": 5.591564230480989e-05,
+ "loss": 1.0287,
+ "step": 709
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3708173720611468,
+ "learning_rate": 5.583739183923732e-05,
+ "loss": 1.0883,
+ "step": 710
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3631427403535479,
+ "learning_rate": 5.575906943070915e-05,
+ "loss": 1.1155,
+ "step": 711
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3305201458598695,
+ "learning_rate": 5.5680675435012834e-05,
+ "loss": 1.0958,
+ "step": 712
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.34978833532083714,
+ "learning_rate": 5.5602210208261036e-05,
+ "loss": 1.1437,
+ "step": 713
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3510553882510229,
+ "learning_rate": 5.552367410688999e-05,
+ "loss": 1.0941,
+ "step": 714
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3523747462465078,
+ "learning_rate": 5.544506748765789e-05,
+ "loss": 1.1289,
+ "step": 715
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38262637783927445,
+ "learning_rate": 5.5366390707643266e-05,
+ "loss": 1.099,
+ "step": 716
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38620065989073454,
+ "learning_rate": 5.528764412424334e-05,
+ "loss": 1.083,
+ "step": 717
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3401355276121096,
+ "learning_rate": 5.520882809517245e-05,
+ "loss": 1.028,
+ "step": 718
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3392061008943934,
+ "learning_rate": 5.512994297846039e-05,
+ "loss": 1.1083,
+ "step": 719
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.34219480421015414,
+ "learning_rate": 5.505098913245077e-05,
+ "loss": 1.1108,
+ "step": 720
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3275058061553761,
+ "learning_rate": 5.497196691579945e-05,
+ "loss": 1.111,
+ "step": 721
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36800249746509384,
+ "learning_rate": 5.489287668747283e-05,
+ "loss": 1.1221,
+ "step": 722
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4129005533101575,
+ "learning_rate": 5.481371880674628e-05,
+ "loss": 1.0966,
+ "step": 723
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36563906596251655,
+ "learning_rate": 5.4734493633202505e-05,
+ "loss": 1.0927,
+ "step": 724
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3614650536839971,
+ "learning_rate": 5.465520152672986e-05,
+ "loss": 1.13,
+ "step": 725
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.36419665098633497,
+ "learning_rate": 5.4575842847520765e-05,
+ "loss": 1.1183,
+ "step": 726
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.34490689807258995,
+ "learning_rate": 5.449641795607005e-05,
+ "loss": 1.0919,
+ "step": 727
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3627643746876298,
+ "learning_rate": 5.441692721317334e-05,
+ "loss": 1.0411,
+ "step": 728
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.323620411949565,
+ "learning_rate": 5.433737097992537e-05,
+ "loss": 1.0725,
+ "step": 729
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3521599501824965,
+ "learning_rate": 5.425774961771838e-05,
+ "loss": 1.0926,
+ "step": 730
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3302390546764222,
+ "learning_rate": 5.417806348824047e-05,
+ "loss": 1.0468,
+ "step": 731
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3833325802616019,
+ "learning_rate": 5.4098312953473956e-05,
+ "loss": 1.1291,
+ "step": 732
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3708621126835512,
+ "learning_rate": 5.401849837569372e-05,
+ "loss": 1.0887,
+ "step": 733
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3625834373416278,
+ "learning_rate": 5.393862011746555e-05,
+ "loss": 1.0981,
+ "step": 734
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3583343965080617,
+ "learning_rate": 5.385867854164451e-05,
+ "loss": 1.1021,
+ "step": 735
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34598320594096066,
+ "learning_rate": 5.377867401137332e-05,
+ "loss": 1.1376,
+ "step": 736
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3046382791315433,
+ "learning_rate": 5.369860689008066e-05,
+ "loss": 1.0206,
+ "step": 737
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34464948380043725,
+ "learning_rate": 5.3618477541479505e-05,
+ "loss": 1.1084,
+ "step": 738
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3203242519627101,
+ "learning_rate": 5.353828632956557e-05,
+ "loss": 1.0731,
+ "step": 739
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3431169960355163,
+ "learning_rate": 5.3458033618615516e-05,
+ "loss": 1.091,
+ "step": 740
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.33492074521678705,
+ "learning_rate": 5.337771977318543e-05,
+ "loss": 1.1112,
+ "step": 741
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.32576546585541344,
+ "learning_rate": 5.3297345158109086e-05,
+ "loss": 1.0993,
+ "step": 742
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3410007245037574,
+ "learning_rate": 5.3216910138496286e-05,
+ "loss": 1.094,
+ "step": 743
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.34891180680896833,
+ "learning_rate": 5.313641507973128e-05,
+ "loss": 1.1331,
+ "step": 744
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.37135766946717214,
+ "learning_rate": 5.3055860347471006e-05,
+ "loss": 1.1,
+ "step": 745
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3465019415478411,
+ "learning_rate": 5.297524630764349e-05,
+ "loss": 1.1256,
+ "step": 746
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37035388481626563,
+ "learning_rate": 5.289457332644615e-05,
+ "loss": 1.0366,
+ "step": 747
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.33853883270759155,
+ "learning_rate": 5.281384177034421e-05,
+ "loss": 1.0547,
+ "step": 748
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.364306618627317,
+ "learning_rate": 5.2733052006068897e-05,
+ "loss": 1.0768,
+ "step": 749
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.4021754315731627,
+ "learning_rate": 5.2652204400615916e-05,
+ "loss": 1.1382,
+ "step": 750
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.3332185389039008,
+ "learning_rate": 5.257129932124368e-05,
+ "loss": 1.0815,
+ "step": 751
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3453105709879854,
+ "learning_rate": 5.249033713547173e-05,
+ "loss": 1.1109,
+ "step": 752
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3385397539717797,
+ "learning_rate": 5.2409318211078966e-05,
+ "loss": 1.0529,
+ "step": 753
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.33197994450130447,
+ "learning_rate": 5.232824291610206e-05,
+ "loss": 1.0721,
+ "step": 754
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32836289576124167,
+ "learning_rate": 5.224711161883375e-05,
+ "loss": 1.0459,
+ "step": 755
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32491620058831744,
+ "learning_rate": 5.216592468782117e-05,
+ "loss": 1.0897,
+ "step": 756
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3137879047811153,
+ "learning_rate": 5.2084682491864155e-05,
+ "loss": 1.096,
+ "step": 757
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3356938043023012,
+ "learning_rate": 5.200338540001364e-05,
+ "loss": 1.0827,
+ "step": 758
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.36044340490819055,
+ "learning_rate": 5.192203378156984e-05,
+ "loss": 1.0617,
+ "step": 759
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.34674262047888293,
+ "learning_rate": 5.184062800608077e-05,
+ "loss": 1.1267,
+ "step": 760
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.32469442322149333,
+ "learning_rate": 5.1759168443340375e-05,
+ "loss": 1.1483,
+ "step": 761
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3290384307774216,
+ "learning_rate": 5.167765546338698e-05,
+ "loss": 1.047,
+ "step": 762
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.31637612188770403,
+ "learning_rate": 5.1596089436501525e-05,
+ "loss": 1.0311,
+ "step": 763
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3168693829641207,
+ "learning_rate": 5.151447073320597e-05,
+ "loss": 1.1405,
+ "step": 764
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.34322421571238926,
+ "learning_rate": 5.143279972426153e-05,
+ "loss": 1.1428,
+ "step": 765
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3291030435830325,
+ "learning_rate": 5.1351076780667026e-05,
+ "loss": 1.0473,
+ "step": 766
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.33772039158758044,
+ "learning_rate": 5.1269302273657195e-05,
+ "loss": 1.0909,
+ "step": 767
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3802031736890876,
+ "learning_rate": 5.118747657470102e-05,
+ "loss": 1.1482,
+ "step": 768
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3296067628997962,
+ "learning_rate": 5.1105600055500025e-05,
+ "loss": 1.0085,
+ "step": 769
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3707139982828035,
+ "learning_rate": 5.102367308798658e-05,
+ "loss": 1.0746,
+ "step": 770
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3378537316757011,
+ "learning_rate": 5.094169604432225e-05,
+ "loss": 1.0482,
+ "step": 771
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.4008417246255145,
+ "learning_rate": 5.085966929689601e-05,
+ "loss": 1.1065,
+ "step": 772
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3244385106988064,
+ "learning_rate": 5.077759321832271e-05,
+ "loss": 1.0827,
+ "step": 773
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.37228575732812336,
+ "learning_rate": 5.0695468181441215e-05,
+ "loss": 1.1146,
+ "step": 774
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33761714797540276,
+ "learning_rate": 5.061329455931283e-05,
+ "loss": 1.092,
+ "step": 775
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3158158390913494,
+ "learning_rate": 5.053107272521955e-05,
+ "loss": 1.1058,
+ "step": 776
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3691501929738938,
+ "learning_rate": 5.044880305266239e-05,
+ "loss": 1.1599,
+ "step": 777
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33730914019805525,
+ "learning_rate": 5.0366485915359645e-05,
+ "loss": 1.0615,
+ "step": 778
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.34970059240017,
+ "learning_rate": 5.0284121687245257e-05,
+ "loss": 1.1475,
+ "step": 779
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3374028029407197,
+ "learning_rate": 5.020171074246707e-05,
+ "loss": 1.0926,
+ "step": 780
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3350020681123992,
+ "learning_rate": 5.011925345538514e-05,
+ "loss": 1.1276,
+ "step": 781
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3224228965786606,
+ "learning_rate": 5.003675020057003e-05,
+ "loss": 1.0183,
+ "step": 782
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3357310714740298,
+ "learning_rate": 4.995420135280114e-05,
+ "loss": 1.1114,
+ "step": 783
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3590203255363759,
+ "learning_rate": 4.9871607287064966e-05,
+ "loss": 1.1504,
+ "step": 784
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.33011195419611655,
+ "learning_rate": 4.9788968378553396e-05,
+ "loss": 1.0826,
+ "step": 785
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.31088868195439445,
+ "learning_rate": 4.970628500266207e-05,
+ "loss": 1.0704,
+ "step": 786
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3144996103179409,
+ "learning_rate": 4.962355753498858e-05,
+ "loss": 1.1403,
+ "step": 787
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3147269555419068,
+ "learning_rate": 4.954078635133081e-05,
+ "loss": 1.0898,
+ "step": 788
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3280151747783868,
+ "learning_rate": 4.945797182768524e-05,
+ "loss": 1.1115,
+ "step": 789
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3551996569232493,
+ "learning_rate": 4.937511434024524e-05,
+ "loss": 1.1731,
+ "step": 790
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.343863208057807,
+ "learning_rate": 4.9292214265399336e-05,
+ "loss": 1.0866,
+ "step": 791
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.37316699385322466,
+ "learning_rate": 4.920927197972949e-05,
+ "loss": 1.1083,
+ "step": 792
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635739774067832,
+ "learning_rate": 4.9126287860009453e-05,
+ "loss": 1.1393,
+ "step": 793
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3755910554972886,
+ "learning_rate": 4.9043262283202974e-05,
+ "loss": 1.1624,
+ "step": 794
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635899120146823,
+ "learning_rate": 4.8960195626462145e-05,
+ "loss": 1.2095,
+ "step": 795
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3642202684342816,
+ "learning_rate": 4.8877088267125664e-05,
+ "loss": 1.1099,
+ "step": 796
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3339946548799316,
+ "learning_rate": 4.879394058271712e-05,
+ "loss": 1.1157,
+ "step": 797
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3457189703100475,
+ "learning_rate": 4.871075295094329e-05,
+ "loss": 1.129,
+ "step": 798
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3550931839691424,
+ "learning_rate": 4.862752574969241e-05,
+ "loss": 1.076,
+ "step": 799
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.36139108917966734,
+ "learning_rate": 4.8544259357032475e-05,
+ "loss": 1.1577,
+ "step": 800
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 829528372936704.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-800/training_args.bin b/checkpoint-800/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c5d2416a3b70bb5260978ec9996f00154a724ba7
--- /dev/null
+++ b/checkpoint-800/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b22e8f9d51a16d03a2c506fa3d1eafa8f4b1ae992992c2086a4d435ffd97387e
+size 6712
diff --git a/checkpoint-800/zero_to_fp32.py b/checkpoint-800/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/checkpoint-800/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-850/README.md b/checkpoint-850/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16b1eacdd9353dec380a08ee77ce6ed5ab50f12e
--- /dev/null
+++ b/checkpoint-850/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: gotzmann/uni
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.10.0
\ No newline at end of file
diff --git a/checkpoint-850/adapter_config.json b/checkpoint-850/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..832188d72d81e59dd2b5259e86f371199b441aca
--- /dev/null
+++ b/checkpoint-850/adapter_config.json
@@ -0,0 +1,31 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "gotzmann/uni",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 128,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "o_proj",
+ "k_proj",
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": true
+}
\ No newline at end of file
diff --git a/checkpoint-850/adapter_model.safetensors b/checkpoint-850/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f4477cadfe1a6896201937acdd0279c467421ba7
--- /dev/null
+++ b/checkpoint-850/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0a24db9b562bcecc0a16772cf5beb9986aa18dcec3d9e061d2564a990a2defff
+size 1048664848
diff --git a/checkpoint-850/global_step850/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-850/global_step850/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5821fc3be3ff89dc4437b7fdebfcddc2203888e9
--- /dev/null
+++ b/checkpoint-850/global_step850/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b094a344753b2dd9453905dee7ba19ab5579b0fd97f0736748401cdcb725c03f
+size 787270042
diff --git a/checkpoint-850/global_step850/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-850/global_step850/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ef6882de0e60be4e0d25f14c77bdd62b034c0cee
--- /dev/null
+++ b/checkpoint-850/global_step850/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:15512858904ed80ac96751a57ab6d1389b6fa1cf23f083e510effe23f73449d7
+size 787270042
diff --git a/checkpoint-850/global_step850/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-850/global_step850/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5d044be17aaeec22100ef3c56a0b0a0779a1dc2b
--- /dev/null
+++ b/checkpoint-850/global_step850/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ce1d8368380f8f11bbb04be5eba4f6fd44e9113dd75241a10f1ea2b9bb769d87
+size 787270042
diff --git a/checkpoint-850/global_step850/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-850/global_step850/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f1d1d4f607353077270163839626297786db503e
--- /dev/null
+++ b/checkpoint-850/global_step850/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:96f3d36c71dbaa47b45b1414fb351580f28c4a259b4aad5b89d16b2869aa49cd
+size 787270042
diff --git a/checkpoint-850/global_step850/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-850/global_step850/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..406e99433fa65969d7852fee72b071b1f4061198
--- /dev/null
+++ b/checkpoint-850/global_step850/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:996fd2d3d1da74eddc9a3e03696b24aaa98ca32e74df017291e45418e74e2a1d
+size 787270042
diff --git a/checkpoint-850/global_step850/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-850/global_step850/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9ead6f116901a184f51c2e45c35538045d1676e0
--- /dev/null
+++ b/checkpoint-850/global_step850/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:770f29434ee8dc9d35ad14a8f1dcb202a82b34a3ef164b74d721c539b4bfc572
+size 787270042
diff --git a/checkpoint-850/global_step850/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-850/global_step850/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9c9f2303c2f5d2e2dfe75d08276c8dfcf725bf98
--- /dev/null
+++ b/checkpoint-850/global_step850/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4f796c99ad999667f8709b536277d74fb2dfc4b29aec43974e3f82d358707106
+size 787270042
diff --git a/checkpoint-850/global_step850/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-850/global_step850/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..66cb5089fc15b2de83ca2f0850468aec1806e3fe
--- /dev/null
+++ b/checkpoint-850/global_step850/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b5b595656aec082e53ab6112580cda2d0864a32ef3a04dc92e011ce170ffc134
+size 787270042
diff --git a/checkpoint-850/global_step850/zero_pp_rank_0_mp_rank_00_model_states.pt b/checkpoint-850/global_step850/zero_pp_rank_0_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4266102da870765feb8c6f4931dd09173cc4a635
--- /dev/null
+++ b/checkpoint-850/global_step850/zero_pp_rank_0_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c4636383f7f0687a09f235b47de5324f37846c740b1f4d010d36adad52ee512
+size 653742
diff --git a/checkpoint-850/global_step850/zero_pp_rank_1_mp_rank_00_model_states.pt b/checkpoint-850/global_step850/zero_pp_rank_1_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..11a7e399cbaeea9b1d0221d94e9a5aeef34ef063
--- /dev/null
+++ b/checkpoint-850/global_step850/zero_pp_rank_1_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3059c7b370b27cc5e627a87a08c4b19e7cd3639b7a839379299724a3ab0d9361
+size 653742
diff --git a/checkpoint-850/global_step850/zero_pp_rank_2_mp_rank_00_model_states.pt b/checkpoint-850/global_step850/zero_pp_rank_2_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..eeb138a36fcfd340418b4d32cb974f13b7697694
--- /dev/null
+++ b/checkpoint-850/global_step850/zero_pp_rank_2_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c711e98fb08c163203dc87b78751464a86ead4b2fd198f8789cc169707d4777d
+size 653742
diff --git a/checkpoint-850/global_step850/zero_pp_rank_3_mp_rank_00_model_states.pt b/checkpoint-850/global_step850/zero_pp_rank_3_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..25032ea96c6490ce3d796172a32126864cdf416d
--- /dev/null
+++ b/checkpoint-850/global_step850/zero_pp_rank_3_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f780e605edce29c8a4c8cabf71af90b8ab02bc1a9ff4d499a57f7bc8bddf73ae
+size 653742
diff --git a/checkpoint-850/global_step850/zero_pp_rank_4_mp_rank_00_model_states.pt b/checkpoint-850/global_step850/zero_pp_rank_4_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..21daac9e3b0da385f2b5d5c010418cf2ac2c471b
--- /dev/null
+++ b/checkpoint-850/global_step850/zero_pp_rank_4_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a80187552ff311ab5f3292c94b850ebeab169f3a8accf72fb9d556dd3895e7a
+size 653742
diff --git a/checkpoint-850/global_step850/zero_pp_rank_5_mp_rank_00_model_states.pt b/checkpoint-850/global_step850/zero_pp_rank_5_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..11c0875a0ca935cb9c9f02b372af124165e9bbfe
--- /dev/null
+++ b/checkpoint-850/global_step850/zero_pp_rank_5_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2879bde5625128d1ce9d91bb79a25eb3ea12549772c9df8f8c4c89025fffd9dc
+size 653742
diff --git a/checkpoint-850/global_step850/zero_pp_rank_6_mp_rank_00_model_states.pt b/checkpoint-850/global_step850/zero_pp_rank_6_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7e9d7004a780769cfd567a5bf98afc7f08c661ea
--- /dev/null
+++ b/checkpoint-850/global_step850/zero_pp_rank_6_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2ad50c5a7d9b587b16f3ae5dfcd43af013ddb103d983b5c5638fc3329d553c91
+size 653742
diff --git a/checkpoint-850/global_step850/zero_pp_rank_7_mp_rank_00_model_states.pt b/checkpoint-850/global_step850/zero_pp_rank_7_mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e65614fbda6c94141248f18d46b6f71f66a72809
--- /dev/null
+++ b/checkpoint-850/global_step850/zero_pp_rank_7_mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0fb0b6e0853703d42cf555fd9864d4db4c2b6d04f08be900fe24218069f955a9
+size 653742
diff --git a/checkpoint-850/latest b/checkpoint-850/latest
new file mode 100644
index 0000000000000000000000000000000000000000..3691022819d6dd7dd441445e8bf742e36ca808cd
--- /dev/null
+++ b/checkpoint-850/latest
@@ -0,0 +1 @@
+global_step850
\ No newline at end of file
diff --git a/checkpoint-850/rng_state_0.pth b/checkpoint-850/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9dd2a62da4ca83b3b986d96dbf0eaeb82207ca93
--- /dev/null
+++ b/checkpoint-850/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a0628a9017696045a3a29e9eaffc71e9262d855716e773c0c3be760a1fe85bc8
+size 15984
diff --git a/checkpoint-850/rng_state_1.pth b/checkpoint-850/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1ba5f3aba4388a582cd47f7f9e57cd5879b1cbd2
--- /dev/null
+++ b/checkpoint-850/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df342004a4d8e3626bf2a9f689fde7c8bfd6d995e14931f5496eda1f456cb6f2
+size 15984
diff --git a/checkpoint-850/rng_state_2.pth b/checkpoint-850/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..27b0f7845c2b9530c3e6ed3ce232ff4e86b86122
--- /dev/null
+++ b/checkpoint-850/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f02096eb4e8850b91490e80e4a042e2e60f71bd2abc6a269d62c271649cb77d2
+size 15984
diff --git a/checkpoint-850/rng_state_3.pth b/checkpoint-850/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fcfb583fc43c6dd4395671708744cfd18c419970
--- /dev/null
+++ b/checkpoint-850/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:326c778d3d0e7e3d5665fa0a9ecd92986609c430da08b41611d6c05dc19815a8
+size 15984
diff --git a/checkpoint-850/rng_state_4.pth b/checkpoint-850/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7a8c64b1f15ac655b2be2a42fe61cabe2a877704
--- /dev/null
+++ b/checkpoint-850/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d978dcb0c34e022ee6750e9d86814b8c82e4965d7e07662f35f06eeac12938f3
+size 15984
diff --git a/checkpoint-850/rng_state_5.pth b/checkpoint-850/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..262e8187e6caeca12ef3b0aa923b12afd697e03d
--- /dev/null
+++ b/checkpoint-850/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01e83399aed1d9d173c3e07b2efa8530c956b62b2b68394c2ed0d43bd8bba9d1
+size 15984
diff --git a/checkpoint-850/rng_state_6.pth b/checkpoint-850/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..72f794e31f8d3e0c63972e5076e1ed90c52087ba
--- /dev/null
+++ b/checkpoint-850/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:606ab3ca92e3d20c327c69fdcce7f7e39bec2f2c3538b036088b255f917e3ba4
+size 15984
diff --git a/checkpoint-850/rng_state_7.pth b/checkpoint-850/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..244e7fdaa1cef2e82bd4e16afb10f32f68318bcc
--- /dev/null
+++ b/checkpoint-850/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1276a987dd22c9093fec58921ba19f340a28f18bff635cc01324e09a3c37ac3a
+size 15984
diff --git a/checkpoint-850/scheduler.pt b/checkpoint-850/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..58ac1ab9bfba8e8a2e6e6e316e2f5c7c070cb178
--- /dev/null
+++ b/checkpoint-850/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c4b4462e080a5c39faf9317093c20bf1f40a2d57c50836da1f781d634a5c527
+size 1064
diff --git a/checkpoint-850/special_tokens_map.json b/checkpoint-850/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-850/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-850/tokenizer.model b/checkpoint-850/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-850/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-850/tokenizer_config.json b/checkpoint-850/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb5a9f09d8c0f3c32c66fc6118fe5c76c5c6fd90
--- /dev/null
+++ b/checkpoint-850/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '' + '### System:\\n\\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '\\n\\n### Human:\\n\\n' + content }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### Assistant:\\n\\n' + content + '' }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-850/trainer_state.json b/checkpoint-850/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..00833dcaff44143877e57dd222998795b26c9cd7
--- /dev/null
+++ b/checkpoint-850/trainer_state.json
@@ -0,0 +1,5971 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.0914494741655236,
+ "eval_steps": 500,
+ "global_step": 850,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "grad_norm": 0.849355824164473,
+ "learning_rate": 4.878048780487805e-07,
+ "loss": 1.3655,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "grad_norm": 10.01567518957158,
+ "learning_rate": 9.75609756097561e-07,
+ "loss": 1.5767,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6466000875559635,
+ "learning_rate": 1.4634146341463414e-06,
+ "loss": 1.3913,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.6644565932010504,
+ "learning_rate": 1.951219512195122e-06,
+ "loss": 1.3218,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.571354207588475,
+ "learning_rate": 2.4390243902439027e-06,
+ "loss": 1.3597,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.31036262839244955,
+ "learning_rate": 2.926829268292683e-06,
+ "loss": 1.2832,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.2622135027188184,
+ "learning_rate": 3.414634146341464e-06,
+ "loss": 1.2161,
+ "step": 7
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.296824630261661,
+ "learning_rate": 3.902439024390244e-06,
+ "loss": 1.2985,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2557267467361569,
+ "learning_rate": 4.390243902439025e-06,
+ "loss": 1.3175,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23418939513890769,
+ "learning_rate": 4.8780487804878055e-06,
+ "loss": 1.2617,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.2364760983285843,
+ "learning_rate": 5.365853658536586e-06,
+ "loss": 1.3103,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.23893034721889,
+ "learning_rate": 5.853658536585366e-06,
+ "loss": 1.2405,
+ "step": 12
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.25563593295485887,
+ "learning_rate": 6.341463414634147e-06,
+ "loss": 1.2831,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.23239975352661665,
+ "learning_rate": 6.829268292682928e-06,
+ "loss": 1.3125,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.3092813858209507,
+ "learning_rate": 7.317073170731707e-06,
+ "loss": 1.2422,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.282563380367434,
+ "learning_rate": 7.804878048780489e-06,
+ "loss": 1.2453,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22065680088315018,
+ "learning_rate": 8.292682926829268e-06,
+ "loss": 1.2491,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22777800877980184,
+ "learning_rate": 8.78048780487805e-06,
+ "loss": 1.2655,
+ "step": 18
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.22145212540177928,
+ "learning_rate": 9.268292682926831e-06,
+ "loss": 1.2413,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.22482351883112714,
+ "learning_rate": 9.756097560975611e-06,
+ "loss": 1.2653,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.20823080508385733,
+ "learning_rate": 1.024390243902439e-05,
+ "loss": 1.2374,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.26025492562935737,
+ "learning_rate": 1.0731707317073172e-05,
+ "loss": 1.2065,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2150252124176173,
+ "learning_rate": 1.1219512195121953e-05,
+ "loss": 1.2782,
+ "step": 23
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.2505915177425618,
+ "learning_rate": 1.1707317073170731e-05,
+ "loss": 1.2742,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.20129223044786942,
+ "learning_rate": 1.2195121951219513e-05,
+ "loss": 1.3366,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.1973508510397107,
+ "learning_rate": 1.2682926829268294e-05,
+ "loss": 1.2476,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.27103325392437194,
+ "learning_rate": 1.3170731707317076e-05,
+ "loss": 1.2325,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.17954976411006285,
+ "learning_rate": 1.3658536585365855e-05,
+ "loss": 1.2523,
+ "step": 28
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.22216997851088888,
+ "learning_rate": 1.4146341463414635e-05,
+ "loss": 1.3297,
+ "step": 29
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.2071458864548587,
+ "learning_rate": 1.4634146341463415e-05,
+ "loss": 1.2127,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18039422081622164,
+ "learning_rate": 1.5121951219512196e-05,
+ "loss": 1.2509,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18631254372974412,
+ "learning_rate": 1.5609756097560978e-05,
+ "loss": 1.2247,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.18843872523649827,
+ "learning_rate": 1.6097560975609757e-05,
+ "loss": 1.195,
+ "step": 33
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.2163847267778325,
+ "learning_rate": 1.6585365853658537e-05,
+ "loss": 1.2179,
+ "step": 34
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.19687688475496104,
+ "learning_rate": 1.7073170731707317e-05,
+ "loss": 1.2763,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.20409643064887947,
+ "learning_rate": 1.75609756097561e-05,
+ "loss": 1.253,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1879182661759335,
+ "learning_rate": 1.804878048780488e-05,
+ "loss": 1.2586,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19400648948514373,
+ "learning_rate": 1.8536585365853663e-05,
+ "loss": 1.2154,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.1878879343148452,
+ "learning_rate": 1.902439024390244e-05,
+ "loss": 1.2304,
+ "step": 39
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.17687475469924052,
+ "learning_rate": 1.9512195121951222e-05,
+ "loss": 1.2351,
+ "step": 40
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.18223935625384885,
+ "learning_rate": 2e-05,
+ "loss": 1.2222,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1943061629408338,
+ "learning_rate": 2.048780487804878e-05,
+ "loss": 1.2044,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.17027514338700078,
+ "learning_rate": 2.0975609756097564e-05,
+ "loss": 1.1548,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18553769630586192,
+ "learning_rate": 2.1463414634146344e-05,
+ "loss": 1.2721,
+ "step": 44
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19732826914228765,
+ "learning_rate": 2.1951219512195124e-05,
+ "loss": 1.3097,
+ "step": 45
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.18714230986631472,
+ "learning_rate": 2.2439024390243907e-05,
+ "loss": 1.2662,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.19988987568002223,
+ "learning_rate": 2.2926829268292683e-05,
+ "loss": 1.2904,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17744650133390918,
+ "learning_rate": 2.3414634146341463e-05,
+ "loss": 1.1825,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.16576734763834533,
+ "learning_rate": 2.3902439024390246e-05,
+ "loss": 1.1858,
+ "step": 49
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.179591794065527,
+ "learning_rate": 2.4390243902439026e-05,
+ "loss": 1.2711,
+ "step": 50
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.17923464471176911,
+ "learning_rate": 2.4878048780487805e-05,
+ "loss": 1.2289,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.18991742907836837,
+ "learning_rate": 2.536585365853659e-05,
+ "loss": 1.3097,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.19849796137254636,
+ "learning_rate": 2.5853658536585368e-05,
+ "loss": 1.2489,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17452371110976383,
+ "learning_rate": 2.634146341463415e-05,
+ "loss": 1.2461,
+ "step": 54
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.17671022353085036,
+ "learning_rate": 2.682926829268293e-05,
+ "loss": 1.153,
+ "step": 55
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.36820559192096686,
+ "learning_rate": 2.731707317073171e-05,
+ "loss": 1.2431,
+ "step": 56
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20331468526494198,
+ "learning_rate": 2.7804878048780487e-05,
+ "loss": 1.2575,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2402486598118377,
+ "learning_rate": 2.829268292682927e-05,
+ "loss": 1.2538,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2549409484173144,
+ "learning_rate": 2.878048780487805e-05,
+ "loss": 1.2065,
+ "step": 59
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.2053105349872685,
+ "learning_rate": 2.926829268292683e-05,
+ "loss": 1.2094,
+ "step": 60
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.17971910872957886,
+ "learning_rate": 2.9756097560975613e-05,
+ "loss": 1.228,
+ "step": 61
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1885853654992973,
+ "learning_rate": 3.0243902439024392e-05,
+ "loss": 1.2286,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.1848524571968613,
+ "learning_rate": 3.073170731707317e-05,
+ "loss": 1.2718,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18734105883548513,
+ "learning_rate": 3.1219512195121955e-05,
+ "loss": 1.2357,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17774668052121825,
+ "learning_rate": 3.170731707317074e-05,
+ "loss": 1.1509,
+ "step": 65
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.17890968008080646,
+ "learning_rate": 3.2195121951219514e-05,
+ "loss": 1.1924,
+ "step": 66
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.18249273371332375,
+ "learning_rate": 3.268292682926829e-05,
+ "loss": 1.2545,
+ "step": 67
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.21064122671902577,
+ "learning_rate": 3.3170731707317074e-05,
+ "loss": 1.2832,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1820064171955093,
+ "learning_rate": 3.365853658536586e-05,
+ "loss": 1.2071,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.16996662800553433,
+ "learning_rate": 3.414634146341463e-05,
+ "loss": 1.2073,
+ "step": 70
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.1618669302922445,
+ "learning_rate": 3.4634146341463416e-05,
+ "loss": 1.1289,
+ "step": 71
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18948744950985544,
+ "learning_rate": 3.51219512195122e-05,
+ "loss": 1.2915,
+ "step": 72
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.18326143691603383,
+ "learning_rate": 3.5609756097560976e-05,
+ "loss": 1.2238,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.17410704510700503,
+ "learning_rate": 3.609756097560976e-05,
+ "loss": 1.1784,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.1983667344995625,
+ "learning_rate": 3.658536585365854e-05,
+ "loss": 1.2452,
+ "step": 75
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.3416310763369357,
+ "learning_rate": 3.7073170731707325e-05,
+ "loss": 1.1972,
+ "step": 76
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.2776466983511955,
+ "learning_rate": 3.75609756097561e-05,
+ "loss": 1.3121,
+ "step": 77
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.20026129636576834,
+ "learning_rate": 3.804878048780488e-05,
+ "loss": 1.2436,
+ "step": 78
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21064549243917835,
+ "learning_rate": 3.853658536585366e-05,
+ "loss": 1.2064,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.22119482175714267,
+ "learning_rate": 3.9024390243902444e-05,
+ "loss": 1.2715,
+ "step": 80
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.23047133748844142,
+ "learning_rate": 3.951219512195122e-05,
+ "loss": 1.2888,
+ "step": 81
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18741863156973176,
+ "learning_rate": 4e-05,
+ "loss": 1.248,
+ "step": 82
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1747859810629604,
+ "learning_rate": 4.0487804878048786e-05,
+ "loss": 1.1683,
+ "step": 83
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.1896944798413341,
+ "learning_rate": 4.097560975609756e-05,
+ "loss": 1.2155,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18724128114363303,
+ "learning_rate": 4.1463414634146346e-05,
+ "loss": 1.2273,
+ "step": 85
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17368125504855478,
+ "learning_rate": 4.195121951219513e-05,
+ "loss": 1.224,
+ "step": 86
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.18371141013625703,
+ "learning_rate": 4.2439024390243905e-05,
+ "loss": 1.2294,
+ "step": 87
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1791029365673714,
+ "learning_rate": 4.292682926829269e-05,
+ "loss": 1.2895,
+ "step": 88
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.20259974283859655,
+ "learning_rate": 4.341463414634147e-05,
+ "loss": 1.1841,
+ "step": 89
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.17457456183272174,
+ "learning_rate": 4.390243902439025e-05,
+ "loss": 1.2357,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.1815824380789748,
+ "learning_rate": 4.439024390243903e-05,
+ "loss": 1.2304,
+ "step": 91
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.17566480599583392,
+ "learning_rate": 4.4878048780487814e-05,
+ "loss": 1.242,
+ "step": 92
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18422975005984474,
+ "learning_rate": 4.536585365853658e-05,
+ "loss": 1.2177,
+ "step": 93
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16796781877940678,
+ "learning_rate": 4.5853658536585366e-05,
+ "loss": 1.1482,
+ "step": 94
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.18636131653783305,
+ "learning_rate": 4.634146341463415e-05,
+ "loss": 1.1758,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1823665700289814,
+ "learning_rate": 4.6829268292682926e-05,
+ "loss": 1.289,
+ "step": 96
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.1719900691262439,
+ "learning_rate": 4.731707317073171e-05,
+ "loss": 1.1626,
+ "step": 97
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17937994168039778,
+ "learning_rate": 4.780487804878049e-05,
+ "loss": 1.175,
+ "step": 98
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.16631851422106986,
+ "learning_rate": 4.829268292682927e-05,
+ "loss": 1.2177,
+ "step": 99
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.19143696232800309,
+ "learning_rate": 4.878048780487805e-05,
+ "loss": 1.3071,
+ "step": 100
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.17859506638780318,
+ "learning_rate": 4.9268292682926835e-05,
+ "loss": 1.2351,
+ "step": 101
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18381520321248196,
+ "learning_rate": 4.975609756097561e-05,
+ "loss": 1.2342,
+ "step": 102
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17968218683773912,
+ "learning_rate": 5.0243902439024394e-05,
+ "loss": 1.2074,
+ "step": 103
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.18139489969339018,
+ "learning_rate": 5.073170731707318e-05,
+ "loss": 1.1558,
+ "step": 104
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17366624842514394,
+ "learning_rate": 5.121951219512195e-05,
+ "loss": 1.1897,
+ "step": 105
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.16034845455223745,
+ "learning_rate": 5.1707317073170736e-05,
+ "loss": 1.179,
+ "step": 106
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17583069577827776,
+ "learning_rate": 5.219512195121952e-05,
+ "loss": 1.1856,
+ "step": 107
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1853758076989552,
+ "learning_rate": 5.26829268292683e-05,
+ "loss": 1.2072,
+ "step": 108
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.19597443965936462,
+ "learning_rate": 5.317073170731708e-05,
+ "loss": 1.2271,
+ "step": 109
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1899206334098331,
+ "learning_rate": 5.365853658536586e-05,
+ "loss": 1.1961,
+ "step": 110
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.17463763837757018,
+ "learning_rate": 5.4146341463414645e-05,
+ "loss": 1.2049,
+ "step": 111
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.20431371701229986,
+ "learning_rate": 5.463414634146342e-05,
+ "loss": 1.2891,
+ "step": 112
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1814475107638498,
+ "learning_rate": 5.51219512195122e-05,
+ "loss": 1.2346,
+ "step": 113
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1883849423207823,
+ "learning_rate": 5.5609756097560974e-05,
+ "loss": 1.244,
+ "step": 114
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1857258128640568,
+ "learning_rate": 5.609756097560976e-05,
+ "loss": 1.2669,
+ "step": 115
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1740768514118401,
+ "learning_rate": 5.658536585365854e-05,
+ "loss": 1.2414,
+ "step": 116
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.1919320335584178,
+ "learning_rate": 5.7073170731707317e-05,
+ "loss": 1.2886,
+ "step": 117
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18288775167828136,
+ "learning_rate": 5.75609756097561e-05,
+ "loss": 1.1875,
+ "step": 118
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.18208588867750863,
+ "learning_rate": 5.804878048780488e-05,
+ "loss": 1.2388,
+ "step": 119
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1743260015658331,
+ "learning_rate": 5.853658536585366e-05,
+ "loss": 1.1762,
+ "step": 120
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17856046291517946,
+ "learning_rate": 5.902439024390244e-05,
+ "loss": 1.2888,
+ "step": 121
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17493794870966536,
+ "learning_rate": 5.9512195121951225e-05,
+ "loss": 1.2222,
+ "step": 122
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.1909202655203384,
+ "learning_rate": 6.000000000000001e-05,
+ "loss": 1.2414,
+ "step": 123
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.18345819482834988,
+ "learning_rate": 6.0487804878048785e-05,
+ "loss": 1.2756,
+ "step": 124
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.2057069352956621,
+ "learning_rate": 6.097560975609757e-05,
+ "loss": 1.261,
+ "step": 125
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.299775882469108,
+ "learning_rate": 6.146341463414634e-05,
+ "loss": 1.2566,
+ "step": 126
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.1869687633018095,
+ "learning_rate": 6.195121951219513e-05,
+ "loss": 1.3039,
+ "step": 127
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.17747149926197442,
+ "learning_rate": 6.243902439024391e-05,
+ "loss": 1.2524,
+ "step": 128
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17885157788044242,
+ "learning_rate": 6.29268292682927e-05,
+ "loss": 1.2455,
+ "step": 129
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.17617298187845123,
+ "learning_rate": 6.341463414634148e-05,
+ "loss": 1.2009,
+ "step": 130
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20164176323497066,
+ "learning_rate": 6.390243902439025e-05,
+ "loss": 1.2634,
+ "step": 131
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.20459903417307612,
+ "learning_rate": 6.439024390243903e-05,
+ "loss": 1.1963,
+ "step": 132
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.1863755486334296,
+ "learning_rate": 6.487804878048781e-05,
+ "loss": 1.2387,
+ "step": 133
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.19265866140295207,
+ "learning_rate": 6.536585365853658e-05,
+ "loss": 1.2688,
+ "step": 134
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.1823425868969493,
+ "learning_rate": 6.585365853658536e-05,
+ "loss": 1.2041,
+ "step": 135
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.2016853266472781,
+ "learning_rate": 6.634146341463415e-05,
+ "loss": 1.1223,
+ "step": 136
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17282675192463448,
+ "learning_rate": 6.682926829268293e-05,
+ "loss": 1.1879,
+ "step": 137
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.17398811693399288,
+ "learning_rate": 6.731707317073171e-05,
+ "loss": 1.2682,
+ "step": 138
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.18516916965434696,
+ "learning_rate": 6.78048780487805e-05,
+ "loss": 1.1666,
+ "step": 139
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.1852213129647933,
+ "learning_rate": 6.829268292682927e-05,
+ "loss": 1.2501,
+ "step": 140
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17915948766591883,
+ "learning_rate": 6.878048780487805e-05,
+ "loss": 1.2264,
+ "step": 141
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.21599939417233183,
+ "learning_rate": 6.926829268292683e-05,
+ "loss": 1.2376,
+ "step": 142
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.17839304459521851,
+ "learning_rate": 6.975609756097562e-05,
+ "loss": 1.2353,
+ "step": 143
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.20826913231380875,
+ "learning_rate": 7.02439024390244e-05,
+ "loss": 1.1901,
+ "step": 144
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.20788894913361589,
+ "learning_rate": 7.073170731707318e-05,
+ "loss": 1.2577,
+ "step": 145
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.18420055842301297,
+ "learning_rate": 7.121951219512195e-05,
+ "loss": 1.1393,
+ "step": 146
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19903048468685589,
+ "learning_rate": 7.170731707317073e-05,
+ "loss": 1.2321,
+ "step": 147
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.19074116314985748,
+ "learning_rate": 7.219512195121952e-05,
+ "loss": 1.1912,
+ "step": 148
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.2353816469403903,
+ "learning_rate": 7.26829268292683e-05,
+ "loss": 1.28,
+ "step": 149
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.21634875684769345,
+ "learning_rate": 7.317073170731708e-05,
+ "loss": 1.3312,
+ "step": 150
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18290969006743918,
+ "learning_rate": 7.365853658536587e-05,
+ "loss": 1.2214,
+ "step": 151
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.18484243897545208,
+ "learning_rate": 7.414634146341465e-05,
+ "loss": 1.1895,
+ "step": 152
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.21882343112978872,
+ "learning_rate": 7.463414634146342e-05,
+ "loss": 1.2219,
+ "step": 153
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.19868284379241205,
+ "learning_rate": 7.51219512195122e-05,
+ "loss": 1.2176,
+ "step": 154
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.20912516312950613,
+ "learning_rate": 7.560975609756097e-05,
+ "loss": 1.242,
+ "step": 155
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.23811880045549916,
+ "learning_rate": 7.609756097560976e-05,
+ "loss": 1.2838,
+ "step": 156
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19511077122033713,
+ "learning_rate": 7.658536585365854e-05,
+ "loss": 1.1594,
+ "step": 157
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.20094129399534238,
+ "learning_rate": 7.707317073170732e-05,
+ "loss": 1.2966,
+ "step": 158
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19366245038292418,
+ "learning_rate": 7.75609756097561e-05,
+ "loss": 1.2246,
+ "step": 159
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.19409570223867306,
+ "learning_rate": 7.804878048780489e-05,
+ "loss": 1.2312,
+ "step": 160
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.2087258457033805,
+ "learning_rate": 7.853658536585366e-05,
+ "loss": 1.2169,
+ "step": 161
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.18765223996270428,
+ "learning_rate": 7.902439024390244e-05,
+ "loss": 1.2383,
+ "step": 162
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.20734180224147242,
+ "learning_rate": 7.951219512195122e-05,
+ "loss": 1.2587,
+ "step": 163
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.24690929540287834,
+ "learning_rate": 8e-05,
+ "loss": 1.1951,
+ "step": 164
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.2003538797619543,
+ "learning_rate": 7.999990914797545e-05,
+ "loss": 1.1982,
+ "step": 165
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.22469075613510484,
+ "learning_rate": 7.99996365923145e-05,
+ "loss": 1.2355,
+ "step": 166
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.21870100788336058,
+ "learning_rate": 7.999918233425526e-05,
+ "loss": 1.1103,
+ "step": 167
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.20939989594131886,
+ "learning_rate": 7.999854637586122e-05,
+ "loss": 1.1966,
+ "step": 168
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.43108211416237796,
+ "learning_rate": 7.999772872002132e-05,
+ "loss": 1.2882,
+ "step": 169
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.27045413432174487,
+ "learning_rate": 7.999672937044984e-05,
+ "loss": 1.2399,
+ "step": 170
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.19700483036740515,
+ "learning_rate": 7.999554833168642e-05,
+ "loss": 1.202,
+ "step": 171
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.3335979493370708,
+ "learning_rate": 7.999418560909604e-05,
+ "loss": 1.1995,
+ "step": 172
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.3165803974474567,
+ "learning_rate": 7.999264120886902e-05,
+ "loss": 1.1569,
+ "step": 173
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1951699080346223,
+ "learning_rate": 7.999091513802093e-05,
+ "loss": 1.1778,
+ "step": 174
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.2087559121749787,
+ "learning_rate": 7.998900740439265e-05,
+ "loss": 1.1736,
+ "step": 175
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.20345180977460478,
+ "learning_rate": 7.998691801665024e-05,
+ "loss": 1.2281,
+ "step": 176
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.24617644827252333,
+ "learning_rate": 7.998464698428495e-05,
+ "loss": 1.2072,
+ "step": 177
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2469050959356265,
+ "learning_rate": 7.998219431761318e-05,
+ "loss": 1.2242,
+ "step": 178
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19529317748460623,
+ "learning_rate": 7.997956002777642e-05,
+ "loss": 1.2567,
+ "step": 179
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.19048389491381376,
+ "learning_rate": 7.99767441267412e-05,
+ "loss": 1.2982,
+ "step": 180
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.2085799116493225,
+ "learning_rate": 7.997374662729904e-05,
+ "loss": 1.1254,
+ "step": 181
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20636853256378995,
+ "learning_rate": 7.997056754306636e-05,
+ "loss": 1.2435,
+ "step": 182
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.20590016382290252,
+ "learning_rate": 7.99672068884845e-05,
+ "loss": 1.2658,
+ "step": 183
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.1931166169764433,
+ "learning_rate": 7.996366467881955e-05,
+ "loss": 1.1637,
+ "step": 184
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.18873318157988098,
+ "learning_rate": 7.995994093016237e-05,
+ "loss": 1.1335,
+ "step": 185
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.19210254625199108,
+ "learning_rate": 7.995603565942846e-05,
+ "loss": 1.1928,
+ "step": 186
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.2130986479765664,
+ "learning_rate": 7.995194888435792e-05,
+ "loss": 1.2158,
+ "step": 187
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.22003854501814088,
+ "learning_rate": 7.994768062351532e-05,
+ "loss": 1.2288,
+ "step": 188
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20330803191993058,
+ "learning_rate": 7.994323089628968e-05,
+ "loss": 1.2426,
+ "step": 189
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20567314642208634,
+ "learning_rate": 7.993859972289434e-05,
+ "loss": 1.2649,
+ "step": 190
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.21556663727342962,
+ "learning_rate": 7.993378712436686e-05,
+ "loss": 1.2545,
+ "step": 191
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.20309165469109888,
+ "learning_rate": 7.992879312256897e-05,
+ "loss": 1.3338,
+ "step": 192
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.19574356669421325,
+ "learning_rate": 7.992361774018641e-05,
+ "loss": 1.278,
+ "step": 193
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.2763613746722313,
+ "learning_rate": 7.991826100072891e-05,
+ "loss": 1.2571,
+ "step": 194
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19346552479915102,
+ "learning_rate": 7.991272292852996e-05,
+ "loss": 1.2027,
+ "step": 195
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.2281167812123908,
+ "learning_rate": 7.990700354874683e-05,
+ "loss": 1.2586,
+ "step": 196
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.19699013712137542,
+ "learning_rate": 7.990110288736042e-05,
+ "loss": 1.1371,
+ "step": 197
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21768209981475933,
+ "learning_rate": 7.989502097117503e-05,
+ "loss": 1.2522,
+ "step": 198
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.21335427847754582,
+ "learning_rate": 7.988875782781838e-05,
+ "loss": 1.2437,
+ "step": 199
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.21856710629066897,
+ "learning_rate": 7.988231348574147e-05,
+ "loss": 1.2135,
+ "step": 200
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20482062658774797,
+ "learning_rate": 7.987568797421836e-05,
+ "loss": 1.1755,
+ "step": 201
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2017756813960897,
+ "learning_rate": 7.986888132334608e-05,
+ "loss": 1.1699,
+ "step": 202
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.20496443848153809,
+ "learning_rate": 7.986189356404458e-05,
+ "loss": 1.2125,
+ "step": 203
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2134603800558358,
+ "learning_rate": 7.985472472805643e-05,
+ "loss": 1.2391,
+ "step": 204
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.2364175573420861,
+ "learning_rate": 7.98473748479468e-05,
+ "loss": 1.2384,
+ "step": 205
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1872419861598724,
+ "learning_rate": 7.983984395710326e-05,
+ "loss": 1.1457,
+ "step": 206
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.28222194007095774,
+ "learning_rate": 7.983213208973566e-05,
+ "loss": 1.2952,
+ "step": 207
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.1916094851162064,
+ "learning_rate": 7.982423928087593e-05,
+ "loss": 1.1763,
+ "step": 208
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.18446245256166657,
+ "learning_rate": 7.981616556637795e-05,
+ "loss": 1.1863,
+ "step": 209
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.195191961022491,
+ "learning_rate": 7.980791098291737e-05,
+ "loss": 1.2036,
+ "step": 210
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.2652439657825496,
+ "learning_rate": 7.979947556799151e-05,
+ "loss": 1.2834,
+ "step": 211
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.24308438957843412,
+ "learning_rate": 7.979085935991906e-05,
+ "loss": 1.234,
+ "step": 212
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.21294701043622016,
+ "learning_rate": 7.978206239784004e-05,
+ "loss": 1.3006,
+ "step": 213
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.25809277041859524,
+ "learning_rate": 7.977308472171553e-05,
+ "loss": 1.2272,
+ "step": 214
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.193463860107294,
+ "learning_rate": 7.976392637232754e-05,
+ "loss": 1.2295,
+ "step": 215
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2150023760609626,
+ "learning_rate": 7.975458739127877e-05,
+ "loss": 1.2135,
+ "step": 216
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.22590495955605894,
+ "learning_rate": 7.974506782099253e-05,
+ "loss": 1.2532,
+ "step": 217
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.21023744668403702,
+ "learning_rate": 7.973536770471242e-05,
+ "loss": 1.2472,
+ "step": 218
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2345749799511543,
+ "learning_rate": 7.972548708650218e-05,
+ "loss": 1.1791,
+ "step": 219
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.2158876734005217,
+ "learning_rate": 7.971542601124553e-05,
+ "loss": 1.2483,
+ "step": 220
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.29455339949432446,
+ "learning_rate": 7.970518452464593e-05,
+ "loss": 1.2894,
+ "step": 221
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.23983708730626851,
+ "learning_rate": 7.969476267322636e-05,
+ "loss": 1.271,
+ "step": 222
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.1922400905426158,
+ "learning_rate": 7.968416050432912e-05,
+ "loss": 1.2139,
+ "step": 223
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.2238136844422931,
+ "learning_rate": 7.967337806611568e-05,
+ "loss": 1.2655,
+ "step": 224
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.21230292828267672,
+ "learning_rate": 7.966241540756631e-05,
+ "loss": 1.2406,
+ "step": 225
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.26656119419070456,
+ "learning_rate": 7.965127257848004e-05,
+ "loss": 1.2595,
+ "step": 226
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.22381385502992684,
+ "learning_rate": 7.963994962947426e-05,
+ "loss": 1.1737,
+ "step": 227
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20056702203994298,
+ "learning_rate": 7.962844661198462e-05,
+ "loss": 1.1969,
+ "step": 228
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20148701321526885,
+ "learning_rate": 7.961676357826478e-05,
+ "loss": 1.2151,
+ "step": 229
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20034834807028637,
+ "learning_rate": 7.960490058138604e-05,
+ "loss": 1.1455,
+ "step": 230
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.21050838521846033,
+ "learning_rate": 7.959285767523732e-05,
+ "loss": 1.2223,
+ "step": 231
+ },
+ {
+ "epoch": 0.42,
+ "grad_norm": 0.20904772138969777,
+ "learning_rate": 7.95806349145247e-05,
+ "loss": 1.2534,
+ "step": 232
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20307877304792957,
+ "learning_rate": 7.956823235477134e-05,
+ "loss": 1.1352,
+ "step": 233
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.20501105270897094,
+ "learning_rate": 7.95556500523171e-05,
+ "loss": 1.2031,
+ "step": 234
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.19800586972038586,
+ "learning_rate": 7.954288806431838e-05,
+ "loss": 1.2567,
+ "step": 235
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.2175102450594135,
+ "learning_rate": 7.952994644874777e-05,
+ "loss": 1.2538,
+ "step": 236
+ },
+ {
+ "epoch": 0.43,
+ "grad_norm": 0.22698189300067595,
+ "learning_rate": 7.951682526439391e-05,
+ "loss": 1.3088,
+ "step": 237
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19208392014975315,
+ "learning_rate": 7.950352457086109e-05,
+ "loss": 1.2336,
+ "step": 238
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.27004086334319655,
+ "learning_rate": 7.949004442856905e-05,
+ "loss": 1.2012,
+ "step": 239
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.23420974954538043,
+ "learning_rate": 7.947638489875272e-05,
+ "loss": 1.2244,
+ "step": 240
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.20514399124802024,
+ "learning_rate": 7.946254604346186e-05,
+ "loss": 1.2548,
+ "step": 241
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.19334973602372896,
+ "learning_rate": 7.944852792556092e-05,
+ "loss": 1.2104,
+ "step": 242
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.1992640714537956,
+ "learning_rate": 7.943433060872858e-05,
+ "loss": 1.2628,
+ "step": 243
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.203284617090413,
+ "learning_rate": 7.941995415745761e-05,
+ "loss": 1.2002,
+ "step": 244
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22795306969682058,
+ "learning_rate": 7.94053986370545e-05,
+ "loss": 1.2215,
+ "step": 245
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.20789041346838505,
+ "learning_rate": 7.939066411363915e-05,
+ "loss": 1.0998,
+ "step": 246
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.22354868884742066,
+ "learning_rate": 7.937575065414464e-05,
+ "loss": 1.2564,
+ "step": 247
+ },
+ {
+ "epoch": 0.45,
+ "grad_norm": 0.21176392726647736,
+ "learning_rate": 7.936065832631687e-05,
+ "loss": 1.2816,
+ "step": 248
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.19967179557235587,
+ "learning_rate": 7.934538719871427e-05,
+ "loss": 1.1961,
+ "step": 249
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.210819577350627,
+ "learning_rate": 7.932993734070747e-05,
+ "loss": 1.2167,
+ "step": 250
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.21537794551756187,
+ "learning_rate": 7.931430882247903e-05,
+ "loss": 1.2341,
+ "step": 251
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22850872387256574,
+ "learning_rate": 7.929850171502304e-05,
+ "loss": 1.1686,
+ "step": 252
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22380366415076383,
+ "learning_rate": 7.928251609014493e-05,
+ "loss": 1.1462,
+ "step": 253
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.22426923149036065,
+ "learning_rate": 7.926635202046102e-05,
+ "loss": 1.1792,
+ "step": 254
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.42082703321103965,
+ "learning_rate": 7.925000957939822e-05,
+ "loss": 1.2718,
+ "step": 255
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2235432774854074,
+ "learning_rate": 7.92334888411937e-05,
+ "loss": 1.2598,
+ "step": 256
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.281644028934108,
+ "learning_rate": 7.92167898808946e-05,
+ "loss": 1.2205,
+ "step": 257
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.2037705143888748,
+ "learning_rate": 7.919991277435763e-05,
+ "loss": 1.1737,
+ "step": 258
+ },
+ {
+ "epoch": 0.47,
+ "grad_norm": 0.20917419230028977,
+ "learning_rate": 7.918285759824879e-05,
+ "loss": 1.2035,
+ "step": 259
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.20510847570635518,
+ "learning_rate": 7.916562443004292e-05,
+ "loss": 1.2135,
+ "step": 260
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.25172483071092466,
+ "learning_rate": 7.914821334802342e-05,
+ "loss": 1.2218,
+ "step": 261
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.21102706700634313,
+ "learning_rate": 7.91306244312819e-05,
+ "loss": 1.1738,
+ "step": 262
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22626060872645815,
+ "learning_rate": 7.911285775971781e-05,
+ "loss": 1.238,
+ "step": 263
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.22448567539778486,
+ "learning_rate": 7.909491341403805e-05,
+ "loss": 1.2404,
+ "step": 264
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 0.2019099786139193,
+ "learning_rate": 7.907679147575661e-05,
+ "loss": 1.213,
+ "step": 265
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.24307234839096267,
+ "learning_rate": 7.905849202719422e-05,
+ "loss": 1.2322,
+ "step": 266
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.19801890521743487,
+ "learning_rate": 7.904001515147802e-05,
+ "loss": 1.2448,
+ "step": 267
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2102742273575385,
+ "learning_rate": 7.902136093254106e-05,
+ "loss": 1.1657,
+ "step": 268
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.2173464476815016,
+ "learning_rate": 7.900252945512201e-05,
+ "loss": 1.2549,
+ "step": 269
+ },
+ {
+ "epoch": 0.49,
+ "grad_norm": 0.20957275458699595,
+ "learning_rate": 7.898352080476479e-05,
+ "loss": 1.2536,
+ "step": 270
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20691966388952363,
+ "learning_rate": 7.896433506781811e-05,
+ "loss": 1.2661,
+ "step": 271
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2276662275112648,
+ "learning_rate": 7.894497233143509e-05,
+ "loss": 1.2409,
+ "step": 272
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.23854109569301263,
+ "learning_rate": 7.892543268357297e-05,
+ "loss": 1.2681,
+ "step": 273
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2233864156677627,
+ "learning_rate": 7.890571621299252e-05,
+ "loss": 1.1687,
+ "step": 274
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.20114129147925475,
+ "learning_rate": 7.888582300925787e-05,
+ "loss": 1.2184,
+ "step": 275
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.2154654670569462,
+ "learning_rate": 7.886575316273586e-05,
+ "loss": 1.1982,
+ "step": 276
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2292982209343639,
+ "learning_rate": 7.884550676459583e-05,
+ "loss": 1.2129,
+ "step": 277
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.21302713135229548,
+ "learning_rate": 7.882508390680908e-05,
+ "loss": 1.1605,
+ "step": 278
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2123661020671048,
+ "learning_rate": 7.88044846821485e-05,
+ "loss": 1.2308,
+ "step": 279
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.2080577410800404,
+ "learning_rate": 7.878370918418818e-05,
+ "loss": 1.2195,
+ "step": 280
+ },
+ {
+ "epoch": 0.51,
+ "grad_norm": 0.19663901881127385,
+ "learning_rate": 7.876275750730289e-05,
+ "loss": 1.1591,
+ "step": 281
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.20534502031312163,
+ "learning_rate": 7.874162974666776e-05,
+ "loss": 1.2664,
+ "step": 282
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.23240445399513837,
+ "learning_rate": 7.872032599825779e-05,
+ "loss": 1.2151,
+ "step": 283
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2672527316717507,
+ "learning_rate": 7.86988463588474e-05,
+ "loss": 1.2406,
+ "step": 284
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.19893903058743695,
+ "learning_rate": 7.867719092601003e-05,
+ "loss": 1.1291,
+ "step": 285
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.33275268109930917,
+ "learning_rate": 7.865535979811768e-05,
+ "loss": 1.1406,
+ "step": 286
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.2373619455690358,
+ "learning_rate": 7.863335307434045e-05,
+ "loss": 1.2799,
+ "step": 287
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.263235735390858,
+ "learning_rate": 7.861117085464612e-05,
+ "loss": 1.2415,
+ "step": 288
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25884281780784324,
+ "learning_rate": 7.858881323979965e-05,
+ "loss": 1.3919,
+ "step": 289
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.25426288332255736,
+ "learning_rate": 7.85662803313628e-05,
+ "loss": 1.174,
+ "step": 290
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.26655405527881243,
+ "learning_rate": 7.854357223169356e-05,
+ "loss": 1.2806,
+ "step": 291
+ },
+ {
+ "epoch": 0.53,
+ "grad_norm": 0.20909844432349833,
+ "learning_rate": 7.852068904394579e-05,
+ "loss": 1.2627,
+ "step": 292
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.21307115068935759,
+ "learning_rate": 7.849763087206866e-05,
+ "loss": 1.1879,
+ "step": 293
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.25009949471398946,
+ "learning_rate": 7.847439782080628e-05,
+ "loss": 1.2881,
+ "step": 294
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.20960783418679174,
+ "learning_rate": 7.845098999569712e-05,
+ "loss": 1.2723,
+ "step": 295
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.24968832437925104,
+ "learning_rate": 7.842740750307362e-05,
+ "loss": 1.2029,
+ "step": 296
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.22981196585125677,
+ "learning_rate": 7.84036504500616e-05,
+ "loss": 1.1695,
+ "step": 297
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2320606844751365,
+ "learning_rate": 7.837971894457991e-05,
+ "loss": 1.2317,
+ "step": 298
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23051459673906124,
+ "learning_rate": 7.835561309533981e-05,
+ "loss": 1.2046,
+ "step": 299
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.2510027231060586,
+ "learning_rate": 7.833133301184457e-05,
+ "loss": 1.199,
+ "step": 300
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23601180466018787,
+ "learning_rate": 7.830687880438895e-05,
+ "loss": 1.1755,
+ "step": 301
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.24740820934385369,
+ "learning_rate": 7.828225058405864e-05,
+ "loss": 1.2054,
+ "step": 302
+ },
+ {
+ "epoch": 0.55,
+ "grad_norm": 0.23065372979111173,
+ "learning_rate": 7.825744846272984e-05,
+ "loss": 1.2066,
+ "step": 303
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.22385077334838213,
+ "learning_rate": 7.823247255306866e-05,
+ "loss": 1.2147,
+ "step": 304
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.42981213948386104,
+ "learning_rate": 7.820732296853074e-05,
+ "loss": 1.2314,
+ "step": 305
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21122844902751076,
+ "learning_rate": 7.818199982336058e-05,
+ "loss": 1.1462,
+ "step": 306
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.23374869692118933,
+ "learning_rate": 7.815650323259117e-05,
+ "loss": 1.2051,
+ "step": 307
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.21662363795962128,
+ "learning_rate": 7.813083331204332e-05,
+ "loss": 1.1575,
+ "step": 308
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2088315773384112,
+ "learning_rate": 7.810499017832526e-05,
+ "loss": 1.1316,
+ "step": 309
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.2095238410730976,
+ "learning_rate": 7.807897394883203e-05,
+ "loss": 1.2087,
+ "step": 310
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.22672932127256515,
+ "learning_rate": 7.805278474174499e-05,
+ "loss": 1.2512,
+ "step": 311
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.21873052340922736,
+ "learning_rate": 7.802642267603126e-05,
+ "loss": 1.1909,
+ "step": 312
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.219814521916342,
+ "learning_rate": 7.79998878714432e-05,
+ "loss": 1.1669,
+ "step": 313
+ },
+ {
+ "epoch": 0.57,
+ "grad_norm": 0.3049426027257317,
+ "learning_rate": 7.797318044851786e-05,
+ "loss": 1.1797,
+ "step": 314
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.22309435690065985,
+ "learning_rate": 7.794630052857638e-05,
+ "loss": 1.1417,
+ "step": 315
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.3891885169154885,
+ "learning_rate": 7.791924823372354e-05,
+ "loss": 1.2369,
+ "step": 316
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.24780269452456372,
+ "learning_rate": 7.789202368684711e-05,
+ "loss": 1.2521,
+ "step": 317
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.21660460720269362,
+ "learning_rate": 7.786462701161738e-05,
+ "loss": 1.2151,
+ "step": 318
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.23635409466561857,
+ "learning_rate": 7.783705833248649e-05,
+ "loss": 1.2363,
+ "step": 319
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.2616135839903218,
+ "learning_rate": 7.780931777468797e-05,
+ "loss": 1.2428,
+ "step": 320
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.21461059159245083,
+ "learning_rate": 7.77814054642361e-05,
+ "loss": 1.1434,
+ "step": 321
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25348824286656163,
+ "learning_rate": 7.775332152792539e-05,
+ "loss": 1.2368,
+ "step": 322
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22275034726331247,
+ "learning_rate": 7.772506609332995e-05,
+ "loss": 1.1827,
+ "step": 323
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.25030821228147526,
+ "learning_rate": 7.769663928880298e-05,
+ "loss": 1.2428,
+ "step": 324
+ },
+ {
+ "epoch": 0.59,
+ "grad_norm": 0.22251804398745534,
+ "learning_rate": 7.766804124347608e-05,
+ "loss": 1.1889,
+ "step": 325
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.23381455520411995,
+ "learning_rate": 7.763927208725879e-05,
+ "loss": 1.2115,
+ "step": 326
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.27341902651946226,
+ "learning_rate": 7.761033195083791e-05,
+ "loss": 1.2535,
+ "step": 327
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.24862471659814522,
+ "learning_rate": 7.758122096567694e-05,
+ "loss": 1.2128,
+ "step": 328
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.2251357082045494,
+ "learning_rate": 7.755193926401547e-05,
+ "loss": 1.2334,
+ "step": 329
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.3173274941622932,
+ "learning_rate": 7.752248697886857e-05,
+ "loss": 1.226,
+ "step": 330
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.23056440717672175,
+ "learning_rate": 7.74928642440263e-05,
+ "loss": 1.2339,
+ "step": 331
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2801507500859342,
+ "learning_rate": 7.746307119405286e-05,
+ "loss": 1.287,
+ "step": 332
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2267818430426272,
+ "learning_rate": 7.743310796428622e-05,
+ "loss": 1.1916,
+ "step": 333
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2777329160365585,
+ "learning_rate": 7.74029746908374e-05,
+ "loss": 1.252,
+ "step": 334
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.25289169762353,
+ "learning_rate": 7.737267151058983e-05,
+ "loss": 1.2153,
+ "step": 335
+ },
+ {
+ "epoch": 0.61,
+ "grad_norm": 0.2424670686901653,
+ "learning_rate": 7.734219856119875e-05,
+ "loss": 1.2227,
+ "step": 336
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22747092217441645,
+ "learning_rate": 7.731155598109067e-05,
+ "loss": 1.19,
+ "step": 337
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2307810940100189,
+ "learning_rate": 7.728074390946257e-05,
+ "loss": 1.1818,
+ "step": 338
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.2583402574655623,
+ "learning_rate": 7.724976248628142e-05,
+ "loss": 1.1608,
+ "step": 339
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.22140209760890694,
+ "learning_rate": 7.721861185228347e-05,
+ "loss": 1.1245,
+ "step": 340
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.25859310758244686,
+ "learning_rate": 7.718729214897362e-05,
+ "loss": 1.2247,
+ "step": 341
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26371179531372124,
+ "learning_rate": 7.715580351862482e-05,
+ "loss": 1.2128,
+ "step": 342
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26575541302851047,
+ "learning_rate": 7.712414610427733e-05,
+ "loss": 1.2443,
+ "step": 343
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.269978305197599,
+ "learning_rate": 7.709232004973816e-05,
+ "loss": 1.2231,
+ "step": 344
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.26583998705977047,
+ "learning_rate": 7.70603254995804e-05,
+ "loss": 1.2476,
+ "step": 345
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.24256062164066097,
+ "learning_rate": 7.702816259914253e-05,
+ "loss": 1.2901,
+ "step": 346
+ },
+ {
+ "epoch": 0.63,
+ "grad_norm": 0.3463123472658915,
+ "learning_rate": 7.699583149452779e-05,
+ "loss": 1.3277,
+ "step": 347
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2269096590531878,
+ "learning_rate": 7.696333233260345e-05,
+ "loss": 1.2047,
+ "step": 348
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.25136883001050025,
+ "learning_rate": 7.693066526100031e-05,
+ "loss": 1.1619,
+ "step": 349
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.2565112571116145,
+ "learning_rate": 7.68978304281118e-05,
+ "loss": 1.2389,
+ "step": 350
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22175779550828703,
+ "learning_rate": 7.686482798309349e-05,
+ "loss": 1.2238,
+ "step": 351
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.22588304332216555,
+ "learning_rate": 7.683165807586234e-05,
+ "loss": 1.174,
+ "step": 352
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.24889474296529737,
+ "learning_rate": 7.6798320857096e-05,
+ "loss": 1.2366,
+ "step": 353
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27339703806525034,
+ "learning_rate": 7.676481647823214e-05,
+ "loss": 1.2356,
+ "step": 354
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23424666722888365,
+ "learning_rate": 7.673114509146782e-05,
+ "loss": 1.2089,
+ "step": 355
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.27978285392461766,
+ "learning_rate": 7.66973068497587e-05,
+ "loss": 1.2609,
+ "step": 356
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.2509423350138824,
+ "learning_rate": 7.666330190681844e-05,
+ "loss": 1.1777,
+ "step": 357
+ },
+ {
+ "epoch": 0.65,
+ "grad_norm": 0.23007730927468031,
+ "learning_rate": 7.662913041711793e-05,
+ "loss": 1.154,
+ "step": 358
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2438648674953112,
+ "learning_rate": 7.659479253588462e-05,
+ "loss": 1.2257,
+ "step": 359
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.28816093242092233,
+ "learning_rate": 7.65602884191018e-05,
+ "loss": 1.2558,
+ "step": 360
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.24972815300596035,
+ "learning_rate": 7.652561822350793e-05,
+ "loss": 1.2837,
+ "step": 361
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2543189139697063,
+ "learning_rate": 7.649078210659587e-05,
+ "loss": 1.2193,
+ "step": 362
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.2237937956718952,
+ "learning_rate": 7.645578022661224e-05,
+ "loss": 1.2237,
+ "step": 363
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.29742029408787396,
+ "learning_rate": 7.642061274255657e-05,
+ "loss": 1.2116,
+ "step": 364
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2462883147335493,
+ "learning_rate": 7.638527981418075e-05,
+ "loss": 1.1827,
+ "step": 365
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2647802498907096,
+ "learning_rate": 7.634978160198817e-05,
+ "loss": 1.2739,
+ "step": 366
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.22360398779217264,
+ "learning_rate": 7.631411826723306e-05,
+ "loss": 1.2185,
+ "step": 367
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2635048004593543,
+ "learning_rate": 7.627828997191973e-05,
+ "loss": 1.2317,
+ "step": 368
+ },
+ {
+ "epoch": 0.67,
+ "grad_norm": 0.2764803449917684,
+ "learning_rate": 7.624229687880184e-05,
+ "loss": 1.1923,
+ "step": 369
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.25724943233414527,
+ "learning_rate": 7.620613915138166e-05,
+ "loss": 1.2218,
+ "step": 370
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2858318045794755,
+ "learning_rate": 7.61698169539093e-05,
+ "loss": 1.1496,
+ "step": 371
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.23547216647460364,
+ "learning_rate": 7.613333045138206e-05,
+ "loss": 1.1905,
+ "step": 372
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.22984814903684375,
+ "learning_rate": 7.609667980954355e-05,
+ "loss": 1.2009,
+ "step": 373
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.2551903754079084,
+ "learning_rate": 7.605986519488301e-05,
+ "loss": 1.2042,
+ "step": 374
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2508257410125616,
+ "learning_rate": 7.602288677463457e-05,
+ "loss": 1.2468,
+ "step": 375
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.25324577774935964,
+ "learning_rate": 7.598574471677644e-05,
+ "loss": 1.2603,
+ "step": 376
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.35888776531769967,
+ "learning_rate": 7.59484391900302e-05,
+ "loss": 1.1929,
+ "step": 377
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.22048517191014724,
+ "learning_rate": 7.591097036385994e-05,
+ "loss": 1.1783,
+ "step": 378
+ },
+ {
+ "epoch": 0.69,
+ "grad_norm": 0.2781160412746083,
+ "learning_rate": 7.587333840847162e-05,
+ "loss": 1.3397,
+ "step": 379
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.24033046830332258,
+ "learning_rate": 7.583554349481222e-05,
+ "loss": 1.2436,
+ "step": 380
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.26413762380260003,
+ "learning_rate": 7.579758579456893e-05,
+ "loss": 1.1917,
+ "step": 381
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.2390937887338632,
+ "learning_rate": 7.575946548016847e-05,
+ "loss": 1.2186,
+ "step": 382
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25131263043429275,
+ "learning_rate": 7.572118272477622e-05,
+ "loss": 1.2538,
+ "step": 383
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.223974104870702,
+ "learning_rate": 7.568273770229546e-05,
+ "loss": 1.2165,
+ "step": 384
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.25840356830252875,
+ "learning_rate": 7.564413058736663e-05,
+ "loss": 1.1848,
+ "step": 385
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2723156683076603,
+ "learning_rate": 7.560536155536641e-05,
+ "loss": 1.1982,
+ "step": 386
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.265687427976889,
+ "learning_rate": 7.556643078240708e-05,
+ "loss": 1.231,
+ "step": 387
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.25152762080976077,
+ "learning_rate": 7.552733844533562e-05,
+ "loss": 1.1974,
+ "step": 388
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.2366049485053541,
+ "learning_rate": 7.548808472173292e-05,
+ "loss": 1.3119,
+ "step": 389
+ },
+ {
+ "epoch": 0.71,
+ "grad_norm": 0.22092196577077122,
+ "learning_rate": 7.5448669789913e-05,
+ "loss": 1.195,
+ "step": 390
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.22667521540462374,
+ "learning_rate": 7.540909382892217e-05,
+ "loss": 1.1431,
+ "step": 391
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.25432207282646513,
+ "learning_rate": 7.536935701853823e-05,
+ "loss": 1.2173,
+ "step": 392
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.29950506457923864,
+ "learning_rate": 7.53294595392697e-05,
+ "loss": 1.1962,
+ "step": 393
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24735689607229913,
+ "learning_rate": 7.528940157235487e-05,
+ "loss": 1.2053,
+ "step": 394
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.24394198607459663,
+ "learning_rate": 7.524918329976114e-05,
+ "loss": 1.1979,
+ "step": 395
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.2630369372689188,
+ "learning_rate": 7.520880490418409e-05,
+ "loss": 1.2111,
+ "step": 396
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26275028416291457,
+ "learning_rate": 7.516826656904664e-05,
+ "loss": 1.2133,
+ "step": 397
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.23938074620956928,
+ "learning_rate": 7.512756847849831e-05,
+ "loss": 1.1355,
+ "step": 398
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.3724960610098138,
+ "learning_rate": 7.508671081741428e-05,
+ "loss": 1.2572,
+ "step": 399
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.24161685847894723,
+ "learning_rate": 7.504569377139462e-05,
+ "loss": 1.1706,
+ "step": 400
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 0.26121591322670523,
+ "learning_rate": 7.50045175267634e-05,
+ "loss": 1.2135,
+ "step": 401
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2465579498164775,
+ "learning_rate": 7.496318227056788e-05,
+ "loss": 1.1641,
+ "step": 402
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2556288696122787,
+ "learning_rate": 7.492168819057767e-05,
+ "loss": 1.2939,
+ "step": 403
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.261481216336303,
+ "learning_rate": 7.488003547528382e-05,
+ "loss": 1.2026,
+ "step": 404
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2389415135676362,
+ "learning_rate": 7.483822431389799e-05,
+ "loss": 1.2131,
+ "step": 405
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.2559201956627192,
+ "learning_rate": 7.479625489635162e-05,
+ "loss": 1.1246,
+ "step": 406
+ },
+ {
+ "epoch": 0.74,
+ "grad_norm": 0.27127932491822604,
+ "learning_rate": 7.475412741329504e-05,
+ "loss": 1.2429,
+ "step": 407
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.27006004008695594,
+ "learning_rate": 7.47118420560966e-05,
+ "loss": 1.2388,
+ "step": 408
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.23716823297200537,
+ "learning_rate": 7.466939901684182e-05,
+ "loss": 1.1264,
+ "step": 409
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.2885373898669248,
+ "learning_rate": 7.462679848833252e-05,
+ "loss": 1.2786,
+ "step": 410
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.49215227598639927,
+ "learning_rate": 7.458404066408588e-05,
+ "loss": 1.2386,
+ "step": 411
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.24235735604947403,
+ "learning_rate": 7.454112573833368e-05,
+ "loss": 1.1423,
+ "step": 412
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2584614748054343,
+ "learning_rate": 7.449805390602127e-05,
+ "loss": 1.2669,
+ "step": 413
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.23806123085998873,
+ "learning_rate": 7.445482536280684e-05,
+ "loss": 1.1763,
+ "step": 414
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.24459517607786851,
+ "learning_rate": 7.441144030506043e-05,
+ "loss": 1.198,
+ "step": 415
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.25801616402700395,
+ "learning_rate": 7.436789892986304e-05,
+ "loss": 1.2136,
+ "step": 416
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.2814819942392514,
+ "learning_rate": 7.432420143500578e-05,
+ "loss": 1.2398,
+ "step": 417
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.22134709322606153,
+ "learning_rate": 7.428034801898893e-05,
+ "loss": 1.1592,
+ "step": 418
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2899677536995633,
+ "learning_rate": 7.42363388810211e-05,
+ "loss": 1.2296,
+ "step": 419
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.24005943230262294,
+ "learning_rate": 7.419217422101822e-05,
+ "loss": 1.2223,
+ "step": 420
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.26417562369496167,
+ "learning_rate": 7.414785423960275e-05,
+ "loss": 1.2261,
+ "step": 421
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.2580815883535521,
+ "learning_rate": 7.410337913810271e-05,
+ "loss": 1.2021,
+ "step": 422
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 0.25242217589496435,
+ "learning_rate": 7.405874911855071e-05,
+ "loss": 1.239,
+ "step": 423
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.21991733999839932,
+ "learning_rate": 7.401396438368315e-05,
+ "loss": 1.1716,
+ "step": 424
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.40116538322720213,
+ "learning_rate": 7.396902513693924e-05,
+ "loss": 1.2773,
+ "step": 425
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.277333939455099,
+ "learning_rate": 7.392393158246002e-05,
+ "loss": 1.2574,
+ "step": 426
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.27146087746385755,
+ "learning_rate": 7.387868392508756e-05,
+ "loss": 1.2243,
+ "step": 427
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.255881055620786,
+ "learning_rate": 7.38332823703639e-05,
+ "loss": 1.223,
+ "step": 428
+ },
+ {
+ "epoch": 0.78,
+ "grad_norm": 0.24807364856677255,
+ "learning_rate": 7.378772712453021e-05,
+ "loss": 1.1985,
+ "step": 429
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.25746257617764423,
+ "learning_rate": 7.37420183945258e-05,
+ "loss": 1.2502,
+ "step": 430
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.28851991049982234,
+ "learning_rate": 7.369615638798722e-05,
+ "loss": 1.2535,
+ "step": 431
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.24113389811604363,
+ "learning_rate": 7.365014131324725e-05,
+ "loss": 1.2227,
+ "step": 432
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2414465151257969,
+ "learning_rate": 7.360397337933405e-05,
+ "loss": 1.1884,
+ "step": 433
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.2735463134699831,
+ "learning_rate": 7.355765279597011e-05,
+ "loss": 1.2756,
+ "step": 434
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2588437452987293,
+ "learning_rate": 7.351117977357139e-05,
+ "loss": 1.2108,
+ "step": 435
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26573294117796553,
+ "learning_rate": 7.346455452324629e-05,
+ "loss": 1.1821,
+ "step": 436
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2555476577827304,
+ "learning_rate": 7.341777725679473e-05,
+ "loss": 1.1937,
+ "step": 437
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.2867704132108098,
+ "learning_rate": 7.337084818670716e-05,
+ "loss": 1.2272,
+ "step": 438
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.27726678115981157,
+ "learning_rate": 7.332376752616367e-05,
+ "loss": 1.2331,
+ "step": 439
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 0.26955338021079955,
+ "learning_rate": 7.32765354890329e-05,
+ "loss": 1.1731,
+ "step": 440
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.25250321202536524,
+ "learning_rate": 7.322915228987116e-05,
+ "loss": 1.2653,
+ "step": 441
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24748844179765395,
+ "learning_rate": 7.318161814392143e-05,
+ "loss": 1.24,
+ "step": 442
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.28177805247356325,
+ "learning_rate": 7.313393326711239e-05,
+ "loss": 1.185,
+ "step": 443
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.24093242000396312,
+ "learning_rate": 7.30860978760574e-05,
+ "loss": 1.1994,
+ "step": 444
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 0.26277803901457075,
+ "learning_rate": 7.30381121880536e-05,
+ "loss": 1.212,
+ "step": 445
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2506524258682433,
+ "learning_rate": 7.298997642108079e-05,
+ "loss": 1.2421,
+ "step": 446
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2840599700015824,
+ "learning_rate": 7.294169079380061e-05,
+ "loss": 1.1818,
+ "step": 447
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.24892184038117549,
+ "learning_rate": 7.289325552555538e-05,
+ "loss": 1.1916,
+ "step": 448
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2700898428541357,
+ "learning_rate": 7.284467083636722e-05,
+ "loss": 1.2517,
+ "step": 449
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2617848546539419,
+ "learning_rate": 7.279593694693698e-05,
+ "loss": 1.2063,
+ "step": 450
+ },
+ {
+ "epoch": 0.82,
+ "grad_norm": 0.2698278585334131,
+ "learning_rate": 7.274705407864332e-05,
+ "loss": 1.194,
+ "step": 451
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.23678313024953834,
+ "learning_rate": 7.26980224535416e-05,
+ "loss": 1.2349,
+ "step": 452
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24851875792002978,
+ "learning_rate": 7.264884229436293e-05,
+ "loss": 1.1758,
+ "step": 453
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.24122080121681125,
+ "learning_rate": 7.259951382451318e-05,
+ "loss": 1.1962,
+ "step": 454
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.22741322959884405,
+ "learning_rate": 7.25500372680719e-05,
+ "loss": 1.1702,
+ "step": 455
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 0.2297475610861458,
+ "learning_rate": 7.250041284979137e-05,
+ "loss": 1.1466,
+ "step": 456
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.3057605989721467,
+ "learning_rate": 7.245064079509553e-05,
+ "loss": 1.246,
+ "step": 457
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2719638501597136,
+ "learning_rate": 7.240072133007899e-05,
+ "loss": 1.2184,
+ "step": 458
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2436807816414479,
+ "learning_rate": 7.235065468150593e-05,
+ "loss": 1.2324,
+ "step": 459
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.23436349430255515,
+ "learning_rate": 7.23004410768092e-05,
+ "loss": 1.1813,
+ "step": 460
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2398940990211377,
+ "learning_rate": 7.22500807440892e-05,
+ "loss": 1.1924,
+ "step": 461
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.2605716625062531,
+ "learning_rate": 7.219957391211281e-05,
+ "loss": 1.182,
+ "step": 462
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.260462524570941,
+ "learning_rate": 7.214892081031244e-05,
+ "loss": 1.2136,
+ "step": 463
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.21979766512306334,
+ "learning_rate": 7.209812166878491e-05,
+ "loss": 1.2066,
+ "step": 464
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.23324453647530663,
+ "learning_rate": 7.204717671829051e-05,
+ "loss": 1.1657,
+ "step": 465
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.2529434935507481,
+ "learning_rate": 7.199608619025177e-05,
+ "loss": 1.2093,
+ "step": 466
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 0.25371701891720116,
+ "learning_rate": 7.194485031675265e-05,
+ "loss": 1.2225,
+ "step": 467
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.23272423066292103,
+ "learning_rate": 7.189346933053725e-05,
+ "loss": 1.1721,
+ "step": 468
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.25122928735587546,
+ "learning_rate": 7.184194346500892e-05,
+ "loss": 1.2537,
+ "step": 469
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2159270875490409,
+ "learning_rate": 7.179027295422913e-05,
+ "loss": 1.197,
+ "step": 470
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.2633111059076544,
+ "learning_rate": 7.173845803291636e-05,
+ "loss": 1.1721,
+ "step": 471
+ },
+ {
+ "epoch": 0.86,
+ "grad_norm": 0.30555936322098703,
+ "learning_rate": 7.168649893644517e-05,
+ "loss": 1.3011,
+ "step": 472
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.23492670111453726,
+ "learning_rate": 7.163439590084502e-05,
+ "loss": 1.1601,
+ "step": 473
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.26602734263721806,
+ "learning_rate": 7.158214916279923e-05,
+ "loss": 1.2808,
+ "step": 474
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.3182695007856262,
+ "learning_rate": 7.152975895964386e-05,
+ "loss": 1.2967,
+ "step": 475
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2785021674736721,
+ "learning_rate": 7.147722552936673e-05,
+ "loss": 1.1789,
+ "step": 476
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.279474303138652,
+ "learning_rate": 7.142454911060627e-05,
+ "loss": 1.2596,
+ "step": 477
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 0.2556980144910755,
+ "learning_rate": 7.137172994265044e-05,
+ "loss": 1.2426,
+ "step": 478
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.3311256331993533,
+ "learning_rate": 7.131876826543565e-05,
+ "loss": 1.2059,
+ "step": 479
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.26467296197775253,
+ "learning_rate": 7.12656643195457e-05,
+ "loss": 1.2482,
+ "step": 480
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.27444885274652553,
+ "learning_rate": 7.121241834621064e-05,
+ "loss": 1.2528,
+ "step": 481
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2572283861115396,
+ "learning_rate": 7.115903058730567e-05,
+ "loss": 1.1849,
+ "step": 482
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.2677065778235683,
+ "learning_rate": 7.11055012853501e-05,
+ "loss": 1.2011,
+ "step": 483
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29470622036742816,
+ "learning_rate": 7.105183068350619e-05,
+ "loss": 1.2398,
+ "step": 484
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.27609230248969197,
+ "learning_rate": 7.099801902557811e-05,
+ "loss": 1.2259,
+ "step": 485
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.24248634168099284,
+ "learning_rate": 7.094406655601073e-05,
+ "loss": 1.2282,
+ "step": 486
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.2765941767688746,
+ "learning_rate": 7.088997351988865e-05,
+ "loss": 1.2319,
+ "step": 487
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.29347776909858947,
+ "learning_rate": 7.083574016293493e-05,
+ "loss": 1.1765,
+ "step": 488
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.285370295424537,
+ "learning_rate": 7.078136673151008e-05,
+ "loss": 1.26,
+ "step": 489
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.29408734903836536,
+ "learning_rate": 7.072685347261093e-05,
+ "loss": 1.226,
+ "step": 490
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27437470239205813,
+ "learning_rate": 7.067220063386947e-05,
+ "loss": 1.1976,
+ "step": 491
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2680770258777871,
+ "learning_rate": 7.061740846355176e-05,
+ "loss": 1.1915,
+ "step": 492
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.27200362879502954,
+ "learning_rate": 7.056247721055678e-05,
+ "loss": 1.2002,
+ "step": 493
+ },
+ {
+ "epoch": 0.9,
+ "grad_norm": 0.2637811092577037,
+ "learning_rate": 7.050740712441528e-05,
+ "loss": 1.287,
+ "step": 494
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.24657959209271266,
+ "learning_rate": 7.045219845528875e-05,
+ "loss": 1.2284,
+ "step": 495
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.25311992110358666,
+ "learning_rate": 7.039685145396812e-05,
+ "loss": 1.1616,
+ "step": 496
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2564633694193358,
+ "learning_rate": 7.034136637187275e-05,
+ "loss": 1.2067,
+ "step": 497
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2446797651174144,
+ "learning_rate": 7.028574346104926e-05,
+ "loss": 1.2284,
+ "step": 498
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2592751463399255,
+ "learning_rate": 7.022998297417034e-05,
+ "loss": 1.2371,
+ "step": 499
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.2500713943206808,
+ "learning_rate": 7.017408516453365e-05,
+ "loss": 1.1061,
+ "step": 500
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2812266276040743,
+ "learning_rate": 7.011805028606064e-05,
+ "loss": 1.1949,
+ "step": 501
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.298829667668083,
+ "learning_rate": 7.006187859329544e-05,
+ "loss": 1.2313,
+ "step": 502
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.26518768159745104,
+ "learning_rate": 7.000557034140361e-05,
+ "loss": 1.2246,
+ "step": 503
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.3037280360760458,
+ "learning_rate": 6.994912578617113e-05,
+ "loss": 1.1617,
+ "step": 504
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.2726903109255714,
+ "learning_rate": 6.989254518400309e-05,
+ "loss": 1.2415,
+ "step": 505
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25568082003046966,
+ "learning_rate": 6.98358287919226e-05,
+ "loss": 1.1817,
+ "step": 506
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.25633294893705044,
+ "learning_rate": 6.97789768675696e-05,
+ "loss": 1.2149,
+ "step": 507
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.28291439435087123,
+ "learning_rate": 6.972198966919972e-05,
+ "loss": 1.1578,
+ "step": 508
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.27195184756655516,
+ "learning_rate": 6.966486745568308e-05,
+ "loss": 1.2355,
+ "step": 509
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.239159568376005,
+ "learning_rate": 6.960761048650312e-05,
+ "loss": 1.1688,
+ "step": 510
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.22961475425949177,
+ "learning_rate": 6.955021902175543e-05,
+ "loss": 1.2094,
+ "step": 511
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.27443773600741117,
+ "learning_rate": 6.949269332214651e-05,
+ "loss": 1.2559,
+ "step": 512
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.26230551832002097,
+ "learning_rate": 6.94350336489927e-05,
+ "loss": 1.2121,
+ "step": 513
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2716742985303849,
+ "learning_rate": 6.937724026421892e-05,
+ "loss": 1.2444,
+ "step": 514
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.2537850139439542,
+ "learning_rate": 6.931931343035742e-05,
+ "loss": 1.1327,
+ "step": 515
+ },
+ {
+ "epoch": 0.94,
+ "grad_norm": 0.28599587967496826,
+ "learning_rate": 6.926125341054676e-05,
+ "loss": 1.2236,
+ "step": 516
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.26780654378470103,
+ "learning_rate": 6.920306046853043e-05,
+ "loss": 1.2295,
+ "step": 517
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.23606296888412015,
+ "learning_rate": 6.914473486865577e-05,
+ "loss": 1.1543,
+ "step": 518
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.34976881174240837,
+ "learning_rate": 6.90862768758727e-05,
+ "loss": 1.2067,
+ "step": 519
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2481257873494882,
+ "learning_rate": 6.902768675573258e-05,
+ "loss": 1.2188,
+ "step": 520
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.2996395778117021,
+ "learning_rate": 6.896896477438699e-05,
+ "loss": 1.2326,
+ "step": 521
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.8839768816333193,
+ "learning_rate": 6.891011119858643e-05,
+ "loss": 1.2435,
+ "step": 522
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2851882482058998,
+ "learning_rate": 6.885112629567927e-05,
+ "loss": 1.2644,
+ "step": 523
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.2813663482913699,
+ "learning_rate": 6.879201033361035e-05,
+ "loss": 1.2309,
+ "step": 524
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3257551560135454,
+ "learning_rate": 6.873276358091996e-05,
+ "loss": 1.2755,
+ "step": 525
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.28930479952494365,
+ "learning_rate": 6.867338630674247e-05,
+ "loss": 1.1962,
+ "step": 526
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.3077462996938649,
+ "learning_rate": 6.861387878080511e-05,
+ "loss": 1.2402,
+ "step": 527
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.2848900193452761,
+ "learning_rate": 6.855424127342688e-05,
+ "loss": 1.2748,
+ "step": 528
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.4765938812802202,
+ "learning_rate": 6.849447405551718e-05,
+ "loss": 1.2226,
+ "step": 529
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.53184473292579,
+ "learning_rate": 6.843457739857467e-05,
+ "loss": 1.2347,
+ "step": 530
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.6416239346492343,
+ "learning_rate": 6.837455157468596e-05,
+ "loss": 1.2429,
+ "step": 531
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3188092712502773,
+ "learning_rate": 6.831439685652442e-05,
+ "loss": 1.216,
+ "step": 532
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.3527495731006385,
+ "learning_rate": 6.825411351734895e-05,
+ "loss": 1.1682,
+ "step": 533
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.29603753744741856,
+ "learning_rate": 6.819370183100274e-05,
+ "loss": 1.1434,
+ "step": 534
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.5252450389976622,
+ "learning_rate": 6.813316207191198e-05,
+ "loss": 1.1943,
+ "step": 535
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.32999419558659937,
+ "learning_rate": 6.807249451508466e-05,
+ "loss": 1.192,
+ "step": 536
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 0.3650175469778724,
+ "learning_rate": 6.801169943610929e-05,
+ "loss": 1.2141,
+ "step": 537
+ },
+ {
+ "epoch": 0.98,
+ "grad_norm": 1.0643532150783557,
+ "learning_rate": 6.795077711115368e-05,
+ "loss": 1.2253,
+ "step": 538
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5041310609130145,
+ "learning_rate": 6.788972781696363e-05,
+ "loss": 1.278,
+ "step": 539
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.5123058164360991,
+ "learning_rate": 6.782855183086177e-05,
+ "loss": 1.2231,
+ "step": 540
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.3533015702394419,
+ "learning_rate": 6.776724943074619e-05,
+ "loss": 1.2072,
+ "step": 541
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.30253964625417207,
+ "learning_rate": 6.770582089508927e-05,
+ "loss": 1.1382,
+ "step": 542
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.348991618828202,
+ "learning_rate": 6.764426650293633e-05,
+ "loss": 1.2079,
+ "step": 543
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.46017440578788743,
+ "learning_rate": 6.758258653390444e-05,
+ "loss": 1.1813,
+ "step": 544
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.31962101755594885,
+ "learning_rate": 6.75207812681811e-05,
+ "loss": 1.1339,
+ "step": 545
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.37092024548285923,
+ "learning_rate": 6.745885098652298e-05,
+ "loss": 1.2591,
+ "step": 546
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.32347106450715835,
+ "learning_rate": 6.739679597025466e-05,
+ "loss": 1.2017,
+ "step": 547
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39250187112342494,
+ "learning_rate": 6.733461650126733e-05,
+ "loss": 1.0933,
+ "step": 548
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.473522452217324,
+ "learning_rate": 6.727231286201752e-05,
+ "loss": 1.1124,
+ "step": 549
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4809062179622052,
+ "learning_rate": 6.720988533552582e-05,
+ "loss": 1.1585,
+ "step": 550
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3529662801059162,
+ "learning_rate": 6.714733420537559e-05,
+ "loss": 1.0501,
+ "step": 551
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5958247214391118,
+ "learning_rate": 6.708465975571168e-05,
+ "loss": 1.1086,
+ "step": 552
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.5341364205022454,
+ "learning_rate": 6.70218622712391e-05,
+ "loss": 1.0518,
+ "step": 553
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3601805724462006,
+ "learning_rate": 6.695894203722181e-05,
+ "loss": 1.1779,
+ "step": 554
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.43410190338280613,
+ "learning_rate": 6.68958993394813e-05,
+ "loss": 1.093,
+ "step": 555
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.46217742572873594,
+ "learning_rate": 6.683273446439546e-05,
+ "loss": 1.0117,
+ "step": 556
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.8591682373623357,
+ "learning_rate": 6.676944769889708e-05,
+ "loss": 1.1002,
+ "step": 557
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.7383229487622726,
+ "learning_rate": 6.670603933047272e-05,
+ "loss": 1.0779,
+ "step": 558
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.5965305891207813,
+ "learning_rate": 6.664250964716131e-05,
+ "loss": 1.0889,
+ "step": 559
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.6030858606684543,
+ "learning_rate": 6.657885893755288e-05,
+ "loss": 1.0982,
+ "step": 560
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4644510682398409,
+ "learning_rate": 6.65150874907872e-05,
+ "loss": 1.1004,
+ "step": 561
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.43943285132452564,
+ "learning_rate": 6.645119559655254e-05,
+ "loss": 1.0536,
+ "step": 562
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.4456395978600012,
+ "learning_rate": 6.638718354508427e-05,
+ "loss": 1.0733,
+ "step": 563
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3303824433217466,
+ "learning_rate": 6.632305162716365e-05,
+ "loss": 1.0552,
+ "step": 564
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3617704823170143,
+ "learning_rate": 6.62588001341164e-05,
+ "loss": 1.1092,
+ "step": 565
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4465013349903427,
+ "learning_rate": 6.619442935781141e-05,
+ "loss": 1.0781,
+ "step": 566
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.48516780613791277,
+ "learning_rate": 6.612993959065947e-05,
+ "loss": 1.0686,
+ "step": 567
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38867820318536633,
+ "learning_rate": 6.606533112561186e-05,
+ "loss": 1.1215,
+ "step": 568
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38566119820378336,
+ "learning_rate": 6.600060425615907e-05,
+ "loss": 1.1213,
+ "step": 569
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.35534855445058544,
+ "learning_rate": 6.593575927632947e-05,
+ "loss": 1.0955,
+ "step": 570
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38124406233349717,
+ "learning_rate": 6.587079648068795e-05,
+ "loss": 1.0659,
+ "step": 571
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.454750160923548,
+ "learning_rate": 6.580571616433457e-05,
+ "loss": 1.1149,
+ "step": 572
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.35353190088025255,
+ "learning_rate": 6.574051862290325e-05,
+ "loss": 1.0388,
+ "step": 573
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3249395594793626,
+ "learning_rate": 6.567520415256045e-05,
+ "loss": 1.0784,
+ "step": 574
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.40078898818247227,
+ "learning_rate": 6.560977305000375e-05,
+ "loss": 1.0859,
+ "step": 575
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4115264795060035,
+ "learning_rate": 6.554422561246054e-05,
+ "loss": 1.1828,
+ "step": 576
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.30090229228069215,
+ "learning_rate": 6.54785621376867e-05,
+ "loss": 1.0901,
+ "step": 577
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.28827860350299206,
+ "learning_rate": 6.541278292396523e-05,
+ "loss": 1.0277,
+ "step": 578
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.34690404488996757,
+ "learning_rate": 6.534688827010484e-05,
+ "loss": 1.048,
+ "step": 579
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.29943113556644785,
+ "learning_rate": 6.528087847543867e-05,
+ "loss": 1.0646,
+ "step": 580
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.37318202575874415,
+ "learning_rate": 6.521475383982291e-05,
+ "loss": 1.1091,
+ "step": 581
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3049663659203959,
+ "learning_rate": 6.51485146636354e-05,
+ "loss": 1.0552,
+ "step": 582
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3342407867509692,
+ "learning_rate": 6.508216124777431e-05,
+ "loss": 1.2227,
+ "step": 583
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3348396047855952,
+ "learning_rate": 6.501569389365674e-05,
+ "loss": 1.0861,
+ "step": 584
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.30951429367513383,
+ "learning_rate": 6.494911290321737e-05,
+ "loss": 1.0461,
+ "step": 585
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.33898401361064606,
+ "learning_rate": 6.488241857890711e-05,
+ "loss": 1.0854,
+ "step": 586
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4901462068263497,
+ "learning_rate": 6.481561122369164e-05,
+ "loss": 1.1012,
+ "step": 587
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3179574879809652,
+ "learning_rate": 6.474869114105018e-05,
+ "loss": 1.0451,
+ "step": 588
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.32159328915060714,
+ "learning_rate": 6.468165863497395e-05,
+ "loss": 1.0458,
+ "step": 589
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.36462235008537297,
+ "learning_rate": 6.461451400996491e-05,
+ "loss": 1.1247,
+ "step": 590
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.5373862753611778,
+ "learning_rate": 6.454725757103432e-05,
+ "loss": 1.0542,
+ "step": 591
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3160409270291303,
+ "learning_rate": 6.447988962370133e-05,
+ "loss": 1.0829,
+ "step": 592
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.390452102978435,
+ "learning_rate": 6.441241047399169e-05,
+ "loss": 1.192,
+ "step": 593
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3802122712014928,
+ "learning_rate": 6.434482042843627e-05,
+ "loss": 1.1153,
+ "step": 594
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4081584328242501,
+ "learning_rate": 6.427711979406966e-05,
+ "loss": 1.1635,
+ "step": 595
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3791962989638633,
+ "learning_rate": 6.420930887842889e-05,
+ "loss": 1.1581,
+ "step": 596
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.33239440056484193,
+ "learning_rate": 6.414138798955189e-05,
+ "loss": 1.0926,
+ "step": 597
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3279881540815014,
+ "learning_rate": 6.407335743597616e-05,
+ "loss": 1.1386,
+ "step": 598
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.30309644763750837,
+ "learning_rate": 6.40052175267374e-05,
+ "loss": 1.0523,
+ "step": 599
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3349097308403333,
+ "learning_rate": 6.393696857136801e-05,
+ "loss": 1.0815,
+ "step": 600
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3288227593556618,
+ "learning_rate": 6.386861087989581e-05,
+ "loss": 1.015,
+ "step": 601
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.36685586740843157,
+ "learning_rate": 6.380014476284255e-05,
+ "loss": 1.1232,
+ "step": 602
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3620977714204643,
+ "learning_rate": 6.373157053122243e-05,
+ "loss": 1.1138,
+ "step": 603
+ },
+ {
+ "epoch": 1.1,
+ "grad_norm": 0.3130587018197183,
+ "learning_rate": 6.366288849654091e-05,
+ "loss": 1.1255,
+ "step": 604
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3602737087072766,
+ "learning_rate": 6.359409897079303e-05,
+ "loss": 1.0282,
+ "step": 605
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.31168852571991945,
+ "learning_rate": 6.352520226646222e-05,
+ "loss": 1.0779,
+ "step": 606
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3516045580189353,
+ "learning_rate": 6.345619869651871e-05,
+ "loss": 1.1028,
+ "step": 607
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.3231857927563657,
+ "learning_rate": 6.33870885744182e-05,
+ "loss": 1.1202,
+ "step": 608
+ },
+ {
+ "epoch": 1.11,
+ "grad_norm": 0.30205205129701157,
+ "learning_rate": 6.331787221410041e-05,
+ "loss": 1.1369,
+ "step": 609
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3198359813888166,
+ "learning_rate": 6.32485499299877e-05,
+ "loss": 1.1763,
+ "step": 610
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3128641370321787,
+ "learning_rate": 6.31791220369835e-05,
+ "loss": 1.0223,
+ "step": 611
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.2989105616213649,
+ "learning_rate": 6.31095888504711e-05,
+ "loss": 1.0358,
+ "step": 612
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.3103537906853337,
+ "learning_rate": 6.303995068631203e-05,
+ "loss": 1.1261,
+ "step": 613
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.28598715532508207,
+ "learning_rate": 6.297020786084467e-05,
+ "loss": 1.0629,
+ "step": 614
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.29809789918093255,
+ "learning_rate": 6.290036069088288e-05,
+ "loss": 1.035,
+ "step": 615
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.33765270252261453,
+ "learning_rate": 6.283040949371451e-05,
+ "loss": 1.1221,
+ "step": 616
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3424617501293415,
+ "learning_rate": 6.276035458709993e-05,
+ "loss": 1.155,
+ "step": 617
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3799189737987811,
+ "learning_rate": 6.269019628927067e-05,
+ "loss": 1.0701,
+ "step": 618
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.3358898935253196,
+ "learning_rate": 6.261993491892791e-05,
+ "loss": 1.1649,
+ "step": 619
+ },
+ {
+ "epoch": 1.13,
+ "grad_norm": 0.31569979424117356,
+ "learning_rate": 6.254957079524099e-05,
+ "loss": 1.0633,
+ "step": 620
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3002168156888237,
+ "learning_rate": 6.247910423784609e-05,
+ "loss": 1.0846,
+ "step": 621
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3097238823450595,
+ "learning_rate": 6.24085355668447e-05,
+ "loss": 1.0808,
+ "step": 622
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3120312761417578,
+ "learning_rate": 6.233786510280212e-05,
+ "loss": 1.0142,
+ "step": 623
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3335343015064923,
+ "learning_rate": 6.22670931667461e-05,
+ "loss": 1.0674,
+ "step": 624
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.3234062304634526,
+ "learning_rate": 6.219622008016533e-05,
+ "loss": 1.0981,
+ "step": 625
+ },
+ {
+ "epoch": 1.14,
+ "grad_norm": 0.32152678786547273,
+ "learning_rate": 6.212524616500798e-05,
+ "loss": 1.0244,
+ "step": 626
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.39031977608147594,
+ "learning_rate": 6.205417174368023e-05,
+ "loss": 1.1205,
+ "step": 627
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3806189090017157,
+ "learning_rate": 6.198299713904485e-05,
+ "loss": 1.1134,
+ "step": 628
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.2978349276971668,
+ "learning_rate": 6.191172267441967e-05,
+ "loss": 1.0088,
+ "step": 629
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.3190354077382501,
+ "learning_rate": 6.184034867357617e-05,
+ "loss": 1.108,
+ "step": 630
+ },
+ {
+ "epoch": 1.15,
+ "grad_norm": 0.32633048665038994,
+ "learning_rate": 6.176887546073797e-05,
+ "loss": 1.0825,
+ "step": 631
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3428026413020903,
+ "learning_rate": 6.169730336057939e-05,
+ "loss": 1.0765,
+ "step": 632
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3475737151929015,
+ "learning_rate": 6.162563269822391e-05,
+ "loss": 1.0693,
+ "step": 633
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.3870252154591392,
+ "learning_rate": 6.15538637992428e-05,
+ "loss": 1.1081,
+ "step": 634
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.33597355193652834,
+ "learning_rate": 6.148199698965352e-05,
+ "loss": 1.0893,
+ "step": 635
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.30805894179787247,
+ "learning_rate": 6.141003259591834e-05,
+ "loss": 1.0995,
+ "step": 636
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3025073882734066,
+ "learning_rate": 6.133797094494281e-05,
+ "loss": 1.0388,
+ "step": 637
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3524395196391662,
+ "learning_rate": 6.126581236407429e-05,
+ "loss": 1.1196,
+ "step": 638
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3377646188130345,
+ "learning_rate": 6.119355718110039e-05,
+ "loss": 1.0382,
+ "step": 639
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.35508400659785483,
+ "learning_rate": 6.112120572424763e-05,
+ "loss": 1.1402,
+ "step": 640
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.3454418793700457,
+ "learning_rate": 6.104875832217982e-05,
+ "loss": 1.1032,
+ "step": 641
+ },
+ {
+ "epoch": 1.17,
+ "grad_norm": 0.32629806837059866,
+ "learning_rate": 6.097621530399661e-05,
+ "loss": 1.0959,
+ "step": 642
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3329536837751315,
+ "learning_rate": 6.090357699923202e-05,
+ "loss": 1.0467,
+ "step": 643
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.32302233828349475,
+ "learning_rate": 6.083084373785287e-05,
+ "loss": 1.0858,
+ "step": 644
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3310358826507611,
+ "learning_rate": 6.075801585025739e-05,
+ "loss": 1.0715,
+ "step": 645
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.319322035854079,
+ "learning_rate": 6.068509366727362e-05,
+ "loss": 1.177,
+ "step": 646
+ },
+ {
+ "epoch": 1.18,
+ "grad_norm": 0.3065230667302707,
+ "learning_rate": 6.061207752015797e-05,
+ "loss": 1.0649,
+ "step": 647
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.29926795565748227,
+ "learning_rate": 6.053896774059368e-05,
+ "loss": 1.1325,
+ "step": 648
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3556069634279046,
+ "learning_rate": 6.046576466068931e-05,
+ "loss": 1.1366,
+ "step": 649
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3189191131461966,
+ "learning_rate": 6.039246861297727e-05,
+ "loss": 1.0693,
+ "step": 650
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.3347197156648834,
+ "learning_rate": 6.031907993041227e-05,
+ "loss": 1.1009,
+ "step": 651
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.32274156348185445,
+ "learning_rate": 6.0245598946369826e-05,
+ "loss": 1.1675,
+ "step": 652
+ },
+ {
+ "epoch": 1.19,
+ "grad_norm": 0.35534089035455224,
+ "learning_rate": 6.017202599464476e-05,
+ "loss": 1.1723,
+ "step": 653
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3106026578570133,
+ "learning_rate": 6.009836140944965e-05,
+ "loss": 1.0954,
+ "step": 654
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3309144454564729,
+ "learning_rate": 6.002460552541331e-05,
+ "loss": 1.0209,
+ "step": 655
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3023619281400003,
+ "learning_rate": 5.9950758677579345e-05,
+ "loss": 1.0363,
+ "step": 656
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.3311182880219704,
+ "learning_rate": 5.987682120140451e-05,
+ "loss": 1.0515,
+ "step": 657
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.33396486010030413,
+ "learning_rate": 5.980279343275729e-05,
+ "loss": 1.1251,
+ "step": 658
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3465764556678002,
+ "learning_rate": 5.97286757079163e-05,
+ "loss": 1.165,
+ "step": 659
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.304193441363374,
+ "learning_rate": 5.965446836356882e-05,
+ "loss": 1.0228,
+ "step": 660
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3415149030413082,
+ "learning_rate": 5.9580171736809224e-05,
+ "loss": 1.0742,
+ "step": 661
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.33138658321132064,
+ "learning_rate": 5.950578616513746e-05,
+ "loss": 1.0843,
+ "step": 662
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.30774403421162994,
+ "learning_rate": 5.943131198645752e-05,
+ "loss": 1.065,
+ "step": 663
+ },
+ {
+ "epoch": 1.21,
+ "grad_norm": 0.3428877492183819,
+ "learning_rate": 5.9356749539075885e-05,
+ "loss": 1.1101,
+ "step": 664
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3621290546130101,
+ "learning_rate": 5.928209916170003e-05,
+ "loss": 1.1372,
+ "step": 665
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.3482375945469884,
+ "learning_rate": 5.9207361193436865e-05,
+ "loss": 1.132,
+ "step": 666
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.31754384974068384,
+ "learning_rate": 5.9132535973791156e-05,
+ "loss": 1.148,
+ "step": 667
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.36003834782050365,
+ "learning_rate": 5.9057623842664044e-05,
+ "loss": 1.1099,
+ "step": 668
+ },
+ {
+ "epoch": 1.22,
+ "grad_norm": 0.2963701622969662,
+ "learning_rate": 5.8982625140351464e-05,
+ "loss": 1.0755,
+ "step": 669
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.32579569606066516,
+ "learning_rate": 5.8907540207542616e-05,
+ "loss": 1.0809,
+ "step": 670
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4247563451753457,
+ "learning_rate": 5.8832369385318416e-05,
+ "loss": 1.097,
+ "step": 671
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.33076932102169776,
+ "learning_rate": 5.875711301514992e-05,
+ "loss": 1.1078,
+ "step": 672
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.3609238032332309,
+ "learning_rate": 5.8681771438896815e-05,
+ "loss": 1.1031,
+ "step": 673
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.325159585649425,
+ "learning_rate": 5.860634499880583e-05,
+ "loss": 1.0707,
+ "step": 674
+ },
+ {
+ "epoch": 1.23,
+ "grad_norm": 0.4620687271068983,
+ "learning_rate": 5.853083403750922e-05,
+ "loss": 1.1017,
+ "step": 675
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33485279064365936,
+ "learning_rate": 5.845523889802316e-05,
+ "loss": 1.0989,
+ "step": 676
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.30952573170841513,
+ "learning_rate": 5.8379559923746214e-05,
+ "loss": 1.0393,
+ "step": 677
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.33498605810588283,
+ "learning_rate": 5.830379745845781e-05,
+ "loss": 1.1259,
+ "step": 678
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.35771921163037307,
+ "learning_rate": 5.822795184631659e-05,
+ "loss": 1.0815,
+ "step": 679
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.3329650192347647,
+ "learning_rate": 5.815202343185894e-05,
+ "loss": 1.1344,
+ "step": 680
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3356634465845771,
+ "learning_rate": 5.807601255999736e-05,
+ "loss": 1.1297,
+ "step": 681
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3289442034151235,
+ "learning_rate": 5.7999919576018934e-05,
+ "loss": 1.022,
+ "step": 682
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3207007334784113,
+ "learning_rate": 5.7923744825583745e-05,
+ "loss": 1.0571,
+ "step": 683
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3582460325329284,
+ "learning_rate": 5.7847488654723304e-05,
+ "loss": 1.0778,
+ "step": 684
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 0.3563317666176927,
+ "learning_rate": 5.777115140983899e-05,
+ "loss": 1.1003,
+ "step": 685
+ },
+ {
+ "epoch": 1.25,
+ "grad_norm": 3.4694912945702105,
+ "learning_rate": 5.769473343770047e-05,
+ "loss": 1.121,
+ "step": 686
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.43002349520483113,
+ "learning_rate": 5.761823508544411e-05,
+ "loss": 1.0765,
+ "step": 687
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39467783104839754,
+ "learning_rate": 5.754165670057142e-05,
+ "loss": 1.0788,
+ "step": 688
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.39629029674867916,
+ "learning_rate": 5.7464998630947464e-05,
+ "loss": 1.0812,
+ "step": 689
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3880152093965208,
+ "learning_rate": 5.738826122479929e-05,
+ "loss": 1.1228,
+ "step": 690
+ },
+ {
+ "epoch": 1.26,
+ "grad_norm": 0.3777874121959188,
+ "learning_rate": 5.7311444830714324e-05,
+ "loss": 1.0907,
+ "step": 691
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.38004041653523696,
+ "learning_rate": 5.723454979763882e-05,
+ "loss": 1.1263,
+ "step": 692
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.37049672627797636,
+ "learning_rate": 5.7157576474876246e-05,
+ "loss": 1.1438,
+ "step": 693
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32973606103437614,
+ "learning_rate": 5.7080525212085725e-05,
+ "loss": 1.0553,
+ "step": 694
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.31674639252070325,
+ "learning_rate": 5.700339635928038e-05,
+ "loss": 1.06,
+ "step": 695
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.32282199426553837,
+ "learning_rate": 5.692619026682588e-05,
+ "loss": 1.0841,
+ "step": 696
+ },
+ {
+ "epoch": 1.27,
+ "grad_norm": 0.4810882958061859,
+ "learning_rate": 5.684890728543869e-05,
+ "loss": 1.0803,
+ "step": 697
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3995638550178378,
+ "learning_rate": 5.6771547766184566e-05,
+ "loss": 1.1187,
+ "step": 698
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35264932960583484,
+ "learning_rate": 5.669411206047699e-05,
+ "loss": 1.0641,
+ "step": 699
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.35240640524733,
+ "learning_rate": 5.661660052007547e-05,
+ "loss": 1.076,
+ "step": 700
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3540694609860389,
+ "learning_rate": 5.653901349708401e-05,
+ "loss": 1.1369,
+ "step": 701
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.3196055112925304,
+ "learning_rate": 5.646135134394955e-05,
+ "loss": 1.0677,
+ "step": 702
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.4214141007955914,
+ "learning_rate": 5.6383614413460266e-05,
+ "loss": 1.1139,
+ "step": 703
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3625611311798579,
+ "learning_rate": 5.630580305874402e-05,
+ "loss": 1.1845,
+ "step": 704
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3425208672181188,
+ "learning_rate": 5.62279176332668e-05,
+ "loss": 1.174,
+ "step": 705
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3108419862818321,
+ "learning_rate": 5.6149958490830996e-05,
+ "loss": 1.0331,
+ "step": 706
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.3274644181571904,
+ "learning_rate": 5.607192598557394e-05,
+ "loss": 1.0664,
+ "step": 707
+ },
+ {
+ "epoch": 1.29,
+ "grad_norm": 0.346218197215145,
+ "learning_rate": 5.599382047196617e-05,
+ "loss": 1.2088,
+ "step": 708
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.328497632267458,
+ "learning_rate": 5.591564230480989e-05,
+ "loss": 1.0287,
+ "step": 709
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3708173720611468,
+ "learning_rate": 5.583739183923732e-05,
+ "loss": 1.0883,
+ "step": 710
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3631427403535479,
+ "learning_rate": 5.575906943070915e-05,
+ "loss": 1.1155,
+ "step": 711
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.3305201458598695,
+ "learning_rate": 5.5680675435012834e-05,
+ "loss": 1.0958,
+ "step": 712
+ },
+ {
+ "epoch": 1.3,
+ "grad_norm": 0.34978833532083714,
+ "learning_rate": 5.5602210208261036e-05,
+ "loss": 1.1437,
+ "step": 713
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3510553882510229,
+ "learning_rate": 5.552367410688999e-05,
+ "loss": 1.0941,
+ "step": 714
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3523747462465078,
+ "learning_rate": 5.544506748765789e-05,
+ "loss": 1.1289,
+ "step": 715
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38262637783927445,
+ "learning_rate": 5.5366390707643266e-05,
+ "loss": 1.099,
+ "step": 716
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.38620065989073454,
+ "learning_rate": 5.528764412424334e-05,
+ "loss": 1.083,
+ "step": 717
+ },
+ {
+ "epoch": 1.31,
+ "grad_norm": 0.3401355276121096,
+ "learning_rate": 5.520882809517245e-05,
+ "loss": 1.028,
+ "step": 718
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3392061008943934,
+ "learning_rate": 5.512994297846039e-05,
+ "loss": 1.1083,
+ "step": 719
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.34219480421015414,
+ "learning_rate": 5.505098913245077e-05,
+ "loss": 1.1108,
+ "step": 720
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.3275058061553761,
+ "learning_rate": 5.497196691579945e-05,
+ "loss": 1.111,
+ "step": 721
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36800249746509384,
+ "learning_rate": 5.489287668747283e-05,
+ "loss": 1.1221,
+ "step": 722
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.4129005533101575,
+ "learning_rate": 5.481371880674628e-05,
+ "loss": 1.0966,
+ "step": 723
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.36563906596251655,
+ "learning_rate": 5.4734493633202505e-05,
+ "loss": 1.0927,
+ "step": 724
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3614650536839971,
+ "learning_rate": 5.465520152672986e-05,
+ "loss": 1.13,
+ "step": 725
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.36419665098633497,
+ "learning_rate": 5.4575842847520765e-05,
+ "loss": 1.1183,
+ "step": 726
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.34490689807258995,
+ "learning_rate": 5.449641795607005e-05,
+ "loss": 1.0919,
+ "step": 727
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.3627643746876298,
+ "learning_rate": 5.441692721317334e-05,
+ "loss": 1.0411,
+ "step": 728
+ },
+ {
+ "epoch": 1.33,
+ "grad_norm": 0.323620411949565,
+ "learning_rate": 5.433737097992537e-05,
+ "loss": 1.0725,
+ "step": 729
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3521599501824965,
+ "learning_rate": 5.425774961771838e-05,
+ "loss": 1.0926,
+ "step": 730
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3302390546764222,
+ "learning_rate": 5.417806348824047e-05,
+ "loss": 1.0468,
+ "step": 731
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3833325802616019,
+ "learning_rate": 5.4098312953473956e-05,
+ "loss": 1.1291,
+ "step": 732
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3708621126835512,
+ "learning_rate": 5.401849837569372e-05,
+ "loss": 1.0887,
+ "step": 733
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3625834373416278,
+ "learning_rate": 5.393862011746555e-05,
+ "loss": 1.0981,
+ "step": 734
+ },
+ {
+ "epoch": 1.34,
+ "grad_norm": 0.3583343965080617,
+ "learning_rate": 5.385867854164451e-05,
+ "loss": 1.1021,
+ "step": 735
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34598320594096066,
+ "learning_rate": 5.377867401137332e-05,
+ "loss": 1.1376,
+ "step": 736
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3046382791315433,
+ "learning_rate": 5.369860689008066e-05,
+ "loss": 1.0206,
+ "step": 737
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.34464948380043725,
+ "learning_rate": 5.3618477541479505e-05,
+ "loss": 1.1084,
+ "step": 738
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3203242519627101,
+ "learning_rate": 5.353828632956557e-05,
+ "loss": 1.0731,
+ "step": 739
+ },
+ {
+ "epoch": 1.35,
+ "grad_norm": 0.3431169960355163,
+ "learning_rate": 5.3458033618615516e-05,
+ "loss": 1.091,
+ "step": 740
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.33492074521678705,
+ "learning_rate": 5.337771977318543e-05,
+ "loss": 1.1112,
+ "step": 741
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.32576546585541344,
+ "learning_rate": 5.3297345158109086e-05,
+ "loss": 1.0993,
+ "step": 742
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3410007245037574,
+ "learning_rate": 5.3216910138496286e-05,
+ "loss": 1.094,
+ "step": 743
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.34891180680896833,
+ "learning_rate": 5.313641507973128e-05,
+ "loss": 1.1331,
+ "step": 744
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.37135766946717214,
+ "learning_rate": 5.3055860347471006e-05,
+ "loss": 1.1,
+ "step": 745
+ },
+ {
+ "epoch": 1.36,
+ "grad_norm": 0.3465019415478411,
+ "learning_rate": 5.297524630764349e-05,
+ "loss": 1.1256,
+ "step": 746
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.37035388481626563,
+ "learning_rate": 5.289457332644615e-05,
+ "loss": 1.0366,
+ "step": 747
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.33853883270759155,
+ "learning_rate": 5.281384177034421e-05,
+ "loss": 1.0547,
+ "step": 748
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.364306618627317,
+ "learning_rate": 5.2733052006068897e-05,
+ "loss": 1.0768,
+ "step": 749
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.4021754315731627,
+ "learning_rate": 5.2652204400615916e-05,
+ "loss": 1.1382,
+ "step": 750
+ },
+ {
+ "epoch": 1.37,
+ "grad_norm": 0.3332185389039008,
+ "learning_rate": 5.257129932124368e-05,
+ "loss": 1.0815,
+ "step": 751
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3453105709879854,
+ "learning_rate": 5.249033713547173e-05,
+ "loss": 1.1109,
+ "step": 752
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3385397539717797,
+ "learning_rate": 5.2409318211078966e-05,
+ "loss": 1.0529,
+ "step": 753
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.33197994450130447,
+ "learning_rate": 5.232824291610206e-05,
+ "loss": 1.0721,
+ "step": 754
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32836289576124167,
+ "learning_rate": 5.224711161883375e-05,
+ "loss": 1.0459,
+ "step": 755
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.32491620058831744,
+ "learning_rate": 5.216592468782117e-05,
+ "loss": 1.0897,
+ "step": 756
+ },
+ {
+ "epoch": 1.38,
+ "grad_norm": 0.3137879047811153,
+ "learning_rate": 5.2084682491864155e-05,
+ "loss": 1.096,
+ "step": 757
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3356938043023012,
+ "learning_rate": 5.200338540001364e-05,
+ "loss": 1.0827,
+ "step": 758
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.36044340490819055,
+ "learning_rate": 5.192203378156984e-05,
+ "loss": 1.0617,
+ "step": 759
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.34674262047888293,
+ "learning_rate": 5.184062800608077e-05,
+ "loss": 1.1267,
+ "step": 760
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.32469442322149333,
+ "learning_rate": 5.1759168443340375e-05,
+ "loss": 1.1483,
+ "step": 761
+ },
+ {
+ "epoch": 1.39,
+ "grad_norm": 0.3290384307774216,
+ "learning_rate": 5.167765546338698e-05,
+ "loss": 1.047,
+ "step": 762
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.31637612188770403,
+ "learning_rate": 5.1596089436501525e-05,
+ "loss": 1.0311,
+ "step": 763
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3168693829641207,
+ "learning_rate": 5.151447073320597e-05,
+ "loss": 1.1405,
+ "step": 764
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.34322421571238926,
+ "learning_rate": 5.143279972426153e-05,
+ "loss": 1.1428,
+ "step": 765
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3291030435830325,
+ "learning_rate": 5.1351076780667026e-05,
+ "loss": 1.0473,
+ "step": 766
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.33772039158758044,
+ "learning_rate": 5.1269302273657195e-05,
+ "loss": 1.0909,
+ "step": 767
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.3802031736890876,
+ "learning_rate": 5.118747657470102e-05,
+ "loss": 1.1482,
+ "step": 768
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3296067628997962,
+ "learning_rate": 5.1105600055500025e-05,
+ "loss": 1.0085,
+ "step": 769
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3707139982828035,
+ "learning_rate": 5.102367308798658e-05,
+ "loss": 1.0746,
+ "step": 770
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3378537316757011,
+ "learning_rate": 5.094169604432225e-05,
+ "loss": 1.0482,
+ "step": 771
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.4008417246255145,
+ "learning_rate": 5.085966929689601e-05,
+ "loss": 1.1065,
+ "step": 772
+ },
+ {
+ "epoch": 1.41,
+ "grad_norm": 0.3244385106988064,
+ "learning_rate": 5.077759321832271e-05,
+ "loss": 1.0827,
+ "step": 773
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.37228575732812336,
+ "learning_rate": 5.0695468181441215e-05,
+ "loss": 1.1146,
+ "step": 774
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33761714797540276,
+ "learning_rate": 5.061329455931283e-05,
+ "loss": 1.092,
+ "step": 775
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3158158390913494,
+ "learning_rate": 5.053107272521955e-05,
+ "loss": 1.1058,
+ "step": 776
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.3691501929738938,
+ "learning_rate": 5.044880305266239e-05,
+ "loss": 1.1599,
+ "step": 777
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.33730914019805525,
+ "learning_rate": 5.0366485915359645e-05,
+ "loss": 1.0615,
+ "step": 778
+ },
+ {
+ "epoch": 1.42,
+ "grad_norm": 0.34970059240017,
+ "learning_rate": 5.0284121687245257e-05,
+ "loss": 1.1475,
+ "step": 779
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3374028029407197,
+ "learning_rate": 5.020171074246707e-05,
+ "loss": 1.0926,
+ "step": 780
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3350020681123992,
+ "learning_rate": 5.011925345538514e-05,
+ "loss": 1.1276,
+ "step": 781
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3224228965786606,
+ "learning_rate": 5.003675020057003e-05,
+ "loss": 1.0183,
+ "step": 782
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3357310714740298,
+ "learning_rate": 4.995420135280114e-05,
+ "loss": 1.1114,
+ "step": 783
+ },
+ {
+ "epoch": 1.43,
+ "grad_norm": 0.3590203255363759,
+ "learning_rate": 4.9871607287064966e-05,
+ "loss": 1.1504,
+ "step": 784
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.33011195419611655,
+ "learning_rate": 4.9788968378553396e-05,
+ "loss": 1.0826,
+ "step": 785
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.31088868195439445,
+ "learning_rate": 4.970628500266207e-05,
+ "loss": 1.0704,
+ "step": 786
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3144996103179409,
+ "learning_rate": 4.962355753498858e-05,
+ "loss": 1.1403,
+ "step": 787
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3147269555419068,
+ "learning_rate": 4.954078635133081e-05,
+ "loss": 1.0898,
+ "step": 788
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3280151747783868,
+ "learning_rate": 4.945797182768524e-05,
+ "loss": 1.1115,
+ "step": 789
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.3551996569232493,
+ "learning_rate": 4.937511434024524e-05,
+ "loss": 1.1731,
+ "step": 790
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.343863208057807,
+ "learning_rate": 4.9292214265399336e-05,
+ "loss": 1.0866,
+ "step": 791
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.37316699385322466,
+ "learning_rate": 4.920927197972949e-05,
+ "loss": 1.1083,
+ "step": 792
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635739774067832,
+ "learning_rate": 4.9126287860009453e-05,
+ "loss": 1.1393,
+ "step": 793
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3755910554972886,
+ "learning_rate": 4.9043262283202974e-05,
+ "loss": 1.1624,
+ "step": 794
+ },
+ {
+ "epoch": 1.45,
+ "grad_norm": 0.3635899120146823,
+ "learning_rate": 4.8960195626462145e-05,
+ "loss": 1.2095,
+ "step": 795
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3642202684342816,
+ "learning_rate": 4.8877088267125664e-05,
+ "loss": 1.1099,
+ "step": 796
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3339946548799316,
+ "learning_rate": 4.879394058271712e-05,
+ "loss": 1.1157,
+ "step": 797
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3457189703100475,
+ "learning_rate": 4.871075295094329e-05,
+ "loss": 1.129,
+ "step": 798
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.3550931839691424,
+ "learning_rate": 4.862752574969241e-05,
+ "loss": 1.076,
+ "step": 799
+ },
+ {
+ "epoch": 1.46,
+ "grad_norm": 0.36139108917966734,
+ "learning_rate": 4.8544259357032475e-05,
+ "loss": 1.1577,
+ "step": 800
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.39569703665247874,
+ "learning_rate": 4.8460954151209486e-05,
+ "loss": 1.0543,
+ "step": 801
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.3879033670170866,
+ "learning_rate": 4.837761051064579e-05,
+ "loss": 1.0688,
+ "step": 802
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3796846713967255,
+ "learning_rate": 4.8294228813938285e-05,
+ "loss": 0.9911,
+ "step": 803
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4007831430409375,
+ "learning_rate": 4.8210809439856804e-05,
+ "loss": 1.0126,
+ "step": 804
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.37588078665500885,
+ "learning_rate": 4.8127352767342276e-05,
+ "loss": 0.9302,
+ "step": 805
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.4078509175013281,
+ "learning_rate": 4.8043859175505095e-05,
+ "loss": 0.9982,
+ "step": 806
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.379096046185539,
+ "learning_rate": 4.7960329043623344e-05,
+ "loss": 1.0035,
+ "step": 807
+ },
+ {
+ "epoch": 1.01,
+ "grad_norm": 0.3813938568133554,
+ "learning_rate": 4.787676275114111e-05,
+ "loss": 0.9579,
+ "step": 808
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.3686863564511168,
+ "learning_rate": 4.779316067766673e-05,
+ "loss": 1.0105,
+ "step": 809
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.4263940878847523,
+ "learning_rate": 4.770952320297109e-05,
+ "loss": 1.0677,
+ "step": 810
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.37178778374665006,
+ "learning_rate": 4.7625850706985886e-05,
+ "loss": 1.0019,
+ "step": 811
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.36803355429187945,
+ "learning_rate": 4.7542143569801894e-05,
+ "loss": 0.9937,
+ "step": 812
+ },
+ {
+ "epoch": 1.02,
+ "grad_norm": 0.3897072472941179,
+ "learning_rate": 4.745840217166725e-05,
+ "loss": 1.0877,
+ "step": 813
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.35571833841716255,
+ "learning_rate": 4.737462689298577e-05,
+ "loss": 1.0015,
+ "step": 814
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.38930229991094323,
+ "learning_rate": 4.7290818114315086e-05,
+ "loss": 1.028,
+ "step": 815
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.411005007105147,
+ "learning_rate": 4.72069762163651e-05,
+ "loss": 1.0068,
+ "step": 816
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.3980240190337736,
+ "learning_rate": 4.7123101579996106e-05,
+ "loss": 0.9919,
+ "step": 817
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.36369517703115467,
+ "learning_rate": 4.7039194586217136e-05,
+ "loss": 0.967,
+ "step": 818
+ },
+ {
+ "epoch": 1.03,
+ "grad_norm": 0.38591148840458894,
+ "learning_rate": 4.695525561618418e-05,
+ "loss": 0.9743,
+ "step": 819
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.45873135108949337,
+ "learning_rate": 4.687128505119853e-05,
+ "loss": 1.0516,
+ "step": 820
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.3866330351411308,
+ "learning_rate": 4.6787283272704966e-05,
+ "loss": 0.9939,
+ "step": 821
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4620340173291326,
+ "learning_rate": 4.670325066229009e-05,
+ "loss": 1.0526,
+ "step": 822
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.38877454299870284,
+ "learning_rate": 4.661918760168052e-05,
+ "loss": 0.9904,
+ "step": 823
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.3880489386116793,
+ "learning_rate": 4.653509447274121e-05,
+ "loss": 0.9623,
+ "step": 824
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3827392356186151,
+ "learning_rate": 4.6450971657473743e-05,
+ "loss": 1.0772,
+ "step": 825
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4132814641854327,
+ "learning_rate": 4.63668195380145e-05,
+ "loss": 1.0533,
+ "step": 826
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.3703610182402835,
+ "learning_rate": 4.628263849663301e-05,
+ "loss": 0.9336,
+ "step": 827
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.4152053683299823,
+ "learning_rate": 4.619842891573016e-05,
+ "loss": 0.9801,
+ "step": 828
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.41791059043554274,
+ "learning_rate": 4.6114191177836514e-05,
+ "loss": 1.0617,
+ "step": 829
+ },
+ {
+ "epoch": 1.05,
+ "grad_norm": 0.46363896517299136,
+ "learning_rate": 4.6029925665610524e-05,
+ "loss": 0.9687,
+ "step": 830
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.41141959057512445,
+ "learning_rate": 4.59456327618368e-05,
+ "loss": 1.0965,
+ "step": 831
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3789192764519836,
+ "learning_rate": 4.5861312849424386e-05,
+ "loss": 0.9793,
+ "step": 832
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4047291581107866,
+ "learning_rate": 4.5776966311405035e-05,
+ "loss": 1.0342,
+ "step": 833
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.4425157400959256,
+ "learning_rate": 4.5692593530931416e-05,
+ "loss": 1.0892,
+ "step": 834
+ },
+ {
+ "epoch": 1.06,
+ "grad_norm": 0.3707332144806616,
+ "learning_rate": 4.560819489127545e-05,
+ "loss": 0.9815,
+ "step": 835
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3897444102572823,
+ "learning_rate": 4.552377077582646e-05,
+ "loss": 0.9884,
+ "step": 836
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.42725787957019346,
+ "learning_rate": 4.543932156808959e-05,
+ "loss": 0.9972,
+ "step": 837
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.40615269781820007,
+ "learning_rate": 4.535484765168386e-05,
+ "loss": 0.9529,
+ "step": 838
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.3505829736050887,
+ "learning_rate": 4.527034941034063e-05,
+ "loss": 0.9492,
+ "step": 839
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.36688064686440497,
+ "learning_rate": 4.51858272279017e-05,
+ "loss": 0.9592,
+ "step": 840
+ },
+ {
+ "epoch": 1.07,
+ "grad_norm": 0.4043468777955929,
+ "learning_rate": 4.5101281488317634e-05,
+ "loss": 1.048,
+ "step": 841
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3811489793242706,
+ "learning_rate": 4.501671257564602e-05,
+ "loss": 1.0138,
+ "step": 842
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.39813004142325986,
+ "learning_rate": 4.49321208740497e-05,
+ "loss": 1.071,
+ "step": 843
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.3809751022095503,
+ "learning_rate": 4.484750676779504e-05,
+ "loss": 1.0351,
+ "step": 844
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.384312178013823,
+ "learning_rate": 4.4762870641250185e-05,
+ "loss": 0.9737,
+ "step": 845
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.40769404907923557,
+ "learning_rate": 4.467821287888331e-05,
+ "loss": 0.9659,
+ "step": 846
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.39594136851937817,
+ "learning_rate": 4.459353386526086e-05,
+ "loss": 0.9405,
+ "step": 847
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.37180161011562185,
+ "learning_rate": 4.450883398504584e-05,
+ "loss": 1.0732,
+ "step": 848
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.3772603623154663,
+ "learning_rate": 4.442411362299602e-05,
+ "loss": 0.9646,
+ "step": 849
+ },
+ {
+ "epoch": 1.09,
+ "grad_norm": 0.4346142368506476,
+ "learning_rate": 4.433937316396224e-05,
+ "loss": 0.9572,
+ "step": 850
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1638,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 50,
+ "total_flos": 881130794385408.0,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-850/training_args.bin b/checkpoint-850/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8c2dfa20e1da5754719c3d7e300b9b86407f077f
--- /dev/null
+++ b/checkpoint-850/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3f2f7bd873b9dca108c5ca2e32ea140480fabeed2dec60f702daabd0a44d071e
+size 6776
diff --git a/checkpoint-850/zero_to_fp32.py b/checkpoint-850/zero_to_fp32.py
new file mode 100755
index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8
--- /dev/null
+++ b/checkpoint-850/zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)