diff --git a/adapter_config.json b/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..35553142128a486f364ea05de87725530372c975
--- /dev/null
+++ b/adapter_config.json
@@ -0,0 +1,42 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "/mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B",
+ "bias": "none",
+ "corda_config": null,
+ "eva_config": null,
+ "exclude_modules": null,
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_bias": false,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "qalora_group_size": 16,
+ "r": 64,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "o_proj",
+ "k_proj",
+ "v_proj",
+ "up_proj"
+ ],
+ "target_parameters": null,
+ "task_type": "CAUSAL_LM",
+ "trainable_token_indices": null,
+ "use_dora": false,
+ "use_qalora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/adapter_model.safetensors b/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0f52d876c630ec8c70c2146dbd88e4f82d528364
--- /dev/null
+++ b/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f3d8836d63a7b223d474b46eca6e48ebf21c4231acf477ac23d86b0770a0bc1
+size 357679240
diff --git a/checkpoint-100/README.md b/checkpoint-100/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7f664a3ae5272e4eb0e149c4adcd2f8c4eb45f00
--- /dev/null
+++ b/checkpoint-100/README.md
@@ -0,0 +1,207 @@
+---
+base_model: /mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B
+library_name: peft
+pipeline_tag: text-generation
+tags:
+- base_model:adapter:/mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B
+- lora
+- transformers
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.17.1
\ No newline at end of file
diff --git a/checkpoint-100/adapter_config.json b/checkpoint-100/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..35553142128a486f364ea05de87725530372c975
--- /dev/null
+++ b/checkpoint-100/adapter_config.json
@@ -0,0 +1,42 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "/mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B",
+ "bias": "none",
+ "corda_config": null,
+ "eva_config": null,
+ "exclude_modules": null,
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_bias": false,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "qalora_group_size": 16,
+ "r": 64,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "o_proj",
+ "k_proj",
+ "v_proj",
+ "up_proj"
+ ],
+ "target_parameters": null,
+ "task_type": "CAUSAL_LM",
+ "trainable_token_indices": null,
+ "use_dora": false,
+ "use_qalora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-100/adapter_model.safetensors b/checkpoint-100/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4581d545bc229a131b428983958c7abd234241fe
--- /dev/null
+++ b/checkpoint-100/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d4c4195dbe71a0946c57150b52e6f41bb8b51253e2f01bc9bff79fbbafb8a1d5
+size 357679240
diff --git a/checkpoint-100/global_step100/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-100/global_step100/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b7315c2b37ee65cb04579706526784ccf4af13bc
--- /dev/null
+++ b/checkpoint-100/global_step100/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c2540c371c784e7f4ec73382dee70e0b9345d5e9fce56b0e09f05bf4019a38a9
+size 340551088
diff --git a/checkpoint-100/global_step100/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-100/global_step100/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7346b6f3a8884dca640d3820916e2301128899d3
--- /dev/null
+++ b/checkpoint-100/global_step100/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fb55dc415c81c6e89b96972e30aa701ac6475e7a543808142be1ae52a3c48871
+size 340550896
diff --git a/checkpoint-100/global_step100/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-100/global_step100/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9d55a9a46750106bd02023af9ef9c3a6276b2eb2
--- /dev/null
+++ b/checkpoint-100/global_step100/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:069eff2bc45865450618e8a56a67e8e2b282f75ccf8134c3471329008db6480b
+size 340550832
diff --git a/checkpoint-100/global_step100/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-100/global_step100/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1638a78df97545b700ebe385573cf6f949db38f0
--- /dev/null
+++ b/checkpoint-100/global_step100/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:72fb24023c6418d72dd9f097af94601b78bf123be18284b6480a0d8da48e86e0
+size 340551216
diff --git a/checkpoint-100/global_step100/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-100/global_step100/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8768060d495e2e7ea692cadd096f0dd89e25173d
--- /dev/null
+++ b/checkpoint-100/global_step100/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8c7943ab571c6df46bbf4a086415154176f42fcb9553b68e2d9e7a4fb9ad7311
+size 340551088
diff --git a/checkpoint-100/global_step100/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-100/global_step100/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4c2f62bfafba9ef8ba9cb60678426ef4529b9114
--- /dev/null
+++ b/checkpoint-100/global_step100/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:882aaee5d595704472eaa96086fdf296fe2fb23ead7ea7ea704c181fad65a4df
+size 340574832
diff --git a/checkpoint-100/global_step100/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-100/global_step100/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2fec2005a53723bcc25c57687444be804cf42290
--- /dev/null
+++ b/checkpoint-100/global_step100/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b1a022748162dcb5ae5febe60b05eb44bbfce6392a7063032b49324bc2a4dc40
+size 340561584
diff --git a/checkpoint-100/global_step100/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-100/global_step100/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b2d0a9f6449bd7cdfc1731cd58817f095fcf5c9c
--- /dev/null
+++ b/checkpoint-100/global_step100/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d38dfb12952fc14e9dc3eb2cb402964808830dd4ce95df41a853dc881ccb6447
+size 340542512
diff --git a/checkpoint-100/global_step100/mp_rank_00_model_states.pt b/checkpoint-100/global_step100/mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1446b503ffa1518501680fb9976c957466e97b5e
--- /dev/null
+++ b/checkpoint-100/global_step100/mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bfa17c7fb02721bf9eb943a673afdc1811760325497c97b450258322d62bd636
+size 456033832
diff --git a/checkpoint-100/latest b/checkpoint-100/latest
new file mode 100644
index 0000000000000000000000000000000000000000..744ae7dbad571b6f37ec6c7066549494261bb59e
--- /dev/null
+++ b/checkpoint-100/latest
@@ -0,0 +1 @@
+global_step100
\ No newline at end of file
diff --git a/checkpoint-100/rng_state_0.pth b/checkpoint-100/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..71ec01ff649e0c8aa6c6113c5a5a19c3577f9331
--- /dev/null
+++ b/checkpoint-100/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8201b91357aac77529bab468b8e17607764f5c92705e4d93a0ccfb6b59f4b90f
+size 15984
diff --git a/checkpoint-100/rng_state_1.pth b/checkpoint-100/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ef9421d1ef49becff27195fd59b098737a6dcf3f
--- /dev/null
+++ b/checkpoint-100/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e10ecefe0414af47758ce2df442cc1a4bd90c62d172740588a1e4721dccce57
+size 15984
diff --git a/checkpoint-100/rng_state_2.pth b/checkpoint-100/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..42776aa433e2dd773633cd25e89f437b63911da6
--- /dev/null
+++ b/checkpoint-100/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f3d6f268d9e0911516f31bd644f608b9e9f1af47231ae13cf78f4495447a0f88
+size 15984
diff --git a/checkpoint-100/rng_state_3.pth b/checkpoint-100/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ce048219e0a920a243a90e7fcba40f2b4aa120f4
--- /dev/null
+++ b/checkpoint-100/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:55f55de3df61463fabae4461ddab493e2b337d696e7c75f4f160e358ee6c2230
+size 15984
diff --git a/checkpoint-100/rng_state_4.pth b/checkpoint-100/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..32e7819b32d7df15f7a5b5040a49a2c4f21ab8dc
--- /dev/null
+++ b/checkpoint-100/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3b48e262494a5827cd6008605e549a6977d9e4be42e90235aee4e29b57ad0b48
+size 15984
diff --git a/checkpoint-100/rng_state_5.pth b/checkpoint-100/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..982777fd4a7feb9844a97ed4adbc715ed248d84c
--- /dev/null
+++ b/checkpoint-100/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab1dc970cc4773115110638482b52d63db0151aa09729fc15c0063d58c7d4760
+size 15984
diff --git a/checkpoint-100/rng_state_6.pth b/checkpoint-100/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..99056b770887d35b7bdcaf153323cd472deff0b4
--- /dev/null
+++ b/checkpoint-100/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0aff83d443730569c3a2641b8b1c05c005b3690d05f093e81bb3fc7bb9096d6a
+size 15984
diff --git a/checkpoint-100/rng_state_7.pth b/checkpoint-100/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f52f146c29515778d5b8e3a2c7267fa1b558f4dc
--- /dev/null
+++ b/checkpoint-100/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:634311fc2e3bdfabf26cbb8ad12d7b1e83f2d3990e6a9574a5f2c580fcaee86f
+size 15984
diff --git a/checkpoint-100/scheduler.pt b/checkpoint-100/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b9571dd895ffde673fe9e056f16f6c37d315d894
--- /dev/null
+++ b/checkpoint-100/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:67dd3e2a4bcfc946c6a325fbcc9dbc410cac1229fc3dcbf329bdb1ba8ac4618d
+size 1064
diff --git a/checkpoint-100/special_tokens_map.json b/checkpoint-100/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/checkpoint-100/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-100/tokenizer.model b/checkpoint-100/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-100/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-100/tokenizer_config.json b/checkpoint-100/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..26c65df1bf794f101c1dd54c908180dc0d880fe3
--- /dev/null
+++ b/checkpoint-100/tokenizer_config.json
@@ -0,0 +1,43 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-100/trainer_state.json b/checkpoint-100/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..4641be6f431d5c4c9d0a42695ce978318848e9dd
--- /dev/null
+++ b/checkpoint-100/trainer_state.json
@@ -0,0 +1,733 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.3333333333333333,
+ "eval_steps": 500,
+ "global_step": 100,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.013333333333333334,
+ "grad_norm": 1.0028682947158813,
+ "learning_rate": 2.2222222222222223e-05,
+ "loss": 1.43,
+ "step": 1
+ },
+ {
+ "epoch": 0.02666666666666667,
+ "grad_norm": 1.6397583484649658,
+ "learning_rate": 4.4444444444444447e-05,
+ "loss": 1.5374,
+ "step": 2
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 1.2198785543441772,
+ "learning_rate": 6.666666666666667e-05,
+ "loss": 1.5106,
+ "step": 3
+ },
+ {
+ "epoch": 0.05333333333333334,
+ "grad_norm": 0.8468486070632935,
+ "learning_rate": 8.888888888888889e-05,
+ "loss": 1.447,
+ "step": 4
+ },
+ {
+ "epoch": 0.06666666666666667,
+ "grad_norm": 0.9268608689308167,
+ "learning_rate": 0.00011111111111111112,
+ "loss": 1.4743,
+ "step": 5
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 1.0168085098266602,
+ "learning_rate": 0.00013333333333333334,
+ "loss": 1.3245,
+ "step": 6
+ },
+ {
+ "epoch": 0.09333333333333334,
+ "grad_norm": 0.6773934960365295,
+ "learning_rate": 0.00015555555555555556,
+ "loss": 1.3738,
+ "step": 7
+ },
+ {
+ "epoch": 0.10666666666666667,
+ "grad_norm": 0.6985631585121155,
+ "learning_rate": 0.00017777777777777779,
+ "loss": 1.3951,
+ "step": 8
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 1.0221399068832397,
+ "learning_rate": 0.0002,
+ "loss": 1.3246,
+ "step": 9
+ },
+ {
+ "epoch": 0.13333333333333333,
+ "grad_norm": 0.6119747161865234,
+ "learning_rate": 0.00019999417253661235,
+ "loss": 1.2951,
+ "step": 10
+ },
+ {
+ "epoch": 0.14666666666666667,
+ "grad_norm": 0.6660990118980408,
+ "learning_rate": 0.00019997669082563597,
+ "loss": 1.2529,
+ "step": 11
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.5874819755554199,
+ "learning_rate": 0.00019994755690455152,
+ "loss": 1.2252,
+ "step": 12
+ },
+ {
+ "epoch": 0.17333333333333334,
+ "grad_norm": 0.4818006157875061,
+ "learning_rate": 0.00019990677416889608,
+ "loss": 1.1708,
+ "step": 13
+ },
+ {
+ "epoch": 0.18666666666666668,
+ "grad_norm": 0.652045488357544,
+ "learning_rate": 0.0001998543473718677,
+ "loss": 0.9865,
+ "step": 14
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.5517733693122864,
+ "learning_rate": 0.00019979028262377118,
+ "loss": 1.181,
+ "step": 15
+ },
+ {
+ "epoch": 0.21333333333333335,
+ "grad_norm": 0.47720542550086975,
+ "learning_rate": 0.00019971458739130598,
+ "loss": 1.0633,
+ "step": 16
+ },
+ {
+ "epoch": 0.22666666666666666,
+ "grad_norm": 0.7947096228599548,
+ "learning_rate": 0.000199627270496696,
+ "loss": 1.1458,
+ "step": 17
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.5301257371902466,
+ "learning_rate": 0.0001995283421166614,
+ "loss": 1.2245,
+ "step": 18
+ },
+ {
+ "epoch": 0.25333333333333335,
+ "grad_norm": 0.9473271369934082,
+ "learning_rate": 0.00019941781378123244,
+ "loss": 1.0859,
+ "step": 19
+ },
+ {
+ "epoch": 0.26666666666666666,
+ "grad_norm": 1.1834161281585693,
+ "learning_rate": 0.00019929569837240564,
+ "loss": 1.1808,
+ "step": 20
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.6784033179283142,
+ "learning_rate": 0.00019916201012264254,
+ "loss": 1.202,
+ "step": 21
+ },
+ {
+ "epoch": 0.29333333333333333,
+ "grad_norm": 0.7274785041809082,
+ "learning_rate": 0.00019901676461321068,
+ "loss": 1.2242,
+ "step": 22
+ },
+ {
+ "epoch": 0.30666666666666664,
+ "grad_norm": 0.7520783543586731,
+ "learning_rate": 0.00019885997877236788,
+ "loss": 1.173,
+ "step": 23
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.8218541145324707,
+ "learning_rate": 0.00019869167087338907,
+ "loss": 1.0723,
+ "step": 24
+ },
+ {
+ "epoch": 0.3333333333333333,
+ "grad_norm": 0.6361420154571533,
+ "learning_rate": 0.00019851186053243666,
+ "loss": 1.1607,
+ "step": 25
+ },
+ {
+ "epoch": 0.3466666666666667,
+ "grad_norm": 0.6401374936103821,
+ "learning_rate": 0.00019832056870627417,
+ "loss": 1.1398,
+ "step": 26
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 1.0995603799819946,
+ "learning_rate": 0.0001981178176898239,
+ "loss": 0.9475,
+ "step": 27
+ },
+ {
+ "epoch": 0.37333333333333335,
+ "grad_norm": 0.5122275948524475,
+ "learning_rate": 0.00019790363111356837,
+ "loss": 1.0125,
+ "step": 28
+ },
+ {
+ "epoch": 0.38666666666666666,
+ "grad_norm": 0.9316695332527161,
+ "learning_rate": 0.00019767803394079615,
+ "loss": 1.0554,
+ "step": 29
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.6831843256950378,
+ "learning_rate": 0.00019744105246469263,
+ "loss": 1.027,
+ "step": 30
+ },
+ {
+ "epoch": 0.41333333333333333,
+ "grad_norm": 0.5529218912124634,
+ "learning_rate": 0.0001971927143052752,
+ "loss": 1.0218,
+ "step": 31
+ },
+ {
+ "epoch": 0.4266666666666667,
+ "grad_norm": 0.7164443135261536,
+ "learning_rate": 0.00019693304840617457,
+ "loss": 1.0901,
+ "step": 32
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.7430665493011475,
+ "learning_rate": 0.00019666208503126112,
+ "loss": 1.125,
+ "step": 33
+ },
+ {
+ "epoch": 0.4533333333333333,
+ "grad_norm": 0.7198122143745422,
+ "learning_rate": 0.00019637985576111778,
+ "loss": 0.9926,
+ "step": 34
+ },
+ {
+ "epoch": 0.4666666666666667,
+ "grad_norm": 0.6613873839378357,
+ "learning_rate": 0.0001960863934893594,
+ "loss": 1.1925,
+ "step": 35
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 1.2440484762191772,
+ "learning_rate": 0.00019578173241879872,
+ "loss": 0.8948,
+ "step": 36
+ },
+ {
+ "epoch": 0.49333333333333335,
+ "grad_norm": 0.6995570659637451,
+ "learning_rate": 0.00019546590805746052,
+ "loss": 0.9525,
+ "step": 37
+ },
+ {
+ "epoch": 0.5066666666666667,
+ "grad_norm": 0.6873968243598938,
+ "learning_rate": 0.00019513895721444286,
+ "loss": 0.9966,
+ "step": 38
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.5394595861434937,
+ "learning_rate": 0.00019480091799562704,
+ "loss": 1.0787,
+ "step": 39
+ },
+ {
+ "epoch": 0.5333333333333333,
+ "grad_norm": 0.9196312427520752,
+ "learning_rate": 0.00019445182979923654,
+ "loss": 1.0559,
+ "step": 40
+ },
+ {
+ "epoch": 0.5466666666666666,
+ "grad_norm": 2.5954103469848633,
+ "learning_rate": 0.000194091733311245,
+ "loss": 0.9638,
+ "step": 41
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 1.244681715965271,
+ "learning_rate": 0.00019372067050063438,
+ "loss": 0.9325,
+ "step": 42
+ },
+ {
+ "epoch": 0.5733333333333334,
+ "grad_norm": 0.613715410232544,
+ "learning_rate": 0.0001933386846145036,
+ "loss": 0.9428,
+ "step": 43
+ },
+ {
+ "epoch": 0.5866666666666667,
+ "grad_norm": 0.9604235291481018,
+ "learning_rate": 0.00019294582017302797,
+ "loss": 0.9486,
+ "step": 44
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.7591239809989929,
+ "learning_rate": 0.00019254212296427044,
+ "loss": 1.0955,
+ "step": 45
+ },
+ {
+ "epoch": 0.6133333333333333,
+ "grad_norm": 0.5218423008918762,
+ "learning_rate": 0.0001921276400388451,
+ "loss": 1.0368,
+ "step": 46
+ },
+ {
+ "epoch": 0.6266666666666667,
+ "grad_norm": 0.5670755505561829,
+ "learning_rate": 0.00019170241970443343,
+ "loss": 0.9854,
+ "step": 47
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.8089922070503235,
+ "learning_rate": 0.00019126651152015403,
+ "loss": 1.0396,
+ "step": 48
+ },
+ {
+ "epoch": 0.6533333333333333,
+ "grad_norm": 0.6459051966667175,
+ "learning_rate": 0.00019081996629078657,
+ "loss": 1.0156,
+ "step": 49
+ },
+ {
+ "epoch": 0.6666666666666666,
+ "grad_norm": 1.1918997764587402,
+ "learning_rate": 0.00019036283606085053,
+ "loss": 1.1542,
+ "step": 50
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.6263722777366638,
+ "learning_rate": 0.00018989517410853955,
+ "loss": 1.1471,
+ "step": 51
+ },
+ {
+ "epoch": 0.6933333333333334,
+ "grad_norm": 0.534618616104126,
+ "learning_rate": 0.00018941703493951164,
+ "loss": 0.9055,
+ "step": 52
+ },
+ {
+ "epoch": 0.7066666666666667,
+ "grad_norm": 0.529036819934845,
+ "learning_rate": 0.00018892847428053693,
+ "loss": 0.9896,
+ "step": 53
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.716572105884552,
+ "learning_rate": 0.00018842954907300236,
+ "loss": 1.0204,
+ "step": 54
+ },
+ {
+ "epoch": 0.7333333333333333,
+ "grad_norm": 0.832662045955658,
+ "learning_rate": 0.00018792031746627563,
+ "loss": 1.0263,
+ "step": 55
+ },
+ {
+ "epoch": 0.7466666666666667,
+ "grad_norm": 0.5659884810447693,
+ "learning_rate": 0.0001874008388109276,
+ "loss": 1.0529,
+ "step": 56
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.4971260726451874,
+ "learning_rate": 0.00018687117365181512,
+ "loss": 1.0109,
+ "step": 57
+ },
+ {
+ "epoch": 0.7733333333333333,
+ "grad_norm": 0.5997689962387085,
+ "learning_rate": 0.00018633138372102468,
+ "loss": 1.0363,
+ "step": 58
+ },
+ {
+ "epoch": 0.7866666666666666,
+ "grad_norm": 0.42450904846191406,
+ "learning_rate": 0.00018578153193067745,
+ "loss": 1.0156,
+ "step": 59
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 1.0025880336761475,
+ "learning_rate": 0.00018522168236559695,
+ "loss": 0.96,
+ "step": 60
+ },
+ {
+ "epoch": 0.8133333333333334,
+ "grad_norm": 0.47225672006607056,
+ "learning_rate": 0.00018465190027584005,
+ "loss": 1.0402,
+ "step": 61
+ },
+ {
+ "epoch": 0.8266666666666667,
+ "grad_norm": 0.6419042348861694,
+ "learning_rate": 0.00018407225206909208,
+ "loss": 0.9727,
+ "step": 62
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.9594618082046509,
+ "learning_rate": 0.00018348280530292713,
+ "loss": 0.986,
+ "step": 63
+ },
+ {
+ "epoch": 0.8533333333333334,
+ "grad_norm": 0.5415605902671814,
+ "learning_rate": 0.00018288362867693414,
+ "loss": 1.019,
+ "step": 64
+ },
+ {
+ "epoch": 0.8666666666666667,
+ "grad_norm": 0.9662086367607117,
+ "learning_rate": 0.00018227479202471015,
+ "loss": 0.9531,
+ "step": 65
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.7523136734962463,
+ "learning_rate": 0.0001816563663057211,
+ "loss": 1.0383,
+ "step": 66
+ },
+ {
+ "epoch": 0.8933333333333333,
+ "grad_norm": 0.7249945998191833,
+ "learning_rate": 0.00018102842359703176,
+ "loss": 1.1131,
+ "step": 67
+ },
+ {
+ "epoch": 0.9066666666666666,
+ "grad_norm": 0.4781404435634613,
+ "learning_rate": 0.000180391037084905,
+ "loss": 0.9701,
+ "step": 68
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.5435504913330078,
+ "learning_rate": 0.00017974428105627208,
+ "loss": 1.2701,
+ "step": 69
+ },
+ {
+ "epoch": 0.9333333333333333,
+ "grad_norm": 0.48021838068962097,
+ "learning_rate": 0.00017908823089007457,
+ "loss": 0.9212,
+ "step": 70
+ },
+ {
+ "epoch": 0.9466666666666667,
+ "grad_norm": 0.7063950300216675,
+ "learning_rate": 0.00017842296304847893,
+ "loss": 1.0076,
+ "step": 71
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.5694530606269836,
+ "learning_rate": 0.00017774855506796496,
+ "loss": 1.0417,
+ "step": 72
+ },
+ {
+ "epoch": 0.9733333333333334,
+ "grad_norm": 0.6120775938034058,
+ "learning_rate": 0.00017706508555028893,
+ "loss": 1.0501,
+ "step": 73
+ },
+ {
+ "epoch": 0.9866666666666667,
+ "grad_norm": 0.5728889107704163,
+ "learning_rate": 0.0001763726341533227,
+ "loss": 1.0024,
+ "step": 74
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.8452621102333069,
+ "learning_rate": 0.00017567128158176953,
+ "loss": 1.0966,
+ "step": 75
+ },
+ {
+ "epoch": 1.0133333333333334,
+ "grad_norm": 0.5769606828689575,
+ "learning_rate": 0.0001749611095777581,
+ "loss": 0.8877,
+ "step": 76
+ },
+ {
+ "epoch": 1.0266666666666666,
+ "grad_norm": 0.9046480059623718,
+ "learning_rate": 0.00017424220091131535,
+ "loss": 0.752,
+ "step": 77
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4488053023815155,
+ "learning_rate": 0.00017351463937072004,
+ "loss": 0.7588,
+ "step": 78
+ },
+ {
+ "epoch": 1.0533333333333332,
+ "grad_norm": 0.41479629278182983,
+ "learning_rate": 0.00017277850975273696,
+ "loss": 0.779,
+ "step": 79
+ },
+ {
+ "epoch": 1.0666666666666667,
+ "grad_norm": 0.7192550301551819,
+ "learning_rate": 0.000172033897852734,
+ "loss": 0.7809,
+ "step": 80
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.7553783655166626,
+ "learning_rate": 0.00017128089045468294,
+ "loss": 0.7421,
+ "step": 81
+ },
+ {
+ "epoch": 1.0933333333333333,
+ "grad_norm": 0.5650737881660461,
+ "learning_rate": 0.0001705195753210446,
+ "loss": 0.7044,
+ "step": 82
+ },
+ {
+ "epoch": 1.1066666666666667,
+ "grad_norm": 0.6692880988121033,
+ "learning_rate": 0.0001697500411825403,
+ "loss": 0.8415,
+ "step": 83
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.6710836291313171,
+ "learning_rate": 0.00016897237772781044,
+ "loss": 0.7247,
+ "step": 84
+ },
+ {
+ "epoch": 1.1333333333333333,
+ "grad_norm": 0.5887194275856018,
+ "learning_rate": 0.0001681866755929612,
+ "loss": 0.7947,
+ "step": 85
+ },
+ {
+ "epoch": 1.1466666666666667,
+ "grad_norm": 0.5538906455039978,
+ "learning_rate": 0.00016739302635100108,
+ "loss": 0.7337,
+ "step": 86
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.6018480062484741,
+ "learning_rate": 0.00016659152250116812,
+ "loss": 0.6801,
+ "step": 87
+ },
+ {
+ "epoch": 1.1733333333333333,
+ "grad_norm": 0.548251748085022,
+ "learning_rate": 0.00016578225745814907,
+ "loss": 0.6898,
+ "step": 88
+ },
+ {
+ "epoch": 1.1866666666666668,
+ "grad_norm": 0.49416568875312805,
+ "learning_rate": 0.00016496532554119214,
+ "loss": 0.719,
+ "step": 89
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.6101306676864624,
+ "learning_rate": 0.000164140821963114,
+ "loss": 0.7917,
+ "step": 90
+ },
+ {
+ "epoch": 1.2133333333333334,
+ "grad_norm": 0.588020920753479,
+ "learning_rate": 0.000163308842819203,
+ "loss": 0.8035,
+ "step": 91
+ },
+ {
+ "epoch": 1.2266666666666666,
+ "grad_norm": 0.5376534461975098,
+ "learning_rate": 0.00016246948507601914,
+ "loss": 0.7816,
+ "step": 92
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.549625813961029,
+ "learning_rate": 0.00016162284656009274,
+ "loss": 0.7245,
+ "step": 93
+ },
+ {
+ "epoch": 1.2533333333333334,
+ "grad_norm": 0.6054933667182922,
+ "learning_rate": 0.0001607690259465229,
+ "loss": 0.7484,
+ "step": 94
+ },
+ {
+ "epoch": 1.2666666666666666,
+ "grad_norm": 0.5613316297531128,
+ "learning_rate": 0.00015990812274747692,
+ "loss": 0.7369,
+ "step": 95
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5984250903129578,
+ "learning_rate": 0.00015904023730059228,
+ "loss": 0.891,
+ "step": 96
+ },
+ {
+ "epoch": 1.2933333333333334,
+ "grad_norm": 0.6490495204925537,
+ "learning_rate": 0.00015816547075728226,
+ "loss": 0.775,
+ "step": 97
+ },
+ {
+ "epoch": 1.3066666666666666,
+ "grad_norm": 0.6675053834915161,
+ "learning_rate": 0.000157283925070947,
+ "loss": 0.7565,
+ "step": 98
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.5397291779518127,
+ "learning_rate": 0.00015639570298509064,
+ "loss": 0.6685,
+ "step": 99
+ },
+ {
+ "epoch": 1.3333333333333333,
+ "grad_norm": 0.5830179452896118,
+ "learning_rate": 0.000155500908021347,
+ "loss": 0.8132,
+ "step": 100
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 300,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 50,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 2.904617129292595e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-100/training_args.bin b/checkpoint-100/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..26de66c830692f3d22f375473f4f43447eefb78a
--- /dev/null
+++ b/checkpoint-100/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54335a849ae8d377cae5839a736f4196087c7894666ca9b9f10dd899e0ada95c
+size 6904
diff --git a/checkpoint-100/zero_to_fp32.py b/checkpoint-100/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..5995d6e6f04e43b989587aa9022a3aef0c66d694
--- /dev/null
+++ b/checkpoint-100/zero_to_fp32.py
@@ -0,0 +1,760 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example:
+# python zero_to_fp32.py . output_dir/
+# or
+# python zero_to_fp32.py . output_dir/ --safe_serialization
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+import gc
+import json
+import numpy as np
+from tqdm import tqdm
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device, weights_only=False)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+ total_files = len(files)
+ state_dicts = []
+ for f in tqdm(files, desc='Loading checkpoint shards'):
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+class GatheredTensor:
+ """
+ A pseudo tensor that collects partitioned weights.
+ It is more memory efficient when there are multiple groups.
+ """
+
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
+ self.flat_groups = flat_groups
+ self.flat_groups_offset = flat_groups_offset
+ self.offset = offset
+ self.partitioned_numel = partitioned_numel
+ self.shape = shape
+ self.dtype = self.flat_groups[0][0].dtype
+
+ def contiguous(self):
+ """
+ Merge partitioned weights from flat_groups into a single tensor.
+ """
+ end_idx = self.offset + self.partitioned_numel
+ world_size = len(self.flat_groups)
+ pad_flat_param_chunks = []
+
+ for rank_i in range(world_size):
+ # for each rank, we need to collect weights from related group/groups
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
+ start_group_id = None
+ end_group_id = None
+ for group_id in range(len(self.flat_groups_offset)):
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
+ start_group_id = group_id
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
+ end_group_id = group_id
+ break
+ # collect weights from related group/groups
+ for group_id in range(start_group_id, end_group_id + 1):
+ flat_tensor = flat_groups_at_rank_i[group_id]
+ start_offset = self.offset - self.flat_groups_offset[group_id]
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
+
+ # collect weights from all ranks
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
+ return param
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
+
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # memory efficient tensor
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
+ state_dict[name] = tensor
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def to_torch_tensor(state_dict, return_empty_tensor=False):
+ """
+ Convert state_dict of GatheredTensor to torch tensor
+ """
+ torch_state_dict = {}
+ converted_tensors = {}
+ for name, tensor in state_dict.items():
+ tensor_id = id(tensor)
+ if tensor_id in converted_tensors: # shared tensors
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
+ torch_state_dict[name] = shared_tensor
+ else:
+ converted_tensors[tensor_id] = name
+ if return_empty_tensor:
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
+ else:
+ torch_state_dict[name] = tensor.contiguous()
+ return torch_state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag=None,
+ exclude_frozen_parameters=False,
+ lazy_mode=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint. Or you can load state_dict in lazy mode ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
+ for name, lazy_tensor in state_dict.item():
+ tensor = lazy_tensor.contiguous() # to cpu
+ print(name, tensor)
+ # del tensor to release memory if it no longer in use
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+ if lazy_mode:
+ return state_dict
+ else:
+ return to_torch_tensor(state_dict)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
+ output_dir,
+ max_shard_size="5GB",
+ safe_serialization=False,
+ tag=None,
+ exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ # Dependency pre-check
+ if safe_serialization:
+ try:
+ from safetensors.torch import save_file
+ except ImportError:
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
+ raise
+ if max_shard_size is not None:
+ try:
+ from huggingface_hub import split_torch_state_dict_into_shards
+ except ImportError:
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
+ raise
+
+ # Convert zero checkpoint to state_dict
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag,
+ exclude_frozen_parameters,
+ lazy_mode=True)
+
+ # Shard the model if it is too big.
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
+ if max_shard_size is not None:
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
+ # an memory-efficient approach for sharding
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
+ filename_pattern=filename_pattern,
+ max_shard_size=max_shard_size)
+ else:
+ from collections import namedtuple
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
+ state_dict_split = StateDictSplit(is_sharded=False,
+ filename_to_tensors={weights_name: list(state_dict.keys())})
+
+ # Save the model by shard
+ os.makedirs(output_dir, exist_ok=True)
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
+ shard_state_dict = to_torch_tensor(shard_state_dict)
+ output_path = os.path.join(output_dir, shard_file)
+ if safe_serialization:
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
+ else:
+ torch.save(shard_state_dict, output_path)
+ # release the memory of current shard
+ for tensor_name in list(shard_state_dict.keys()):
+ del state_dict[tensor_name]
+ del shard_state_dict[tensor_name]
+ del shard_state_dict
+ gc.collect()
+
+ # Save index if sharded
+ if state_dict_split.is_sharded:
+ index = {
+ "metadata": state_dict_split.metadata,
+ "weight_map": state_dict_split.tensor_to_filename,
+ }
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
+ save_index_file = os.path.join(output_dir, save_index_file)
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info("Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info("Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument("output_dir",
+ type=str,
+ help="directory to the pytorch fp32 state_dict output files"
+ "(e.g. path/checkpoint-12-output/)")
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="5GB",
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
+ "without CPU OOM issues.")
+ parser.add_argument(
+ "--safe_serialization",
+ default=False,
+ action='store_true',
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_dir,
+ max_shard_size=args.max_shard_size,
+ safe_serialization=args.safe_serialization,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-150/README.md b/checkpoint-150/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7f664a3ae5272e4eb0e149c4adcd2f8c4eb45f00
--- /dev/null
+++ b/checkpoint-150/README.md
@@ -0,0 +1,207 @@
+---
+base_model: /mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B
+library_name: peft
+pipeline_tag: text-generation
+tags:
+- base_model:adapter:/mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B
+- lora
+- transformers
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.17.1
\ No newline at end of file
diff --git a/checkpoint-150/adapter_config.json b/checkpoint-150/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..35553142128a486f364ea05de87725530372c975
--- /dev/null
+++ b/checkpoint-150/adapter_config.json
@@ -0,0 +1,42 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "/mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B",
+ "bias": "none",
+ "corda_config": null,
+ "eva_config": null,
+ "exclude_modules": null,
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_bias": false,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "qalora_group_size": 16,
+ "r": 64,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "o_proj",
+ "k_proj",
+ "v_proj",
+ "up_proj"
+ ],
+ "target_parameters": null,
+ "task_type": "CAUSAL_LM",
+ "trainable_token_indices": null,
+ "use_dora": false,
+ "use_qalora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-150/adapter_model.safetensors b/checkpoint-150/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..dfb1f53380071f32838846376eab532a5163c214
--- /dev/null
+++ b/checkpoint-150/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0cff569fccc330603ba7e3e056044a89e8d9aa5d1aefddd83cb1076accebb3a1
+size 357679240
diff --git a/checkpoint-150/global_step150/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-150/global_step150/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b459377bf7be532ecec670c8a83e4c431fb9c180
--- /dev/null
+++ b/checkpoint-150/global_step150/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad10d9c0e5fa6a7ff7698cd5ed180d9c4beb9913d6553c7af8c354d0a031dffd
+size 340551088
diff --git a/checkpoint-150/global_step150/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-150/global_step150/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3ef1ebed65758126a0f8ef8be3bf66d3b96e1654
--- /dev/null
+++ b/checkpoint-150/global_step150/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ffec7f49bc27546fb8d89450428e4eb35eda050d7e746b4b56f188f9c93e4749
+size 340550896
diff --git a/checkpoint-150/global_step150/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-150/global_step150/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a4b9e33ed75c6d4c018cb1e1037dcfa8acb1e46c
--- /dev/null
+++ b/checkpoint-150/global_step150/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:374162972107657e905fdc3f79c7cc216fa42a20fd4783808efc4d05b3f4a65e
+size 340550832
diff --git a/checkpoint-150/global_step150/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-150/global_step150/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4f41f29421dadfda3f46f4ed636b1c09312ebaa2
--- /dev/null
+++ b/checkpoint-150/global_step150/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:413326fdf18c69c0d3fe0a42c3853c3e2903b17f43f1eff44063eb1c6a55c91a
+size 340551216
diff --git a/checkpoint-150/global_step150/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-150/global_step150/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..69de4493986e8342d97a2fad2e68d2f532df9fbd
--- /dev/null
+++ b/checkpoint-150/global_step150/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:67691177471f1db26268c57549e7de3c0e3a8c594639aae7f2ca7cb67ead5146
+size 340551088
diff --git a/checkpoint-150/global_step150/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-150/global_step150/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2aaa5cba4fd4e39201040b4733bf7b492a022546
--- /dev/null
+++ b/checkpoint-150/global_step150/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:52c316499ef51fb55929556b8b13538dd64261338accece7f936e7d4a364b7b7
+size 340574832
diff --git a/checkpoint-150/global_step150/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-150/global_step150/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..45aba9d8cfd5b89aac99c199bf31b1508d67c8f9
--- /dev/null
+++ b/checkpoint-150/global_step150/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:62ac89a7b1c2b4c8b16a7af4463d29e2f6faeef5b29e29abbdedbff5471bf997
+size 340561584
diff --git a/checkpoint-150/global_step150/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-150/global_step150/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6f31c8f0d3633aa0dbf9621ab1dce4490a1c303b
--- /dev/null
+++ b/checkpoint-150/global_step150/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0cfacfe3fa4f9f1128ea79725115b851fbe3f2fcb22627b21005d8ec64d255a3
+size 340542512
diff --git a/checkpoint-150/global_step150/mp_rank_00_model_states.pt b/checkpoint-150/global_step150/mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..231ad2eb85e727be470f75a715f4726335d1dde7
--- /dev/null
+++ b/checkpoint-150/global_step150/mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:940565dc256828af4ef3cccf87697d1eec925e8350b312376864334f4248b495
+size 456033832
diff --git a/checkpoint-150/latest b/checkpoint-150/latest
new file mode 100644
index 0000000000000000000000000000000000000000..daf5be2c4861b36c6659b05fae8c31547db7f579
--- /dev/null
+++ b/checkpoint-150/latest
@@ -0,0 +1 @@
+global_step150
\ No newline at end of file
diff --git a/checkpoint-150/rng_state_0.pth b/checkpoint-150/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8f5d780b4c8e1f0f5b3edd72600bf763df2abfc5
--- /dev/null
+++ b/checkpoint-150/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:72e856b75792d055582543fbfc010b7fa2c0d3468d6703b1614d4597965d6e97
+size 15984
diff --git a/checkpoint-150/rng_state_1.pth b/checkpoint-150/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..75ea67165781f6d68ea56bae6e6fd126286b79ec
--- /dev/null
+++ b/checkpoint-150/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:73d22c18b2cc7869b85de54d900cd63b3d71626b2444ececc92288437891ee11
+size 15984
diff --git a/checkpoint-150/rng_state_2.pth b/checkpoint-150/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8d39162096b9c984b51dbf5b00c874b72ad8d3db
--- /dev/null
+++ b/checkpoint-150/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a787b019a543574c268d3bb21ffa38b25447a066932015b9d2bf9544cc1f8f7f
+size 15984
diff --git a/checkpoint-150/rng_state_3.pth b/checkpoint-150/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..362bde6fc702885e02543fcc4b3a966e43c2397a
--- /dev/null
+++ b/checkpoint-150/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e432d3cc376fa1e9a64651a018f1b1e2772f511592fc94d7704685d2a4d8f675
+size 15984
diff --git a/checkpoint-150/rng_state_4.pth b/checkpoint-150/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3be5037d3ec09a55a170874f127d9b7078b3b873
--- /dev/null
+++ b/checkpoint-150/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d31ee9d950235b086cc3f24a6930421c44a62fa406c3963f317feb3c1c9603a1
+size 15984
diff --git a/checkpoint-150/rng_state_5.pth b/checkpoint-150/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fe8bdaafcc77096a5020f26d70532a6976722a56
--- /dev/null
+++ b/checkpoint-150/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cdf136a33a2fd52b89376a3693adb05b5344fe85d0a089694b29b26b08840602
+size 15984
diff --git a/checkpoint-150/rng_state_6.pth b/checkpoint-150/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..07425da9e16c5fa27b126f34e5f16cedac50bc37
--- /dev/null
+++ b/checkpoint-150/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1d4ec421ddb9a38c0416480e948aabbe4aaff6dc26d7b5a88d1b73206ce44bb3
+size 15984
diff --git a/checkpoint-150/rng_state_7.pth b/checkpoint-150/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..cb02726c31170048e10a20e284806c3986af58ed
--- /dev/null
+++ b/checkpoint-150/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4fbc7a6c9e0c390c0a97ede98c3263d98fc3f1261f11c0e4b791548e62d28973
+size 15984
diff --git a/checkpoint-150/scheduler.pt b/checkpoint-150/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4bd5b89716004f13ce2ecc6d084ffe7d5e562a8f
--- /dev/null
+++ b/checkpoint-150/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af6383d8980c88385bae66872c7fd182075b693c2d308a714bd98989eef461fd
+size 1064
diff --git a/checkpoint-150/special_tokens_map.json b/checkpoint-150/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/checkpoint-150/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-150/tokenizer.model b/checkpoint-150/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-150/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-150/tokenizer_config.json b/checkpoint-150/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..26c65df1bf794f101c1dd54c908180dc0d880fe3
--- /dev/null
+++ b/checkpoint-150/tokenizer_config.json
@@ -0,0 +1,43 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-150/trainer_state.json b/checkpoint-150/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..12dc2f88e5cbff74e97acc96f9954fe409944cf0
--- /dev/null
+++ b/checkpoint-150/trainer_state.json
@@ -0,0 +1,1083 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.0,
+ "eval_steps": 500,
+ "global_step": 150,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.013333333333333334,
+ "grad_norm": 1.0028682947158813,
+ "learning_rate": 2.2222222222222223e-05,
+ "loss": 1.43,
+ "step": 1
+ },
+ {
+ "epoch": 0.02666666666666667,
+ "grad_norm": 1.6397583484649658,
+ "learning_rate": 4.4444444444444447e-05,
+ "loss": 1.5374,
+ "step": 2
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 1.2198785543441772,
+ "learning_rate": 6.666666666666667e-05,
+ "loss": 1.5106,
+ "step": 3
+ },
+ {
+ "epoch": 0.05333333333333334,
+ "grad_norm": 0.8468486070632935,
+ "learning_rate": 8.888888888888889e-05,
+ "loss": 1.447,
+ "step": 4
+ },
+ {
+ "epoch": 0.06666666666666667,
+ "grad_norm": 0.9268608689308167,
+ "learning_rate": 0.00011111111111111112,
+ "loss": 1.4743,
+ "step": 5
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 1.0168085098266602,
+ "learning_rate": 0.00013333333333333334,
+ "loss": 1.3245,
+ "step": 6
+ },
+ {
+ "epoch": 0.09333333333333334,
+ "grad_norm": 0.6773934960365295,
+ "learning_rate": 0.00015555555555555556,
+ "loss": 1.3738,
+ "step": 7
+ },
+ {
+ "epoch": 0.10666666666666667,
+ "grad_norm": 0.6985631585121155,
+ "learning_rate": 0.00017777777777777779,
+ "loss": 1.3951,
+ "step": 8
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 1.0221399068832397,
+ "learning_rate": 0.0002,
+ "loss": 1.3246,
+ "step": 9
+ },
+ {
+ "epoch": 0.13333333333333333,
+ "grad_norm": 0.6119747161865234,
+ "learning_rate": 0.00019999417253661235,
+ "loss": 1.2951,
+ "step": 10
+ },
+ {
+ "epoch": 0.14666666666666667,
+ "grad_norm": 0.6660990118980408,
+ "learning_rate": 0.00019997669082563597,
+ "loss": 1.2529,
+ "step": 11
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.5874819755554199,
+ "learning_rate": 0.00019994755690455152,
+ "loss": 1.2252,
+ "step": 12
+ },
+ {
+ "epoch": 0.17333333333333334,
+ "grad_norm": 0.4818006157875061,
+ "learning_rate": 0.00019990677416889608,
+ "loss": 1.1708,
+ "step": 13
+ },
+ {
+ "epoch": 0.18666666666666668,
+ "grad_norm": 0.652045488357544,
+ "learning_rate": 0.0001998543473718677,
+ "loss": 0.9865,
+ "step": 14
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.5517733693122864,
+ "learning_rate": 0.00019979028262377118,
+ "loss": 1.181,
+ "step": 15
+ },
+ {
+ "epoch": 0.21333333333333335,
+ "grad_norm": 0.47720542550086975,
+ "learning_rate": 0.00019971458739130598,
+ "loss": 1.0633,
+ "step": 16
+ },
+ {
+ "epoch": 0.22666666666666666,
+ "grad_norm": 0.7947096228599548,
+ "learning_rate": 0.000199627270496696,
+ "loss": 1.1458,
+ "step": 17
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.5301257371902466,
+ "learning_rate": 0.0001995283421166614,
+ "loss": 1.2245,
+ "step": 18
+ },
+ {
+ "epoch": 0.25333333333333335,
+ "grad_norm": 0.9473271369934082,
+ "learning_rate": 0.00019941781378123244,
+ "loss": 1.0859,
+ "step": 19
+ },
+ {
+ "epoch": 0.26666666666666666,
+ "grad_norm": 1.1834161281585693,
+ "learning_rate": 0.00019929569837240564,
+ "loss": 1.1808,
+ "step": 20
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.6784033179283142,
+ "learning_rate": 0.00019916201012264254,
+ "loss": 1.202,
+ "step": 21
+ },
+ {
+ "epoch": 0.29333333333333333,
+ "grad_norm": 0.7274785041809082,
+ "learning_rate": 0.00019901676461321068,
+ "loss": 1.2242,
+ "step": 22
+ },
+ {
+ "epoch": 0.30666666666666664,
+ "grad_norm": 0.7520783543586731,
+ "learning_rate": 0.00019885997877236788,
+ "loss": 1.173,
+ "step": 23
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.8218541145324707,
+ "learning_rate": 0.00019869167087338907,
+ "loss": 1.0723,
+ "step": 24
+ },
+ {
+ "epoch": 0.3333333333333333,
+ "grad_norm": 0.6361420154571533,
+ "learning_rate": 0.00019851186053243666,
+ "loss": 1.1607,
+ "step": 25
+ },
+ {
+ "epoch": 0.3466666666666667,
+ "grad_norm": 0.6401374936103821,
+ "learning_rate": 0.00019832056870627417,
+ "loss": 1.1398,
+ "step": 26
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 1.0995603799819946,
+ "learning_rate": 0.0001981178176898239,
+ "loss": 0.9475,
+ "step": 27
+ },
+ {
+ "epoch": 0.37333333333333335,
+ "grad_norm": 0.5122275948524475,
+ "learning_rate": 0.00019790363111356837,
+ "loss": 1.0125,
+ "step": 28
+ },
+ {
+ "epoch": 0.38666666666666666,
+ "grad_norm": 0.9316695332527161,
+ "learning_rate": 0.00019767803394079615,
+ "loss": 1.0554,
+ "step": 29
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.6831843256950378,
+ "learning_rate": 0.00019744105246469263,
+ "loss": 1.027,
+ "step": 30
+ },
+ {
+ "epoch": 0.41333333333333333,
+ "grad_norm": 0.5529218912124634,
+ "learning_rate": 0.0001971927143052752,
+ "loss": 1.0218,
+ "step": 31
+ },
+ {
+ "epoch": 0.4266666666666667,
+ "grad_norm": 0.7164443135261536,
+ "learning_rate": 0.00019693304840617457,
+ "loss": 1.0901,
+ "step": 32
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.7430665493011475,
+ "learning_rate": 0.00019666208503126112,
+ "loss": 1.125,
+ "step": 33
+ },
+ {
+ "epoch": 0.4533333333333333,
+ "grad_norm": 0.7198122143745422,
+ "learning_rate": 0.00019637985576111778,
+ "loss": 0.9926,
+ "step": 34
+ },
+ {
+ "epoch": 0.4666666666666667,
+ "grad_norm": 0.6613873839378357,
+ "learning_rate": 0.0001960863934893594,
+ "loss": 1.1925,
+ "step": 35
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 1.2440484762191772,
+ "learning_rate": 0.00019578173241879872,
+ "loss": 0.8948,
+ "step": 36
+ },
+ {
+ "epoch": 0.49333333333333335,
+ "grad_norm": 0.6995570659637451,
+ "learning_rate": 0.00019546590805746052,
+ "loss": 0.9525,
+ "step": 37
+ },
+ {
+ "epoch": 0.5066666666666667,
+ "grad_norm": 0.6873968243598938,
+ "learning_rate": 0.00019513895721444286,
+ "loss": 0.9966,
+ "step": 38
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.5394595861434937,
+ "learning_rate": 0.00019480091799562704,
+ "loss": 1.0787,
+ "step": 39
+ },
+ {
+ "epoch": 0.5333333333333333,
+ "grad_norm": 0.9196312427520752,
+ "learning_rate": 0.00019445182979923654,
+ "loss": 1.0559,
+ "step": 40
+ },
+ {
+ "epoch": 0.5466666666666666,
+ "grad_norm": 2.5954103469848633,
+ "learning_rate": 0.000194091733311245,
+ "loss": 0.9638,
+ "step": 41
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 1.244681715965271,
+ "learning_rate": 0.00019372067050063438,
+ "loss": 0.9325,
+ "step": 42
+ },
+ {
+ "epoch": 0.5733333333333334,
+ "grad_norm": 0.613715410232544,
+ "learning_rate": 0.0001933386846145036,
+ "loss": 0.9428,
+ "step": 43
+ },
+ {
+ "epoch": 0.5866666666666667,
+ "grad_norm": 0.9604235291481018,
+ "learning_rate": 0.00019294582017302797,
+ "loss": 0.9486,
+ "step": 44
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.7591239809989929,
+ "learning_rate": 0.00019254212296427044,
+ "loss": 1.0955,
+ "step": 45
+ },
+ {
+ "epoch": 0.6133333333333333,
+ "grad_norm": 0.5218423008918762,
+ "learning_rate": 0.0001921276400388451,
+ "loss": 1.0368,
+ "step": 46
+ },
+ {
+ "epoch": 0.6266666666666667,
+ "grad_norm": 0.5670755505561829,
+ "learning_rate": 0.00019170241970443343,
+ "loss": 0.9854,
+ "step": 47
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.8089922070503235,
+ "learning_rate": 0.00019126651152015403,
+ "loss": 1.0396,
+ "step": 48
+ },
+ {
+ "epoch": 0.6533333333333333,
+ "grad_norm": 0.6459051966667175,
+ "learning_rate": 0.00019081996629078657,
+ "loss": 1.0156,
+ "step": 49
+ },
+ {
+ "epoch": 0.6666666666666666,
+ "grad_norm": 1.1918997764587402,
+ "learning_rate": 0.00019036283606085053,
+ "loss": 1.1542,
+ "step": 50
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.6263722777366638,
+ "learning_rate": 0.00018989517410853955,
+ "loss": 1.1471,
+ "step": 51
+ },
+ {
+ "epoch": 0.6933333333333334,
+ "grad_norm": 0.534618616104126,
+ "learning_rate": 0.00018941703493951164,
+ "loss": 0.9055,
+ "step": 52
+ },
+ {
+ "epoch": 0.7066666666666667,
+ "grad_norm": 0.529036819934845,
+ "learning_rate": 0.00018892847428053693,
+ "loss": 0.9896,
+ "step": 53
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.716572105884552,
+ "learning_rate": 0.00018842954907300236,
+ "loss": 1.0204,
+ "step": 54
+ },
+ {
+ "epoch": 0.7333333333333333,
+ "grad_norm": 0.832662045955658,
+ "learning_rate": 0.00018792031746627563,
+ "loss": 1.0263,
+ "step": 55
+ },
+ {
+ "epoch": 0.7466666666666667,
+ "grad_norm": 0.5659884810447693,
+ "learning_rate": 0.0001874008388109276,
+ "loss": 1.0529,
+ "step": 56
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.4971260726451874,
+ "learning_rate": 0.00018687117365181512,
+ "loss": 1.0109,
+ "step": 57
+ },
+ {
+ "epoch": 0.7733333333333333,
+ "grad_norm": 0.5997689962387085,
+ "learning_rate": 0.00018633138372102468,
+ "loss": 1.0363,
+ "step": 58
+ },
+ {
+ "epoch": 0.7866666666666666,
+ "grad_norm": 0.42450904846191406,
+ "learning_rate": 0.00018578153193067745,
+ "loss": 1.0156,
+ "step": 59
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 1.0025880336761475,
+ "learning_rate": 0.00018522168236559695,
+ "loss": 0.96,
+ "step": 60
+ },
+ {
+ "epoch": 0.8133333333333334,
+ "grad_norm": 0.47225672006607056,
+ "learning_rate": 0.00018465190027584005,
+ "loss": 1.0402,
+ "step": 61
+ },
+ {
+ "epoch": 0.8266666666666667,
+ "grad_norm": 0.6419042348861694,
+ "learning_rate": 0.00018407225206909208,
+ "loss": 0.9727,
+ "step": 62
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.9594618082046509,
+ "learning_rate": 0.00018348280530292713,
+ "loss": 0.986,
+ "step": 63
+ },
+ {
+ "epoch": 0.8533333333333334,
+ "grad_norm": 0.5415605902671814,
+ "learning_rate": 0.00018288362867693414,
+ "loss": 1.019,
+ "step": 64
+ },
+ {
+ "epoch": 0.8666666666666667,
+ "grad_norm": 0.9662086367607117,
+ "learning_rate": 0.00018227479202471015,
+ "loss": 0.9531,
+ "step": 65
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.7523136734962463,
+ "learning_rate": 0.0001816563663057211,
+ "loss": 1.0383,
+ "step": 66
+ },
+ {
+ "epoch": 0.8933333333333333,
+ "grad_norm": 0.7249945998191833,
+ "learning_rate": 0.00018102842359703176,
+ "loss": 1.1131,
+ "step": 67
+ },
+ {
+ "epoch": 0.9066666666666666,
+ "grad_norm": 0.4781404435634613,
+ "learning_rate": 0.000180391037084905,
+ "loss": 0.9701,
+ "step": 68
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.5435504913330078,
+ "learning_rate": 0.00017974428105627208,
+ "loss": 1.2701,
+ "step": 69
+ },
+ {
+ "epoch": 0.9333333333333333,
+ "grad_norm": 0.48021838068962097,
+ "learning_rate": 0.00017908823089007457,
+ "loss": 0.9212,
+ "step": 70
+ },
+ {
+ "epoch": 0.9466666666666667,
+ "grad_norm": 0.7063950300216675,
+ "learning_rate": 0.00017842296304847893,
+ "loss": 1.0076,
+ "step": 71
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.5694530606269836,
+ "learning_rate": 0.00017774855506796496,
+ "loss": 1.0417,
+ "step": 72
+ },
+ {
+ "epoch": 0.9733333333333334,
+ "grad_norm": 0.6120775938034058,
+ "learning_rate": 0.00017706508555028893,
+ "loss": 1.0501,
+ "step": 73
+ },
+ {
+ "epoch": 0.9866666666666667,
+ "grad_norm": 0.5728889107704163,
+ "learning_rate": 0.0001763726341533227,
+ "loss": 1.0024,
+ "step": 74
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.8452621102333069,
+ "learning_rate": 0.00017567128158176953,
+ "loss": 1.0966,
+ "step": 75
+ },
+ {
+ "epoch": 1.0133333333333334,
+ "grad_norm": 0.5769606828689575,
+ "learning_rate": 0.0001749611095777581,
+ "loss": 0.8877,
+ "step": 76
+ },
+ {
+ "epoch": 1.0266666666666666,
+ "grad_norm": 0.9046480059623718,
+ "learning_rate": 0.00017424220091131535,
+ "loss": 0.752,
+ "step": 77
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4488053023815155,
+ "learning_rate": 0.00017351463937072004,
+ "loss": 0.7588,
+ "step": 78
+ },
+ {
+ "epoch": 1.0533333333333332,
+ "grad_norm": 0.41479629278182983,
+ "learning_rate": 0.00017277850975273696,
+ "loss": 0.779,
+ "step": 79
+ },
+ {
+ "epoch": 1.0666666666666667,
+ "grad_norm": 0.7192550301551819,
+ "learning_rate": 0.000172033897852734,
+ "loss": 0.7809,
+ "step": 80
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.7553783655166626,
+ "learning_rate": 0.00017128089045468294,
+ "loss": 0.7421,
+ "step": 81
+ },
+ {
+ "epoch": 1.0933333333333333,
+ "grad_norm": 0.5650737881660461,
+ "learning_rate": 0.0001705195753210446,
+ "loss": 0.7044,
+ "step": 82
+ },
+ {
+ "epoch": 1.1066666666666667,
+ "grad_norm": 0.6692880988121033,
+ "learning_rate": 0.0001697500411825403,
+ "loss": 0.8415,
+ "step": 83
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.6710836291313171,
+ "learning_rate": 0.00016897237772781044,
+ "loss": 0.7247,
+ "step": 84
+ },
+ {
+ "epoch": 1.1333333333333333,
+ "grad_norm": 0.5887194275856018,
+ "learning_rate": 0.0001681866755929612,
+ "loss": 0.7947,
+ "step": 85
+ },
+ {
+ "epoch": 1.1466666666666667,
+ "grad_norm": 0.5538906455039978,
+ "learning_rate": 0.00016739302635100108,
+ "loss": 0.7337,
+ "step": 86
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.6018480062484741,
+ "learning_rate": 0.00016659152250116812,
+ "loss": 0.6801,
+ "step": 87
+ },
+ {
+ "epoch": 1.1733333333333333,
+ "grad_norm": 0.548251748085022,
+ "learning_rate": 0.00016578225745814907,
+ "loss": 0.6898,
+ "step": 88
+ },
+ {
+ "epoch": 1.1866666666666668,
+ "grad_norm": 0.49416568875312805,
+ "learning_rate": 0.00016496532554119214,
+ "loss": 0.719,
+ "step": 89
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.6101306676864624,
+ "learning_rate": 0.000164140821963114,
+ "loss": 0.7917,
+ "step": 90
+ },
+ {
+ "epoch": 1.2133333333333334,
+ "grad_norm": 0.588020920753479,
+ "learning_rate": 0.000163308842819203,
+ "loss": 0.8035,
+ "step": 91
+ },
+ {
+ "epoch": 1.2266666666666666,
+ "grad_norm": 0.5376534461975098,
+ "learning_rate": 0.00016246948507601914,
+ "loss": 0.7816,
+ "step": 92
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.549625813961029,
+ "learning_rate": 0.00016162284656009274,
+ "loss": 0.7245,
+ "step": 93
+ },
+ {
+ "epoch": 1.2533333333333334,
+ "grad_norm": 0.6054933667182922,
+ "learning_rate": 0.0001607690259465229,
+ "loss": 0.7484,
+ "step": 94
+ },
+ {
+ "epoch": 1.2666666666666666,
+ "grad_norm": 0.5613316297531128,
+ "learning_rate": 0.00015990812274747692,
+ "loss": 0.7369,
+ "step": 95
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5984250903129578,
+ "learning_rate": 0.00015904023730059228,
+ "loss": 0.891,
+ "step": 96
+ },
+ {
+ "epoch": 1.2933333333333334,
+ "grad_norm": 0.6490495204925537,
+ "learning_rate": 0.00015816547075728226,
+ "loss": 0.775,
+ "step": 97
+ },
+ {
+ "epoch": 1.3066666666666666,
+ "grad_norm": 0.6675053834915161,
+ "learning_rate": 0.000157283925070947,
+ "loss": 0.7565,
+ "step": 98
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.5397291779518127,
+ "learning_rate": 0.00015639570298509064,
+ "loss": 0.6685,
+ "step": 99
+ },
+ {
+ "epoch": 1.3333333333333333,
+ "grad_norm": 0.5830179452896118,
+ "learning_rate": 0.000155500908021347,
+ "loss": 0.8132,
+ "step": 100
+ },
+ {
+ "epoch": 1.3466666666666667,
+ "grad_norm": 0.511802077293396,
+ "learning_rate": 0.00015459964446741382,
+ "loss": 0.7664,
+ "step": 101
+ },
+ {
+ "epoch": 1.3599999999999999,
+ "grad_norm": 1.350032091140747,
+ "learning_rate": 0.0001536920173648984,
+ "loss": 0.8179,
+ "step": 102
+ },
+ {
+ "epoch": 1.3733333333333333,
+ "grad_norm": 0.7308780550956726,
+ "learning_rate": 0.00015277813249707487,
+ "loss": 0.8401,
+ "step": 103
+ },
+ {
+ "epoch": 1.3866666666666667,
+ "grad_norm": 0.5292226076126099,
+ "learning_rate": 0.0001518580963765555,
+ "loss": 0.6367,
+ "step": 104
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.6958481073379517,
+ "learning_rate": 0.00015093201623287631,
+ "loss": 0.7173,
+ "step": 105
+ },
+ {
+ "epoch": 1.4133333333333333,
+ "grad_norm": 0.7024071216583252,
+ "learning_rate": 0.00015000000000000001,
+ "loss": 0.5604,
+ "step": 106
+ },
+ {
+ "epoch": 1.4266666666666667,
+ "grad_norm": 0.5597444772720337,
+ "learning_rate": 0.00014906215630373606,
+ "loss": 0.6767,
+ "step": 107
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.6003674864768982,
+ "learning_rate": 0.00014811859444908052,
+ "loss": 0.8149,
+ "step": 108
+ },
+ {
+ "epoch": 1.4533333333333334,
+ "grad_norm": 0.5815126895904541,
+ "learning_rate": 0.00014716942440747664,
+ "loss": 0.7801,
+ "step": 109
+ },
+ {
+ "epoch": 1.4666666666666668,
+ "grad_norm": 0.7836669683456421,
+ "learning_rate": 0.0001462147568039977,
+ "loss": 0.8452,
+ "step": 110
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.8783419132232666,
+ "learning_rate": 0.00014525470290445392,
+ "loss": 0.8257,
+ "step": 111
+ },
+ {
+ "epoch": 1.4933333333333334,
+ "grad_norm": 0.46948131918907166,
+ "learning_rate": 0.00014428937460242417,
+ "loss": 0.7481,
+ "step": 112
+ },
+ {
+ "epoch": 1.5066666666666668,
+ "grad_norm": 0.5725980401039124,
+ "learning_rate": 0.00014331888440621533,
+ "loss": 0.8267,
+ "step": 113
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.4418632686138153,
+ "learning_rate": 0.00014234334542574906,
+ "loss": 0.7631,
+ "step": 114
+ },
+ {
+ "epoch": 1.5333333333333332,
+ "grad_norm": 0.6430942416191101,
+ "learning_rate": 0.00014136287135937915,
+ "loss": 0.8817,
+ "step": 115
+ },
+ {
+ "epoch": 1.5466666666666666,
+ "grad_norm": 0.5670009255409241,
+ "learning_rate": 0.00014037757648064018,
+ "loss": 0.5991,
+ "step": 116
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 0.5407504439353943,
+ "learning_rate": 0.00013938757562492873,
+ "loss": 0.6898,
+ "step": 117
+ },
+ {
+ "epoch": 1.5733333333333333,
+ "grad_norm": 0.5176808834075928,
+ "learning_rate": 0.00013839298417611963,
+ "loss": 0.731,
+ "step": 118
+ },
+ {
+ "epoch": 1.5866666666666667,
+ "grad_norm": 0.9752798080444336,
+ "learning_rate": 0.00013739391805311793,
+ "loss": 0.6736,
+ "step": 119
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 0.7100059390068054,
+ "learning_rate": 0.00013639049369634876,
+ "loss": 0.7369,
+ "step": 120
+ },
+ {
+ "epoch": 1.6133333333333333,
+ "grad_norm": 0.6285961270332336,
+ "learning_rate": 0.0001353828280541861,
+ "loss": 0.721,
+ "step": 121
+ },
+ {
+ "epoch": 1.6266666666666667,
+ "grad_norm": 0.5981026291847229,
+ "learning_rate": 0.00013437103856932264,
+ "loss": 0.8094,
+ "step": 122
+ },
+ {
+ "epoch": 1.6400000000000001,
+ "grad_norm": 0.6587502360343933,
+ "learning_rate": 0.00013335524316508208,
+ "loss": 0.7646,
+ "step": 123
+ },
+ {
+ "epoch": 1.6533333333333333,
+ "grad_norm": 0.5544253587722778,
+ "learning_rate": 0.00013233556023167485,
+ "loss": 0.7165,
+ "step": 124
+ },
+ {
+ "epoch": 1.6666666666666665,
+ "grad_norm": 0.6012857556343079,
+ "learning_rate": 0.00013131210861240026,
+ "loss": 0.8104,
+ "step": 125
+ },
+ {
+ "epoch": 1.6800000000000002,
+ "grad_norm": 0.5157524347305298,
+ "learning_rate": 0.00013028500758979506,
+ "loss": 0.8585,
+ "step": 126
+ },
+ {
+ "epoch": 1.6933333333333334,
+ "grad_norm": 0.4888676702976227,
+ "learning_rate": 0.00012925437687173142,
+ "loss": 0.6579,
+ "step": 127
+ },
+ {
+ "epoch": 1.7066666666666666,
+ "grad_norm": 0.5127140879631042,
+ "learning_rate": 0.00012822033657746478,
+ "loss": 0.7432,
+ "step": 128
+ },
+ {
+ "epoch": 1.72,
+ "grad_norm": 0.6154641509056091,
+ "learning_rate": 0.0001271830072236343,
+ "loss": 0.7322,
+ "step": 129
+ },
+ {
+ "epoch": 1.7333333333333334,
+ "grad_norm": 0.5081548690795898,
+ "learning_rate": 0.00012614250971021657,
+ "loss": 0.7547,
+ "step": 130
+ },
+ {
+ "epoch": 1.7466666666666666,
+ "grad_norm": 0.6808217763900757,
+ "learning_rate": 0.00012509896530643488,
+ "loss": 0.6855,
+ "step": 131
+ },
+ {
+ "epoch": 1.76,
+ "grad_norm": 0.8672941327095032,
+ "learning_rate": 0.00012405249563662537,
+ "loss": 0.6332,
+ "step": 132
+ },
+ {
+ "epoch": 1.7733333333333334,
+ "grad_norm": 0.6130337119102478,
+ "learning_rate": 0.00012300322266606178,
+ "loss": 0.7453,
+ "step": 133
+ },
+ {
+ "epoch": 1.7866666666666666,
+ "grad_norm": 0.739959180355072,
+ "learning_rate": 0.00012195126868674051,
+ "loss": 0.687,
+ "step": 134
+ },
+ {
+ "epoch": 1.8,
+ "grad_norm": 0.5801121592521667,
+ "learning_rate": 0.00012089675630312754,
+ "loss": 0.7059,
+ "step": 135
+ },
+ {
+ "epoch": 1.8133333333333335,
+ "grad_norm": 0.5766938328742981,
+ "learning_rate": 0.000119839808417869,
+ "loss": 0.737,
+ "step": 136
+ },
+ {
+ "epoch": 1.8266666666666667,
+ "grad_norm": 0.6705268621444702,
+ "learning_rate": 0.00011878054821746703,
+ "loss": 0.7358,
+ "step": 137
+ },
+ {
+ "epoch": 1.8399999999999999,
+ "grad_norm": 0.7814889550209045,
+ "learning_rate": 0.0001177190991579223,
+ "loss": 0.7021,
+ "step": 138
+ },
+ {
+ "epoch": 1.8533333333333335,
+ "grad_norm": 0.6991515755653381,
+ "learning_rate": 0.00011665558495034546,
+ "loss": 0.7325,
+ "step": 139
+ },
+ {
+ "epoch": 1.8666666666666667,
+ "grad_norm": 0.8299288749694824,
+ "learning_rate": 0.00011559012954653865,
+ "loss": 0.7128,
+ "step": 140
+ },
+ {
+ "epoch": 1.88,
+ "grad_norm": 0.7293754816055298,
+ "learning_rate": 0.00011452285712454904,
+ "loss": 0.5813,
+ "step": 141
+ },
+ {
+ "epoch": 1.8933333333333333,
+ "grad_norm": 0.6560428738594055,
+ "learning_rate": 0.00011345389207419588,
+ "loss": 0.6352,
+ "step": 142
+ },
+ {
+ "epoch": 1.9066666666666667,
+ "grad_norm": 0.54889976978302,
+ "learning_rate": 0.00011238335898257304,
+ "loss": 0.7372,
+ "step": 143
+ },
+ {
+ "epoch": 1.92,
+ "grad_norm": 0.5890987515449524,
+ "learning_rate": 0.00011131138261952845,
+ "loss": 0.6402,
+ "step": 144
+ },
+ {
+ "epoch": 1.9333333333333333,
+ "grad_norm": 0.8450446128845215,
+ "learning_rate": 0.00011023808792312227,
+ "loss": 0.7152,
+ "step": 145
+ },
+ {
+ "epoch": 1.9466666666666668,
+ "grad_norm": 0.7649719715118408,
+ "learning_rate": 0.0001091635999850655,
+ "loss": 0.6831,
+ "step": 146
+ },
+ {
+ "epoch": 1.96,
+ "grad_norm": 0.6236613988876343,
+ "learning_rate": 0.00010808804403614043,
+ "loss": 0.6671,
+ "step": 147
+ },
+ {
+ "epoch": 1.9733333333333334,
+ "grad_norm": 0.6295299530029297,
+ "learning_rate": 0.00010701154543160541,
+ "loss": 0.8226,
+ "step": 148
+ },
+ {
+ "epoch": 1.9866666666666668,
+ "grad_norm": 0.641965389251709,
+ "learning_rate": 0.00010593422963658452,
+ "loss": 0.6567,
+ "step": 149
+ },
+ {
+ "epoch": 2.0,
+ "grad_norm": 0.779958188533783,
+ "learning_rate": 0.00010485622221144484,
+ "loss": 0.6346,
+ "step": 150
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 300,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 50,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 4.29731846511657e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-150/training_args.bin b/checkpoint-150/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..26de66c830692f3d22f375473f4f43447eefb78a
--- /dev/null
+++ b/checkpoint-150/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54335a849ae8d377cae5839a736f4196087c7894666ca9b9f10dd899e0ada95c
+size 6904
diff --git a/checkpoint-150/zero_to_fp32.py b/checkpoint-150/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..5995d6e6f04e43b989587aa9022a3aef0c66d694
--- /dev/null
+++ b/checkpoint-150/zero_to_fp32.py
@@ -0,0 +1,760 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example:
+# python zero_to_fp32.py . output_dir/
+# or
+# python zero_to_fp32.py . output_dir/ --safe_serialization
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+import gc
+import json
+import numpy as np
+from tqdm import tqdm
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device, weights_only=False)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+ total_files = len(files)
+ state_dicts = []
+ for f in tqdm(files, desc='Loading checkpoint shards'):
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+class GatheredTensor:
+ """
+ A pseudo tensor that collects partitioned weights.
+ It is more memory efficient when there are multiple groups.
+ """
+
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
+ self.flat_groups = flat_groups
+ self.flat_groups_offset = flat_groups_offset
+ self.offset = offset
+ self.partitioned_numel = partitioned_numel
+ self.shape = shape
+ self.dtype = self.flat_groups[0][0].dtype
+
+ def contiguous(self):
+ """
+ Merge partitioned weights from flat_groups into a single tensor.
+ """
+ end_idx = self.offset + self.partitioned_numel
+ world_size = len(self.flat_groups)
+ pad_flat_param_chunks = []
+
+ for rank_i in range(world_size):
+ # for each rank, we need to collect weights from related group/groups
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
+ start_group_id = None
+ end_group_id = None
+ for group_id in range(len(self.flat_groups_offset)):
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
+ start_group_id = group_id
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
+ end_group_id = group_id
+ break
+ # collect weights from related group/groups
+ for group_id in range(start_group_id, end_group_id + 1):
+ flat_tensor = flat_groups_at_rank_i[group_id]
+ start_offset = self.offset - self.flat_groups_offset[group_id]
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
+
+ # collect weights from all ranks
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
+ return param
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
+
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # memory efficient tensor
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
+ state_dict[name] = tensor
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def to_torch_tensor(state_dict, return_empty_tensor=False):
+ """
+ Convert state_dict of GatheredTensor to torch tensor
+ """
+ torch_state_dict = {}
+ converted_tensors = {}
+ for name, tensor in state_dict.items():
+ tensor_id = id(tensor)
+ if tensor_id in converted_tensors: # shared tensors
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
+ torch_state_dict[name] = shared_tensor
+ else:
+ converted_tensors[tensor_id] = name
+ if return_empty_tensor:
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
+ else:
+ torch_state_dict[name] = tensor.contiguous()
+ return torch_state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag=None,
+ exclude_frozen_parameters=False,
+ lazy_mode=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint. Or you can load state_dict in lazy mode ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
+ for name, lazy_tensor in state_dict.item():
+ tensor = lazy_tensor.contiguous() # to cpu
+ print(name, tensor)
+ # del tensor to release memory if it no longer in use
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+ if lazy_mode:
+ return state_dict
+ else:
+ return to_torch_tensor(state_dict)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
+ output_dir,
+ max_shard_size="5GB",
+ safe_serialization=False,
+ tag=None,
+ exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ # Dependency pre-check
+ if safe_serialization:
+ try:
+ from safetensors.torch import save_file
+ except ImportError:
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
+ raise
+ if max_shard_size is not None:
+ try:
+ from huggingface_hub import split_torch_state_dict_into_shards
+ except ImportError:
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
+ raise
+
+ # Convert zero checkpoint to state_dict
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag,
+ exclude_frozen_parameters,
+ lazy_mode=True)
+
+ # Shard the model if it is too big.
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
+ if max_shard_size is not None:
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
+ # an memory-efficient approach for sharding
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
+ filename_pattern=filename_pattern,
+ max_shard_size=max_shard_size)
+ else:
+ from collections import namedtuple
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
+ state_dict_split = StateDictSplit(is_sharded=False,
+ filename_to_tensors={weights_name: list(state_dict.keys())})
+
+ # Save the model by shard
+ os.makedirs(output_dir, exist_ok=True)
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
+ shard_state_dict = to_torch_tensor(shard_state_dict)
+ output_path = os.path.join(output_dir, shard_file)
+ if safe_serialization:
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
+ else:
+ torch.save(shard_state_dict, output_path)
+ # release the memory of current shard
+ for tensor_name in list(shard_state_dict.keys()):
+ del state_dict[tensor_name]
+ del shard_state_dict[tensor_name]
+ del shard_state_dict
+ gc.collect()
+
+ # Save index if sharded
+ if state_dict_split.is_sharded:
+ index = {
+ "metadata": state_dict_split.metadata,
+ "weight_map": state_dict_split.tensor_to_filename,
+ }
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
+ save_index_file = os.path.join(output_dir, save_index_file)
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info("Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info("Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument("output_dir",
+ type=str,
+ help="directory to the pytorch fp32 state_dict output files"
+ "(e.g. path/checkpoint-12-output/)")
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="5GB",
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
+ "without CPU OOM issues.")
+ parser.add_argument(
+ "--safe_serialization",
+ default=False,
+ action='store_true',
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_dir,
+ max_shard_size=args.max_shard_size,
+ safe_serialization=args.safe_serialization,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-200/README.md b/checkpoint-200/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7f664a3ae5272e4eb0e149c4adcd2f8c4eb45f00
--- /dev/null
+++ b/checkpoint-200/README.md
@@ -0,0 +1,207 @@
+---
+base_model: /mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B
+library_name: peft
+pipeline_tag: text-generation
+tags:
+- base_model:adapter:/mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B
+- lora
+- transformers
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.17.1
\ No newline at end of file
diff --git a/checkpoint-200/adapter_config.json b/checkpoint-200/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..35553142128a486f364ea05de87725530372c975
--- /dev/null
+++ b/checkpoint-200/adapter_config.json
@@ -0,0 +1,42 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "/mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B",
+ "bias": "none",
+ "corda_config": null,
+ "eva_config": null,
+ "exclude_modules": null,
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_bias": false,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "qalora_group_size": 16,
+ "r": 64,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "o_proj",
+ "k_proj",
+ "v_proj",
+ "up_proj"
+ ],
+ "target_parameters": null,
+ "task_type": "CAUSAL_LM",
+ "trainable_token_indices": null,
+ "use_dora": false,
+ "use_qalora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-200/adapter_model.safetensors b/checkpoint-200/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b2b600afc96d2185d8c88b05f050294714bf2615
--- /dev/null
+++ b/checkpoint-200/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8aeddda7a5430a0d1651844b166ba8ddfecd7afb186b9166d8a52df0e23941bc
+size 357679240
diff --git a/checkpoint-200/global_step200/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-200/global_step200/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..061bf87810aa00bcafa4bba555e7bd469332c054
--- /dev/null
+++ b/checkpoint-200/global_step200/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d3a5e89bd3a8517912530cde7b99bf691eca15162dadf953662d142216be4be7
+size 340551088
diff --git a/checkpoint-200/global_step200/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-200/global_step200/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c4eed7705e440dbf324f405dbf7a651746c1efb2
--- /dev/null
+++ b/checkpoint-200/global_step200/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4f00939386ed6b3109adb952b65521f136ac45083109889b3bbffdca6f7bd251
+size 340550896
diff --git a/checkpoint-200/global_step200/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-200/global_step200/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..35fb0d8af9aa8c71cc4c61d9a9fba10f0df65b83
--- /dev/null
+++ b/checkpoint-200/global_step200/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a131275c0730160f19b2134fbf221620bceca51977a3545d9f45e54328cfedeb
+size 340550832
diff --git a/checkpoint-200/global_step200/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-200/global_step200/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8a3440e6b501472b48ca8a317ba9e0f75f8b7302
--- /dev/null
+++ b/checkpoint-200/global_step200/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0e5ddfcb8de66d39e575ccb49f3ecb3649ff89eb94553c0a97ef21b37e7d6bcb
+size 340551216
diff --git a/checkpoint-200/global_step200/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-200/global_step200/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..35c5b759ecef4dbd5d52c2e3bda637a96b20141c
--- /dev/null
+++ b/checkpoint-200/global_step200/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4ddc246dc7170f1f277840ea9a66b99bb161ebf9492d39bec680b2b13788157a
+size 340551088
diff --git a/checkpoint-200/global_step200/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-200/global_step200/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f0c3ae1d34a9e6de0a1321ddce2023b217bdb079
--- /dev/null
+++ b/checkpoint-200/global_step200/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:82f981f49514dc30f85959afa5e4cbe8d1a9c73e1cab44292d09ab99653e7ad9
+size 340574832
diff --git a/checkpoint-200/global_step200/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-200/global_step200/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b01d9791e4e91ed31ff19ec81e26ca00fd446769
--- /dev/null
+++ b/checkpoint-200/global_step200/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:99dd9da32aa759d2891df3cd3b8a18ff38a4fc1f61f8cb1895dc226b6722af3e
+size 340561584
diff --git a/checkpoint-200/global_step200/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-200/global_step200/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8831fcef47d048c20b822913de438b4f477fcae2
--- /dev/null
+++ b/checkpoint-200/global_step200/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:adae006e1874c9b8c06bf479cb550a889acc618f1db01eafcdc98c968b1869e6
+size 340542512
diff --git a/checkpoint-200/global_step200/mp_rank_00_model_states.pt b/checkpoint-200/global_step200/mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..746faf2573c9f427211064f1e2d91e5cb5fbc5e6
--- /dev/null
+++ b/checkpoint-200/global_step200/mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:13afbf9502c24a20dfcdd7279f33db543ed67088ae8a59e481afec1f23238f78
+size 456033832
diff --git a/checkpoint-200/latest b/checkpoint-200/latest
new file mode 100644
index 0000000000000000000000000000000000000000..753e24e10f3a2489150f458205cf759fd8b6081f
--- /dev/null
+++ b/checkpoint-200/latest
@@ -0,0 +1 @@
+global_step200
\ No newline at end of file
diff --git a/checkpoint-200/rng_state_0.pth b/checkpoint-200/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b3412ca4cd8c764c6b9b305056cab69068ef39c0
--- /dev/null
+++ b/checkpoint-200/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3cddb983d68c67be419b5956b120512bcf7e312c6a9b7cfc7ab25c71a4836b68
+size 15984
diff --git a/checkpoint-200/rng_state_1.pth b/checkpoint-200/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..db92f88f608b761263c38594df49f80b8672546c
--- /dev/null
+++ b/checkpoint-200/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d17ef74f9bf39b04739ae4d4e0ce42a88a374b2450a1ded094b97f4849ecf642
+size 15984
diff --git a/checkpoint-200/rng_state_2.pth b/checkpoint-200/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..67bc5f39c70d48e59d7b7a05aef7dcb91cbfdd1a
--- /dev/null
+++ b/checkpoint-200/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9519fb75bfd475d17c874f206c529c4daf85e00f355dd4a93eda09ce431bfa1e
+size 15984
diff --git a/checkpoint-200/rng_state_3.pth b/checkpoint-200/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..692dd0fc4787b77dca910b590e707d6560a5a9f9
--- /dev/null
+++ b/checkpoint-200/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ffc089f193a4cd8e1048db8dccfab33e535ab41945cb4f71fa20ebdaf8725afa
+size 15984
diff --git a/checkpoint-200/rng_state_4.pth b/checkpoint-200/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..83f3b72e32d694b5e87a865d1dcdb2fb7eb653e6
--- /dev/null
+++ b/checkpoint-200/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab9a68e20d550a69dd7b24e5068268f83797f2534ce68ac89fc7dd9d40c27bc3
+size 15984
diff --git a/checkpoint-200/rng_state_5.pth b/checkpoint-200/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..dc3ef8f063257bc4650b6b371f472a270f9011d8
--- /dev/null
+++ b/checkpoint-200/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60953c9a8d481a7d740d34cfeabd70de9da2dbccbb1acdbed32e21870b758005
+size 15984
diff --git a/checkpoint-200/rng_state_6.pth b/checkpoint-200/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..43841f9f8a3a212c57ced0ec0e8eecd0a910ed34
--- /dev/null
+++ b/checkpoint-200/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e013ca57f5ed10dc71c747c0d6e2b5c740126462fcbf2ff119d1e23ea9e84429
+size 15984
diff --git a/checkpoint-200/rng_state_7.pth b/checkpoint-200/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..cca804ab10924518e1b454189f7b980dc703b56e
--- /dev/null
+++ b/checkpoint-200/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a606b21e70f2d7bad8c2afead30d22c24acba0ed2283d9202c9a59d708a41ebf
+size 15984
diff --git a/checkpoint-200/scheduler.pt b/checkpoint-200/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..61104efea155848e065f640211df663d591723a5
--- /dev/null
+++ b/checkpoint-200/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4f68349d952aa8e01bbd66c36920ebd196c5f137316087752ec1ba84475b3df9
+size 1064
diff --git a/checkpoint-200/special_tokens_map.json b/checkpoint-200/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/checkpoint-200/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-200/tokenizer.model b/checkpoint-200/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-200/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-200/tokenizer_config.json b/checkpoint-200/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..26c65df1bf794f101c1dd54c908180dc0d880fe3
--- /dev/null
+++ b/checkpoint-200/tokenizer_config.json
@@ -0,0 +1,43 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-200/trainer_state.json b/checkpoint-200/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..6d79bbd0b94994fd2a0ce4448bff3690e5d402de
--- /dev/null
+++ b/checkpoint-200/trainer_state.json
@@ -0,0 +1,1433 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.6666666666666665,
+ "eval_steps": 500,
+ "global_step": 200,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.013333333333333334,
+ "grad_norm": 1.0028682947158813,
+ "learning_rate": 2.2222222222222223e-05,
+ "loss": 1.43,
+ "step": 1
+ },
+ {
+ "epoch": 0.02666666666666667,
+ "grad_norm": 1.6397583484649658,
+ "learning_rate": 4.4444444444444447e-05,
+ "loss": 1.5374,
+ "step": 2
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 1.2198785543441772,
+ "learning_rate": 6.666666666666667e-05,
+ "loss": 1.5106,
+ "step": 3
+ },
+ {
+ "epoch": 0.05333333333333334,
+ "grad_norm": 0.8468486070632935,
+ "learning_rate": 8.888888888888889e-05,
+ "loss": 1.447,
+ "step": 4
+ },
+ {
+ "epoch": 0.06666666666666667,
+ "grad_norm": 0.9268608689308167,
+ "learning_rate": 0.00011111111111111112,
+ "loss": 1.4743,
+ "step": 5
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 1.0168085098266602,
+ "learning_rate": 0.00013333333333333334,
+ "loss": 1.3245,
+ "step": 6
+ },
+ {
+ "epoch": 0.09333333333333334,
+ "grad_norm": 0.6773934960365295,
+ "learning_rate": 0.00015555555555555556,
+ "loss": 1.3738,
+ "step": 7
+ },
+ {
+ "epoch": 0.10666666666666667,
+ "grad_norm": 0.6985631585121155,
+ "learning_rate": 0.00017777777777777779,
+ "loss": 1.3951,
+ "step": 8
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 1.0221399068832397,
+ "learning_rate": 0.0002,
+ "loss": 1.3246,
+ "step": 9
+ },
+ {
+ "epoch": 0.13333333333333333,
+ "grad_norm": 0.6119747161865234,
+ "learning_rate": 0.00019999417253661235,
+ "loss": 1.2951,
+ "step": 10
+ },
+ {
+ "epoch": 0.14666666666666667,
+ "grad_norm": 0.6660990118980408,
+ "learning_rate": 0.00019997669082563597,
+ "loss": 1.2529,
+ "step": 11
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.5874819755554199,
+ "learning_rate": 0.00019994755690455152,
+ "loss": 1.2252,
+ "step": 12
+ },
+ {
+ "epoch": 0.17333333333333334,
+ "grad_norm": 0.4818006157875061,
+ "learning_rate": 0.00019990677416889608,
+ "loss": 1.1708,
+ "step": 13
+ },
+ {
+ "epoch": 0.18666666666666668,
+ "grad_norm": 0.652045488357544,
+ "learning_rate": 0.0001998543473718677,
+ "loss": 0.9865,
+ "step": 14
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.5517733693122864,
+ "learning_rate": 0.00019979028262377118,
+ "loss": 1.181,
+ "step": 15
+ },
+ {
+ "epoch": 0.21333333333333335,
+ "grad_norm": 0.47720542550086975,
+ "learning_rate": 0.00019971458739130598,
+ "loss": 1.0633,
+ "step": 16
+ },
+ {
+ "epoch": 0.22666666666666666,
+ "grad_norm": 0.7947096228599548,
+ "learning_rate": 0.000199627270496696,
+ "loss": 1.1458,
+ "step": 17
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.5301257371902466,
+ "learning_rate": 0.0001995283421166614,
+ "loss": 1.2245,
+ "step": 18
+ },
+ {
+ "epoch": 0.25333333333333335,
+ "grad_norm": 0.9473271369934082,
+ "learning_rate": 0.00019941781378123244,
+ "loss": 1.0859,
+ "step": 19
+ },
+ {
+ "epoch": 0.26666666666666666,
+ "grad_norm": 1.1834161281585693,
+ "learning_rate": 0.00019929569837240564,
+ "loss": 1.1808,
+ "step": 20
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.6784033179283142,
+ "learning_rate": 0.00019916201012264254,
+ "loss": 1.202,
+ "step": 21
+ },
+ {
+ "epoch": 0.29333333333333333,
+ "grad_norm": 0.7274785041809082,
+ "learning_rate": 0.00019901676461321068,
+ "loss": 1.2242,
+ "step": 22
+ },
+ {
+ "epoch": 0.30666666666666664,
+ "grad_norm": 0.7520783543586731,
+ "learning_rate": 0.00019885997877236788,
+ "loss": 1.173,
+ "step": 23
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.8218541145324707,
+ "learning_rate": 0.00019869167087338907,
+ "loss": 1.0723,
+ "step": 24
+ },
+ {
+ "epoch": 0.3333333333333333,
+ "grad_norm": 0.6361420154571533,
+ "learning_rate": 0.00019851186053243666,
+ "loss": 1.1607,
+ "step": 25
+ },
+ {
+ "epoch": 0.3466666666666667,
+ "grad_norm": 0.6401374936103821,
+ "learning_rate": 0.00019832056870627417,
+ "loss": 1.1398,
+ "step": 26
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 1.0995603799819946,
+ "learning_rate": 0.0001981178176898239,
+ "loss": 0.9475,
+ "step": 27
+ },
+ {
+ "epoch": 0.37333333333333335,
+ "grad_norm": 0.5122275948524475,
+ "learning_rate": 0.00019790363111356837,
+ "loss": 1.0125,
+ "step": 28
+ },
+ {
+ "epoch": 0.38666666666666666,
+ "grad_norm": 0.9316695332527161,
+ "learning_rate": 0.00019767803394079615,
+ "loss": 1.0554,
+ "step": 29
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.6831843256950378,
+ "learning_rate": 0.00019744105246469263,
+ "loss": 1.027,
+ "step": 30
+ },
+ {
+ "epoch": 0.41333333333333333,
+ "grad_norm": 0.5529218912124634,
+ "learning_rate": 0.0001971927143052752,
+ "loss": 1.0218,
+ "step": 31
+ },
+ {
+ "epoch": 0.4266666666666667,
+ "grad_norm": 0.7164443135261536,
+ "learning_rate": 0.00019693304840617457,
+ "loss": 1.0901,
+ "step": 32
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.7430665493011475,
+ "learning_rate": 0.00019666208503126112,
+ "loss": 1.125,
+ "step": 33
+ },
+ {
+ "epoch": 0.4533333333333333,
+ "grad_norm": 0.7198122143745422,
+ "learning_rate": 0.00019637985576111778,
+ "loss": 0.9926,
+ "step": 34
+ },
+ {
+ "epoch": 0.4666666666666667,
+ "grad_norm": 0.6613873839378357,
+ "learning_rate": 0.0001960863934893594,
+ "loss": 1.1925,
+ "step": 35
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 1.2440484762191772,
+ "learning_rate": 0.00019578173241879872,
+ "loss": 0.8948,
+ "step": 36
+ },
+ {
+ "epoch": 0.49333333333333335,
+ "grad_norm": 0.6995570659637451,
+ "learning_rate": 0.00019546590805746052,
+ "loss": 0.9525,
+ "step": 37
+ },
+ {
+ "epoch": 0.5066666666666667,
+ "grad_norm": 0.6873968243598938,
+ "learning_rate": 0.00019513895721444286,
+ "loss": 0.9966,
+ "step": 38
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.5394595861434937,
+ "learning_rate": 0.00019480091799562704,
+ "loss": 1.0787,
+ "step": 39
+ },
+ {
+ "epoch": 0.5333333333333333,
+ "grad_norm": 0.9196312427520752,
+ "learning_rate": 0.00019445182979923654,
+ "loss": 1.0559,
+ "step": 40
+ },
+ {
+ "epoch": 0.5466666666666666,
+ "grad_norm": 2.5954103469848633,
+ "learning_rate": 0.000194091733311245,
+ "loss": 0.9638,
+ "step": 41
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 1.244681715965271,
+ "learning_rate": 0.00019372067050063438,
+ "loss": 0.9325,
+ "step": 42
+ },
+ {
+ "epoch": 0.5733333333333334,
+ "grad_norm": 0.613715410232544,
+ "learning_rate": 0.0001933386846145036,
+ "loss": 0.9428,
+ "step": 43
+ },
+ {
+ "epoch": 0.5866666666666667,
+ "grad_norm": 0.9604235291481018,
+ "learning_rate": 0.00019294582017302797,
+ "loss": 0.9486,
+ "step": 44
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.7591239809989929,
+ "learning_rate": 0.00019254212296427044,
+ "loss": 1.0955,
+ "step": 45
+ },
+ {
+ "epoch": 0.6133333333333333,
+ "grad_norm": 0.5218423008918762,
+ "learning_rate": 0.0001921276400388451,
+ "loss": 1.0368,
+ "step": 46
+ },
+ {
+ "epoch": 0.6266666666666667,
+ "grad_norm": 0.5670755505561829,
+ "learning_rate": 0.00019170241970443343,
+ "loss": 0.9854,
+ "step": 47
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.8089922070503235,
+ "learning_rate": 0.00019126651152015403,
+ "loss": 1.0396,
+ "step": 48
+ },
+ {
+ "epoch": 0.6533333333333333,
+ "grad_norm": 0.6459051966667175,
+ "learning_rate": 0.00019081996629078657,
+ "loss": 1.0156,
+ "step": 49
+ },
+ {
+ "epoch": 0.6666666666666666,
+ "grad_norm": 1.1918997764587402,
+ "learning_rate": 0.00019036283606085053,
+ "loss": 1.1542,
+ "step": 50
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.6263722777366638,
+ "learning_rate": 0.00018989517410853955,
+ "loss": 1.1471,
+ "step": 51
+ },
+ {
+ "epoch": 0.6933333333333334,
+ "grad_norm": 0.534618616104126,
+ "learning_rate": 0.00018941703493951164,
+ "loss": 0.9055,
+ "step": 52
+ },
+ {
+ "epoch": 0.7066666666666667,
+ "grad_norm": 0.529036819934845,
+ "learning_rate": 0.00018892847428053693,
+ "loss": 0.9896,
+ "step": 53
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.716572105884552,
+ "learning_rate": 0.00018842954907300236,
+ "loss": 1.0204,
+ "step": 54
+ },
+ {
+ "epoch": 0.7333333333333333,
+ "grad_norm": 0.832662045955658,
+ "learning_rate": 0.00018792031746627563,
+ "loss": 1.0263,
+ "step": 55
+ },
+ {
+ "epoch": 0.7466666666666667,
+ "grad_norm": 0.5659884810447693,
+ "learning_rate": 0.0001874008388109276,
+ "loss": 1.0529,
+ "step": 56
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.4971260726451874,
+ "learning_rate": 0.00018687117365181512,
+ "loss": 1.0109,
+ "step": 57
+ },
+ {
+ "epoch": 0.7733333333333333,
+ "grad_norm": 0.5997689962387085,
+ "learning_rate": 0.00018633138372102468,
+ "loss": 1.0363,
+ "step": 58
+ },
+ {
+ "epoch": 0.7866666666666666,
+ "grad_norm": 0.42450904846191406,
+ "learning_rate": 0.00018578153193067745,
+ "loss": 1.0156,
+ "step": 59
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 1.0025880336761475,
+ "learning_rate": 0.00018522168236559695,
+ "loss": 0.96,
+ "step": 60
+ },
+ {
+ "epoch": 0.8133333333333334,
+ "grad_norm": 0.47225672006607056,
+ "learning_rate": 0.00018465190027584005,
+ "loss": 1.0402,
+ "step": 61
+ },
+ {
+ "epoch": 0.8266666666666667,
+ "grad_norm": 0.6419042348861694,
+ "learning_rate": 0.00018407225206909208,
+ "loss": 0.9727,
+ "step": 62
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.9594618082046509,
+ "learning_rate": 0.00018348280530292713,
+ "loss": 0.986,
+ "step": 63
+ },
+ {
+ "epoch": 0.8533333333333334,
+ "grad_norm": 0.5415605902671814,
+ "learning_rate": 0.00018288362867693414,
+ "loss": 1.019,
+ "step": 64
+ },
+ {
+ "epoch": 0.8666666666666667,
+ "grad_norm": 0.9662086367607117,
+ "learning_rate": 0.00018227479202471015,
+ "loss": 0.9531,
+ "step": 65
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.7523136734962463,
+ "learning_rate": 0.0001816563663057211,
+ "loss": 1.0383,
+ "step": 66
+ },
+ {
+ "epoch": 0.8933333333333333,
+ "grad_norm": 0.7249945998191833,
+ "learning_rate": 0.00018102842359703176,
+ "loss": 1.1131,
+ "step": 67
+ },
+ {
+ "epoch": 0.9066666666666666,
+ "grad_norm": 0.4781404435634613,
+ "learning_rate": 0.000180391037084905,
+ "loss": 0.9701,
+ "step": 68
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.5435504913330078,
+ "learning_rate": 0.00017974428105627208,
+ "loss": 1.2701,
+ "step": 69
+ },
+ {
+ "epoch": 0.9333333333333333,
+ "grad_norm": 0.48021838068962097,
+ "learning_rate": 0.00017908823089007457,
+ "loss": 0.9212,
+ "step": 70
+ },
+ {
+ "epoch": 0.9466666666666667,
+ "grad_norm": 0.7063950300216675,
+ "learning_rate": 0.00017842296304847893,
+ "loss": 1.0076,
+ "step": 71
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.5694530606269836,
+ "learning_rate": 0.00017774855506796496,
+ "loss": 1.0417,
+ "step": 72
+ },
+ {
+ "epoch": 0.9733333333333334,
+ "grad_norm": 0.6120775938034058,
+ "learning_rate": 0.00017706508555028893,
+ "loss": 1.0501,
+ "step": 73
+ },
+ {
+ "epoch": 0.9866666666666667,
+ "grad_norm": 0.5728889107704163,
+ "learning_rate": 0.0001763726341533227,
+ "loss": 1.0024,
+ "step": 74
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.8452621102333069,
+ "learning_rate": 0.00017567128158176953,
+ "loss": 1.0966,
+ "step": 75
+ },
+ {
+ "epoch": 1.0133333333333334,
+ "grad_norm": 0.5769606828689575,
+ "learning_rate": 0.0001749611095777581,
+ "loss": 0.8877,
+ "step": 76
+ },
+ {
+ "epoch": 1.0266666666666666,
+ "grad_norm": 0.9046480059623718,
+ "learning_rate": 0.00017424220091131535,
+ "loss": 0.752,
+ "step": 77
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4488053023815155,
+ "learning_rate": 0.00017351463937072004,
+ "loss": 0.7588,
+ "step": 78
+ },
+ {
+ "epoch": 1.0533333333333332,
+ "grad_norm": 0.41479629278182983,
+ "learning_rate": 0.00017277850975273696,
+ "loss": 0.779,
+ "step": 79
+ },
+ {
+ "epoch": 1.0666666666666667,
+ "grad_norm": 0.7192550301551819,
+ "learning_rate": 0.000172033897852734,
+ "loss": 0.7809,
+ "step": 80
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.7553783655166626,
+ "learning_rate": 0.00017128089045468294,
+ "loss": 0.7421,
+ "step": 81
+ },
+ {
+ "epoch": 1.0933333333333333,
+ "grad_norm": 0.5650737881660461,
+ "learning_rate": 0.0001705195753210446,
+ "loss": 0.7044,
+ "step": 82
+ },
+ {
+ "epoch": 1.1066666666666667,
+ "grad_norm": 0.6692880988121033,
+ "learning_rate": 0.0001697500411825403,
+ "loss": 0.8415,
+ "step": 83
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.6710836291313171,
+ "learning_rate": 0.00016897237772781044,
+ "loss": 0.7247,
+ "step": 84
+ },
+ {
+ "epoch": 1.1333333333333333,
+ "grad_norm": 0.5887194275856018,
+ "learning_rate": 0.0001681866755929612,
+ "loss": 0.7947,
+ "step": 85
+ },
+ {
+ "epoch": 1.1466666666666667,
+ "grad_norm": 0.5538906455039978,
+ "learning_rate": 0.00016739302635100108,
+ "loss": 0.7337,
+ "step": 86
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.6018480062484741,
+ "learning_rate": 0.00016659152250116812,
+ "loss": 0.6801,
+ "step": 87
+ },
+ {
+ "epoch": 1.1733333333333333,
+ "grad_norm": 0.548251748085022,
+ "learning_rate": 0.00016578225745814907,
+ "loss": 0.6898,
+ "step": 88
+ },
+ {
+ "epoch": 1.1866666666666668,
+ "grad_norm": 0.49416568875312805,
+ "learning_rate": 0.00016496532554119214,
+ "loss": 0.719,
+ "step": 89
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.6101306676864624,
+ "learning_rate": 0.000164140821963114,
+ "loss": 0.7917,
+ "step": 90
+ },
+ {
+ "epoch": 1.2133333333333334,
+ "grad_norm": 0.588020920753479,
+ "learning_rate": 0.000163308842819203,
+ "loss": 0.8035,
+ "step": 91
+ },
+ {
+ "epoch": 1.2266666666666666,
+ "grad_norm": 0.5376534461975098,
+ "learning_rate": 0.00016246948507601914,
+ "loss": 0.7816,
+ "step": 92
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.549625813961029,
+ "learning_rate": 0.00016162284656009274,
+ "loss": 0.7245,
+ "step": 93
+ },
+ {
+ "epoch": 1.2533333333333334,
+ "grad_norm": 0.6054933667182922,
+ "learning_rate": 0.0001607690259465229,
+ "loss": 0.7484,
+ "step": 94
+ },
+ {
+ "epoch": 1.2666666666666666,
+ "grad_norm": 0.5613316297531128,
+ "learning_rate": 0.00015990812274747692,
+ "loss": 0.7369,
+ "step": 95
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5984250903129578,
+ "learning_rate": 0.00015904023730059228,
+ "loss": 0.891,
+ "step": 96
+ },
+ {
+ "epoch": 1.2933333333333334,
+ "grad_norm": 0.6490495204925537,
+ "learning_rate": 0.00015816547075728226,
+ "loss": 0.775,
+ "step": 97
+ },
+ {
+ "epoch": 1.3066666666666666,
+ "grad_norm": 0.6675053834915161,
+ "learning_rate": 0.000157283925070947,
+ "loss": 0.7565,
+ "step": 98
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.5397291779518127,
+ "learning_rate": 0.00015639570298509064,
+ "loss": 0.6685,
+ "step": 99
+ },
+ {
+ "epoch": 1.3333333333333333,
+ "grad_norm": 0.5830179452896118,
+ "learning_rate": 0.000155500908021347,
+ "loss": 0.8132,
+ "step": 100
+ },
+ {
+ "epoch": 1.3466666666666667,
+ "grad_norm": 0.511802077293396,
+ "learning_rate": 0.00015459964446741382,
+ "loss": 0.7664,
+ "step": 101
+ },
+ {
+ "epoch": 1.3599999999999999,
+ "grad_norm": 1.350032091140747,
+ "learning_rate": 0.0001536920173648984,
+ "loss": 0.8179,
+ "step": 102
+ },
+ {
+ "epoch": 1.3733333333333333,
+ "grad_norm": 0.7308780550956726,
+ "learning_rate": 0.00015277813249707487,
+ "loss": 0.8401,
+ "step": 103
+ },
+ {
+ "epoch": 1.3866666666666667,
+ "grad_norm": 0.5292226076126099,
+ "learning_rate": 0.0001518580963765555,
+ "loss": 0.6367,
+ "step": 104
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.6958481073379517,
+ "learning_rate": 0.00015093201623287631,
+ "loss": 0.7173,
+ "step": 105
+ },
+ {
+ "epoch": 1.4133333333333333,
+ "grad_norm": 0.7024071216583252,
+ "learning_rate": 0.00015000000000000001,
+ "loss": 0.5604,
+ "step": 106
+ },
+ {
+ "epoch": 1.4266666666666667,
+ "grad_norm": 0.5597444772720337,
+ "learning_rate": 0.00014906215630373606,
+ "loss": 0.6767,
+ "step": 107
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.6003674864768982,
+ "learning_rate": 0.00014811859444908052,
+ "loss": 0.8149,
+ "step": 108
+ },
+ {
+ "epoch": 1.4533333333333334,
+ "grad_norm": 0.5815126895904541,
+ "learning_rate": 0.00014716942440747664,
+ "loss": 0.7801,
+ "step": 109
+ },
+ {
+ "epoch": 1.4666666666666668,
+ "grad_norm": 0.7836669683456421,
+ "learning_rate": 0.0001462147568039977,
+ "loss": 0.8452,
+ "step": 110
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.8783419132232666,
+ "learning_rate": 0.00014525470290445392,
+ "loss": 0.8257,
+ "step": 111
+ },
+ {
+ "epoch": 1.4933333333333334,
+ "grad_norm": 0.46948131918907166,
+ "learning_rate": 0.00014428937460242417,
+ "loss": 0.7481,
+ "step": 112
+ },
+ {
+ "epoch": 1.5066666666666668,
+ "grad_norm": 0.5725980401039124,
+ "learning_rate": 0.00014331888440621533,
+ "loss": 0.8267,
+ "step": 113
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.4418632686138153,
+ "learning_rate": 0.00014234334542574906,
+ "loss": 0.7631,
+ "step": 114
+ },
+ {
+ "epoch": 1.5333333333333332,
+ "grad_norm": 0.6430942416191101,
+ "learning_rate": 0.00014136287135937915,
+ "loss": 0.8817,
+ "step": 115
+ },
+ {
+ "epoch": 1.5466666666666666,
+ "grad_norm": 0.5670009255409241,
+ "learning_rate": 0.00014037757648064018,
+ "loss": 0.5991,
+ "step": 116
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 0.5407504439353943,
+ "learning_rate": 0.00013938757562492873,
+ "loss": 0.6898,
+ "step": 117
+ },
+ {
+ "epoch": 1.5733333333333333,
+ "grad_norm": 0.5176808834075928,
+ "learning_rate": 0.00013839298417611963,
+ "loss": 0.731,
+ "step": 118
+ },
+ {
+ "epoch": 1.5866666666666667,
+ "grad_norm": 0.9752798080444336,
+ "learning_rate": 0.00013739391805311793,
+ "loss": 0.6736,
+ "step": 119
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 0.7100059390068054,
+ "learning_rate": 0.00013639049369634876,
+ "loss": 0.7369,
+ "step": 120
+ },
+ {
+ "epoch": 1.6133333333333333,
+ "grad_norm": 0.6285961270332336,
+ "learning_rate": 0.0001353828280541861,
+ "loss": 0.721,
+ "step": 121
+ },
+ {
+ "epoch": 1.6266666666666667,
+ "grad_norm": 0.5981026291847229,
+ "learning_rate": 0.00013437103856932264,
+ "loss": 0.8094,
+ "step": 122
+ },
+ {
+ "epoch": 1.6400000000000001,
+ "grad_norm": 0.6587502360343933,
+ "learning_rate": 0.00013335524316508208,
+ "loss": 0.7646,
+ "step": 123
+ },
+ {
+ "epoch": 1.6533333333333333,
+ "grad_norm": 0.5544253587722778,
+ "learning_rate": 0.00013233556023167485,
+ "loss": 0.7165,
+ "step": 124
+ },
+ {
+ "epoch": 1.6666666666666665,
+ "grad_norm": 0.6012857556343079,
+ "learning_rate": 0.00013131210861240026,
+ "loss": 0.8104,
+ "step": 125
+ },
+ {
+ "epoch": 1.6800000000000002,
+ "grad_norm": 0.5157524347305298,
+ "learning_rate": 0.00013028500758979506,
+ "loss": 0.8585,
+ "step": 126
+ },
+ {
+ "epoch": 1.6933333333333334,
+ "grad_norm": 0.4888676702976227,
+ "learning_rate": 0.00012925437687173142,
+ "loss": 0.6579,
+ "step": 127
+ },
+ {
+ "epoch": 1.7066666666666666,
+ "grad_norm": 0.5127140879631042,
+ "learning_rate": 0.00012822033657746478,
+ "loss": 0.7432,
+ "step": 128
+ },
+ {
+ "epoch": 1.72,
+ "grad_norm": 0.6154641509056091,
+ "learning_rate": 0.0001271830072236343,
+ "loss": 0.7322,
+ "step": 129
+ },
+ {
+ "epoch": 1.7333333333333334,
+ "grad_norm": 0.5081548690795898,
+ "learning_rate": 0.00012614250971021657,
+ "loss": 0.7547,
+ "step": 130
+ },
+ {
+ "epoch": 1.7466666666666666,
+ "grad_norm": 0.6808217763900757,
+ "learning_rate": 0.00012509896530643488,
+ "loss": 0.6855,
+ "step": 131
+ },
+ {
+ "epoch": 1.76,
+ "grad_norm": 0.8672941327095032,
+ "learning_rate": 0.00012405249563662537,
+ "loss": 0.6332,
+ "step": 132
+ },
+ {
+ "epoch": 1.7733333333333334,
+ "grad_norm": 0.6130337119102478,
+ "learning_rate": 0.00012300322266606178,
+ "loss": 0.7453,
+ "step": 133
+ },
+ {
+ "epoch": 1.7866666666666666,
+ "grad_norm": 0.739959180355072,
+ "learning_rate": 0.00012195126868674051,
+ "loss": 0.687,
+ "step": 134
+ },
+ {
+ "epoch": 1.8,
+ "grad_norm": 0.5801121592521667,
+ "learning_rate": 0.00012089675630312754,
+ "loss": 0.7059,
+ "step": 135
+ },
+ {
+ "epoch": 1.8133333333333335,
+ "grad_norm": 0.5766938328742981,
+ "learning_rate": 0.000119839808417869,
+ "loss": 0.737,
+ "step": 136
+ },
+ {
+ "epoch": 1.8266666666666667,
+ "grad_norm": 0.6705268621444702,
+ "learning_rate": 0.00011878054821746703,
+ "loss": 0.7358,
+ "step": 137
+ },
+ {
+ "epoch": 1.8399999999999999,
+ "grad_norm": 0.7814889550209045,
+ "learning_rate": 0.0001177190991579223,
+ "loss": 0.7021,
+ "step": 138
+ },
+ {
+ "epoch": 1.8533333333333335,
+ "grad_norm": 0.6991515755653381,
+ "learning_rate": 0.00011665558495034546,
+ "loss": 0.7325,
+ "step": 139
+ },
+ {
+ "epoch": 1.8666666666666667,
+ "grad_norm": 0.8299288749694824,
+ "learning_rate": 0.00011559012954653865,
+ "loss": 0.7128,
+ "step": 140
+ },
+ {
+ "epoch": 1.88,
+ "grad_norm": 0.7293754816055298,
+ "learning_rate": 0.00011452285712454904,
+ "loss": 0.5813,
+ "step": 141
+ },
+ {
+ "epoch": 1.8933333333333333,
+ "grad_norm": 0.6560428738594055,
+ "learning_rate": 0.00011345389207419588,
+ "loss": 0.6352,
+ "step": 142
+ },
+ {
+ "epoch": 1.9066666666666667,
+ "grad_norm": 0.54889976978302,
+ "learning_rate": 0.00011238335898257304,
+ "loss": 0.7372,
+ "step": 143
+ },
+ {
+ "epoch": 1.92,
+ "grad_norm": 0.5890987515449524,
+ "learning_rate": 0.00011131138261952845,
+ "loss": 0.6402,
+ "step": 144
+ },
+ {
+ "epoch": 1.9333333333333333,
+ "grad_norm": 0.8450446128845215,
+ "learning_rate": 0.00011023808792312227,
+ "loss": 0.7152,
+ "step": 145
+ },
+ {
+ "epoch": 1.9466666666666668,
+ "grad_norm": 0.7649719715118408,
+ "learning_rate": 0.0001091635999850655,
+ "loss": 0.6831,
+ "step": 146
+ },
+ {
+ "epoch": 1.96,
+ "grad_norm": 0.6236613988876343,
+ "learning_rate": 0.00010808804403614043,
+ "loss": 0.6671,
+ "step": 147
+ },
+ {
+ "epoch": 1.9733333333333334,
+ "grad_norm": 0.6295299530029297,
+ "learning_rate": 0.00010701154543160541,
+ "loss": 0.8226,
+ "step": 148
+ },
+ {
+ "epoch": 1.9866666666666668,
+ "grad_norm": 0.641965389251709,
+ "learning_rate": 0.00010593422963658452,
+ "loss": 0.6567,
+ "step": 149
+ },
+ {
+ "epoch": 2.0,
+ "grad_norm": 0.779958188533783,
+ "learning_rate": 0.00010485622221144484,
+ "loss": 0.6346,
+ "step": 150
+ },
+ {
+ "epoch": 2.013333333333333,
+ "grad_norm": 0.6322675347328186,
+ "learning_rate": 0.00010377764879716234,
+ "loss": 0.5576,
+ "step": 151
+ },
+ {
+ "epoch": 2.026666666666667,
+ "grad_norm": 0.7052869200706482,
+ "learning_rate": 0.00010269863510067872,
+ "loss": 0.4362,
+ "step": 152
+ },
+ {
+ "epoch": 2.04,
+ "grad_norm": 0.4991523027420044,
+ "learning_rate": 0.00010161930688025017,
+ "loss": 0.4478,
+ "step": 153
+ },
+ {
+ "epoch": 2.0533333333333332,
+ "grad_norm": 0.4096013903617859,
+ "learning_rate": 0.00010053978993079045,
+ "loss": 0.5258,
+ "step": 154
+ },
+ {
+ "epoch": 2.066666666666667,
+ "grad_norm": 0.4861268103122711,
+ "learning_rate": 9.946021006920959e-05,
+ "loss": 0.5085,
+ "step": 155
+ },
+ {
+ "epoch": 2.08,
+ "grad_norm": 0.5277904272079468,
+ "learning_rate": 9.838069311974986e-05,
+ "loss": 0.4549,
+ "step": 156
+ },
+ {
+ "epoch": 2.0933333333333333,
+ "grad_norm": 0.640762209892273,
+ "learning_rate": 9.730136489932133e-05,
+ "loss": 0.5168,
+ "step": 157
+ },
+ {
+ "epoch": 2.1066666666666665,
+ "grad_norm": 0.6294235587120056,
+ "learning_rate": 9.622235120283769e-05,
+ "loss": 0.3587,
+ "step": 158
+ },
+ {
+ "epoch": 2.12,
+ "grad_norm": 0.6455483436584473,
+ "learning_rate": 9.514377778855521e-05,
+ "loss": 0.5013,
+ "step": 159
+ },
+ {
+ "epoch": 2.1333333333333333,
+ "grad_norm": 0.7052115201950073,
+ "learning_rate": 9.406577036341548e-05,
+ "loss": 0.458,
+ "step": 160
+ },
+ {
+ "epoch": 2.1466666666666665,
+ "grad_norm": 0.6517965197563171,
+ "learning_rate": 9.298845456839459e-05,
+ "loss": 0.4427,
+ "step": 161
+ },
+ {
+ "epoch": 2.16,
+ "grad_norm": 1.0105723142623901,
+ "learning_rate": 9.19119559638596e-05,
+ "loss": 0.5482,
+ "step": 162
+ },
+ {
+ "epoch": 2.1733333333333333,
+ "grad_norm": 0.6122080683708191,
+ "learning_rate": 9.083640001493454e-05,
+ "loss": 0.3708,
+ "step": 163
+ },
+ {
+ "epoch": 2.1866666666666665,
+ "grad_norm": 0.7772620320320129,
+ "learning_rate": 8.976191207687775e-05,
+ "loss": 0.4423,
+ "step": 164
+ },
+ {
+ "epoch": 2.2,
+ "grad_norm": 0.5697551965713501,
+ "learning_rate": 8.868861738047158e-05,
+ "loss": 0.4544,
+ "step": 165
+ },
+ {
+ "epoch": 2.2133333333333334,
+ "grad_norm": 0.6369841694831848,
+ "learning_rate": 8.7616641017427e-05,
+ "loss": 0.4458,
+ "step": 166
+ },
+ {
+ "epoch": 2.2266666666666666,
+ "grad_norm": 0.6867621541023254,
+ "learning_rate": 8.654610792580415e-05,
+ "loss": 0.3971,
+ "step": 167
+ },
+ {
+ "epoch": 2.24,
+ "grad_norm": 0.6379404067993164,
+ "learning_rate": 8.5477142875451e-05,
+ "loss": 0.409,
+ "step": 168
+ },
+ {
+ "epoch": 2.2533333333333334,
+ "grad_norm": 0.5854802131652832,
+ "learning_rate": 8.440987045346134e-05,
+ "loss": 0.4489,
+ "step": 169
+ },
+ {
+ "epoch": 2.2666666666666666,
+ "grad_norm": 0.6577348113059998,
+ "learning_rate": 8.334441504965455e-05,
+ "loss": 0.5224,
+ "step": 170
+ },
+ {
+ "epoch": 2.2800000000000002,
+ "grad_norm": 0.6020484566688538,
+ "learning_rate": 8.228090084207774e-05,
+ "loss": 0.3876,
+ "step": 171
+ },
+ {
+ "epoch": 2.2933333333333334,
+ "grad_norm": 0.5832955241203308,
+ "learning_rate": 8.1219451782533e-05,
+ "loss": 0.4096,
+ "step": 172
+ },
+ {
+ "epoch": 2.3066666666666666,
+ "grad_norm": 0.6917557716369629,
+ "learning_rate": 8.016019158213101e-05,
+ "loss": 0.3827,
+ "step": 173
+ },
+ {
+ "epoch": 2.32,
+ "grad_norm": 0.6793459057807922,
+ "learning_rate": 7.91032436968725e-05,
+ "loss": 0.3801,
+ "step": 174
+ },
+ {
+ "epoch": 2.3333333333333335,
+ "grad_norm": 0.5776801705360413,
+ "learning_rate": 7.804873131325954e-05,
+ "loss": 0.4567,
+ "step": 175
+ },
+ {
+ "epoch": 2.3466666666666667,
+ "grad_norm": 0.7591734528541565,
+ "learning_rate": 7.699677733393826e-05,
+ "loss": 0.4957,
+ "step": 176
+ },
+ {
+ "epoch": 2.36,
+ "grad_norm": 0.8160498142242432,
+ "learning_rate": 7.594750436337467e-05,
+ "loss": 0.4485,
+ "step": 177
+ },
+ {
+ "epoch": 2.3733333333333335,
+ "grad_norm": 0.5776082873344421,
+ "learning_rate": 7.490103469356513e-05,
+ "loss": 0.5212,
+ "step": 178
+ },
+ {
+ "epoch": 2.3866666666666667,
+ "grad_norm": 0.6858205795288086,
+ "learning_rate": 7.385749028978346e-05,
+ "loss": 0.3985,
+ "step": 179
+ },
+ {
+ "epoch": 2.4,
+ "grad_norm": 0.7106760740280151,
+ "learning_rate": 7.281699277636572e-05,
+ "loss": 0.447,
+ "step": 180
+ },
+ {
+ "epoch": 2.413333333333333,
+ "grad_norm": 0.5468612313270569,
+ "learning_rate": 7.177966342253524e-05,
+ "loss": 0.3429,
+ "step": 181
+ },
+ {
+ "epoch": 2.4266666666666667,
+ "grad_norm": 0.6179500222206116,
+ "learning_rate": 7.07456231282686e-05,
+ "loss": 0.4873,
+ "step": 182
+ },
+ {
+ "epoch": 2.44,
+ "grad_norm": 0.677168607711792,
+ "learning_rate": 6.971499241020495e-05,
+ "loss": 0.5149,
+ "step": 183
+ },
+ {
+ "epoch": 2.453333333333333,
+ "grad_norm": 0.7949701547622681,
+ "learning_rate": 6.868789138759976e-05,
+ "loss": 0.5183,
+ "step": 184
+ },
+ {
+ "epoch": 2.466666666666667,
+ "grad_norm": 0.631366491317749,
+ "learning_rate": 6.766443976832517e-05,
+ "loss": 0.3915,
+ "step": 185
+ },
+ {
+ "epoch": 2.48,
+ "grad_norm": 0.6317359805107117,
+ "learning_rate": 6.664475683491796e-05,
+ "loss": 0.4246,
+ "step": 186
+ },
+ {
+ "epoch": 2.493333333333333,
+ "grad_norm": 0.7377456426620483,
+ "learning_rate": 6.562896143067734e-05,
+ "loss": 0.3572,
+ "step": 187
+ },
+ {
+ "epoch": 2.506666666666667,
+ "grad_norm": 0.7473776340484619,
+ "learning_rate": 6.461717194581393e-05,
+ "loss": 0.4051,
+ "step": 188
+ },
+ {
+ "epoch": 2.52,
+ "grad_norm": 0.6155073046684265,
+ "learning_rate": 6.360950630365126e-05,
+ "loss": 0.4465,
+ "step": 189
+ },
+ {
+ "epoch": 2.533333333333333,
+ "grad_norm": 0.599367082118988,
+ "learning_rate": 6.260608194688206e-05,
+ "loss": 0.4863,
+ "step": 190
+ },
+ {
+ "epoch": 2.546666666666667,
+ "grad_norm": 0.6318126320838928,
+ "learning_rate": 6.160701582388038e-05,
+ "loss": 0.5023,
+ "step": 191
+ },
+ {
+ "epoch": 2.56,
+ "grad_norm": 0.6112634539604187,
+ "learning_rate": 6.061242437507131e-05,
+ "loss": 0.5643,
+ "step": 192
+ },
+ {
+ "epoch": 2.5733333333333333,
+ "grad_norm": 0.9118645787239075,
+ "learning_rate": 5.962242351935985e-05,
+ "loss": 0.4692,
+ "step": 193
+ },
+ {
+ "epoch": 2.586666666666667,
+ "grad_norm": 0.7344533801078796,
+ "learning_rate": 5.863712864062089e-05,
+ "loss": 0.4355,
+ "step": 194
+ },
+ {
+ "epoch": 2.6,
+ "grad_norm": 0.6159957051277161,
+ "learning_rate": 5.765665457425102e-05,
+ "loss": 0.5576,
+ "step": 195
+ },
+ {
+ "epoch": 2.6133333333333333,
+ "grad_norm": 0.632001519203186,
+ "learning_rate": 5.668111559378471e-05,
+ "loss": 0.5418,
+ "step": 196
+ },
+ {
+ "epoch": 2.626666666666667,
+ "grad_norm": 0.7217976450920105,
+ "learning_rate": 5.571062539757581e-05,
+ "loss": 0.5315,
+ "step": 197
+ },
+ {
+ "epoch": 2.64,
+ "grad_norm": 0.5802445411682129,
+ "learning_rate": 5.474529709554612e-05,
+ "loss": 0.5183,
+ "step": 198
+ },
+ {
+ "epoch": 2.6533333333333333,
+ "grad_norm": 0.8810819983482361,
+ "learning_rate": 5.378524319600231e-05,
+ "loss": 0.5234,
+ "step": 199
+ },
+ {
+ "epoch": 2.6666666666666665,
+ "grad_norm": 0.8704924583435059,
+ "learning_rate": 5.283057559252341e-05,
+ "loss": 0.4652,
+ "step": 200
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 300,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 50,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 5.724202694030131e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-200/training_args.bin b/checkpoint-200/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..26de66c830692f3d22f375473f4f43447eefb78a
--- /dev/null
+++ b/checkpoint-200/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54335a849ae8d377cae5839a736f4196087c7894666ca9b9f10dd899e0ada95c
+size 6904
diff --git a/checkpoint-200/zero_to_fp32.py b/checkpoint-200/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..5995d6e6f04e43b989587aa9022a3aef0c66d694
--- /dev/null
+++ b/checkpoint-200/zero_to_fp32.py
@@ -0,0 +1,760 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example:
+# python zero_to_fp32.py . output_dir/
+# or
+# python zero_to_fp32.py . output_dir/ --safe_serialization
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+import gc
+import json
+import numpy as np
+from tqdm import tqdm
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device, weights_only=False)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+ total_files = len(files)
+ state_dicts = []
+ for f in tqdm(files, desc='Loading checkpoint shards'):
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+class GatheredTensor:
+ """
+ A pseudo tensor that collects partitioned weights.
+ It is more memory efficient when there are multiple groups.
+ """
+
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
+ self.flat_groups = flat_groups
+ self.flat_groups_offset = flat_groups_offset
+ self.offset = offset
+ self.partitioned_numel = partitioned_numel
+ self.shape = shape
+ self.dtype = self.flat_groups[0][0].dtype
+
+ def contiguous(self):
+ """
+ Merge partitioned weights from flat_groups into a single tensor.
+ """
+ end_idx = self.offset + self.partitioned_numel
+ world_size = len(self.flat_groups)
+ pad_flat_param_chunks = []
+
+ for rank_i in range(world_size):
+ # for each rank, we need to collect weights from related group/groups
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
+ start_group_id = None
+ end_group_id = None
+ for group_id in range(len(self.flat_groups_offset)):
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
+ start_group_id = group_id
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
+ end_group_id = group_id
+ break
+ # collect weights from related group/groups
+ for group_id in range(start_group_id, end_group_id + 1):
+ flat_tensor = flat_groups_at_rank_i[group_id]
+ start_offset = self.offset - self.flat_groups_offset[group_id]
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
+
+ # collect weights from all ranks
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
+ return param
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
+
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # memory efficient tensor
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
+ state_dict[name] = tensor
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def to_torch_tensor(state_dict, return_empty_tensor=False):
+ """
+ Convert state_dict of GatheredTensor to torch tensor
+ """
+ torch_state_dict = {}
+ converted_tensors = {}
+ for name, tensor in state_dict.items():
+ tensor_id = id(tensor)
+ if tensor_id in converted_tensors: # shared tensors
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
+ torch_state_dict[name] = shared_tensor
+ else:
+ converted_tensors[tensor_id] = name
+ if return_empty_tensor:
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
+ else:
+ torch_state_dict[name] = tensor.contiguous()
+ return torch_state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag=None,
+ exclude_frozen_parameters=False,
+ lazy_mode=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint. Or you can load state_dict in lazy mode ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
+ for name, lazy_tensor in state_dict.item():
+ tensor = lazy_tensor.contiguous() # to cpu
+ print(name, tensor)
+ # del tensor to release memory if it no longer in use
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+ if lazy_mode:
+ return state_dict
+ else:
+ return to_torch_tensor(state_dict)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
+ output_dir,
+ max_shard_size="5GB",
+ safe_serialization=False,
+ tag=None,
+ exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ # Dependency pre-check
+ if safe_serialization:
+ try:
+ from safetensors.torch import save_file
+ except ImportError:
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
+ raise
+ if max_shard_size is not None:
+ try:
+ from huggingface_hub import split_torch_state_dict_into_shards
+ except ImportError:
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
+ raise
+
+ # Convert zero checkpoint to state_dict
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag,
+ exclude_frozen_parameters,
+ lazy_mode=True)
+
+ # Shard the model if it is too big.
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
+ if max_shard_size is not None:
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
+ # an memory-efficient approach for sharding
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
+ filename_pattern=filename_pattern,
+ max_shard_size=max_shard_size)
+ else:
+ from collections import namedtuple
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
+ state_dict_split = StateDictSplit(is_sharded=False,
+ filename_to_tensors={weights_name: list(state_dict.keys())})
+
+ # Save the model by shard
+ os.makedirs(output_dir, exist_ok=True)
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
+ shard_state_dict = to_torch_tensor(shard_state_dict)
+ output_path = os.path.join(output_dir, shard_file)
+ if safe_serialization:
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
+ else:
+ torch.save(shard_state_dict, output_path)
+ # release the memory of current shard
+ for tensor_name in list(shard_state_dict.keys()):
+ del state_dict[tensor_name]
+ del shard_state_dict[tensor_name]
+ del shard_state_dict
+ gc.collect()
+
+ # Save index if sharded
+ if state_dict_split.is_sharded:
+ index = {
+ "metadata": state_dict_split.metadata,
+ "weight_map": state_dict_split.tensor_to_filename,
+ }
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
+ save_index_file = os.path.join(output_dir, save_index_file)
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info("Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info("Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument("output_dir",
+ type=str,
+ help="directory to the pytorch fp32 state_dict output files"
+ "(e.g. path/checkpoint-12-output/)")
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="5GB",
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
+ "without CPU OOM issues.")
+ parser.add_argument(
+ "--safe_serialization",
+ default=False,
+ action='store_true',
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_dir,
+ max_shard_size=args.max_shard_size,
+ safe_serialization=args.safe_serialization,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-250/README.md b/checkpoint-250/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7f664a3ae5272e4eb0e149c4adcd2f8c4eb45f00
--- /dev/null
+++ b/checkpoint-250/README.md
@@ -0,0 +1,207 @@
+---
+base_model: /mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B
+library_name: peft
+pipeline_tag: text-generation
+tags:
+- base_model:adapter:/mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B
+- lora
+- transformers
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.17.1
\ No newline at end of file
diff --git a/checkpoint-250/adapter_config.json b/checkpoint-250/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..35553142128a486f364ea05de87725530372c975
--- /dev/null
+++ b/checkpoint-250/adapter_config.json
@@ -0,0 +1,42 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "/mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B",
+ "bias": "none",
+ "corda_config": null,
+ "eva_config": null,
+ "exclude_modules": null,
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_bias": false,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "qalora_group_size": 16,
+ "r": 64,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "o_proj",
+ "k_proj",
+ "v_proj",
+ "up_proj"
+ ],
+ "target_parameters": null,
+ "task_type": "CAUSAL_LM",
+ "trainable_token_indices": null,
+ "use_dora": false,
+ "use_qalora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-250/adapter_model.safetensors b/checkpoint-250/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..30aa6c95baa66da2ecdb14e339697a7930e93574
--- /dev/null
+++ b/checkpoint-250/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:83a6b01208efc6df7581206bde5159318b1849c4001f916893738d3eba82f4b3
+size 357679240
diff --git a/checkpoint-250/global_step250/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-250/global_step250/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6084350f32e96f7e7ad733be4a4553c168ec3198
--- /dev/null
+++ b/checkpoint-250/global_step250/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4f9868236f2109582fe4453ddb9081e0005ae9dcc6576994c9152d75b60c251
+size 340551088
diff --git a/checkpoint-250/global_step250/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-250/global_step250/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a998768552f94d996424b46a1a042c0adf734bf2
--- /dev/null
+++ b/checkpoint-250/global_step250/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f8230c8a4aff45eb598fac80fc231372f8469992c1bca6e35f23d6cdc762ba07
+size 340550896
diff --git a/checkpoint-250/global_step250/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-250/global_step250/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7b448e35ef08e0d9a7080baf348136b5545d4169
--- /dev/null
+++ b/checkpoint-250/global_step250/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:41c3ec37f18bf528c0c43dd045259514fb7bbbbd35cc7e14680aeeec07dc6a0d
+size 340550832
diff --git a/checkpoint-250/global_step250/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-250/global_step250/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..88643049bdc565a5bf8f4c0f75ed1a229ca23ec1
--- /dev/null
+++ b/checkpoint-250/global_step250/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:120adb03f207a7b1f009e29e61c4bf70d86ab7d945455d3e754d3b7e103b635b
+size 340551216
diff --git a/checkpoint-250/global_step250/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-250/global_step250/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..174ba430db196afac004c1121e8805eb927a11ee
--- /dev/null
+++ b/checkpoint-250/global_step250/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:62aa4cac9c28eedccce64b0d997a3c1503f27aed9943d00ccd64456e45bb4fec
+size 340551088
diff --git a/checkpoint-250/global_step250/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-250/global_step250/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7b18cbcb5abc47f5d8b76eb9fd2bf2dfd75c7af8
--- /dev/null
+++ b/checkpoint-250/global_step250/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f8b90a55b81167d58c55584e15909f03587c0c517519d9b5e3371989cca4cef8
+size 340574832
diff --git a/checkpoint-250/global_step250/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-250/global_step250/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a6859db883d6de4a246312e85ed105ab13fde180
--- /dev/null
+++ b/checkpoint-250/global_step250/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ed999d24896cd2195a76a8804b232e7d7d1edbb3a604ad3e1b7a2d7e6b86145c
+size 340561584
diff --git a/checkpoint-250/global_step250/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-250/global_step250/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5166891a2d512546ff4aaafff49c6eeac93c5764
--- /dev/null
+++ b/checkpoint-250/global_step250/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fc0c7ba39c05f006c761655453e30db6eda768298ff1dcaa517bfef0ee90d9dd
+size 340542512
diff --git a/checkpoint-250/global_step250/mp_rank_00_model_states.pt b/checkpoint-250/global_step250/mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..fdcc4ea95a5c7d8072723fbfc496993403489f38
--- /dev/null
+++ b/checkpoint-250/global_step250/mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3211969860399fd1fb44ffc221014bde148d0942e6a0ae23678a23567db2ad9a
+size 456033832
diff --git a/checkpoint-250/latest b/checkpoint-250/latest
new file mode 100644
index 0000000000000000000000000000000000000000..87449ff1a854ba4a77ea33fbc24adaed3311d6b1
--- /dev/null
+++ b/checkpoint-250/latest
@@ -0,0 +1 @@
+global_step250
\ No newline at end of file
diff --git a/checkpoint-250/rng_state_0.pth b/checkpoint-250/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..98eec2235fea425e10a073033a027c80c238cf46
--- /dev/null
+++ b/checkpoint-250/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:04ca7c301d0d42006224a99f764f85bf24d13d4b7e9bdbac5d05b0926e60f447
+size 15984
diff --git a/checkpoint-250/rng_state_1.pth b/checkpoint-250/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e6050a63be4f039df95d30c8d3ae87c78b86d78f
--- /dev/null
+++ b/checkpoint-250/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c3a2acf952414d18eac8ac429a8e945d13da4d128ff5d30031a62b4a2386f9e9
+size 15984
diff --git a/checkpoint-250/rng_state_2.pth b/checkpoint-250/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..c315c125975ee1b3c9c17f9dea0332900b0652b1
--- /dev/null
+++ b/checkpoint-250/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:261370b13e69b510249ae1ed89a93215a5bbc8bf533c1adfd7e68734e8fd58f8
+size 15984
diff --git a/checkpoint-250/rng_state_3.pth b/checkpoint-250/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..35192bd7acd0f364d5d02aa1a698deb45897b120
--- /dev/null
+++ b/checkpoint-250/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2285634dca1052fc1fde899edd9d795d6eed52107343fb3c8ea525351ad44006
+size 15984
diff --git a/checkpoint-250/rng_state_4.pth b/checkpoint-250/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d004289ee9fb7a1d9707198c4a2d98f7a12ee407
--- /dev/null
+++ b/checkpoint-250/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7cb32e061aea0ed5863abfd0612ca27030894826ef0187477fa99c36d614f7e1
+size 15984
diff --git a/checkpoint-250/rng_state_5.pth b/checkpoint-250/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e7e528383e831c25a221289a965a47c1134988e2
--- /dev/null
+++ b/checkpoint-250/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7fc72f3619c3d611898c48bc1b7ef7db5d12bc09b5e4949b5864c00dfcc90eaf
+size 15984
diff --git a/checkpoint-250/rng_state_6.pth b/checkpoint-250/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0e2da5f54e6e31e8c534ba3d6899b8eb5d6578a5
--- /dev/null
+++ b/checkpoint-250/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cca5278e902b50129a0c9c455ed2c71b13ebc6bfccc27afb14fb8753a9a4ce47
+size 15984
diff --git a/checkpoint-250/rng_state_7.pth b/checkpoint-250/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..40f6eae46ead4fc90bc5eb40bcd1bbeeec28c3d9
--- /dev/null
+++ b/checkpoint-250/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:400285c392473e0db1b02a52cccbee4cea57d08644b4c16ff978e6216e01b0d3
+size 15984
diff --git a/checkpoint-250/scheduler.pt b/checkpoint-250/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..35c1877c919f3d2ab9760bfd8d72541d65f83274
--- /dev/null
+++ b/checkpoint-250/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b0150a02c94f82b53717b7236efec20aa55d126d95258ddca77bc6b4bee97156
+size 1064
diff --git a/checkpoint-250/special_tokens_map.json b/checkpoint-250/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/checkpoint-250/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-250/tokenizer.model b/checkpoint-250/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-250/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-250/tokenizer_config.json b/checkpoint-250/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..26c65df1bf794f101c1dd54c908180dc0d880fe3
--- /dev/null
+++ b/checkpoint-250/tokenizer_config.json
@@ -0,0 +1,43 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-250/trainer_state.json b/checkpoint-250/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..5ce2ef6e8bd858642361982540dafbb1a4fe9ef1
--- /dev/null
+++ b/checkpoint-250/trainer_state.json
@@ -0,0 +1,1783 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 3.3333333333333335,
+ "eval_steps": 500,
+ "global_step": 250,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.013333333333333334,
+ "grad_norm": 1.0028682947158813,
+ "learning_rate": 2.2222222222222223e-05,
+ "loss": 1.43,
+ "step": 1
+ },
+ {
+ "epoch": 0.02666666666666667,
+ "grad_norm": 1.6397583484649658,
+ "learning_rate": 4.4444444444444447e-05,
+ "loss": 1.5374,
+ "step": 2
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 1.2198785543441772,
+ "learning_rate": 6.666666666666667e-05,
+ "loss": 1.5106,
+ "step": 3
+ },
+ {
+ "epoch": 0.05333333333333334,
+ "grad_norm": 0.8468486070632935,
+ "learning_rate": 8.888888888888889e-05,
+ "loss": 1.447,
+ "step": 4
+ },
+ {
+ "epoch": 0.06666666666666667,
+ "grad_norm": 0.9268608689308167,
+ "learning_rate": 0.00011111111111111112,
+ "loss": 1.4743,
+ "step": 5
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 1.0168085098266602,
+ "learning_rate": 0.00013333333333333334,
+ "loss": 1.3245,
+ "step": 6
+ },
+ {
+ "epoch": 0.09333333333333334,
+ "grad_norm": 0.6773934960365295,
+ "learning_rate": 0.00015555555555555556,
+ "loss": 1.3738,
+ "step": 7
+ },
+ {
+ "epoch": 0.10666666666666667,
+ "grad_norm": 0.6985631585121155,
+ "learning_rate": 0.00017777777777777779,
+ "loss": 1.3951,
+ "step": 8
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 1.0221399068832397,
+ "learning_rate": 0.0002,
+ "loss": 1.3246,
+ "step": 9
+ },
+ {
+ "epoch": 0.13333333333333333,
+ "grad_norm": 0.6119747161865234,
+ "learning_rate": 0.00019999417253661235,
+ "loss": 1.2951,
+ "step": 10
+ },
+ {
+ "epoch": 0.14666666666666667,
+ "grad_norm": 0.6660990118980408,
+ "learning_rate": 0.00019997669082563597,
+ "loss": 1.2529,
+ "step": 11
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.5874819755554199,
+ "learning_rate": 0.00019994755690455152,
+ "loss": 1.2252,
+ "step": 12
+ },
+ {
+ "epoch": 0.17333333333333334,
+ "grad_norm": 0.4818006157875061,
+ "learning_rate": 0.00019990677416889608,
+ "loss": 1.1708,
+ "step": 13
+ },
+ {
+ "epoch": 0.18666666666666668,
+ "grad_norm": 0.652045488357544,
+ "learning_rate": 0.0001998543473718677,
+ "loss": 0.9865,
+ "step": 14
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.5517733693122864,
+ "learning_rate": 0.00019979028262377118,
+ "loss": 1.181,
+ "step": 15
+ },
+ {
+ "epoch": 0.21333333333333335,
+ "grad_norm": 0.47720542550086975,
+ "learning_rate": 0.00019971458739130598,
+ "loss": 1.0633,
+ "step": 16
+ },
+ {
+ "epoch": 0.22666666666666666,
+ "grad_norm": 0.7947096228599548,
+ "learning_rate": 0.000199627270496696,
+ "loss": 1.1458,
+ "step": 17
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.5301257371902466,
+ "learning_rate": 0.0001995283421166614,
+ "loss": 1.2245,
+ "step": 18
+ },
+ {
+ "epoch": 0.25333333333333335,
+ "grad_norm": 0.9473271369934082,
+ "learning_rate": 0.00019941781378123244,
+ "loss": 1.0859,
+ "step": 19
+ },
+ {
+ "epoch": 0.26666666666666666,
+ "grad_norm": 1.1834161281585693,
+ "learning_rate": 0.00019929569837240564,
+ "loss": 1.1808,
+ "step": 20
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.6784033179283142,
+ "learning_rate": 0.00019916201012264254,
+ "loss": 1.202,
+ "step": 21
+ },
+ {
+ "epoch": 0.29333333333333333,
+ "grad_norm": 0.7274785041809082,
+ "learning_rate": 0.00019901676461321068,
+ "loss": 1.2242,
+ "step": 22
+ },
+ {
+ "epoch": 0.30666666666666664,
+ "grad_norm": 0.7520783543586731,
+ "learning_rate": 0.00019885997877236788,
+ "loss": 1.173,
+ "step": 23
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.8218541145324707,
+ "learning_rate": 0.00019869167087338907,
+ "loss": 1.0723,
+ "step": 24
+ },
+ {
+ "epoch": 0.3333333333333333,
+ "grad_norm": 0.6361420154571533,
+ "learning_rate": 0.00019851186053243666,
+ "loss": 1.1607,
+ "step": 25
+ },
+ {
+ "epoch": 0.3466666666666667,
+ "grad_norm": 0.6401374936103821,
+ "learning_rate": 0.00019832056870627417,
+ "loss": 1.1398,
+ "step": 26
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 1.0995603799819946,
+ "learning_rate": 0.0001981178176898239,
+ "loss": 0.9475,
+ "step": 27
+ },
+ {
+ "epoch": 0.37333333333333335,
+ "grad_norm": 0.5122275948524475,
+ "learning_rate": 0.00019790363111356837,
+ "loss": 1.0125,
+ "step": 28
+ },
+ {
+ "epoch": 0.38666666666666666,
+ "grad_norm": 0.9316695332527161,
+ "learning_rate": 0.00019767803394079615,
+ "loss": 1.0554,
+ "step": 29
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.6831843256950378,
+ "learning_rate": 0.00019744105246469263,
+ "loss": 1.027,
+ "step": 30
+ },
+ {
+ "epoch": 0.41333333333333333,
+ "grad_norm": 0.5529218912124634,
+ "learning_rate": 0.0001971927143052752,
+ "loss": 1.0218,
+ "step": 31
+ },
+ {
+ "epoch": 0.4266666666666667,
+ "grad_norm": 0.7164443135261536,
+ "learning_rate": 0.00019693304840617457,
+ "loss": 1.0901,
+ "step": 32
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.7430665493011475,
+ "learning_rate": 0.00019666208503126112,
+ "loss": 1.125,
+ "step": 33
+ },
+ {
+ "epoch": 0.4533333333333333,
+ "grad_norm": 0.7198122143745422,
+ "learning_rate": 0.00019637985576111778,
+ "loss": 0.9926,
+ "step": 34
+ },
+ {
+ "epoch": 0.4666666666666667,
+ "grad_norm": 0.6613873839378357,
+ "learning_rate": 0.0001960863934893594,
+ "loss": 1.1925,
+ "step": 35
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 1.2440484762191772,
+ "learning_rate": 0.00019578173241879872,
+ "loss": 0.8948,
+ "step": 36
+ },
+ {
+ "epoch": 0.49333333333333335,
+ "grad_norm": 0.6995570659637451,
+ "learning_rate": 0.00019546590805746052,
+ "loss": 0.9525,
+ "step": 37
+ },
+ {
+ "epoch": 0.5066666666666667,
+ "grad_norm": 0.6873968243598938,
+ "learning_rate": 0.00019513895721444286,
+ "loss": 0.9966,
+ "step": 38
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.5394595861434937,
+ "learning_rate": 0.00019480091799562704,
+ "loss": 1.0787,
+ "step": 39
+ },
+ {
+ "epoch": 0.5333333333333333,
+ "grad_norm": 0.9196312427520752,
+ "learning_rate": 0.00019445182979923654,
+ "loss": 1.0559,
+ "step": 40
+ },
+ {
+ "epoch": 0.5466666666666666,
+ "grad_norm": 2.5954103469848633,
+ "learning_rate": 0.000194091733311245,
+ "loss": 0.9638,
+ "step": 41
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 1.244681715965271,
+ "learning_rate": 0.00019372067050063438,
+ "loss": 0.9325,
+ "step": 42
+ },
+ {
+ "epoch": 0.5733333333333334,
+ "grad_norm": 0.613715410232544,
+ "learning_rate": 0.0001933386846145036,
+ "loss": 0.9428,
+ "step": 43
+ },
+ {
+ "epoch": 0.5866666666666667,
+ "grad_norm": 0.9604235291481018,
+ "learning_rate": 0.00019294582017302797,
+ "loss": 0.9486,
+ "step": 44
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.7591239809989929,
+ "learning_rate": 0.00019254212296427044,
+ "loss": 1.0955,
+ "step": 45
+ },
+ {
+ "epoch": 0.6133333333333333,
+ "grad_norm": 0.5218423008918762,
+ "learning_rate": 0.0001921276400388451,
+ "loss": 1.0368,
+ "step": 46
+ },
+ {
+ "epoch": 0.6266666666666667,
+ "grad_norm": 0.5670755505561829,
+ "learning_rate": 0.00019170241970443343,
+ "loss": 0.9854,
+ "step": 47
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.8089922070503235,
+ "learning_rate": 0.00019126651152015403,
+ "loss": 1.0396,
+ "step": 48
+ },
+ {
+ "epoch": 0.6533333333333333,
+ "grad_norm": 0.6459051966667175,
+ "learning_rate": 0.00019081996629078657,
+ "loss": 1.0156,
+ "step": 49
+ },
+ {
+ "epoch": 0.6666666666666666,
+ "grad_norm": 1.1918997764587402,
+ "learning_rate": 0.00019036283606085053,
+ "loss": 1.1542,
+ "step": 50
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.6263722777366638,
+ "learning_rate": 0.00018989517410853955,
+ "loss": 1.1471,
+ "step": 51
+ },
+ {
+ "epoch": 0.6933333333333334,
+ "grad_norm": 0.534618616104126,
+ "learning_rate": 0.00018941703493951164,
+ "loss": 0.9055,
+ "step": 52
+ },
+ {
+ "epoch": 0.7066666666666667,
+ "grad_norm": 0.529036819934845,
+ "learning_rate": 0.00018892847428053693,
+ "loss": 0.9896,
+ "step": 53
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.716572105884552,
+ "learning_rate": 0.00018842954907300236,
+ "loss": 1.0204,
+ "step": 54
+ },
+ {
+ "epoch": 0.7333333333333333,
+ "grad_norm": 0.832662045955658,
+ "learning_rate": 0.00018792031746627563,
+ "loss": 1.0263,
+ "step": 55
+ },
+ {
+ "epoch": 0.7466666666666667,
+ "grad_norm": 0.5659884810447693,
+ "learning_rate": 0.0001874008388109276,
+ "loss": 1.0529,
+ "step": 56
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.4971260726451874,
+ "learning_rate": 0.00018687117365181512,
+ "loss": 1.0109,
+ "step": 57
+ },
+ {
+ "epoch": 0.7733333333333333,
+ "grad_norm": 0.5997689962387085,
+ "learning_rate": 0.00018633138372102468,
+ "loss": 1.0363,
+ "step": 58
+ },
+ {
+ "epoch": 0.7866666666666666,
+ "grad_norm": 0.42450904846191406,
+ "learning_rate": 0.00018578153193067745,
+ "loss": 1.0156,
+ "step": 59
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 1.0025880336761475,
+ "learning_rate": 0.00018522168236559695,
+ "loss": 0.96,
+ "step": 60
+ },
+ {
+ "epoch": 0.8133333333333334,
+ "grad_norm": 0.47225672006607056,
+ "learning_rate": 0.00018465190027584005,
+ "loss": 1.0402,
+ "step": 61
+ },
+ {
+ "epoch": 0.8266666666666667,
+ "grad_norm": 0.6419042348861694,
+ "learning_rate": 0.00018407225206909208,
+ "loss": 0.9727,
+ "step": 62
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.9594618082046509,
+ "learning_rate": 0.00018348280530292713,
+ "loss": 0.986,
+ "step": 63
+ },
+ {
+ "epoch": 0.8533333333333334,
+ "grad_norm": 0.5415605902671814,
+ "learning_rate": 0.00018288362867693414,
+ "loss": 1.019,
+ "step": 64
+ },
+ {
+ "epoch": 0.8666666666666667,
+ "grad_norm": 0.9662086367607117,
+ "learning_rate": 0.00018227479202471015,
+ "loss": 0.9531,
+ "step": 65
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.7523136734962463,
+ "learning_rate": 0.0001816563663057211,
+ "loss": 1.0383,
+ "step": 66
+ },
+ {
+ "epoch": 0.8933333333333333,
+ "grad_norm": 0.7249945998191833,
+ "learning_rate": 0.00018102842359703176,
+ "loss": 1.1131,
+ "step": 67
+ },
+ {
+ "epoch": 0.9066666666666666,
+ "grad_norm": 0.4781404435634613,
+ "learning_rate": 0.000180391037084905,
+ "loss": 0.9701,
+ "step": 68
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.5435504913330078,
+ "learning_rate": 0.00017974428105627208,
+ "loss": 1.2701,
+ "step": 69
+ },
+ {
+ "epoch": 0.9333333333333333,
+ "grad_norm": 0.48021838068962097,
+ "learning_rate": 0.00017908823089007457,
+ "loss": 0.9212,
+ "step": 70
+ },
+ {
+ "epoch": 0.9466666666666667,
+ "grad_norm": 0.7063950300216675,
+ "learning_rate": 0.00017842296304847893,
+ "loss": 1.0076,
+ "step": 71
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.5694530606269836,
+ "learning_rate": 0.00017774855506796496,
+ "loss": 1.0417,
+ "step": 72
+ },
+ {
+ "epoch": 0.9733333333333334,
+ "grad_norm": 0.6120775938034058,
+ "learning_rate": 0.00017706508555028893,
+ "loss": 1.0501,
+ "step": 73
+ },
+ {
+ "epoch": 0.9866666666666667,
+ "grad_norm": 0.5728889107704163,
+ "learning_rate": 0.0001763726341533227,
+ "loss": 1.0024,
+ "step": 74
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.8452621102333069,
+ "learning_rate": 0.00017567128158176953,
+ "loss": 1.0966,
+ "step": 75
+ },
+ {
+ "epoch": 1.0133333333333334,
+ "grad_norm": 0.5769606828689575,
+ "learning_rate": 0.0001749611095777581,
+ "loss": 0.8877,
+ "step": 76
+ },
+ {
+ "epoch": 1.0266666666666666,
+ "grad_norm": 0.9046480059623718,
+ "learning_rate": 0.00017424220091131535,
+ "loss": 0.752,
+ "step": 77
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4488053023815155,
+ "learning_rate": 0.00017351463937072004,
+ "loss": 0.7588,
+ "step": 78
+ },
+ {
+ "epoch": 1.0533333333333332,
+ "grad_norm": 0.41479629278182983,
+ "learning_rate": 0.00017277850975273696,
+ "loss": 0.779,
+ "step": 79
+ },
+ {
+ "epoch": 1.0666666666666667,
+ "grad_norm": 0.7192550301551819,
+ "learning_rate": 0.000172033897852734,
+ "loss": 0.7809,
+ "step": 80
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.7553783655166626,
+ "learning_rate": 0.00017128089045468294,
+ "loss": 0.7421,
+ "step": 81
+ },
+ {
+ "epoch": 1.0933333333333333,
+ "grad_norm": 0.5650737881660461,
+ "learning_rate": 0.0001705195753210446,
+ "loss": 0.7044,
+ "step": 82
+ },
+ {
+ "epoch": 1.1066666666666667,
+ "grad_norm": 0.6692880988121033,
+ "learning_rate": 0.0001697500411825403,
+ "loss": 0.8415,
+ "step": 83
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.6710836291313171,
+ "learning_rate": 0.00016897237772781044,
+ "loss": 0.7247,
+ "step": 84
+ },
+ {
+ "epoch": 1.1333333333333333,
+ "grad_norm": 0.5887194275856018,
+ "learning_rate": 0.0001681866755929612,
+ "loss": 0.7947,
+ "step": 85
+ },
+ {
+ "epoch": 1.1466666666666667,
+ "grad_norm": 0.5538906455039978,
+ "learning_rate": 0.00016739302635100108,
+ "loss": 0.7337,
+ "step": 86
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.6018480062484741,
+ "learning_rate": 0.00016659152250116812,
+ "loss": 0.6801,
+ "step": 87
+ },
+ {
+ "epoch": 1.1733333333333333,
+ "grad_norm": 0.548251748085022,
+ "learning_rate": 0.00016578225745814907,
+ "loss": 0.6898,
+ "step": 88
+ },
+ {
+ "epoch": 1.1866666666666668,
+ "grad_norm": 0.49416568875312805,
+ "learning_rate": 0.00016496532554119214,
+ "loss": 0.719,
+ "step": 89
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.6101306676864624,
+ "learning_rate": 0.000164140821963114,
+ "loss": 0.7917,
+ "step": 90
+ },
+ {
+ "epoch": 1.2133333333333334,
+ "grad_norm": 0.588020920753479,
+ "learning_rate": 0.000163308842819203,
+ "loss": 0.8035,
+ "step": 91
+ },
+ {
+ "epoch": 1.2266666666666666,
+ "grad_norm": 0.5376534461975098,
+ "learning_rate": 0.00016246948507601914,
+ "loss": 0.7816,
+ "step": 92
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.549625813961029,
+ "learning_rate": 0.00016162284656009274,
+ "loss": 0.7245,
+ "step": 93
+ },
+ {
+ "epoch": 1.2533333333333334,
+ "grad_norm": 0.6054933667182922,
+ "learning_rate": 0.0001607690259465229,
+ "loss": 0.7484,
+ "step": 94
+ },
+ {
+ "epoch": 1.2666666666666666,
+ "grad_norm": 0.5613316297531128,
+ "learning_rate": 0.00015990812274747692,
+ "loss": 0.7369,
+ "step": 95
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5984250903129578,
+ "learning_rate": 0.00015904023730059228,
+ "loss": 0.891,
+ "step": 96
+ },
+ {
+ "epoch": 1.2933333333333334,
+ "grad_norm": 0.6490495204925537,
+ "learning_rate": 0.00015816547075728226,
+ "loss": 0.775,
+ "step": 97
+ },
+ {
+ "epoch": 1.3066666666666666,
+ "grad_norm": 0.6675053834915161,
+ "learning_rate": 0.000157283925070947,
+ "loss": 0.7565,
+ "step": 98
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.5397291779518127,
+ "learning_rate": 0.00015639570298509064,
+ "loss": 0.6685,
+ "step": 99
+ },
+ {
+ "epoch": 1.3333333333333333,
+ "grad_norm": 0.5830179452896118,
+ "learning_rate": 0.000155500908021347,
+ "loss": 0.8132,
+ "step": 100
+ },
+ {
+ "epoch": 1.3466666666666667,
+ "grad_norm": 0.511802077293396,
+ "learning_rate": 0.00015459964446741382,
+ "loss": 0.7664,
+ "step": 101
+ },
+ {
+ "epoch": 1.3599999999999999,
+ "grad_norm": 1.350032091140747,
+ "learning_rate": 0.0001536920173648984,
+ "loss": 0.8179,
+ "step": 102
+ },
+ {
+ "epoch": 1.3733333333333333,
+ "grad_norm": 0.7308780550956726,
+ "learning_rate": 0.00015277813249707487,
+ "loss": 0.8401,
+ "step": 103
+ },
+ {
+ "epoch": 1.3866666666666667,
+ "grad_norm": 0.5292226076126099,
+ "learning_rate": 0.0001518580963765555,
+ "loss": 0.6367,
+ "step": 104
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.6958481073379517,
+ "learning_rate": 0.00015093201623287631,
+ "loss": 0.7173,
+ "step": 105
+ },
+ {
+ "epoch": 1.4133333333333333,
+ "grad_norm": 0.7024071216583252,
+ "learning_rate": 0.00015000000000000001,
+ "loss": 0.5604,
+ "step": 106
+ },
+ {
+ "epoch": 1.4266666666666667,
+ "grad_norm": 0.5597444772720337,
+ "learning_rate": 0.00014906215630373606,
+ "loss": 0.6767,
+ "step": 107
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.6003674864768982,
+ "learning_rate": 0.00014811859444908052,
+ "loss": 0.8149,
+ "step": 108
+ },
+ {
+ "epoch": 1.4533333333333334,
+ "grad_norm": 0.5815126895904541,
+ "learning_rate": 0.00014716942440747664,
+ "loss": 0.7801,
+ "step": 109
+ },
+ {
+ "epoch": 1.4666666666666668,
+ "grad_norm": 0.7836669683456421,
+ "learning_rate": 0.0001462147568039977,
+ "loss": 0.8452,
+ "step": 110
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.8783419132232666,
+ "learning_rate": 0.00014525470290445392,
+ "loss": 0.8257,
+ "step": 111
+ },
+ {
+ "epoch": 1.4933333333333334,
+ "grad_norm": 0.46948131918907166,
+ "learning_rate": 0.00014428937460242417,
+ "loss": 0.7481,
+ "step": 112
+ },
+ {
+ "epoch": 1.5066666666666668,
+ "grad_norm": 0.5725980401039124,
+ "learning_rate": 0.00014331888440621533,
+ "loss": 0.8267,
+ "step": 113
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.4418632686138153,
+ "learning_rate": 0.00014234334542574906,
+ "loss": 0.7631,
+ "step": 114
+ },
+ {
+ "epoch": 1.5333333333333332,
+ "grad_norm": 0.6430942416191101,
+ "learning_rate": 0.00014136287135937915,
+ "loss": 0.8817,
+ "step": 115
+ },
+ {
+ "epoch": 1.5466666666666666,
+ "grad_norm": 0.5670009255409241,
+ "learning_rate": 0.00014037757648064018,
+ "loss": 0.5991,
+ "step": 116
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 0.5407504439353943,
+ "learning_rate": 0.00013938757562492873,
+ "loss": 0.6898,
+ "step": 117
+ },
+ {
+ "epoch": 1.5733333333333333,
+ "grad_norm": 0.5176808834075928,
+ "learning_rate": 0.00013839298417611963,
+ "loss": 0.731,
+ "step": 118
+ },
+ {
+ "epoch": 1.5866666666666667,
+ "grad_norm": 0.9752798080444336,
+ "learning_rate": 0.00013739391805311793,
+ "loss": 0.6736,
+ "step": 119
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 0.7100059390068054,
+ "learning_rate": 0.00013639049369634876,
+ "loss": 0.7369,
+ "step": 120
+ },
+ {
+ "epoch": 1.6133333333333333,
+ "grad_norm": 0.6285961270332336,
+ "learning_rate": 0.0001353828280541861,
+ "loss": 0.721,
+ "step": 121
+ },
+ {
+ "epoch": 1.6266666666666667,
+ "grad_norm": 0.5981026291847229,
+ "learning_rate": 0.00013437103856932264,
+ "loss": 0.8094,
+ "step": 122
+ },
+ {
+ "epoch": 1.6400000000000001,
+ "grad_norm": 0.6587502360343933,
+ "learning_rate": 0.00013335524316508208,
+ "loss": 0.7646,
+ "step": 123
+ },
+ {
+ "epoch": 1.6533333333333333,
+ "grad_norm": 0.5544253587722778,
+ "learning_rate": 0.00013233556023167485,
+ "loss": 0.7165,
+ "step": 124
+ },
+ {
+ "epoch": 1.6666666666666665,
+ "grad_norm": 0.6012857556343079,
+ "learning_rate": 0.00013131210861240026,
+ "loss": 0.8104,
+ "step": 125
+ },
+ {
+ "epoch": 1.6800000000000002,
+ "grad_norm": 0.5157524347305298,
+ "learning_rate": 0.00013028500758979506,
+ "loss": 0.8585,
+ "step": 126
+ },
+ {
+ "epoch": 1.6933333333333334,
+ "grad_norm": 0.4888676702976227,
+ "learning_rate": 0.00012925437687173142,
+ "loss": 0.6579,
+ "step": 127
+ },
+ {
+ "epoch": 1.7066666666666666,
+ "grad_norm": 0.5127140879631042,
+ "learning_rate": 0.00012822033657746478,
+ "loss": 0.7432,
+ "step": 128
+ },
+ {
+ "epoch": 1.72,
+ "grad_norm": 0.6154641509056091,
+ "learning_rate": 0.0001271830072236343,
+ "loss": 0.7322,
+ "step": 129
+ },
+ {
+ "epoch": 1.7333333333333334,
+ "grad_norm": 0.5081548690795898,
+ "learning_rate": 0.00012614250971021657,
+ "loss": 0.7547,
+ "step": 130
+ },
+ {
+ "epoch": 1.7466666666666666,
+ "grad_norm": 0.6808217763900757,
+ "learning_rate": 0.00012509896530643488,
+ "loss": 0.6855,
+ "step": 131
+ },
+ {
+ "epoch": 1.76,
+ "grad_norm": 0.8672941327095032,
+ "learning_rate": 0.00012405249563662537,
+ "loss": 0.6332,
+ "step": 132
+ },
+ {
+ "epoch": 1.7733333333333334,
+ "grad_norm": 0.6130337119102478,
+ "learning_rate": 0.00012300322266606178,
+ "loss": 0.7453,
+ "step": 133
+ },
+ {
+ "epoch": 1.7866666666666666,
+ "grad_norm": 0.739959180355072,
+ "learning_rate": 0.00012195126868674051,
+ "loss": 0.687,
+ "step": 134
+ },
+ {
+ "epoch": 1.8,
+ "grad_norm": 0.5801121592521667,
+ "learning_rate": 0.00012089675630312754,
+ "loss": 0.7059,
+ "step": 135
+ },
+ {
+ "epoch": 1.8133333333333335,
+ "grad_norm": 0.5766938328742981,
+ "learning_rate": 0.000119839808417869,
+ "loss": 0.737,
+ "step": 136
+ },
+ {
+ "epoch": 1.8266666666666667,
+ "grad_norm": 0.6705268621444702,
+ "learning_rate": 0.00011878054821746703,
+ "loss": 0.7358,
+ "step": 137
+ },
+ {
+ "epoch": 1.8399999999999999,
+ "grad_norm": 0.7814889550209045,
+ "learning_rate": 0.0001177190991579223,
+ "loss": 0.7021,
+ "step": 138
+ },
+ {
+ "epoch": 1.8533333333333335,
+ "grad_norm": 0.6991515755653381,
+ "learning_rate": 0.00011665558495034546,
+ "loss": 0.7325,
+ "step": 139
+ },
+ {
+ "epoch": 1.8666666666666667,
+ "grad_norm": 0.8299288749694824,
+ "learning_rate": 0.00011559012954653865,
+ "loss": 0.7128,
+ "step": 140
+ },
+ {
+ "epoch": 1.88,
+ "grad_norm": 0.7293754816055298,
+ "learning_rate": 0.00011452285712454904,
+ "loss": 0.5813,
+ "step": 141
+ },
+ {
+ "epoch": 1.8933333333333333,
+ "grad_norm": 0.6560428738594055,
+ "learning_rate": 0.00011345389207419588,
+ "loss": 0.6352,
+ "step": 142
+ },
+ {
+ "epoch": 1.9066666666666667,
+ "grad_norm": 0.54889976978302,
+ "learning_rate": 0.00011238335898257304,
+ "loss": 0.7372,
+ "step": 143
+ },
+ {
+ "epoch": 1.92,
+ "grad_norm": 0.5890987515449524,
+ "learning_rate": 0.00011131138261952845,
+ "loss": 0.6402,
+ "step": 144
+ },
+ {
+ "epoch": 1.9333333333333333,
+ "grad_norm": 0.8450446128845215,
+ "learning_rate": 0.00011023808792312227,
+ "loss": 0.7152,
+ "step": 145
+ },
+ {
+ "epoch": 1.9466666666666668,
+ "grad_norm": 0.7649719715118408,
+ "learning_rate": 0.0001091635999850655,
+ "loss": 0.6831,
+ "step": 146
+ },
+ {
+ "epoch": 1.96,
+ "grad_norm": 0.6236613988876343,
+ "learning_rate": 0.00010808804403614043,
+ "loss": 0.6671,
+ "step": 147
+ },
+ {
+ "epoch": 1.9733333333333334,
+ "grad_norm": 0.6295299530029297,
+ "learning_rate": 0.00010701154543160541,
+ "loss": 0.8226,
+ "step": 148
+ },
+ {
+ "epoch": 1.9866666666666668,
+ "grad_norm": 0.641965389251709,
+ "learning_rate": 0.00010593422963658452,
+ "loss": 0.6567,
+ "step": 149
+ },
+ {
+ "epoch": 2.0,
+ "grad_norm": 0.779958188533783,
+ "learning_rate": 0.00010485622221144484,
+ "loss": 0.6346,
+ "step": 150
+ },
+ {
+ "epoch": 2.013333333333333,
+ "grad_norm": 0.6322675347328186,
+ "learning_rate": 0.00010377764879716234,
+ "loss": 0.5576,
+ "step": 151
+ },
+ {
+ "epoch": 2.026666666666667,
+ "grad_norm": 0.7052869200706482,
+ "learning_rate": 0.00010269863510067872,
+ "loss": 0.4362,
+ "step": 152
+ },
+ {
+ "epoch": 2.04,
+ "grad_norm": 0.4991523027420044,
+ "learning_rate": 0.00010161930688025017,
+ "loss": 0.4478,
+ "step": 153
+ },
+ {
+ "epoch": 2.0533333333333332,
+ "grad_norm": 0.4096013903617859,
+ "learning_rate": 0.00010053978993079045,
+ "loss": 0.5258,
+ "step": 154
+ },
+ {
+ "epoch": 2.066666666666667,
+ "grad_norm": 0.4861268103122711,
+ "learning_rate": 9.946021006920959e-05,
+ "loss": 0.5085,
+ "step": 155
+ },
+ {
+ "epoch": 2.08,
+ "grad_norm": 0.5277904272079468,
+ "learning_rate": 9.838069311974986e-05,
+ "loss": 0.4549,
+ "step": 156
+ },
+ {
+ "epoch": 2.0933333333333333,
+ "grad_norm": 0.640762209892273,
+ "learning_rate": 9.730136489932133e-05,
+ "loss": 0.5168,
+ "step": 157
+ },
+ {
+ "epoch": 2.1066666666666665,
+ "grad_norm": 0.6294235587120056,
+ "learning_rate": 9.622235120283769e-05,
+ "loss": 0.3587,
+ "step": 158
+ },
+ {
+ "epoch": 2.12,
+ "grad_norm": 0.6455483436584473,
+ "learning_rate": 9.514377778855521e-05,
+ "loss": 0.5013,
+ "step": 159
+ },
+ {
+ "epoch": 2.1333333333333333,
+ "grad_norm": 0.7052115201950073,
+ "learning_rate": 9.406577036341548e-05,
+ "loss": 0.458,
+ "step": 160
+ },
+ {
+ "epoch": 2.1466666666666665,
+ "grad_norm": 0.6517965197563171,
+ "learning_rate": 9.298845456839459e-05,
+ "loss": 0.4427,
+ "step": 161
+ },
+ {
+ "epoch": 2.16,
+ "grad_norm": 1.0105723142623901,
+ "learning_rate": 9.19119559638596e-05,
+ "loss": 0.5482,
+ "step": 162
+ },
+ {
+ "epoch": 2.1733333333333333,
+ "grad_norm": 0.6122080683708191,
+ "learning_rate": 9.083640001493454e-05,
+ "loss": 0.3708,
+ "step": 163
+ },
+ {
+ "epoch": 2.1866666666666665,
+ "grad_norm": 0.7772620320320129,
+ "learning_rate": 8.976191207687775e-05,
+ "loss": 0.4423,
+ "step": 164
+ },
+ {
+ "epoch": 2.2,
+ "grad_norm": 0.5697551965713501,
+ "learning_rate": 8.868861738047158e-05,
+ "loss": 0.4544,
+ "step": 165
+ },
+ {
+ "epoch": 2.2133333333333334,
+ "grad_norm": 0.6369841694831848,
+ "learning_rate": 8.7616641017427e-05,
+ "loss": 0.4458,
+ "step": 166
+ },
+ {
+ "epoch": 2.2266666666666666,
+ "grad_norm": 0.6867621541023254,
+ "learning_rate": 8.654610792580415e-05,
+ "loss": 0.3971,
+ "step": 167
+ },
+ {
+ "epoch": 2.24,
+ "grad_norm": 0.6379404067993164,
+ "learning_rate": 8.5477142875451e-05,
+ "loss": 0.409,
+ "step": 168
+ },
+ {
+ "epoch": 2.2533333333333334,
+ "grad_norm": 0.5854802131652832,
+ "learning_rate": 8.440987045346134e-05,
+ "loss": 0.4489,
+ "step": 169
+ },
+ {
+ "epoch": 2.2666666666666666,
+ "grad_norm": 0.6577348113059998,
+ "learning_rate": 8.334441504965455e-05,
+ "loss": 0.5224,
+ "step": 170
+ },
+ {
+ "epoch": 2.2800000000000002,
+ "grad_norm": 0.6020484566688538,
+ "learning_rate": 8.228090084207774e-05,
+ "loss": 0.3876,
+ "step": 171
+ },
+ {
+ "epoch": 2.2933333333333334,
+ "grad_norm": 0.5832955241203308,
+ "learning_rate": 8.1219451782533e-05,
+ "loss": 0.4096,
+ "step": 172
+ },
+ {
+ "epoch": 2.3066666666666666,
+ "grad_norm": 0.6917557716369629,
+ "learning_rate": 8.016019158213101e-05,
+ "loss": 0.3827,
+ "step": 173
+ },
+ {
+ "epoch": 2.32,
+ "grad_norm": 0.6793459057807922,
+ "learning_rate": 7.91032436968725e-05,
+ "loss": 0.3801,
+ "step": 174
+ },
+ {
+ "epoch": 2.3333333333333335,
+ "grad_norm": 0.5776801705360413,
+ "learning_rate": 7.804873131325954e-05,
+ "loss": 0.4567,
+ "step": 175
+ },
+ {
+ "epoch": 2.3466666666666667,
+ "grad_norm": 0.7591734528541565,
+ "learning_rate": 7.699677733393826e-05,
+ "loss": 0.4957,
+ "step": 176
+ },
+ {
+ "epoch": 2.36,
+ "grad_norm": 0.8160498142242432,
+ "learning_rate": 7.594750436337467e-05,
+ "loss": 0.4485,
+ "step": 177
+ },
+ {
+ "epoch": 2.3733333333333335,
+ "grad_norm": 0.5776082873344421,
+ "learning_rate": 7.490103469356513e-05,
+ "loss": 0.5212,
+ "step": 178
+ },
+ {
+ "epoch": 2.3866666666666667,
+ "grad_norm": 0.6858205795288086,
+ "learning_rate": 7.385749028978346e-05,
+ "loss": 0.3985,
+ "step": 179
+ },
+ {
+ "epoch": 2.4,
+ "grad_norm": 0.7106760740280151,
+ "learning_rate": 7.281699277636572e-05,
+ "loss": 0.447,
+ "step": 180
+ },
+ {
+ "epoch": 2.413333333333333,
+ "grad_norm": 0.5468612313270569,
+ "learning_rate": 7.177966342253524e-05,
+ "loss": 0.3429,
+ "step": 181
+ },
+ {
+ "epoch": 2.4266666666666667,
+ "grad_norm": 0.6179500222206116,
+ "learning_rate": 7.07456231282686e-05,
+ "loss": 0.4873,
+ "step": 182
+ },
+ {
+ "epoch": 2.44,
+ "grad_norm": 0.677168607711792,
+ "learning_rate": 6.971499241020495e-05,
+ "loss": 0.5149,
+ "step": 183
+ },
+ {
+ "epoch": 2.453333333333333,
+ "grad_norm": 0.7949701547622681,
+ "learning_rate": 6.868789138759976e-05,
+ "loss": 0.5183,
+ "step": 184
+ },
+ {
+ "epoch": 2.466666666666667,
+ "grad_norm": 0.631366491317749,
+ "learning_rate": 6.766443976832517e-05,
+ "loss": 0.3915,
+ "step": 185
+ },
+ {
+ "epoch": 2.48,
+ "grad_norm": 0.6317359805107117,
+ "learning_rate": 6.664475683491796e-05,
+ "loss": 0.4246,
+ "step": 186
+ },
+ {
+ "epoch": 2.493333333333333,
+ "grad_norm": 0.7377456426620483,
+ "learning_rate": 6.562896143067734e-05,
+ "loss": 0.3572,
+ "step": 187
+ },
+ {
+ "epoch": 2.506666666666667,
+ "grad_norm": 0.7473776340484619,
+ "learning_rate": 6.461717194581393e-05,
+ "loss": 0.4051,
+ "step": 188
+ },
+ {
+ "epoch": 2.52,
+ "grad_norm": 0.6155073046684265,
+ "learning_rate": 6.360950630365126e-05,
+ "loss": 0.4465,
+ "step": 189
+ },
+ {
+ "epoch": 2.533333333333333,
+ "grad_norm": 0.599367082118988,
+ "learning_rate": 6.260608194688206e-05,
+ "loss": 0.4863,
+ "step": 190
+ },
+ {
+ "epoch": 2.546666666666667,
+ "grad_norm": 0.6318126320838928,
+ "learning_rate": 6.160701582388038e-05,
+ "loss": 0.5023,
+ "step": 191
+ },
+ {
+ "epoch": 2.56,
+ "grad_norm": 0.6112634539604187,
+ "learning_rate": 6.061242437507131e-05,
+ "loss": 0.5643,
+ "step": 192
+ },
+ {
+ "epoch": 2.5733333333333333,
+ "grad_norm": 0.9118645787239075,
+ "learning_rate": 5.962242351935985e-05,
+ "loss": 0.4692,
+ "step": 193
+ },
+ {
+ "epoch": 2.586666666666667,
+ "grad_norm": 0.7344533801078796,
+ "learning_rate": 5.863712864062089e-05,
+ "loss": 0.4355,
+ "step": 194
+ },
+ {
+ "epoch": 2.6,
+ "grad_norm": 0.6159957051277161,
+ "learning_rate": 5.765665457425102e-05,
+ "loss": 0.5576,
+ "step": 195
+ },
+ {
+ "epoch": 2.6133333333333333,
+ "grad_norm": 0.632001519203186,
+ "learning_rate": 5.668111559378471e-05,
+ "loss": 0.5418,
+ "step": 196
+ },
+ {
+ "epoch": 2.626666666666667,
+ "grad_norm": 0.7217976450920105,
+ "learning_rate": 5.571062539757581e-05,
+ "loss": 0.5315,
+ "step": 197
+ },
+ {
+ "epoch": 2.64,
+ "grad_norm": 0.5802445411682129,
+ "learning_rate": 5.474529709554612e-05,
+ "loss": 0.5183,
+ "step": 198
+ },
+ {
+ "epoch": 2.6533333333333333,
+ "grad_norm": 0.8810819983482361,
+ "learning_rate": 5.378524319600231e-05,
+ "loss": 0.5234,
+ "step": 199
+ },
+ {
+ "epoch": 2.6666666666666665,
+ "grad_norm": 0.8704924583435059,
+ "learning_rate": 5.283057559252341e-05,
+ "loss": 0.4652,
+ "step": 200
+ },
+ {
+ "epoch": 2.68,
+ "grad_norm": 0.6765443682670593,
+ "learning_rate": 5.1881405550919493e-05,
+ "loss": 0.4305,
+ "step": 201
+ },
+ {
+ "epoch": 2.6933333333333334,
+ "grad_norm": 0.7338111400604248,
+ "learning_rate": 5.0937843696263966e-05,
+ "loss": 0.5201,
+ "step": 202
+ },
+ {
+ "epoch": 2.7066666666666666,
+ "grad_norm": 0.8355885744094849,
+ "learning_rate": 5.000000000000002e-05,
+ "loss": 0.3281,
+ "step": 203
+ },
+ {
+ "epoch": 2.7199999999999998,
+ "grad_norm": 0.6787794828414917,
+ "learning_rate": 4.9067983767123736e-05,
+ "loss": 0.5057,
+ "step": 204
+ },
+ {
+ "epoch": 2.7333333333333334,
+ "grad_norm": 0.6520538330078125,
+ "learning_rate": 4.814190362344454e-05,
+ "loss": 0.3988,
+ "step": 205
+ },
+ {
+ "epoch": 2.7466666666666666,
+ "grad_norm": 0.9957300424575806,
+ "learning_rate": 4.722186750292511e-05,
+ "loss": 0.4347,
+ "step": 206
+ },
+ {
+ "epoch": 2.76,
+ "grad_norm": 0.6702403426170349,
+ "learning_rate": 4.630798263510162e-05,
+ "loss": 0.4431,
+ "step": 207
+ },
+ {
+ "epoch": 2.7733333333333334,
+ "grad_norm": 0.6874341368675232,
+ "learning_rate": 4.540035553258619e-05,
+ "loss": 0.5117,
+ "step": 208
+ },
+ {
+ "epoch": 2.7866666666666666,
+ "grad_norm": 0.5381680727005005,
+ "learning_rate": 4.449909197865303e-05,
+ "loss": 0.4461,
+ "step": 209
+ },
+ {
+ "epoch": 2.8,
+ "grad_norm": 0.5413621068000793,
+ "learning_rate": 4.360429701490934e-05,
+ "loss": 0.5286,
+ "step": 210
+ },
+ {
+ "epoch": 2.8133333333333335,
+ "grad_norm": 0.5521364808082581,
+ "learning_rate": 4.271607492905303e-05,
+ "loss": 0.4846,
+ "step": 211
+ },
+ {
+ "epoch": 2.8266666666666667,
+ "grad_norm": 0.5857036709785461,
+ "learning_rate": 4.183452924271776e-05,
+ "loss": 0.4799,
+ "step": 212
+ },
+ {
+ "epoch": 2.84,
+ "grad_norm": 0.7210860848426819,
+ "learning_rate": 4.0959762699407766e-05,
+ "loss": 0.3214,
+ "step": 213
+ },
+ {
+ "epoch": 2.8533333333333335,
+ "grad_norm": 0.6596788763999939,
+ "learning_rate": 4.009187725252309e-05,
+ "loss": 0.4818,
+ "step": 214
+ },
+ {
+ "epoch": 2.8666666666666667,
+ "grad_norm": 0.6683171987533569,
+ "learning_rate": 3.9230974053477086e-05,
+ "loss": 0.4854,
+ "step": 215
+ },
+ {
+ "epoch": 2.88,
+ "grad_norm": 0.6668866872787476,
+ "learning_rate": 3.8377153439907266e-05,
+ "loss": 0.3712,
+ "step": 216
+ },
+ {
+ "epoch": 2.8933333333333335,
+ "grad_norm": 0.8019754886627197,
+ "learning_rate": 3.7530514923980884e-05,
+ "loss": 0.3826,
+ "step": 217
+ },
+ {
+ "epoch": 2.9066666666666667,
+ "grad_norm": 0.7723852396011353,
+ "learning_rate": 3.669115718079702e-05,
+ "loss": 0.3923,
+ "step": 218
+ },
+ {
+ "epoch": 2.92,
+ "grad_norm": 0.8932898044586182,
+ "learning_rate": 3.585917803688603e-05,
+ "loss": 0.4372,
+ "step": 219
+ },
+ {
+ "epoch": 2.9333333333333336,
+ "grad_norm": 0.8380217552185059,
+ "learning_rate": 3.503467445880789e-05,
+ "loss": 0.6057,
+ "step": 220
+ },
+ {
+ "epoch": 2.9466666666666668,
+ "grad_norm": 0.7885947227478027,
+ "learning_rate": 3.421774254185096e-05,
+ "loss": 0.4252,
+ "step": 221
+ },
+ {
+ "epoch": 2.96,
+ "grad_norm": 0.6929410099983215,
+ "learning_rate": 3.340847749883191e-05,
+ "loss": 0.4339,
+ "step": 222
+ },
+ {
+ "epoch": 2.9733333333333336,
+ "grad_norm": 0.5754182934761047,
+ "learning_rate": 3.2606973648998915e-05,
+ "loss": 0.5381,
+ "step": 223
+ },
+ {
+ "epoch": 2.986666666666667,
+ "grad_norm": 0.5466946363449097,
+ "learning_rate": 3.1813324407038825e-05,
+ "loss": 0.4699,
+ "step": 224
+ },
+ {
+ "epoch": 3.0,
+ "grad_norm": 0.6496762037277222,
+ "learning_rate": 3.102762227218957e-05,
+ "loss": 0.4392,
+ "step": 225
+ },
+ {
+ "epoch": 3.013333333333333,
+ "grad_norm": 0.5232837200164795,
+ "learning_rate": 3.0249958817459722e-05,
+ "loss": 0.3204,
+ "step": 226
+ },
+ {
+ "epoch": 3.026666666666667,
+ "grad_norm": 0.6648370623588562,
+ "learning_rate": 2.9480424678955443e-05,
+ "loss": 0.3054,
+ "step": 227
+ },
+ {
+ "epoch": 3.04,
+ "grad_norm": 0.5386692881584167,
+ "learning_rate": 2.8719109545317103e-05,
+ "loss": 0.3565,
+ "step": 228
+ },
+ {
+ "epoch": 3.0533333333333332,
+ "grad_norm": 0.5926947593688965,
+ "learning_rate": 2.7966102147265994e-05,
+ "loss": 0.298,
+ "step": 229
+ },
+ {
+ "epoch": 3.066666666666667,
+ "grad_norm": 0.5218151807785034,
+ "learning_rate": 2.722149024726307e-05,
+ "loss": 0.242,
+ "step": 230
+ },
+ {
+ "epoch": 3.08,
+ "grad_norm": 0.5256300568580627,
+ "learning_rate": 2.6485360629279987e-05,
+ "loss": 0.3497,
+ "step": 231
+ },
+ {
+ "epoch": 3.0933333333333333,
+ "grad_norm": 0.5321183204650879,
+ "learning_rate": 2.5757799088684654e-05,
+ "loss": 0.2814,
+ "step": 232
+ },
+ {
+ "epoch": 3.1066666666666665,
+ "grad_norm": 0.4942474663257599,
+ "learning_rate": 2.5038890422241958e-05,
+ "loss": 0.3361,
+ "step": 233
+ },
+ {
+ "epoch": 3.12,
+ "grad_norm": 0.6570059657096863,
+ "learning_rate": 2.432871841823047e-05,
+ "loss": 0.2742,
+ "step": 234
+ },
+ {
+ "epoch": 3.1333333333333333,
+ "grad_norm": 0.6714842319488525,
+ "learning_rate": 2.3627365846677306e-05,
+ "loss": 0.3261,
+ "step": 235
+ },
+ {
+ "epoch": 3.1466666666666665,
+ "grad_norm": 0.68682861328125,
+ "learning_rate": 2.2934914449711087e-05,
+ "loss": 0.2931,
+ "step": 236
+ },
+ {
+ "epoch": 3.16,
+ "grad_norm": 0.519067645072937,
+ "learning_rate": 2.2251444932035094e-05,
+ "loss": 0.323,
+ "step": 237
+ },
+ {
+ "epoch": 3.1733333333333333,
+ "grad_norm": 0.5991199612617493,
+ "learning_rate": 2.157703695152109e-05,
+ "loss": 0.2603,
+ "step": 238
+ },
+ {
+ "epoch": 3.1866666666666665,
+ "grad_norm": 0.7177437543869019,
+ "learning_rate": 2.091176910992545e-05,
+ "loss": 0.3499,
+ "step": 239
+ },
+ {
+ "epoch": 3.2,
+ "grad_norm": 0.6380268335342407,
+ "learning_rate": 2.025571894372794e-05,
+ "loss": 0.3064,
+ "step": 240
+ },
+ {
+ "epoch": 3.2133333333333334,
+ "grad_norm": 0.5707582235336304,
+ "learning_rate": 1.9608962915094996e-05,
+ "loss": 0.2747,
+ "step": 241
+ },
+ {
+ "epoch": 3.2266666666666666,
+ "grad_norm": 0.6281158328056335,
+ "learning_rate": 1.897157640296825e-05,
+ "loss": 0.2461,
+ "step": 242
+ },
+ {
+ "epoch": 3.24,
+ "grad_norm": 0.6357036232948303,
+ "learning_rate": 1.8343633694278895e-05,
+ "loss": 0.2846,
+ "step": 243
+ },
+ {
+ "epoch": 3.2533333333333334,
+ "grad_norm": 0.7706279754638672,
+ "learning_rate": 1.772520797528988e-05,
+ "loss": 0.2931,
+ "step": 244
+ },
+ {
+ "epoch": 3.2666666666666666,
+ "grad_norm": 0.5687737464904785,
+ "learning_rate": 1.7116371323065883e-05,
+ "loss": 0.2162,
+ "step": 245
+ },
+ {
+ "epoch": 3.2800000000000002,
+ "grad_norm": 0.5638925433158875,
+ "learning_rate": 1.65171946970729e-05,
+ "loss": 0.2219,
+ "step": 246
+ },
+ {
+ "epoch": 3.2933333333333334,
+ "grad_norm": 0.6213463544845581,
+ "learning_rate": 1.592774793090792e-05,
+ "loss": 0.3329,
+ "step": 247
+ },
+ {
+ "epoch": 3.3066666666666666,
+ "grad_norm": 0.6423382759094238,
+ "learning_rate": 1.534809972415998e-05,
+ "loss": 0.225,
+ "step": 248
+ },
+ {
+ "epoch": 3.32,
+ "grad_norm": 0.818946361541748,
+ "learning_rate": 1.4778317634403083e-05,
+ "loss": 0.2252,
+ "step": 249
+ },
+ {
+ "epoch": 3.3333333333333335,
+ "grad_norm": 0.5952211022377014,
+ "learning_rate": 1.4218468069322578e-05,
+ "loss": 0.2295,
+ "step": 250
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 300,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 50,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 7.190061088610714e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-250/training_args.bin b/checkpoint-250/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..26de66c830692f3d22f375473f4f43447eefb78a
--- /dev/null
+++ b/checkpoint-250/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54335a849ae8d377cae5839a736f4196087c7894666ca9b9f10dd899e0ada95c
+size 6904
diff --git a/checkpoint-250/zero_to_fp32.py b/checkpoint-250/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..5995d6e6f04e43b989587aa9022a3aef0c66d694
--- /dev/null
+++ b/checkpoint-250/zero_to_fp32.py
@@ -0,0 +1,760 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example:
+# python zero_to_fp32.py . output_dir/
+# or
+# python zero_to_fp32.py . output_dir/ --safe_serialization
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+import gc
+import json
+import numpy as np
+from tqdm import tqdm
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device, weights_only=False)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+ total_files = len(files)
+ state_dicts = []
+ for f in tqdm(files, desc='Loading checkpoint shards'):
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+class GatheredTensor:
+ """
+ A pseudo tensor that collects partitioned weights.
+ It is more memory efficient when there are multiple groups.
+ """
+
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
+ self.flat_groups = flat_groups
+ self.flat_groups_offset = flat_groups_offset
+ self.offset = offset
+ self.partitioned_numel = partitioned_numel
+ self.shape = shape
+ self.dtype = self.flat_groups[0][0].dtype
+
+ def contiguous(self):
+ """
+ Merge partitioned weights from flat_groups into a single tensor.
+ """
+ end_idx = self.offset + self.partitioned_numel
+ world_size = len(self.flat_groups)
+ pad_flat_param_chunks = []
+
+ for rank_i in range(world_size):
+ # for each rank, we need to collect weights from related group/groups
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
+ start_group_id = None
+ end_group_id = None
+ for group_id in range(len(self.flat_groups_offset)):
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
+ start_group_id = group_id
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
+ end_group_id = group_id
+ break
+ # collect weights from related group/groups
+ for group_id in range(start_group_id, end_group_id + 1):
+ flat_tensor = flat_groups_at_rank_i[group_id]
+ start_offset = self.offset - self.flat_groups_offset[group_id]
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
+
+ # collect weights from all ranks
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
+ return param
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
+
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # memory efficient tensor
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
+ state_dict[name] = tensor
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def to_torch_tensor(state_dict, return_empty_tensor=False):
+ """
+ Convert state_dict of GatheredTensor to torch tensor
+ """
+ torch_state_dict = {}
+ converted_tensors = {}
+ for name, tensor in state_dict.items():
+ tensor_id = id(tensor)
+ if tensor_id in converted_tensors: # shared tensors
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
+ torch_state_dict[name] = shared_tensor
+ else:
+ converted_tensors[tensor_id] = name
+ if return_empty_tensor:
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
+ else:
+ torch_state_dict[name] = tensor.contiguous()
+ return torch_state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag=None,
+ exclude_frozen_parameters=False,
+ lazy_mode=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint. Or you can load state_dict in lazy mode ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
+ for name, lazy_tensor in state_dict.item():
+ tensor = lazy_tensor.contiguous() # to cpu
+ print(name, tensor)
+ # del tensor to release memory if it no longer in use
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+ if lazy_mode:
+ return state_dict
+ else:
+ return to_torch_tensor(state_dict)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
+ output_dir,
+ max_shard_size="5GB",
+ safe_serialization=False,
+ tag=None,
+ exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ # Dependency pre-check
+ if safe_serialization:
+ try:
+ from safetensors.torch import save_file
+ except ImportError:
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
+ raise
+ if max_shard_size is not None:
+ try:
+ from huggingface_hub import split_torch_state_dict_into_shards
+ except ImportError:
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
+ raise
+
+ # Convert zero checkpoint to state_dict
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag,
+ exclude_frozen_parameters,
+ lazy_mode=True)
+
+ # Shard the model if it is too big.
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
+ if max_shard_size is not None:
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
+ # an memory-efficient approach for sharding
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
+ filename_pattern=filename_pattern,
+ max_shard_size=max_shard_size)
+ else:
+ from collections import namedtuple
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
+ state_dict_split = StateDictSplit(is_sharded=False,
+ filename_to_tensors={weights_name: list(state_dict.keys())})
+
+ # Save the model by shard
+ os.makedirs(output_dir, exist_ok=True)
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
+ shard_state_dict = to_torch_tensor(shard_state_dict)
+ output_path = os.path.join(output_dir, shard_file)
+ if safe_serialization:
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
+ else:
+ torch.save(shard_state_dict, output_path)
+ # release the memory of current shard
+ for tensor_name in list(shard_state_dict.keys()):
+ del state_dict[tensor_name]
+ del shard_state_dict[tensor_name]
+ del shard_state_dict
+ gc.collect()
+
+ # Save index if sharded
+ if state_dict_split.is_sharded:
+ index = {
+ "metadata": state_dict_split.metadata,
+ "weight_map": state_dict_split.tensor_to_filename,
+ }
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
+ save_index_file = os.path.join(output_dir, save_index_file)
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info("Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info("Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument("output_dir",
+ type=str,
+ help="directory to the pytorch fp32 state_dict output files"
+ "(e.g. path/checkpoint-12-output/)")
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="5GB",
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
+ "without CPU OOM issues.")
+ parser.add_argument(
+ "--safe_serialization",
+ default=False,
+ action='store_true',
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_dir,
+ max_shard_size=args.max_shard_size,
+ safe_serialization=args.safe_serialization,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-300/README.md b/checkpoint-300/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7f664a3ae5272e4eb0e149c4adcd2f8c4eb45f00
--- /dev/null
+++ b/checkpoint-300/README.md
@@ -0,0 +1,207 @@
+---
+base_model: /mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B
+library_name: peft
+pipeline_tag: text-generation
+tags:
+- base_model:adapter:/mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B
+- lora
+- transformers
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.17.1
\ No newline at end of file
diff --git a/checkpoint-300/adapter_config.json b/checkpoint-300/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..35553142128a486f364ea05de87725530372c975
--- /dev/null
+++ b/checkpoint-300/adapter_config.json
@@ -0,0 +1,42 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "/mnt/phwfile/datafrontier/chenyang/data/models/Eagle-X5-7B",
+ "bias": "none",
+ "corda_config": null,
+ "eva_config": null,
+ "exclude_modules": null,
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 128,
+ "lora_bias": false,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "qalora_group_size": 16,
+ "r": 64,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "down_proj",
+ "gate_proj",
+ "o_proj",
+ "k_proj",
+ "v_proj",
+ "up_proj"
+ ],
+ "target_parameters": null,
+ "task_type": "CAUSAL_LM",
+ "trainable_token_indices": null,
+ "use_dora": false,
+ "use_qalora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-300/adapter_model.safetensors b/checkpoint-300/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0f52d876c630ec8c70c2146dbd88e4f82d528364
--- /dev/null
+++ b/checkpoint-300/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f3d8836d63a7b223d474b46eca6e48ebf21c4231acf477ac23d86b0770a0bc1
+size 357679240
diff --git a/checkpoint-300/global_step300/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-300/global_step300/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1be99e45cc48665666d42557630b6000370037d3
--- /dev/null
+++ b/checkpoint-300/global_step300/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8bdb68e426be27d52bc40f6c7734506d56561d4f28c3155cc99d64aaed3f0c
+size 340551088
diff --git a/checkpoint-300/global_step300/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-300/global_step300/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9ccedb38d0a4811c3767ac12a7b6f013f6f52c99
--- /dev/null
+++ b/checkpoint-300/global_step300/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af699be44475a77b83de6d2e860ff573f7d0da66f34c1097ccd8cd1541570412
+size 340550896
diff --git a/checkpoint-300/global_step300/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-300/global_step300/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5b83e5d48e79a667f5fc6215c4f4085e77ed55c7
--- /dev/null
+++ b/checkpoint-300/global_step300/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:979c6578785f5aa4e910f421226eaed4f8a4ed3c5d3631701ffa8df4496fb37f
+size 340550832
diff --git a/checkpoint-300/global_step300/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-300/global_step300/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..24136c9e10afc400c8bfd755ec976729a9a3cb0e
--- /dev/null
+++ b/checkpoint-300/global_step300/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c491d1e92788769be26e8749f08978b8d1f026b77fa0f4b081fb412c7bb19c69
+size 340551216
diff --git a/checkpoint-300/global_step300/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-300/global_step300/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7adedf02324762b9a29dfa6b173299631dc8c5ef
--- /dev/null
+++ b/checkpoint-300/global_step300/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f43d5ef9448f65336c8d52f63907ee1addfbbca8837de01a06b25ab91affb384
+size 340551088
diff --git a/checkpoint-300/global_step300/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-300/global_step300/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0429ecc9c55644f174fc312a8be9a1d7d22244db
--- /dev/null
+++ b/checkpoint-300/global_step300/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:15b46d5b9193198687615f40923e80d30241f13237a3407849020b926339ef33
+size 340574832
diff --git a/checkpoint-300/global_step300/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-300/global_step300/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..119105bea1c2af5be4ce9de2f8b1401d00cf73a9
--- /dev/null
+++ b/checkpoint-300/global_step300/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b100579c5559f5442bfd856aff0ff32fb605b4d60b22a0d924d806eef61bfcf9
+size 340561584
diff --git a/checkpoint-300/global_step300/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-300/global_step300/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..586c2c87011866fc6e8de3ca7570eab87df76800
--- /dev/null
+++ b/checkpoint-300/global_step300/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:079469ea3870985f7b2d7d8dde8a47454bd2e8ebca34a38014a08d75f669e8c1
+size 340542512
diff --git a/checkpoint-300/global_step300/mp_rank_00_model_states.pt b/checkpoint-300/global_step300/mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6f06a23d604417f14a88bc583f4c64095fc37fda
--- /dev/null
+++ b/checkpoint-300/global_step300/mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7846d0da81fcc8db7740fdd6e706c9fd1c9095cf914f886c73ae2d77d1aa307b
+size 456033832
diff --git a/checkpoint-300/latest b/checkpoint-300/latest
new file mode 100644
index 0000000000000000000000000000000000000000..6761b575fffac7f1984044dcb6446b3a51da04c8
--- /dev/null
+++ b/checkpoint-300/latest
@@ -0,0 +1 @@
+global_step300
\ No newline at end of file
diff --git a/checkpoint-300/rng_state_0.pth b/checkpoint-300/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..25ea808ceefef8734bffcd6bae86d74483044b14
--- /dev/null
+++ b/checkpoint-300/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5f4b040a1a2bc6d5ca04dc31738e13493abf50ad66c6600292fe15b57ba6e6f0
+size 15984
diff --git a/checkpoint-300/rng_state_1.pth b/checkpoint-300/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..a62aacd2438129100470b219d333462f4fb42d2b
--- /dev/null
+++ b/checkpoint-300/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:777f6a9a9358856e73cce63e054817d06a51f84431629d7455d30e90ef27739c
+size 15984
diff --git a/checkpoint-300/rng_state_2.pth b/checkpoint-300/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..73e46c2ed582d576c0a21a8c71efb30b5e0116f2
--- /dev/null
+++ b/checkpoint-300/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dff9cb7f1444dacd48c361accf8529ec77c5ea69b1a427a13bf10e9be98c401d
+size 15984
diff --git a/checkpoint-300/rng_state_3.pth b/checkpoint-300/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0bb5cff5dcc0bf2a657253f93d853a9a160403e0
--- /dev/null
+++ b/checkpoint-300/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50a7f7cda1e602e5ac6d919b01c0313a412b2aa0b6a7428a8a5f258c8a9d0944
+size 15984
diff --git a/checkpoint-300/rng_state_4.pth b/checkpoint-300/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..5176dde55eae42ed2b4d42723e5c9bc35adebe8c
--- /dev/null
+++ b/checkpoint-300/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a4a301937e30a7b2a6c263ec30be34a88b824d9516c6fa76751ece4dfcec1c93
+size 15984
diff --git a/checkpoint-300/rng_state_5.pth b/checkpoint-300/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..2ef76b612e3ed92025e7957523511d558ccbffd3
--- /dev/null
+++ b/checkpoint-300/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:64535b1867e827c5433ced2f1315edbd0b18cb26197fc06a45ea9efc2a95589b
+size 15984
diff --git a/checkpoint-300/rng_state_6.pth b/checkpoint-300/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9b36c2ab387d4527b9f40668ff7e01b05aed2181
--- /dev/null
+++ b/checkpoint-300/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6fb1dddf6bcc410c1fff1c56ae81f727c9abdb3789cb23ca6d0a2ba0e0bed636
+size 15984
diff --git a/checkpoint-300/rng_state_7.pth b/checkpoint-300/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8d74ea9ccc8de68be95f1f86cef2b72dd3b6f6fc
--- /dev/null
+++ b/checkpoint-300/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8db883fc31bf7d15e64a3d8f8f4746f73fbb33b2510b00740ec9b43ad6c7ef8c
+size 15984
diff --git a/checkpoint-300/scheduler.pt b/checkpoint-300/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..13986fd8613abca87ab3b0a37fae4692e08907d1
--- /dev/null
+++ b/checkpoint-300/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:74485e67705dc36efbfb69b1e54f842e1ff07894d01bb0e36d6d2526a318b300
+size 1064
diff --git a/checkpoint-300/special_tokens_map.json b/checkpoint-300/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/checkpoint-300/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-300/tokenizer.model b/checkpoint-300/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-300/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-300/tokenizer_config.json b/checkpoint-300/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..26c65df1bf794f101c1dd54c908180dc0d880fe3
--- /dev/null
+++ b/checkpoint-300/tokenizer_config.json
@@ -0,0 +1,43 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/checkpoint-300/trainer_state.json b/checkpoint-300/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..c7c9d531aaaa3b43a4ad7867e057a3bd017eb97c
--- /dev/null
+++ b/checkpoint-300/trainer_state.json
@@ -0,0 +1,2133 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 4.0,
+ "eval_steps": 500,
+ "global_step": 300,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.013333333333333334,
+ "grad_norm": 1.0028682947158813,
+ "learning_rate": 2.2222222222222223e-05,
+ "loss": 1.43,
+ "step": 1
+ },
+ {
+ "epoch": 0.02666666666666667,
+ "grad_norm": 1.6397583484649658,
+ "learning_rate": 4.4444444444444447e-05,
+ "loss": 1.5374,
+ "step": 2
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 1.2198785543441772,
+ "learning_rate": 6.666666666666667e-05,
+ "loss": 1.5106,
+ "step": 3
+ },
+ {
+ "epoch": 0.05333333333333334,
+ "grad_norm": 0.8468486070632935,
+ "learning_rate": 8.888888888888889e-05,
+ "loss": 1.447,
+ "step": 4
+ },
+ {
+ "epoch": 0.06666666666666667,
+ "grad_norm": 0.9268608689308167,
+ "learning_rate": 0.00011111111111111112,
+ "loss": 1.4743,
+ "step": 5
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 1.0168085098266602,
+ "learning_rate": 0.00013333333333333334,
+ "loss": 1.3245,
+ "step": 6
+ },
+ {
+ "epoch": 0.09333333333333334,
+ "grad_norm": 0.6773934960365295,
+ "learning_rate": 0.00015555555555555556,
+ "loss": 1.3738,
+ "step": 7
+ },
+ {
+ "epoch": 0.10666666666666667,
+ "grad_norm": 0.6985631585121155,
+ "learning_rate": 0.00017777777777777779,
+ "loss": 1.3951,
+ "step": 8
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 1.0221399068832397,
+ "learning_rate": 0.0002,
+ "loss": 1.3246,
+ "step": 9
+ },
+ {
+ "epoch": 0.13333333333333333,
+ "grad_norm": 0.6119747161865234,
+ "learning_rate": 0.00019999417253661235,
+ "loss": 1.2951,
+ "step": 10
+ },
+ {
+ "epoch": 0.14666666666666667,
+ "grad_norm": 0.6660990118980408,
+ "learning_rate": 0.00019997669082563597,
+ "loss": 1.2529,
+ "step": 11
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.5874819755554199,
+ "learning_rate": 0.00019994755690455152,
+ "loss": 1.2252,
+ "step": 12
+ },
+ {
+ "epoch": 0.17333333333333334,
+ "grad_norm": 0.4818006157875061,
+ "learning_rate": 0.00019990677416889608,
+ "loss": 1.1708,
+ "step": 13
+ },
+ {
+ "epoch": 0.18666666666666668,
+ "grad_norm": 0.652045488357544,
+ "learning_rate": 0.0001998543473718677,
+ "loss": 0.9865,
+ "step": 14
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.5517733693122864,
+ "learning_rate": 0.00019979028262377118,
+ "loss": 1.181,
+ "step": 15
+ },
+ {
+ "epoch": 0.21333333333333335,
+ "grad_norm": 0.47720542550086975,
+ "learning_rate": 0.00019971458739130598,
+ "loss": 1.0633,
+ "step": 16
+ },
+ {
+ "epoch": 0.22666666666666666,
+ "grad_norm": 0.7947096228599548,
+ "learning_rate": 0.000199627270496696,
+ "loss": 1.1458,
+ "step": 17
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.5301257371902466,
+ "learning_rate": 0.0001995283421166614,
+ "loss": 1.2245,
+ "step": 18
+ },
+ {
+ "epoch": 0.25333333333333335,
+ "grad_norm": 0.9473271369934082,
+ "learning_rate": 0.00019941781378123244,
+ "loss": 1.0859,
+ "step": 19
+ },
+ {
+ "epoch": 0.26666666666666666,
+ "grad_norm": 1.1834161281585693,
+ "learning_rate": 0.00019929569837240564,
+ "loss": 1.1808,
+ "step": 20
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.6784033179283142,
+ "learning_rate": 0.00019916201012264254,
+ "loss": 1.202,
+ "step": 21
+ },
+ {
+ "epoch": 0.29333333333333333,
+ "grad_norm": 0.7274785041809082,
+ "learning_rate": 0.00019901676461321068,
+ "loss": 1.2242,
+ "step": 22
+ },
+ {
+ "epoch": 0.30666666666666664,
+ "grad_norm": 0.7520783543586731,
+ "learning_rate": 0.00019885997877236788,
+ "loss": 1.173,
+ "step": 23
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.8218541145324707,
+ "learning_rate": 0.00019869167087338907,
+ "loss": 1.0723,
+ "step": 24
+ },
+ {
+ "epoch": 0.3333333333333333,
+ "grad_norm": 0.6361420154571533,
+ "learning_rate": 0.00019851186053243666,
+ "loss": 1.1607,
+ "step": 25
+ },
+ {
+ "epoch": 0.3466666666666667,
+ "grad_norm": 0.6401374936103821,
+ "learning_rate": 0.00019832056870627417,
+ "loss": 1.1398,
+ "step": 26
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 1.0995603799819946,
+ "learning_rate": 0.0001981178176898239,
+ "loss": 0.9475,
+ "step": 27
+ },
+ {
+ "epoch": 0.37333333333333335,
+ "grad_norm": 0.5122275948524475,
+ "learning_rate": 0.00019790363111356837,
+ "loss": 1.0125,
+ "step": 28
+ },
+ {
+ "epoch": 0.38666666666666666,
+ "grad_norm": 0.9316695332527161,
+ "learning_rate": 0.00019767803394079615,
+ "loss": 1.0554,
+ "step": 29
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.6831843256950378,
+ "learning_rate": 0.00019744105246469263,
+ "loss": 1.027,
+ "step": 30
+ },
+ {
+ "epoch": 0.41333333333333333,
+ "grad_norm": 0.5529218912124634,
+ "learning_rate": 0.0001971927143052752,
+ "loss": 1.0218,
+ "step": 31
+ },
+ {
+ "epoch": 0.4266666666666667,
+ "grad_norm": 0.7164443135261536,
+ "learning_rate": 0.00019693304840617457,
+ "loss": 1.0901,
+ "step": 32
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.7430665493011475,
+ "learning_rate": 0.00019666208503126112,
+ "loss": 1.125,
+ "step": 33
+ },
+ {
+ "epoch": 0.4533333333333333,
+ "grad_norm": 0.7198122143745422,
+ "learning_rate": 0.00019637985576111778,
+ "loss": 0.9926,
+ "step": 34
+ },
+ {
+ "epoch": 0.4666666666666667,
+ "grad_norm": 0.6613873839378357,
+ "learning_rate": 0.0001960863934893594,
+ "loss": 1.1925,
+ "step": 35
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 1.2440484762191772,
+ "learning_rate": 0.00019578173241879872,
+ "loss": 0.8948,
+ "step": 36
+ },
+ {
+ "epoch": 0.49333333333333335,
+ "grad_norm": 0.6995570659637451,
+ "learning_rate": 0.00019546590805746052,
+ "loss": 0.9525,
+ "step": 37
+ },
+ {
+ "epoch": 0.5066666666666667,
+ "grad_norm": 0.6873968243598938,
+ "learning_rate": 0.00019513895721444286,
+ "loss": 0.9966,
+ "step": 38
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.5394595861434937,
+ "learning_rate": 0.00019480091799562704,
+ "loss": 1.0787,
+ "step": 39
+ },
+ {
+ "epoch": 0.5333333333333333,
+ "grad_norm": 0.9196312427520752,
+ "learning_rate": 0.00019445182979923654,
+ "loss": 1.0559,
+ "step": 40
+ },
+ {
+ "epoch": 0.5466666666666666,
+ "grad_norm": 2.5954103469848633,
+ "learning_rate": 0.000194091733311245,
+ "loss": 0.9638,
+ "step": 41
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 1.244681715965271,
+ "learning_rate": 0.00019372067050063438,
+ "loss": 0.9325,
+ "step": 42
+ },
+ {
+ "epoch": 0.5733333333333334,
+ "grad_norm": 0.613715410232544,
+ "learning_rate": 0.0001933386846145036,
+ "loss": 0.9428,
+ "step": 43
+ },
+ {
+ "epoch": 0.5866666666666667,
+ "grad_norm": 0.9604235291481018,
+ "learning_rate": 0.00019294582017302797,
+ "loss": 0.9486,
+ "step": 44
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.7591239809989929,
+ "learning_rate": 0.00019254212296427044,
+ "loss": 1.0955,
+ "step": 45
+ },
+ {
+ "epoch": 0.6133333333333333,
+ "grad_norm": 0.5218423008918762,
+ "learning_rate": 0.0001921276400388451,
+ "loss": 1.0368,
+ "step": 46
+ },
+ {
+ "epoch": 0.6266666666666667,
+ "grad_norm": 0.5670755505561829,
+ "learning_rate": 0.00019170241970443343,
+ "loss": 0.9854,
+ "step": 47
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.8089922070503235,
+ "learning_rate": 0.00019126651152015403,
+ "loss": 1.0396,
+ "step": 48
+ },
+ {
+ "epoch": 0.6533333333333333,
+ "grad_norm": 0.6459051966667175,
+ "learning_rate": 0.00019081996629078657,
+ "loss": 1.0156,
+ "step": 49
+ },
+ {
+ "epoch": 0.6666666666666666,
+ "grad_norm": 1.1918997764587402,
+ "learning_rate": 0.00019036283606085053,
+ "loss": 1.1542,
+ "step": 50
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.6263722777366638,
+ "learning_rate": 0.00018989517410853955,
+ "loss": 1.1471,
+ "step": 51
+ },
+ {
+ "epoch": 0.6933333333333334,
+ "grad_norm": 0.534618616104126,
+ "learning_rate": 0.00018941703493951164,
+ "loss": 0.9055,
+ "step": 52
+ },
+ {
+ "epoch": 0.7066666666666667,
+ "grad_norm": 0.529036819934845,
+ "learning_rate": 0.00018892847428053693,
+ "loss": 0.9896,
+ "step": 53
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.716572105884552,
+ "learning_rate": 0.00018842954907300236,
+ "loss": 1.0204,
+ "step": 54
+ },
+ {
+ "epoch": 0.7333333333333333,
+ "grad_norm": 0.832662045955658,
+ "learning_rate": 0.00018792031746627563,
+ "loss": 1.0263,
+ "step": 55
+ },
+ {
+ "epoch": 0.7466666666666667,
+ "grad_norm": 0.5659884810447693,
+ "learning_rate": 0.0001874008388109276,
+ "loss": 1.0529,
+ "step": 56
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.4971260726451874,
+ "learning_rate": 0.00018687117365181512,
+ "loss": 1.0109,
+ "step": 57
+ },
+ {
+ "epoch": 0.7733333333333333,
+ "grad_norm": 0.5997689962387085,
+ "learning_rate": 0.00018633138372102468,
+ "loss": 1.0363,
+ "step": 58
+ },
+ {
+ "epoch": 0.7866666666666666,
+ "grad_norm": 0.42450904846191406,
+ "learning_rate": 0.00018578153193067745,
+ "loss": 1.0156,
+ "step": 59
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 1.0025880336761475,
+ "learning_rate": 0.00018522168236559695,
+ "loss": 0.96,
+ "step": 60
+ },
+ {
+ "epoch": 0.8133333333333334,
+ "grad_norm": 0.47225672006607056,
+ "learning_rate": 0.00018465190027584005,
+ "loss": 1.0402,
+ "step": 61
+ },
+ {
+ "epoch": 0.8266666666666667,
+ "grad_norm": 0.6419042348861694,
+ "learning_rate": 0.00018407225206909208,
+ "loss": 0.9727,
+ "step": 62
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.9594618082046509,
+ "learning_rate": 0.00018348280530292713,
+ "loss": 0.986,
+ "step": 63
+ },
+ {
+ "epoch": 0.8533333333333334,
+ "grad_norm": 0.5415605902671814,
+ "learning_rate": 0.00018288362867693414,
+ "loss": 1.019,
+ "step": 64
+ },
+ {
+ "epoch": 0.8666666666666667,
+ "grad_norm": 0.9662086367607117,
+ "learning_rate": 0.00018227479202471015,
+ "loss": 0.9531,
+ "step": 65
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.7523136734962463,
+ "learning_rate": 0.0001816563663057211,
+ "loss": 1.0383,
+ "step": 66
+ },
+ {
+ "epoch": 0.8933333333333333,
+ "grad_norm": 0.7249945998191833,
+ "learning_rate": 0.00018102842359703176,
+ "loss": 1.1131,
+ "step": 67
+ },
+ {
+ "epoch": 0.9066666666666666,
+ "grad_norm": 0.4781404435634613,
+ "learning_rate": 0.000180391037084905,
+ "loss": 0.9701,
+ "step": 68
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.5435504913330078,
+ "learning_rate": 0.00017974428105627208,
+ "loss": 1.2701,
+ "step": 69
+ },
+ {
+ "epoch": 0.9333333333333333,
+ "grad_norm": 0.48021838068962097,
+ "learning_rate": 0.00017908823089007457,
+ "loss": 0.9212,
+ "step": 70
+ },
+ {
+ "epoch": 0.9466666666666667,
+ "grad_norm": 0.7063950300216675,
+ "learning_rate": 0.00017842296304847893,
+ "loss": 1.0076,
+ "step": 71
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.5694530606269836,
+ "learning_rate": 0.00017774855506796496,
+ "loss": 1.0417,
+ "step": 72
+ },
+ {
+ "epoch": 0.9733333333333334,
+ "grad_norm": 0.6120775938034058,
+ "learning_rate": 0.00017706508555028893,
+ "loss": 1.0501,
+ "step": 73
+ },
+ {
+ "epoch": 0.9866666666666667,
+ "grad_norm": 0.5728889107704163,
+ "learning_rate": 0.0001763726341533227,
+ "loss": 1.0024,
+ "step": 74
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.8452621102333069,
+ "learning_rate": 0.00017567128158176953,
+ "loss": 1.0966,
+ "step": 75
+ },
+ {
+ "epoch": 1.0133333333333334,
+ "grad_norm": 0.5769606828689575,
+ "learning_rate": 0.0001749611095777581,
+ "loss": 0.8877,
+ "step": 76
+ },
+ {
+ "epoch": 1.0266666666666666,
+ "grad_norm": 0.9046480059623718,
+ "learning_rate": 0.00017424220091131535,
+ "loss": 0.752,
+ "step": 77
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4488053023815155,
+ "learning_rate": 0.00017351463937072004,
+ "loss": 0.7588,
+ "step": 78
+ },
+ {
+ "epoch": 1.0533333333333332,
+ "grad_norm": 0.41479629278182983,
+ "learning_rate": 0.00017277850975273696,
+ "loss": 0.779,
+ "step": 79
+ },
+ {
+ "epoch": 1.0666666666666667,
+ "grad_norm": 0.7192550301551819,
+ "learning_rate": 0.000172033897852734,
+ "loss": 0.7809,
+ "step": 80
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.7553783655166626,
+ "learning_rate": 0.00017128089045468294,
+ "loss": 0.7421,
+ "step": 81
+ },
+ {
+ "epoch": 1.0933333333333333,
+ "grad_norm": 0.5650737881660461,
+ "learning_rate": 0.0001705195753210446,
+ "loss": 0.7044,
+ "step": 82
+ },
+ {
+ "epoch": 1.1066666666666667,
+ "grad_norm": 0.6692880988121033,
+ "learning_rate": 0.0001697500411825403,
+ "loss": 0.8415,
+ "step": 83
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.6710836291313171,
+ "learning_rate": 0.00016897237772781044,
+ "loss": 0.7247,
+ "step": 84
+ },
+ {
+ "epoch": 1.1333333333333333,
+ "grad_norm": 0.5887194275856018,
+ "learning_rate": 0.0001681866755929612,
+ "loss": 0.7947,
+ "step": 85
+ },
+ {
+ "epoch": 1.1466666666666667,
+ "grad_norm": 0.5538906455039978,
+ "learning_rate": 0.00016739302635100108,
+ "loss": 0.7337,
+ "step": 86
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.6018480062484741,
+ "learning_rate": 0.00016659152250116812,
+ "loss": 0.6801,
+ "step": 87
+ },
+ {
+ "epoch": 1.1733333333333333,
+ "grad_norm": 0.548251748085022,
+ "learning_rate": 0.00016578225745814907,
+ "loss": 0.6898,
+ "step": 88
+ },
+ {
+ "epoch": 1.1866666666666668,
+ "grad_norm": 0.49416568875312805,
+ "learning_rate": 0.00016496532554119214,
+ "loss": 0.719,
+ "step": 89
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.6101306676864624,
+ "learning_rate": 0.000164140821963114,
+ "loss": 0.7917,
+ "step": 90
+ },
+ {
+ "epoch": 1.2133333333333334,
+ "grad_norm": 0.588020920753479,
+ "learning_rate": 0.000163308842819203,
+ "loss": 0.8035,
+ "step": 91
+ },
+ {
+ "epoch": 1.2266666666666666,
+ "grad_norm": 0.5376534461975098,
+ "learning_rate": 0.00016246948507601914,
+ "loss": 0.7816,
+ "step": 92
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.549625813961029,
+ "learning_rate": 0.00016162284656009274,
+ "loss": 0.7245,
+ "step": 93
+ },
+ {
+ "epoch": 1.2533333333333334,
+ "grad_norm": 0.6054933667182922,
+ "learning_rate": 0.0001607690259465229,
+ "loss": 0.7484,
+ "step": 94
+ },
+ {
+ "epoch": 1.2666666666666666,
+ "grad_norm": 0.5613316297531128,
+ "learning_rate": 0.00015990812274747692,
+ "loss": 0.7369,
+ "step": 95
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5984250903129578,
+ "learning_rate": 0.00015904023730059228,
+ "loss": 0.891,
+ "step": 96
+ },
+ {
+ "epoch": 1.2933333333333334,
+ "grad_norm": 0.6490495204925537,
+ "learning_rate": 0.00015816547075728226,
+ "loss": 0.775,
+ "step": 97
+ },
+ {
+ "epoch": 1.3066666666666666,
+ "grad_norm": 0.6675053834915161,
+ "learning_rate": 0.000157283925070947,
+ "loss": 0.7565,
+ "step": 98
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.5397291779518127,
+ "learning_rate": 0.00015639570298509064,
+ "loss": 0.6685,
+ "step": 99
+ },
+ {
+ "epoch": 1.3333333333333333,
+ "grad_norm": 0.5830179452896118,
+ "learning_rate": 0.000155500908021347,
+ "loss": 0.8132,
+ "step": 100
+ },
+ {
+ "epoch": 1.3466666666666667,
+ "grad_norm": 0.511802077293396,
+ "learning_rate": 0.00015459964446741382,
+ "loss": 0.7664,
+ "step": 101
+ },
+ {
+ "epoch": 1.3599999999999999,
+ "grad_norm": 1.350032091140747,
+ "learning_rate": 0.0001536920173648984,
+ "loss": 0.8179,
+ "step": 102
+ },
+ {
+ "epoch": 1.3733333333333333,
+ "grad_norm": 0.7308780550956726,
+ "learning_rate": 0.00015277813249707487,
+ "loss": 0.8401,
+ "step": 103
+ },
+ {
+ "epoch": 1.3866666666666667,
+ "grad_norm": 0.5292226076126099,
+ "learning_rate": 0.0001518580963765555,
+ "loss": 0.6367,
+ "step": 104
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.6958481073379517,
+ "learning_rate": 0.00015093201623287631,
+ "loss": 0.7173,
+ "step": 105
+ },
+ {
+ "epoch": 1.4133333333333333,
+ "grad_norm": 0.7024071216583252,
+ "learning_rate": 0.00015000000000000001,
+ "loss": 0.5604,
+ "step": 106
+ },
+ {
+ "epoch": 1.4266666666666667,
+ "grad_norm": 0.5597444772720337,
+ "learning_rate": 0.00014906215630373606,
+ "loss": 0.6767,
+ "step": 107
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.6003674864768982,
+ "learning_rate": 0.00014811859444908052,
+ "loss": 0.8149,
+ "step": 108
+ },
+ {
+ "epoch": 1.4533333333333334,
+ "grad_norm": 0.5815126895904541,
+ "learning_rate": 0.00014716942440747664,
+ "loss": 0.7801,
+ "step": 109
+ },
+ {
+ "epoch": 1.4666666666666668,
+ "grad_norm": 0.7836669683456421,
+ "learning_rate": 0.0001462147568039977,
+ "loss": 0.8452,
+ "step": 110
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.8783419132232666,
+ "learning_rate": 0.00014525470290445392,
+ "loss": 0.8257,
+ "step": 111
+ },
+ {
+ "epoch": 1.4933333333333334,
+ "grad_norm": 0.46948131918907166,
+ "learning_rate": 0.00014428937460242417,
+ "loss": 0.7481,
+ "step": 112
+ },
+ {
+ "epoch": 1.5066666666666668,
+ "grad_norm": 0.5725980401039124,
+ "learning_rate": 0.00014331888440621533,
+ "loss": 0.8267,
+ "step": 113
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.4418632686138153,
+ "learning_rate": 0.00014234334542574906,
+ "loss": 0.7631,
+ "step": 114
+ },
+ {
+ "epoch": 1.5333333333333332,
+ "grad_norm": 0.6430942416191101,
+ "learning_rate": 0.00014136287135937915,
+ "loss": 0.8817,
+ "step": 115
+ },
+ {
+ "epoch": 1.5466666666666666,
+ "grad_norm": 0.5670009255409241,
+ "learning_rate": 0.00014037757648064018,
+ "loss": 0.5991,
+ "step": 116
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 0.5407504439353943,
+ "learning_rate": 0.00013938757562492873,
+ "loss": 0.6898,
+ "step": 117
+ },
+ {
+ "epoch": 1.5733333333333333,
+ "grad_norm": 0.5176808834075928,
+ "learning_rate": 0.00013839298417611963,
+ "loss": 0.731,
+ "step": 118
+ },
+ {
+ "epoch": 1.5866666666666667,
+ "grad_norm": 0.9752798080444336,
+ "learning_rate": 0.00013739391805311793,
+ "loss": 0.6736,
+ "step": 119
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 0.7100059390068054,
+ "learning_rate": 0.00013639049369634876,
+ "loss": 0.7369,
+ "step": 120
+ },
+ {
+ "epoch": 1.6133333333333333,
+ "grad_norm": 0.6285961270332336,
+ "learning_rate": 0.0001353828280541861,
+ "loss": 0.721,
+ "step": 121
+ },
+ {
+ "epoch": 1.6266666666666667,
+ "grad_norm": 0.5981026291847229,
+ "learning_rate": 0.00013437103856932264,
+ "loss": 0.8094,
+ "step": 122
+ },
+ {
+ "epoch": 1.6400000000000001,
+ "grad_norm": 0.6587502360343933,
+ "learning_rate": 0.00013335524316508208,
+ "loss": 0.7646,
+ "step": 123
+ },
+ {
+ "epoch": 1.6533333333333333,
+ "grad_norm": 0.5544253587722778,
+ "learning_rate": 0.00013233556023167485,
+ "loss": 0.7165,
+ "step": 124
+ },
+ {
+ "epoch": 1.6666666666666665,
+ "grad_norm": 0.6012857556343079,
+ "learning_rate": 0.00013131210861240026,
+ "loss": 0.8104,
+ "step": 125
+ },
+ {
+ "epoch": 1.6800000000000002,
+ "grad_norm": 0.5157524347305298,
+ "learning_rate": 0.00013028500758979506,
+ "loss": 0.8585,
+ "step": 126
+ },
+ {
+ "epoch": 1.6933333333333334,
+ "grad_norm": 0.4888676702976227,
+ "learning_rate": 0.00012925437687173142,
+ "loss": 0.6579,
+ "step": 127
+ },
+ {
+ "epoch": 1.7066666666666666,
+ "grad_norm": 0.5127140879631042,
+ "learning_rate": 0.00012822033657746478,
+ "loss": 0.7432,
+ "step": 128
+ },
+ {
+ "epoch": 1.72,
+ "grad_norm": 0.6154641509056091,
+ "learning_rate": 0.0001271830072236343,
+ "loss": 0.7322,
+ "step": 129
+ },
+ {
+ "epoch": 1.7333333333333334,
+ "grad_norm": 0.5081548690795898,
+ "learning_rate": 0.00012614250971021657,
+ "loss": 0.7547,
+ "step": 130
+ },
+ {
+ "epoch": 1.7466666666666666,
+ "grad_norm": 0.6808217763900757,
+ "learning_rate": 0.00012509896530643488,
+ "loss": 0.6855,
+ "step": 131
+ },
+ {
+ "epoch": 1.76,
+ "grad_norm": 0.8672941327095032,
+ "learning_rate": 0.00012405249563662537,
+ "loss": 0.6332,
+ "step": 132
+ },
+ {
+ "epoch": 1.7733333333333334,
+ "grad_norm": 0.6130337119102478,
+ "learning_rate": 0.00012300322266606178,
+ "loss": 0.7453,
+ "step": 133
+ },
+ {
+ "epoch": 1.7866666666666666,
+ "grad_norm": 0.739959180355072,
+ "learning_rate": 0.00012195126868674051,
+ "loss": 0.687,
+ "step": 134
+ },
+ {
+ "epoch": 1.8,
+ "grad_norm": 0.5801121592521667,
+ "learning_rate": 0.00012089675630312754,
+ "loss": 0.7059,
+ "step": 135
+ },
+ {
+ "epoch": 1.8133333333333335,
+ "grad_norm": 0.5766938328742981,
+ "learning_rate": 0.000119839808417869,
+ "loss": 0.737,
+ "step": 136
+ },
+ {
+ "epoch": 1.8266666666666667,
+ "grad_norm": 0.6705268621444702,
+ "learning_rate": 0.00011878054821746703,
+ "loss": 0.7358,
+ "step": 137
+ },
+ {
+ "epoch": 1.8399999999999999,
+ "grad_norm": 0.7814889550209045,
+ "learning_rate": 0.0001177190991579223,
+ "loss": 0.7021,
+ "step": 138
+ },
+ {
+ "epoch": 1.8533333333333335,
+ "grad_norm": 0.6991515755653381,
+ "learning_rate": 0.00011665558495034546,
+ "loss": 0.7325,
+ "step": 139
+ },
+ {
+ "epoch": 1.8666666666666667,
+ "grad_norm": 0.8299288749694824,
+ "learning_rate": 0.00011559012954653865,
+ "loss": 0.7128,
+ "step": 140
+ },
+ {
+ "epoch": 1.88,
+ "grad_norm": 0.7293754816055298,
+ "learning_rate": 0.00011452285712454904,
+ "loss": 0.5813,
+ "step": 141
+ },
+ {
+ "epoch": 1.8933333333333333,
+ "grad_norm": 0.6560428738594055,
+ "learning_rate": 0.00011345389207419588,
+ "loss": 0.6352,
+ "step": 142
+ },
+ {
+ "epoch": 1.9066666666666667,
+ "grad_norm": 0.54889976978302,
+ "learning_rate": 0.00011238335898257304,
+ "loss": 0.7372,
+ "step": 143
+ },
+ {
+ "epoch": 1.92,
+ "grad_norm": 0.5890987515449524,
+ "learning_rate": 0.00011131138261952845,
+ "loss": 0.6402,
+ "step": 144
+ },
+ {
+ "epoch": 1.9333333333333333,
+ "grad_norm": 0.8450446128845215,
+ "learning_rate": 0.00011023808792312227,
+ "loss": 0.7152,
+ "step": 145
+ },
+ {
+ "epoch": 1.9466666666666668,
+ "grad_norm": 0.7649719715118408,
+ "learning_rate": 0.0001091635999850655,
+ "loss": 0.6831,
+ "step": 146
+ },
+ {
+ "epoch": 1.96,
+ "grad_norm": 0.6236613988876343,
+ "learning_rate": 0.00010808804403614043,
+ "loss": 0.6671,
+ "step": 147
+ },
+ {
+ "epoch": 1.9733333333333334,
+ "grad_norm": 0.6295299530029297,
+ "learning_rate": 0.00010701154543160541,
+ "loss": 0.8226,
+ "step": 148
+ },
+ {
+ "epoch": 1.9866666666666668,
+ "grad_norm": 0.641965389251709,
+ "learning_rate": 0.00010593422963658452,
+ "loss": 0.6567,
+ "step": 149
+ },
+ {
+ "epoch": 2.0,
+ "grad_norm": 0.779958188533783,
+ "learning_rate": 0.00010485622221144484,
+ "loss": 0.6346,
+ "step": 150
+ },
+ {
+ "epoch": 2.013333333333333,
+ "grad_norm": 0.6322675347328186,
+ "learning_rate": 0.00010377764879716234,
+ "loss": 0.5576,
+ "step": 151
+ },
+ {
+ "epoch": 2.026666666666667,
+ "grad_norm": 0.7052869200706482,
+ "learning_rate": 0.00010269863510067872,
+ "loss": 0.4362,
+ "step": 152
+ },
+ {
+ "epoch": 2.04,
+ "grad_norm": 0.4991523027420044,
+ "learning_rate": 0.00010161930688025017,
+ "loss": 0.4478,
+ "step": 153
+ },
+ {
+ "epoch": 2.0533333333333332,
+ "grad_norm": 0.4096013903617859,
+ "learning_rate": 0.00010053978993079045,
+ "loss": 0.5258,
+ "step": 154
+ },
+ {
+ "epoch": 2.066666666666667,
+ "grad_norm": 0.4861268103122711,
+ "learning_rate": 9.946021006920959e-05,
+ "loss": 0.5085,
+ "step": 155
+ },
+ {
+ "epoch": 2.08,
+ "grad_norm": 0.5277904272079468,
+ "learning_rate": 9.838069311974986e-05,
+ "loss": 0.4549,
+ "step": 156
+ },
+ {
+ "epoch": 2.0933333333333333,
+ "grad_norm": 0.640762209892273,
+ "learning_rate": 9.730136489932133e-05,
+ "loss": 0.5168,
+ "step": 157
+ },
+ {
+ "epoch": 2.1066666666666665,
+ "grad_norm": 0.6294235587120056,
+ "learning_rate": 9.622235120283769e-05,
+ "loss": 0.3587,
+ "step": 158
+ },
+ {
+ "epoch": 2.12,
+ "grad_norm": 0.6455483436584473,
+ "learning_rate": 9.514377778855521e-05,
+ "loss": 0.5013,
+ "step": 159
+ },
+ {
+ "epoch": 2.1333333333333333,
+ "grad_norm": 0.7052115201950073,
+ "learning_rate": 9.406577036341548e-05,
+ "loss": 0.458,
+ "step": 160
+ },
+ {
+ "epoch": 2.1466666666666665,
+ "grad_norm": 0.6517965197563171,
+ "learning_rate": 9.298845456839459e-05,
+ "loss": 0.4427,
+ "step": 161
+ },
+ {
+ "epoch": 2.16,
+ "grad_norm": 1.0105723142623901,
+ "learning_rate": 9.19119559638596e-05,
+ "loss": 0.5482,
+ "step": 162
+ },
+ {
+ "epoch": 2.1733333333333333,
+ "grad_norm": 0.6122080683708191,
+ "learning_rate": 9.083640001493454e-05,
+ "loss": 0.3708,
+ "step": 163
+ },
+ {
+ "epoch": 2.1866666666666665,
+ "grad_norm": 0.7772620320320129,
+ "learning_rate": 8.976191207687775e-05,
+ "loss": 0.4423,
+ "step": 164
+ },
+ {
+ "epoch": 2.2,
+ "grad_norm": 0.5697551965713501,
+ "learning_rate": 8.868861738047158e-05,
+ "loss": 0.4544,
+ "step": 165
+ },
+ {
+ "epoch": 2.2133333333333334,
+ "grad_norm": 0.6369841694831848,
+ "learning_rate": 8.7616641017427e-05,
+ "loss": 0.4458,
+ "step": 166
+ },
+ {
+ "epoch": 2.2266666666666666,
+ "grad_norm": 0.6867621541023254,
+ "learning_rate": 8.654610792580415e-05,
+ "loss": 0.3971,
+ "step": 167
+ },
+ {
+ "epoch": 2.24,
+ "grad_norm": 0.6379404067993164,
+ "learning_rate": 8.5477142875451e-05,
+ "loss": 0.409,
+ "step": 168
+ },
+ {
+ "epoch": 2.2533333333333334,
+ "grad_norm": 0.5854802131652832,
+ "learning_rate": 8.440987045346134e-05,
+ "loss": 0.4489,
+ "step": 169
+ },
+ {
+ "epoch": 2.2666666666666666,
+ "grad_norm": 0.6577348113059998,
+ "learning_rate": 8.334441504965455e-05,
+ "loss": 0.5224,
+ "step": 170
+ },
+ {
+ "epoch": 2.2800000000000002,
+ "grad_norm": 0.6020484566688538,
+ "learning_rate": 8.228090084207774e-05,
+ "loss": 0.3876,
+ "step": 171
+ },
+ {
+ "epoch": 2.2933333333333334,
+ "grad_norm": 0.5832955241203308,
+ "learning_rate": 8.1219451782533e-05,
+ "loss": 0.4096,
+ "step": 172
+ },
+ {
+ "epoch": 2.3066666666666666,
+ "grad_norm": 0.6917557716369629,
+ "learning_rate": 8.016019158213101e-05,
+ "loss": 0.3827,
+ "step": 173
+ },
+ {
+ "epoch": 2.32,
+ "grad_norm": 0.6793459057807922,
+ "learning_rate": 7.91032436968725e-05,
+ "loss": 0.3801,
+ "step": 174
+ },
+ {
+ "epoch": 2.3333333333333335,
+ "grad_norm": 0.5776801705360413,
+ "learning_rate": 7.804873131325954e-05,
+ "loss": 0.4567,
+ "step": 175
+ },
+ {
+ "epoch": 2.3466666666666667,
+ "grad_norm": 0.7591734528541565,
+ "learning_rate": 7.699677733393826e-05,
+ "loss": 0.4957,
+ "step": 176
+ },
+ {
+ "epoch": 2.36,
+ "grad_norm": 0.8160498142242432,
+ "learning_rate": 7.594750436337467e-05,
+ "loss": 0.4485,
+ "step": 177
+ },
+ {
+ "epoch": 2.3733333333333335,
+ "grad_norm": 0.5776082873344421,
+ "learning_rate": 7.490103469356513e-05,
+ "loss": 0.5212,
+ "step": 178
+ },
+ {
+ "epoch": 2.3866666666666667,
+ "grad_norm": 0.6858205795288086,
+ "learning_rate": 7.385749028978346e-05,
+ "loss": 0.3985,
+ "step": 179
+ },
+ {
+ "epoch": 2.4,
+ "grad_norm": 0.7106760740280151,
+ "learning_rate": 7.281699277636572e-05,
+ "loss": 0.447,
+ "step": 180
+ },
+ {
+ "epoch": 2.413333333333333,
+ "grad_norm": 0.5468612313270569,
+ "learning_rate": 7.177966342253524e-05,
+ "loss": 0.3429,
+ "step": 181
+ },
+ {
+ "epoch": 2.4266666666666667,
+ "grad_norm": 0.6179500222206116,
+ "learning_rate": 7.07456231282686e-05,
+ "loss": 0.4873,
+ "step": 182
+ },
+ {
+ "epoch": 2.44,
+ "grad_norm": 0.677168607711792,
+ "learning_rate": 6.971499241020495e-05,
+ "loss": 0.5149,
+ "step": 183
+ },
+ {
+ "epoch": 2.453333333333333,
+ "grad_norm": 0.7949701547622681,
+ "learning_rate": 6.868789138759976e-05,
+ "loss": 0.5183,
+ "step": 184
+ },
+ {
+ "epoch": 2.466666666666667,
+ "grad_norm": 0.631366491317749,
+ "learning_rate": 6.766443976832517e-05,
+ "loss": 0.3915,
+ "step": 185
+ },
+ {
+ "epoch": 2.48,
+ "grad_norm": 0.6317359805107117,
+ "learning_rate": 6.664475683491796e-05,
+ "loss": 0.4246,
+ "step": 186
+ },
+ {
+ "epoch": 2.493333333333333,
+ "grad_norm": 0.7377456426620483,
+ "learning_rate": 6.562896143067734e-05,
+ "loss": 0.3572,
+ "step": 187
+ },
+ {
+ "epoch": 2.506666666666667,
+ "grad_norm": 0.7473776340484619,
+ "learning_rate": 6.461717194581393e-05,
+ "loss": 0.4051,
+ "step": 188
+ },
+ {
+ "epoch": 2.52,
+ "grad_norm": 0.6155073046684265,
+ "learning_rate": 6.360950630365126e-05,
+ "loss": 0.4465,
+ "step": 189
+ },
+ {
+ "epoch": 2.533333333333333,
+ "grad_norm": 0.599367082118988,
+ "learning_rate": 6.260608194688206e-05,
+ "loss": 0.4863,
+ "step": 190
+ },
+ {
+ "epoch": 2.546666666666667,
+ "grad_norm": 0.6318126320838928,
+ "learning_rate": 6.160701582388038e-05,
+ "loss": 0.5023,
+ "step": 191
+ },
+ {
+ "epoch": 2.56,
+ "grad_norm": 0.6112634539604187,
+ "learning_rate": 6.061242437507131e-05,
+ "loss": 0.5643,
+ "step": 192
+ },
+ {
+ "epoch": 2.5733333333333333,
+ "grad_norm": 0.9118645787239075,
+ "learning_rate": 5.962242351935985e-05,
+ "loss": 0.4692,
+ "step": 193
+ },
+ {
+ "epoch": 2.586666666666667,
+ "grad_norm": 0.7344533801078796,
+ "learning_rate": 5.863712864062089e-05,
+ "loss": 0.4355,
+ "step": 194
+ },
+ {
+ "epoch": 2.6,
+ "grad_norm": 0.6159957051277161,
+ "learning_rate": 5.765665457425102e-05,
+ "loss": 0.5576,
+ "step": 195
+ },
+ {
+ "epoch": 2.6133333333333333,
+ "grad_norm": 0.632001519203186,
+ "learning_rate": 5.668111559378471e-05,
+ "loss": 0.5418,
+ "step": 196
+ },
+ {
+ "epoch": 2.626666666666667,
+ "grad_norm": 0.7217976450920105,
+ "learning_rate": 5.571062539757581e-05,
+ "loss": 0.5315,
+ "step": 197
+ },
+ {
+ "epoch": 2.64,
+ "grad_norm": 0.5802445411682129,
+ "learning_rate": 5.474529709554612e-05,
+ "loss": 0.5183,
+ "step": 198
+ },
+ {
+ "epoch": 2.6533333333333333,
+ "grad_norm": 0.8810819983482361,
+ "learning_rate": 5.378524319600231e-05,
+ "loss": 0.5234,
+ "step": 199
+ },
+ {
+ "epoch": 2.6666666666666665,
+ "grad_norm": 0.8704924583435059,
+ "learning_rate": 5.283057559252341e-05,
+ "loss": 0.4652,
+ "step": 200
+ },
+ {
+ "epoch": 2.68,
+ "grad_norm": 0.6765443682670593,
+ "learning_rate": 5.1881405550919493e-05,
+ "loss": 0.4305,
+ "step": 201
+ },
+ {
+ "epoch": 2.6933333333333334,
+ "grad_norm": 0.7338111400604248,
+ "learning_rate": 5.0937843696263966e-05,
+ "loss": 0.5201,
+ "step": 202
+ },
+ {
+ "epoch": 2.7066666666666666,
+ "grad_norm": 0.8355885744094849,
+ "learning_rate": 5.000000000000002e-05,
+ "loss": 0.3281,
+ "step": 203
+ },
+ {
+ "epoch": 2.7199999999999998,
+ "grad_norm": 0.6787794828414917,
+ "learning_rate": 4.9067983767123736e-05,
+ "loss": 0.5057,
+ "step": 204
+ },
+ {
+ "epoch": 2.7333333333333334,
+ "grad_norm": 0.6520538330078125,
+ "learning_rate": 4.814190362344454e-05,
+ "loss": 0.3988,
+ "step": 205
+ },
+ {
+ "epoch": 2.7466666666666666,
+ "grad_norm": 0.9957300424575806,
+ "learning_rate": 4.722186750292511e-05,
+ "loss": 0.4347,
+ "step": 206
+ },
+ {
+ "epoch": 2.76,
+ "grad_norm": 0.6702403426170349,
+ "learning_rate": 4.630798263510162e-05,
+ "loss": 0.4431,
+ "step": 207
+ },
+ {
+ "epoch": 2.7733333333333334,
+ "grad_norm": 0.6874341368675232,
+ "learning_rate": 4.540035553258619e-05,
+ "loss": 0.5117,
+ "step": 208
+ },
+ {
+ "epoch": 2.7866666666666666,
+ "grad_norm": 0.5381680727005005,
+ "learning_rate": 4.449909197865303e-05,
+ "loss": 0.4461,
+ "step": 209
+ },
+ {
+ "epoch": 2.8,
+ "grad_norm": 0.5413621068000793,
+ "learning_rate": 4.360429701490934e-05,
+ "loss": 0.5286,
+ "step": 210
+ },
+ {
+ "epoch": 2.8133333333333335,
+ "grad_norm": 0.5521364808082581,
+ "learning_rate": 4.271607492905303e-05,
+ "loss": 0.4846,
+ "step": 211
+ },
+ {
+ "epoch": 2.8266666666666667,
+ "grad_norm": 0.5857036709785461,
+ "learning_rate": 4.183452924271776e-05,
+ "loss": 0.4799,
+ "step": 212
+ },
+ {
+ "epoch": 2.84,
+ "grad_norm": 0.7210860848426819,
+ "learning_rate": 4.0959762699407766e-05,
+ "loss": 0.3214,
+ "step": 213
+ },
+ {
+ "epoch": 2.8533333333333335,
+ "grad_norm": 0.6596788763999939,
+ "learning_rate": 4.009187725252309e-05,
+ "loss": 0.4818,
+ "step": 214
+ },
+ {
+ "epoch": 2.8666666666666667,
+ "grad_norm": 0.6683171987533569,
+ "learning_rate": 3.9230974053477086e-05,
+ "loss": 0.4854,
+ "step": 215
+ },
+ {
+ "epoch": 2.88,
+ "grad_norm": 0.6668866872787476,
+ "learning_rate": 3.8377153439907266e-05,
+ "loss": 0.3712,
+ "step": 216
+ },
+ {
+ "epoch": 2.8933333333333335,
+ "grad_norm": 0.8019754886627197,
+ "learning_rate": 3.7530514923980884e-05,
+ "loss": 0.3826,
+ "step": 217
+ },
+ {
+ "epoch": 2.9066666666666667,
+ "grad_norm": 0.7723852396011353,
+ "learning_rate": 3.669115718079702e-05,
+ "loss": 0.3923,
+ "step": 218
+ },
+ {
+ "epoch": 2.92,
+ "grad_norm": 0.8932898044586182,
+ "learning_rate": 3.585917803688603e-05,
+ "loss": 0.4372,
+ "step": 219
+ },
+ {
+ "epoch": 2.9333333333333336,
+ "grad_norm": 0.8380217552185059,
+ "learning_rate": 3.503467445880789e-05,
+ "loss": 0.6057,
+ "step": 220
+ },
+ {
+ "epoch": 2.9466666666666668,
+ "grad_norm": 0.7885947227478027,
+ "learning_rate": 3.421774254185096e-05,
+ "loss": 0.4252,
+ "step": 221
+ },
+ {
+ "epoch": 2.96,
+ "grad_norm": 0.6929410099983215,
+ "learning_rate": 3.340847749883191e-05,
+ "loss": 0.4339,
+ "step": 222
+ },
+ {
+ "epoch": 2.9733333333333336,
+ "grad_norm": 0.5754182934761047,
+ "learning_rate": 3.2606973648998915e-05,
+ "loss": 0.5381,
+ "step": 223
+ },
+ {
+ "epoch": 2.986666666666667,
+ "grad_norm": 0.5466946363449097,
+ "learning_rate": 3.1813324407038825e-05,
+ "loss": 0.4699,
+ "step": 224
+ },
+ {
+ "epoch": 3.0,
+ "grad_norm": 0.6496762037277222,
+ "learning_rate": 3.102762227218957e-05,
+ "loss": 0.4392,
+ "step": 225
+ },
+ {
+ "epoch": 3.013333333333333,
+ "grad_norm": 0.5232837200164795,
+ "learning_rate": 3.0249958817459722e-05,
+ "loss": 0.3204,
+ "step": 226
+ },
+ {
+ "epoch": 3.026666666666667,
+ "grad_norm": 0.6648370623588562,
+ "learning_rate": 2.9480424678955443e-05,
+ "loss": 0.3054,
+ "step": 227
+ },
+ {
+ "epoch": 3.04,
+ "grad_norm": 0.5386692881584167,
+ "learning_rate": 2.8719109545317103e-05,
+ "loss": 0.3565,
+ "step": 228
+ },
+ {
+ "epoch": 3.0533333333333332,
+ "grad_norm": 0.5926947593688965,
+ "learning_rate": 2.7966102147265994e-05,
+ "loss": 0.298,
+ "step": 229
+ },
+ {
+ "epoch": 3.066666666666667,
+ "grad_norm": 0.5218151807785034,
+ "learning_rate": 2.722149024726307e-05,
+ "loss": 0.242,
+ "step": 230
+ },
+ {
+ "epoch": 3.08,
+ "grad_norm": 0.5256300568580627,
+ "learning_rate": 2.6485360629279987e-05,
+ "loss": 0.3497,
+ "step": 231
+ },
+ {
+ "epoch": 3.0933333333333333,
+ "grad_norm": 0.5321183204650879,
+ "learning_rate": 2.5757799088684654e-05,
+ "loss": 0.2814,
+ "step": 232
+ },
+ {
+ "epoch": 3.1066666666666665,
+ "grad_norm": 0.4942474663257599,
+ "learning_rate": 2.5038890422241958e-05,
+ "loss": 0.3361,
+ "step": 233
+ },
+ {
+ "epoch": 3.12,
+ "grad_norm": 0.6570059657096863,
+ "learning_rate": 2.432871841823047e-05,
+ "loss": 0.2742,
+ "step": 234
+ },
+ {
+ "epoch": 3.1333333333333333,
+ "grad_norm": 0.6714842319488525,
+ "learning_rate": 2.3627365846677306e-05,
+ "loss": 0.3261,
+ "step": 235
+ },
+ {
+ "epoch": 3.1466666666666665,
+ "grad_norm": 0.68682861328125,
+ "learning_rate": 2.2934914449711087e-05,
+ "loss": 0.2931,
+ "step": 236
+ },
+ {
+ "epoch": 3.16,
+ "grad_norm": 0.519067645072937,
+ "learning_rate": 2.2251444932035094e-05,
+ "loss": 0.323,
+ "step": 237
+ },
+ {
+ "epoch": 3.1733333333333333,
+ "grad_norm": 0.5991199612617493,
+ "learning_rate": 2.157703695152109e-05,
+ "loss": 0.2603,
+ "step": 238
+ },
+ {
+ "epoch": 3.1866666666666665,
+ "grad_norm": 0.7177437543869019,
+ "learning_rate": 2.091176910992545e-05,
+ "loss": 0.3499,
+ "step": 239
+ },
+ {
+ "epoch": 3.2,
+ "grad_norm": 0.6380268335342407,
+ "learning_rate": 2.025571894372794e-05,
+ "loss": 0.3064,
+ "step": 240
+ },
+ {
+ "epoch": 3.2133333333333334,
+ "grad_norm": 0.5707582235336304,
+ "learning_rate": 1.9608962915094996e-05,
+ "loss": 0.2747,
+ "step": 241
+ },
+ {
+ "epoch": 3.2266666666666666,
+ "grad_norm": 0.6281158328056335,
+ "learning_rate": 1.897157640296825e-05,
+ "loss": 0.2461,
+ "step": 242
+ },
+ {
+ "epoch": 3.24,
+ "grad_norm": 0.6357036232948303,
+ "learning_rate": 1.8343633694278895e-05,
+ "loss": 0.2846,
+ "step": 243
+ },
+ {
+ "epoch": 3.2533333333333334,
+ "grad_norm": 0.7706279754638672,
+ "learning_rate": 1.772520797528988e-05,
+ "loss": 0.2931,
+ "step": 244
+ },
+ {
+ "epoch": 3.2666666666666666,
+ "grad_norm": 0.5687737464904785,
+ "learning_rate": 1.7116371323065883e-05,
+ "loss": 0.2162,
+ "step": 245
+ },
+ {
+ "epoch": 3.2800000000000002,
+ "grad_norm": 0.5638925433158875,
+ "learning_rate": 1.65171946970729e-05,
+ "loss": 0.2219,
+ "step": 246
+ },
+ {
+ "epoch": 3.2933333333333334,
+ "grad_norm": 0.6213463544845581,
+ "learning_rate": 1.592774793090792e-05,
+ "loss": 0.3329,
+ "step": 247
+ },
+ {
+ "epoch": 3.3066666666666666,
+ "grad_norm": 0.6423382759094238,
+ "learning_rate": 1.534809972415998e-05,
+ "loss": 0.225,
+ "step": 248
+ },
+ {
+ "epoch": 3.32,
+ "grad_norm": 0.818946361541748,
+ "learning_rate": 1.4778317634403083e-05,
+ "loss": 0.2252,
+ "step": 249
+ },
+ {
+ "epoch": 3.3333333333333335,
+ "grad_norm": 0.5952211022377014,
+ "learning_rate": 1.4218468069322578e-05,
+ "loss": 0.2295,
+ "step": 250
+ },
+ {
+ "epoch": 3.3466666666666667,
+ "grad_norm": 0.5699911117553711,
+ "learning_rate": 1.3668616278975343e-05,
+ "loss": 0.3132,
+ "step": 251
+ },
+ {
+ "epoch": 3.36,
+ "grad_norm": 0.7674257755279541,
+ "learning_rate": 1.3128826348184887e-05,
+ "loss": 0.3487,
+ "step": 252
+ },
+ {
+ "epoch": 3.3733333333333335,
+ "grad_norm": 0.6047098636627197,
+ "learning_rate": 1.2599161189072427e-05,
+ "loss": 0.1991,
+ "step": 253
+ },
+ {
+ "epoch": 3.3866666666666667,
+ "grad_norm": 0.6673244833946228,
+ "learning_rate": 1.2079682533724379e-05,
+ "loss": 0.2233,
+ "step": 254
+ },
+ {
+ "epoch": 3.4,
+ "grad_norm": 0.7468060851097107,
+ "learning_rate": 1.1570450926997655e-05,
+ "loss": 0.2155,
+ "step": 255
+ },
+ {
+ "epoch": 3.413333333333333,
+ "grad_norm": 0.6119690537452698,
+ "learning_rate": 1.1071525719463095e-05,
+ "loss": 0.1624,
+ "step": 256
+ },
+ {
+ "epoch": 3.4266666666666667,
+ "grad_norm": 0.5702852606773376,
+ "learning_rate": 1.0582965060488359e-05,
+ "loss": 0.3134,
+ "step": 257
+ },
+ {
+ "epoch": 3.44,
+ "grad_norm": 0.9024640321731567,
+ "learning_rate": 1.010482589146048e-05,
+ "loss": 0.3316,
+ "step": 258
+ },
+ {
+ "epoch": 3.453333333333333,
+ "grad_norm": 0.6158623695373535,
+ "learning_rate": 9.637163939149485e-06,
+ "loss": 0.2637,
+ "step": 259
+ },
+ {
+ "epoch": 3.466666666666667,
+ "grad_norm": 0.5979247689247131,
+ "learning_rate": 9.180033709213454e-06,
+ "loss": 0.3148,
+ "step": 260
+ },
+ {
+ "epoch": 3.48,
+ "grad_norm": 0.5976862907409668,
+ "learning_rate": 8.733488479845997e-06,
+ "loss": 0.293,
+ "step": 261
+ },
+ {
+ "epoch": 3.493333333333333,
+ "grad_norm": 0.6761628985404968,
+ "learning_rate": 8.297580295566575e-06,
+ "loss": 0.2675,
+ "step": 262
+ },
+ {
+ "epoch": 3.506666666666667,
+ "grad_norm": 0.7228958606719971,
+ "learning_rate": 7.872359961154906e-06,
+ "loss": 0.3601,
+ "step": 263
+ },
+ {
+ "epoch": 3.52,
+ "grad_norm": 0.6675010919570923,
+ "learning_rate": 7.457877035729588e-06,
+ "loss": 0.2551,
+ "step": 264
+ },
+ {
+ "epoch": 3.533333333333333,
+ "grad_norm": 0.6560245752334595,
+ "learning_rate": 7.054179826972074e-06,
+ "loss": 0.1799,
+ "step": 265
+ },
+ {
+ "epoch": 3.546666666666667,
+ "grad_norm": 0.6431569457054138,
+ "learning_rate": 6.661315385496425e-06,
+ "loss": 0.2183,
+ "step": 266
+ },
+ {
+ "epoch": 3.56,
+ "grad_norm": 0.7804158329963684,
+ "learning_rate": 6.2793294993656494e-06,
+ "loss": 0.2438,
+ "step": 267
+ },
+ {
+ "epoch": 3.5733333333333333,
+ "grad_norm": 0.8433220982551575,
+ "learning_rate": 5.908266688755049e-06,
+ "loss": 0.2806,
+ "step": 268
+ },
+ {
+ "epoch": 3.586666666666667,
+ "grad_norm": 0.7814007997512817,
+ "learning_rate": 5.54817020076347e-06,
+ "loss": 0.2841,
+ "step": 269
+ },
+ {
+ "epoch": 3.6,
+ "grad_norm": 0.5926189422607422,
+ "learning_rate": 5.199082004372957e-06,
+ "loss": 0.2349,
+ "step": 270
+ },
+ {
+ "epoch": 3.6133333333333333,
+ "grad_norm": 0.7108579277992249,
+ "learning_rate": 4.861042785557146e-06,
+ "loss": 0.2771,
+ "step": 271
+ },
+ {
+ "epoch": 3.626666666666667,
+ "grad_norm": 0.8635604381561279,
+ "learning_rate": 4.534091942539475e-06,
+ "loss": 0.3572,
+ "step": 272
+ },
+ {
+ "epoch": 3.64,
+ "grad_norm": 0.7461998462677002,
+ "learning_rate": 4.2182675812012965e-06,
+ "loss": 0.3266,
+ "step": 273
+ },
+ {
+ "epoch": 3.6533333333333333,
+ "grad_norm": 0.6848060488700867,
+ "learning_rate": 3.913606510640644e-06,
+ "loss": 0.2782,
+ "step": 274
+ },
+ {
+ "epoch": 3.6666666666666665,
+ "grad_norm": 0.5404589176177979,
+ "learning_rate": 3.620144238882206e-06,
+ "loss": 0.2365,
+ "step": 275
+ },
+ {
+ "epoch": 3.68,
+ "grad_norm": 0.8409443497657776,
+ "learning_rate": 3.3379149687388867e-06,
+ "loss": 0.243,
+ "step": 276
+ },
+ {
+ "epoch": 3.6933333333333334,
+ "grad_norm": 0.6383155584335327,
+ "learning_rate": 3.06695159382544e-06,
+ "loss": 0.3358,
+ "step": 277
+ },
+ {
+ "epoch": 3.7066666666666666,
+ "grad_norm": 0.790045976638794,
+ "learning_rate": 2.8072856947248037e-06,
+ "loss": 0.2835,
+ "step": 278
+ },
+ {
+ "epoch": 3.7199999999999998,
+ "grad_norm": 0.6109788417816162,
+ "learning_rate": 2.5589475353073988e-06,
+ "loss": 0.3113,
+ "step": 279
+ },
+ {
+ "epoch": 3.7333333333333334,
+ "grad_norm": 0.5038023591041565,
+ "learning_rate": 2.3219660592038285e-06,
+ "loss": 0.1902,
+ "step": 280
+ },
+ {
+ "epoch": 3.7466666666666666,
+ "grad_norm": 0.5871134996414185,
+ "learning_rate": 2.0963688864316323e-06,
+ "loss": 0.3928,
+ "step": 281
+ },
+ {
+ "epoch": 3.76,
+ "grad_norm": 0.5895046591758728,
+ "learning_rate": 1.882182310176095e-06,
+ "loss": 0.2864,
+ "step": 282
+ },
+ {
+ "epoch": 3.7733333333333334,
+ "grad_norm": 1.456066608428955,
+ "learning_rate": 1.6794312937258417e-06,
+ "loss": 0.2669,
+ "step": 283
+ },
+ {
+ "epoch": 3.7866666666666666,
+ "grad_norm": 0.6652315855026245,
+ "learning_rate": 1.488139467563354e-06,
+ "loss": 0.2859,
+ "step": 284
+ },
+ {
+ "epoch": 3.8,
+ "grad_norm": 0.7242224812507629,
+ "learning_rate": 1.30832912661093e-06,
+ "loss": 0.1955,
+ "step": 285
+ },
+ {
+ "epoch": 3.8133333333333335,
+ "grad_norm": 0.5488805174827576,
+ "learning_rate": 1.1400212276321376e-06,
+ "loss": 0.2772,
+ "step": 286
+ },
+ {
+ "epoch": 3.8266666666666667,
+ "grad_norm": 0.9780620336532593,
+ "learning_rate": 9.832353867893386e-07,
+ "loss": 0.2286,
+ "step": 287
+ },
+ {
+ "epoch": 3.84,
+ "grad_norm": 0.6662778258323669,
+ "learning_rate": 8.379898773574924e-07,
+ "loss": 0.2704,
+ "step": 288
+ },
+ {
+ "epoch": 3.8533333333333335,
+ "grad_norm": 0.6265354156494141,
+ "learning_rate": 7.043016275943615e-07,
+ "loss": 0.2296,
+ "step": 289
+ },
+ {
+ "epoch": 3.8666666666666667,
+ "grad_norm": 0.6008942127227783,
+ "learning_rate": 5.821862187675775e-07,
+ "loss": 0.2374,
+ "step": 290
+ },
+ {
+ "epoch": 3.88,
+ "grad_norm": 0.7946692109107971,
+ "learning_rate": 4.7165788333860536e-07,
+ "loss": 0.1566,
+ "step": 291
+ },
+ {
+ "epoch": 3.8933333333333335,
+ "grad_norm": 0.7700130939483643,
+ "learning_rate": 3.727295033040035e-07,
+ "loss": 0.2748,
+ "step": 292
+ },
+ {
+ "epoch": 3.9066666666666667,
+ "grad_norm": 0.671159029006958,
+ "learning_rate": 2.854126086940356e-07,
+ "loss": 0.2345,
+ "step": 293
+ },
+ {
+ "epoch": 3.92,
+ "grad_norm": 0.591444194316864,
+ "learning_rate": 2.0971737622883515e-07,
+ "loss": 0.2364,
+ "step": 294
+ },
+ {
+ "epoch": 3.9333333333333336,
+ "grad_norm": 0.6508089900016785,
+ "learning_rate": 1.4565262813230894e-07,
+ "loss": 0.2455,
+ "step": 295
+ },
+ {
+ "epoch": 3.9466666666666668,
+ "grad_norm": 0.8111832737922668,
+ "learning_rate": 9.32258311039269e-08,
+ "loss": 0.2412,
+ "step": 296
+ },
+ {
+ "epoch": 3.96,
+ "grad_norm": 0.6243981122970581,
+ "learning_rate": 5.2443095448506674e-08,
+ "loss": 0.2991,
+ "step": 297
+ },
+ {
+ "epoch": 3.9733333333333336,
+ "grad_norm": 0.8096401691436768,
+ "learning_rate": 2.3309174364027907e-08,
+ "loss": 0.2848,
+ "step": 298
+ },
+ {
+ "epoch": 3.986666666666667,
+ "grad_norm": 0.6315991878509521,
+ "learning_rate": 5.827463387653165e-09,
+ "loss": 0.3048,
+ "step": 299
+ },
+ {
+ "epoch": 4.0,
+ "grad_norm": 0.6686562895774841,
+ "learning_rate": 0.0,
+ "loss": 0.266,
+ "step": 300
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 300,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 50,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": true
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 8.62109265035264e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-300/training_args.bin b/checkpoint-300/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..26de66c830692f3d22f375473f4f43447eefb78a
--- /dev/null
+++ b/checkpoint-300/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54335a849ae8d377cae5839a736f4196087c7894666ca9b9f10dd899e0ada95c
+size 6904
diff --git a/checkpoint-300/zero_to_fp32.py b/checkpoint-300/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..5995d6e6f04e43b989587aa9022a3aef0c66d694
--- /dev/null
+++ b/checkpoint-300/zero_to_fp32.py
@@ -0,0 +1,760 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example:
+# python zero_to_fp32.py . output_dir/
+# or
+# python zero_to_fp32.py . output_dir/ --safe_serialization
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+import gc
+import json
+import numpy as np
+from tqdm import tqdm
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device, weights_only=False)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+ total_files = len(files)
+ state_dicts = []
+ for f in tqdm(files, desc='Loading checkpoint shards'):
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+class GatheredTensor:
+ """
+ A pseudo tensor that collects partitioned weights.
+ It is more memory efficient when there are multiple groups.
+ """
+
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
+ self.flat_groups = flat_groups
+ self.flat_groups_offset = flat_groups_offset
+ self.offset = offset
+ self.partitioned_numel = partitioned_numel
+ self.shape = shape
+ self.dtype = self.flat_groups[0][0].dtype
+
+ def contiguous(self):
+ """
+ Merge partitioned weights from flat_groups into a single tensor.
+ """
+ end_idx = self.offset + self.partitioned_numel
+ world_size = len(self.flat_groups)
+ pad_flat_param_chunks = []
+
+ for rank_i in range(world_size):
+ # for each rank, we need to collect weights from related group/groups
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
+ start_group_id = None
+ end_group_id = None
+ for group_id in range(len(self.flat_groups_offset)):
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
+ start_group_id = group_id
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
+ end_group_id = group_id
+ break
+ # collect weights from related group/groups
+ for group_id in range(start_group_id, end_group_id + 1):
+ flat_tensor = flat_groups_at_rank_i[group_id]
+ start_offset = self.offset - self.flat_groups_offset[group_id]
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
+
+ # collect weights from all ranks
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
+ return param
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
+
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # memory efficient tensor
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
+ state_dict[name] = tensor
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def to_torch_tensor(state_dict, return_empty_tensor=False):
+ """
+ Convert state_dict of GatheredTensor to torch tensor
+ """
+ torch_state_dict = {}
+ converted_tensors = {}
+ for name, tensor in state_dict.items():
+ tensor_id = id(tensor)
+ if tensor_id in converted_tensors: # shared tensors
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
+ torch_state_dict[name] = shared_tensor
+ else:
+ converted_tensors[tensor_id] = name
+ if return_empty_tensor:
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
+ else:
+ torch_state_dict[name] = tensor.contiguous()
+ return torch_state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag=None,
+ exclude_frozen_parameters=False,
+ lazy_mode=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint. Or you can load state_dict in lazy mode ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
+ for name, lazy_tensor in state_dict.item():
+ tensor = lazy_tensor.contiguous() # to cpu
+ print(name, tensor)
+ # del tensor to release memory if it no longer in use
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+ if lazy_mode:
+ return state_dict
+ else:
+ return to_torch_tensor(state_dict)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
+ output_dir,
+ max_shard_size="5GB",
+ safe_serialization=False,
+ tag=None,
+ exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ # Dependency pre-check
+ if safe_serialization:
+ try:
+ from safetensors.torch import save_file
+ except ImportError:
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
+ raise
+ if max_shard_size is not None:
+ try:
+ from huggingface_hub import split_torch_state_dict_into_shards
+ except ImportError:
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
+ raise
+
+ # Convert zero checkpoint to state_dict
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag,
+ exclude_frozen_parameters,
+ lazy_mode=True)
+
+ # Shard the model if it is too big.
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
+ if max_shard_size is not None:
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
+ # an memory-efficient approach for sharding
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
+ filename_pattern=filename_pattern,
+ max_shard_size=max_shard_size)
+ else:
+ from collections import namedtuple
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
+ state_dict_split = StateDictSplit(is_sharded=False,
+ filename_to_tensors={weights_name: list(state_dict.keys())})
+
+ # Save the model by shard
+ os.makedirs(output_dir, exist_ok=True)
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
+ shard_state_dict = to_torch_tensor(shard_state_dict)
+ output_path = os.path.join(output_dir, shard_file)
+ if safe_serialization:
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
+ else:
+ torch.save(shard_state_dict, output_path)
+ # release the memory of current shard
+ for tensor_name in list(shard_state_dict.keys()):
+ del state_dict[tensor_name]
+ del shard_state_dict[tensor_name]
+ del shard_state_dict
+ gc.collect()
+
+ # Save index if sharded
+ if state_dict_split.is_sharded:
+ index = {
+ "metadata": state_dict_split.metadata,
+ "weight_map": state_dict_split.tensor_to_filename,
+ }
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
+ save_index_file = os.path.join(output_dir, save_index_file)
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info("Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info("Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument("output_dir",
+ type=str,
+ help="directory to the pytorch fp32 state_dict output files"
+ "(e.g. path/checkpoint-12-output/)")
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="5GB",
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
+ "without CPU OOM issues.")
+ parser.add_argument(
+ "--safe_serialization",
+ default=False,
+ action='store_true',
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_dir,
+ max_shard_size=args.max_shard_size,
+ safe_serialization=args.safe_serialization,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/special_tokens_map.json b/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/tokenizer.model b/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..26c65df1bf794f101c1dd54c908180dc0d880fe3
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,43 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/trainer_state.json b/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..1fe6f8c0c4d774791366cc67c585fbf2350d9ca8
--- /dev/null
+++ b/trainer_state.json
@@ -0,0 +1,2142 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 4.0,
+ "eval_steps": 500,
+ "global_step": 300,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.013333333333333334,
+ "grad_norm": 1.0028682947158813,
+ "learning_rate": 2.2222222222222223e-05,
+ "loss": 1.43,
+ "step": 1
+ },
+ {
+ "epoch": 0.02666666666666667,
+ "grad_norm": 1.6397583484649658,
+ "learning_rate": 4.4444444444444447e-05,
+ "loss": 1.5374,
+ "step": 2
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 1.2198785543441772,
+ "learning_rate": 6.666666666666667e-05,
+ "loss": 1.5106,
+ "step": 3
+ },
+ {
+ "epoch": 0.05333333333333334,
+ "grad_norm": 0.8468486070632935,
+ "learning_rate": 8.888888888888889e-05,
+ "loss": 1.447,
+ "step": 4
+ },
+ {
+ "epoch": 0.06666666666666667,
+ "grad_norm": 0.9268608689308167,
+ "learning_rate": 0.00011111111111111112,
+ "loss": 1.4743,
+ "step": 5
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 1.0168085098266602,
+ "learning_rate": 0.00013333333333333334,
+ "loss": 1.3245,
+ "step": 6
+ },
+ {
+ "epoch": 0.09333333333333334,
+ "grad_norm": 0.6773934960365295,
+ "learning_rate": 0.00015555555555555556,
+ "loss": 1.3738,
+ "step": 7
+ },
+ {
+ "epoch": 0.10666666666666667,
+ "grad_norm": 0.6985631585121155,
+ "learning_rate": 0.00017777777777777779,
+ "loss": 1.3951,
+ "step": 8
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 1.0221399068832397,
+ "learning_rate": 0.0002,
+ "loss": 1.3246,
+ "step": 9
+ },
+ {
+ "epoch": 0.13333333333333333,
+ "grad_norm": 0.6119747161865234,
+ "learning_rate": 0.00019999417253661235,
+ "loss": 1.2951,
+ "step": 10
+ },
+ {
+ "epoch": 0.14666666666666667,
+ "grad_norm": 0.6660990118980408,
+ "learning_rate": 0.00019997669082563597,
+ "loss": 1.2529,
+ "step": 11
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.5874819755554199,
+ "learning_rate": 0.00019994755690455152,
+ "loss": 1.2252,
+ "step": 12
+ },
+ {
+ "epoch": 0.17333333333333334,
+ "grad_norm": 0.4818006157875061,
+ "learning_rate": 0.00019990677416889608,
+ "loss": 1.1708,
+ "step": 13
+ },
+ {
+ "epoch": 0.18666666666666668,
+ "grad_norm": 0.652045488357544,
+ "learning_rate": 0.0001998543473718677,
+ "loss": 0.9865,
+ "step": 14
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.5517733693122864,
+ "learning_rate": 0.00019979028262377118,
+ "loss": 1.181,
+ "step": 15
+ },
+ {
+ "epoch": 0.21333333333333335,
+ "grad_norm": 0.47720542550086975,
+ "learning_rate": 0.00019971458739130598,
+ "loss": 1.0633,
+ "step": 16
+ },
+ {
+ "epoch": 0.22666666666666666,
+ "grad_norm": 0.7947096228599548,
+ "learning_rate": 0.000199627270496696,
+ "loss": 1.1458,
+ "step": 17
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.5301257371902466,
+ "learning_rate": 0.0001995283421166614,
+ "loss": 1.2245,
+ "step": 18
+ },
+ {
+ "epoch": 0.25333333333333335,
+ "grad_norm": 0.9473271369934082,
+ "learning_rate": 0.00019941781378123244,
+ "loss": 1.0859,
+ "step": 19
+ },
+ {
+ "epoch": 0.26666666666666666,
+ "grad_norm": 1.1834161281585693,
+ "learning_rate": 0.00019929569837240564,
+ "loss": 1.1808,
+ "step": 20
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.6784033179283142,
+ "learning_rate": 0.00019916201012264254,
+ "loss": 1.202,
+ "step": 21
+ },
+ {
+ "epoch": 0.29333333333333333,
+ "grad_norm": 0.7274785041809082,
+ "learning_rate": 0.00019901676461321068,
+ "loss": 1.2242,
+ "step": 22
+ },
+ {
+ "epoch": 0.30666666666666664,
+ "grad_norm": 0.7520783543586731,
+ "learning_rate": 0.00019885997877236788,
+ "loss": 1.173,
+ "step": 23
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.8218541145324707,
+ "learning_rate": 0.00019869167087338907,
+ "loss": 1.0723,
+ "step": 24
+ },
+ {
+ "epoch": 0.3333333333333333,
+ "grad_norm": 0.6361420154571533,
+ "learning_rate": 0.00019851186053243666,
+ "loss": 1.1607,
+ "step": 25
+ },
+ {
+ "epoch": 0.3466666666666667,
+ "grad_norm": 0.6401374936103821,
+ "learning_rate": 0.00019832056870627417,
+ "loss": 1.1398,
+ "step": 26
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 1.0995603799819946,
+ "learning_rate": 0.0001981178176898239,
+ "loss": 0.9475,
+ "step": 27
+ },
+ {
+ "epoch": 0.37333333333333335,
+ "grad_norm": 0.5122275948524475,
+ "learning_rate": 0.00019790363111356837,
+ "loss": 1.0125,
+ "step": 28
+ },
+ {
+ "epoch": 0.38666666666666666,
+ "grad_norm": 0.9316695332527161,
+ "learning_rate": 0.00019767803394079615,
+ "loss": 1.0554,
+ "step": 29
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.6831843256950378,
+ "learning_rate": 0.00019744105246469263,
+ "loss": 1.027,
+ "step": 30
+ },
+ {
+ "epoch": 0.41333333333333333,
+ "grad_norm": 0.5529218912124634,
+ "learning_rate": 0.0001971927143052752,
+ "loss": 1.0218,
+ "step": 31
+ },
+ {
+ "epoch": 0.4266666666666667,
+ "grad_norm": 0.7164443135261536,
+ "learning_rate": 0.00019693304840617457,
+ "loss": 1.0901,
+ "step": 32
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.7430665493011475,
+ "learning_rate": 0.00019666208503126112,
+ "loss": 1.125,
+ "step": 33
+ },
+ {
+ "epoch": 0.4533333333333333,
+ "grad_norm": 0.7198122143745422,
+ "learning_rate": 0.00019637985576111778,
+ "loss": 0.9926,
+ "step": 34
+ },
+ {
+ "epoch": 0.4666666666666667,
+ "grad_norm": 0.6613873839378357,
+ "learning_rate": 0.0001960863934893594,
+ "loss": 1.1925,
+ "step": 35
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 1.2440484762191772,
+ "learning_rate": 0.00019578173241879872,
+ "loss": 0.8948,
+ "step": 36
+ },
+ {
+ "epoch": 0.49333333333333335,
+ "grad_norm": 0.6995570659637451,
+ "learning_rate": 0.00019546590805746052,
+ "loss": 0.9525,
+ "step": 37
+ },
+ {
+ "epoch": 0.5066666666666667,
+ "grad_norm": 0.6873968243598938,
+ "learning_rate": 0.00019513895721444286,
+ "loss": 0.9966,
+ "step": 38
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.5394595861434937,
+ "learning_rate": 0.00019480091799562704,
+ "loss": 1.0787,
+ "step": 39
+ },
+ {
+ "epoch": 0.5333333333333333,
+ "grad_norm": 0.9196312427520752,
+ "learning_rate": 0.00019445182979923654,
+ "loss": 1.0559,
+ "step": 40
+ },
+ {
+ "epoch": 0.5466666666666666,
+ "grad_norm": 2.5954103469848633,
+ "learning_rate": 0.000194091733311245,
+ "loss": 0.9638,
+ "step": 41
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 1.244681715965271,
+ "learning_rate": 0.00019372067050063438,
+ "loss": 0.9325,
+ "step": 42
+ },
+ {
+ "epoch": 0.5733333333333334,
+ "grad_norm": 0.613715410232544,
+ "learning_rate": 0.0001933386846145036,
+ "loss": 0.9428,
+ "step": 43
+ },
+ {
+ "epoch": 0.5866666666666667,
+ "grad_norm": 0.9604235291481018,
+ "learning_rate": 0.00019294582017302797,
+ "loss": 0.9486,
+ "step": 44
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.7591239809989929,
+ "learning_rate": 0.00019254212296427044,
+ "loss": 1.0955,
+ "step": 45
+ },
+ {
+ "epoch": 0.6133333333333333,
+ "grad_norm": 0.5218423008918762,
+ "learning_rate": 0.0001921276400388451,
+ "loss": 1.0368,
+ "step": 46
+ },
+ {
+ "epoch": 0.6266666666666667,
+ "grad_norm": 0.5670755505561829,
+ "learning_rate": 0.00019170241970443343,
+ "loss": 0.9854,
+ "step": 47
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.8089922070503235,
+ "learning_rate": 0.00019126651152015403,
+ "loss": 1.0396,
+ "step": 48
+ },
+ {
+ "epoch": 0.6533333333333333,
+ "grad_norm": 0.6459051966667175,
+ "learning_rate": 0.00019081996629078657,
+ "loss": 1.0156,
+ "step": 49
+ },
+ {
+ "epoch": 0.6666666666666666,
+ "grad_norm": 1.1918997764587402,
+ "learning_rate": 0.00019036283606085053,
+ "loss": 1.1542,
+ "step": 50
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.6263722777366638,
+ "learning_rate": 0.00018989517410853955,
+ "loss": 1.1471,
+ "step": 51
+ },
+ {
+ "epoch": 0.6933333333333334,
+ "grad_norm": 0.534618616104126,
+ "learning_rate": 0.00018941703493951164,
+ "loss": 0.9055,
+ "step": 52
+ },
+ {
+ "epoch": 0.7066666666666667,
+ "grad_norm": 0.529036819934845,
+ "learning_rate": 0.00018892847428053693,
+ "loss": 0.9896,
+ "step": 53
+ },
+ {
+ "epoch": 0.72,
+ "grad_norm": 0.716572105884552,
+ "learning_rate": 0.00018842954907300236,
+ "loss": 1.0204,
+ "step": 54
+ },
+ {
+ "epoch": 0.7333333333333333,
+ "grad_norm": 0.832662045955658,
+ "learning_rate": 0.00018792031746627563,
+ "loss": 1.0263,
+ "step": 55
+ },
+ {
+ "epoch": 0.7466666666666667,
+ "grad_norm": 0.5659884810447693,
+ "learning_rate": 0.0001874008388109276,
+ "loss": 1.0529,
+ "step": 56
+ },
+ {
+ "epoch": 0.76,
+ "grad_norm": 0.4971260726451874,
+ "learning_rate": 0.00018687117365181512,
+ "loss": 1.0109,
+ "step": 57
+ },
+ {
+ "epoch": 0.7733333333333333,
+ "grad_norm": 0.5997689962387085,
+ "learning_rate": 0.00018633138372102468,
+ "loss": 1.0363,
+ "step": 58
+ },
+ {
+ "epoch": 0.7866666666666666,
+ "grad_norm": 0.42450904846191406,
+ "learning_rate": 0.00018578153193067745,
+ "loss": 1.0156,
+ "step": 59
+ },
+ {
+ "epoch": 0.8,
+ "grad_norm": 1.0025880336761475,
+ "learning_rate": 0.00018522168236559695,
+ "loss": 0.96,
+ "step": 60
+ },
+ {
+ "epoch": 0.8133333333333334,
+ "grad_norm": 0.47225672006607056,
+ "learning_rate": 0.00018465190027584005,
+ "loss": 1.0402,
+ "step": 61
+ },
+ {
+ "epoch": 0.8266666666666667,
+ "grad_norm": 0.6419042348861694,
+ "learning_rate": 0.00018407225206909208,
+ "loss": 0.9727,
+ "step": 62
+ },
+ {
+ "epoch": 0.84,
+ "grad_norm": 0.9594618082046509,
+ "learning_rate": 0.00018348280530292713,
+ "loss": 0.986,
+ "step": 63
+ },
+ {
+ "epoch": 0.8533333333333334,
+ "grad_norm": 0.5415605902671814,
+ "learning_rate": 0.00018288362867693414,
+ "loss": 1.019,
+ "step": 64
+ },
+ {
+ "epoch": 0.8666666666666667,
+ "grad_norm": 0.9662086367607117,
+ "learning_rate": 0.00018227479202471015,
+ "loss": 0.9531,
+ "step": 65
+ },
+ {
+ "epoch": 0.88,
+ "grad_norm": 0.7523136734962463,
+ "learning_rate": 0.0001816563663057211,
+ "loss": 1.0383,
+ "step": 66
+ },
+ {
+ "epoch": 0.8933333333333333,
+ "grad_norm": 0.7249945998191833,
+ "learning_rate": 0.00018102842359703176,
+ "loss": 1.1131,
+ "step": 67
+ },
+ {
+ "epoch": 0.9066666666666666,
+ "grad_norm": 0.4781404435634613,
+ "learning_rate": 0.000180391037084905,
+ "loss": 0.9701,
+ "step": 68
+ },
+ {
+ "epoch": 0.92,
+ "grad_norm": 0.5435504913330078,
+ "learning_rate": 0.00017974428105627208,
+ "loss": 1.2701,
+ "step": 69
+ },
+ {
+ "epoch": 0.9333333333333333,
+ "grad_norm": 0.48021838068962097,
+ "learning_rate": 0.00017908823089007457,
+ "loss": 0.9212,
+ "step": 70
+ },
+ {
+ "epoch": 0.9466666666666667,
+ "grad_norm": 0.7063950300216675,
+ "learning_rate": 0.00017842296304847893,
+ "loss": 1.0076,
+ "step": 71
+ },
+ {
+ "epoch": 0.96,
+ "grad_norm": 0.5694530606269836,
+ "learning_rate": 0.00017774855506796496,
+ "loss": 1.0417,
+ "step": 72
+ },
+ {
+ "epoch": 0.9733333333333334,
+ "grad_norm": 0.6120775938034058,
+ "learning_rate": 0.00017706508555028893,
+ "loss": 1.0501,
+ "step": 73
+ },
+ {
+ "epoch": 0.9866666666666667,
+ "grad_norm": 0.5728889107704163,
+ "learning_rate": 0.0001763726341533227,
+ "loss": 1.0024,
+ "step": 74
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.8452621102333069,
+ "learning_rate": 0.00017567128158176953,
+ "loss": 1.0966,
+ "step": 75
+ },
+ {
+ "epoch": 1.0133333333333334,
+ "grad_norm": 0.5769606828689575,
+ "learning_rate": 0.0001749611095777581,
+ "loss": 0.8877,
+ "step": 76
+ },
+ {
+ "epoch": 1.0266666666666666,
+ "grad_norm": 0.9046480059623718,
+ "learning_rate": 0.00017424220091131535,
+ "loss": 0.752,
+ "step": 77
+ },
+ {
+ "epoch": 1.04,
+ "grad_norm": 0.4488053023815155,
+ "learning_rate": 0.00017351463937072004,
+ "loss": 0.7588,
+ "step": 78
+ },
+ {
+ "epoch": 1.0533333333333332,
+ "grad_norm": 0.41479629278182983,
+ "learning_rate": 0.00017277850975273696,
+ "loss": 0.779,
+ "step": 79
+ },
+ {
+ "epoch": 1.0666666666666667,
+ "grad_norm": 0.7192550301551819,
+ "learning_rate": 0.000172033897852734,
+ "loss": 0.7809,
+ "step": 80
+ },
+ {
+ "epoch": 1.08,
+ "grad_norm": 0.7553783655166626,
+ "learning_rate": 0.00017128089045468294,
+ "loss": 0.7421,
+ "step": 81
+ },
+ {
+ "epoch": 1.0933333333333333,
+ "grad_norm": 0.5650737881660461,
+ "learning_rate": 0.0001705195753210446,
+ "loss": 0.7044,
+ "step": 82
+ },
+ {
+ "epoch": 1.1066666666666667,
+ "grad_norm": 0.6692880988121033,
+ "learning_rate": 0.0001697500411825403,
+ "loss": 0.8415,
+ "step": 83
+ },
+ {
+ "epoch": 1.12,
+ "grad_norm": 0.6710836291313171,
+ "learning_rate": 0.00016897237772781044,
+ "loss": 0.7247,
+ "step": 84
+ },
+ {
+ "epoch": 1.1333333333333333,
+ "grad_norm": 0.5887194275856018,
+ "learning_rate": 0.0001681866755929612,
+ "loss": 0.7947,
+ "step": 85
+ },
+ {
+ "epoch": 1.1466666666666667,
+ "grad_norm": 0.5538906455039978,
+ "learning_rate": 0.00016739302635100108,
+ "loss": 0.7337,
+ "step": 86
+ },
+ {
+ "epoch": 1.16,
+ "grad_norm": 0.6018480062484741,
+ "learning_rate": 0.00016659152250116812,
+ "loss": 0.6801,
+ "step": 87
+ },
+ {
+ "epoch": 1.1733333333333333,
+ "grad_norm": 0.548251748085022,
+ "learning_rate": 0.00016578225745814907,
+ "loss": 0.6898,
+ "step": 88
+ },
+ {
+ "epoch": 1.1866666666666668,
+ "grad_norm": 0.49416568875312805,
+ "learning_rate": 0.00016496532554119214,
+ "loss": 0.719,
+ "step": 89
+ },
+ {
+ "epoch": 1.2,
+ "grad_norm": 0.6101306676864624,
+ "learning_rate": 0.000164140821963114,
+ "loss": 0.7917,
+ "step": 90
+ },
+ {
+ "epoch": 1.2133333333333334,
+ "grad_norm": 0.588020920753479,
+ "learning_rate": 0.000163308842819203,
+ "loss": 0.8035,
+ "step": 91
+ },
+ {
+ "epoch": 1.2266666666666666,
+ "grad_norm": 0.5376534461975098,
+ "learning_rate": 0.00016246948507601914,
+ "loss": 0.7816,
+ "step": 92
+ },
+ {
+ "epoch": 1.24,
+ "grad_norm": 0.549625813961029,
+ "learning_rate": 0.00016162284656009274,
+ "loss": 0.7245,
+ "step": 93
+ },
+ {
+ "epoch": 1.2533333333333334,
+ "grad_norm": 0.6054933667182922,
+ "learning_rate": 0.0001607690259465229,
+ "loss": 0.7484,
+ "step": 94
+ },
+ {
+ "epoch": 1.2666666666666666,
+ "grad_norm": 0.5613316297531128,
+ "learning_rate": 0.00015990812274747692,
+ "loss": 0.7369,
+ "step": 95
+ },
+ {
+ "epoch": 1.28,
+ "grad_norm": 0.5984250903129578,
+ "learning_rate": 0.00015904023730059228,
+ "loss": 0.891,
+ "step": 96
+ },
+ {
+ "epoch": 1.2933333333333334,
+ "grad_norm": 0.6490495204925537,
+ "learning_rate": 0.00015816547075728226,
+ "loss": 0.775,
+ "step": 97
+ },
+ {
+ "epoch": 1.3066666666666666,
+ "grad_norm": 0.6675053834915161,
+ "learning_rate": 0.000157283925070947,
+ "loss": 0.7565,
+ "step": 98
+ },
+ {
+ "epoch": 1.32,
+ "grad_norm": 0.5397291779518127,
+ "learning_rate": 0.00015639570298509064,
+ "loss": 0.6685,
+ "step": 99
+ },
+ {
+ "epoch": 1.3333333333333333,
+ "grad_norm": 0.5830179452896118,
+ "learning_rate": 0.000155500908021347,
+ "loss": 0.8132,
+ "step": 100
+ },
+ {
+ "epoch": 1.3466666666666667,
+ "grad_norm": 0.511802077293396,
+ "learning_rate": 0.00015459964446741382,
+ "loss": 0.7664,
+ "step": 101
+ },
+ {
+ "epoch": 1.3599999999999999,
+ "grad_norm": 1.350032091140747,
+ "learning_rate": 0.0001536920173648984,
+ "loss": 0.8179,
+ "step": 102
+ },
+ {
+ "epoch": 1.3733333333333333,
+ "grad_norm": 0.7308780550956726,
+ "learning_rate": 0.00015277813249707487,
+ "loss": 0.8401,
+ "step": 103
+ },
+ {
+ "epoch": 1.3866666666666667,
+ "grad_norm": 0.5292226076126099,
+ "learning_rate": 0.0001518580963765555,
+ "loss": 0.6367,
+ "step": 104
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 0.6958481073379517,
+ "learning_rate": 0.00015093201623287631,
+ "loss": 0.7173,
+ "step": 105
+ },
+ {
+ "epoch": 1.4133333333333333,
+ "grad_norm": 0.7024071216583252,
+ "learning_rate": 0.00015000000000000001,
+ "loss": 0.5604,
+ "step": 106
+ },
+ {
+ "epoch": 1.4266666666666667,
+ "grad_norm": 0.5597444772720337,
+ "learning_rate": 0.00014906215630373606,
+ "loss": 0.6767,
+ "step": 107
+ },
+ {
+ "epoch": 1.44,
+ "grad_norm": 0.6003674864768982,
+ "learning_rate": 0.00014811859444908052,
+ "loss": 0.8149,
+ "step": 108
+ },
+ {
+ "epoch": 1.4533333333333334,
+ "grad_norm": 0.5815126895904541,
+ "learning_rate": 0.00014716942440747664,
+ "loss": 0.7801,
+ "step": 109
+ },
+ {
+ "epoch": 1.4666666666666668,
+ "grad_norm": 0.7836669683456421,
+ "learning_rate": 0.0001462147568039977,
+ "loss": 0.8452,
+ "step": 110
+ },
+ {
+ "epoch": 1.48,
+ "grad_norm": 0.8783419132232666,
+ "learning_rate": 0.00014525470290445392,
+ "loss": 0.8257,
+ "step": 111
+ },
+ {
+ "epoch": 1.4933333333333334,
+ "grad_norm": 0.46948131918907166,
+ "learning_rate": 0.00014428937460242417,
+ "loss": 0.7481,
+ "step": 112
+ },
+ {
+ "epoch": 1.5066666666666668,
+ "grad_norm": 0.5725980401039124,
+ "learning_rate": 0.00014331888440621533,
+ "loss": 0.8267,
+ "step": 113
+ },
+ {
+ "epoch": 1.52,
+ "grad_norm": 0.4418632686138153,
+ "learning_rate": 0.00014234334542574906,
+ "loss": 0.7631,
+ "step": 114
+ },
+ {
+ "epoch": 1.5333333333333332,
+ "grad_norm": 0.6430942416191101,
+ "learning_rate": 0.00014136287135937915,
+ "loss": 0.8817,
+ "step": 115
+ },
+ {
+ "epoch": 1.5466666666666666,
+ "grad_norm": 0.5670009255409241,
+ "learning_rate": 0.00014037757648064018,
+ "loss": 0.5991,
+ "step": 116
+ },
+ {
+ "epoch": 1.56,
+ "grad_norm": 0.5407504439353943,
+ "learning_rate": 0.00013938757562492873,
+ "loss": 0.6898,
+ "step": 117
+ },
+ {
+ "epoch": 1.5733333333333333,
+ "grad_norm": 0.5176808834075928,
+ "learning_rate": 0.00013839298417611963,
+ "loss": 0.731,
+ "step": 118
+ },
+ {
+ "epoch": 1.5866666666666667,
+ "grad_norm": 0.9752798080444336,
+ "learning_rate": 0.00013739391805311793,
+ "loss": 0.6736,
+ "step": 119
+ },
+ {
+ "epoch": 1.6,
+ "grad_norm": 0.7100059390068054,
+ "learning_rate": 0.00013639049369634876,
+ "loss": 0.7369,
+ "step": 120
+ },
+ {
+ "epoch": 1.6133333333333333,
+ "grad_norm": 0.6285961270332336,
+ "learning_rate": 0.0001353828280541861,
+ "loss": 0.721,
+ "step": 121
+ },
+ {
+ "epoch": 1.6266666666666667,
+ "grad_norm": 0.5981026291847229,
+ "learning_rate": 0.00013437103856932264,
+ "loss": 0.8094,
+ "step": 122
+ },
+ {
+ "epoch": 1.6400000000000001,
+ "grad_norm": 0.6587502360343933,
+ "learning_rate": 0.00013335524316508208,
+ "loss": 0.7646,
+ "step": 123
+ },
+ {
+ "epoch": 1.6533333333333333,
+ "grad_norm": 0.5544253587722778,
+ "learning_rate": 0.00013233556023167485,
+ "loss": 0.7165,
+ "step": 124
+ },
+ {
+ "epoch": 1.6666666666666665,
+ "grad_norm": 0.6012857556343079,
+ "learning_rate": 0.00013131210861240026,
+ "loss": 0.8104,
+ "step": 125
+ },
+ {
+ "epoch": 1.6800000000000002,
+ "grad_norm": 0.5157524347305298,
+ "learning_rate": 0.00013028500758979506,
+ "loss": 0.8585,
+ "step": 126
+ },
+ {
+ "epoch": 1.6933333333333334,
+ "grad_norm": 0.4888676702976227,
+ "learning_rate": 0.00012925437687173142,
+ "loss": 0.6579,
+ "step": 127
+ },
+ {
+ "epoch": 1.7066666666666666,
+ "grad_norm": 0.5127140879631042,
+ "learning_rate": 0.00012822033657746478,
+ "loss": 0.7432,
+ "step": 128
+ },
+ {
+ "epoch": 1.72,
+ "grad_norm": 0.6154641509056091,
+ "learning_rate": 0.0001271830072236343,
+ "loss": 0.7322,
+ "step": 129
+ },
+ {
+ "epoch": 1.7333333333333334,
+ "grad_norm": 0.5081548690795898,
+ "learning_rate": 0.00012614250971021657,
+ "loss": 0.7547,
+ "step": 130
+ },
+ {
+ "epoch": 1.7466666666666666,
+ "grad_norm": 0.6808217763900757,
+ "learning_rate": 0.00012509896530643488,
+ "loss": 0.6855,
+ "step": 131
+ },
+ {
+ "epoch": 1.76,
+ "grad_norm": 0.8672941327095032,
+ "learning_rate": 0.00012405249563662537,
+ "loss": 0.6332,
+ "step": 132
+ },
+ {
+ "epoch": 1.7733333333333334,
+ "grad_norm": 0.6130337119102478,
+ "learning_rate": 0.00012300322266606178,
+ "loss": 0.7453,
+ "step": 133
+ },
+ {
+ "epoch": 1.7866666666666666,
+ "grad_norm": 0.739959180355072,
+ "learning_rate": 0.00012195126868674051,
+ "loss": 0.687,
+ "step": 134
+ },
+ {
+ "epoch": 1.8,
+ "grad_norm": 0.5801121592521667,
+ "learning_rate": 0.00012089675630312754,
+ "loss": 0.7059,
+ "step": 135
+ },
+ {
+ "epoch": 1.8133333333333335,
+ "grad_norm": 0.5766938328742981,
+ "learning_rate": 0.000119839808417869,
+ "loss": 0.737,
+ "step": 136
+ },
+ {
+ "epoch": 1.8266666666666667,
+ "grad_norm": 0.6705268621444702,
+ "learning_rate": 0.00011878054821746703,
+ "loss": 0.7358,
+ "step": 137
+ },
+ {
+ "epoch": 1.8399999999999999,
+ "grad_norm": 0.7814889550209045,
+ "learning_rate": 0.0001177190991579223,
+ "loss": 0.7021,
+ "step": 138
+ },
+ {
+ "epoch": 1.8533333333333335,
+ "grad_norm": 0.6991515755653381,
+ "learning_rate": 0.00011665558495034546,
+ "loss": 0.7325,
+ "step": 139
+ },
+ {
+ "epoch": 1.8666666666666667,
+ "grad_norm": 0.8299288749694824,
+ "learning_rate": 0.00011559012954653865,
+ "loss": 0.7128,
+ "step": 140
+ },
+ {
+ "epoch": 1.88,
+ "grad_norm": 0.7293754816055298,
+ "learning_rate": 0.00011452285712454904,
+ "loss": 0.5813,
+ "step": 141
+ },
+ {
+ "epoch": 1.8933333333333333,
+ "grad_norm": 0.6560428738594055,
+ "learning_rate": 0.00011345389207419588,
+ "loss": 0.6352,
+ "step": 142
+ },
+ {
+ "epoch": 1.9066666666666667,
+ "grad_norm": 0.54889976978302,
+ "learning_rate": 0.00011238335898257304,
+ "loss": 0.7372,
+ "step": 143
+ },
+ {
+ "epoch": 1.92,
+ "grad_norm": 0.5890987515449524,
+ "learning_rate": 0.00011131138261952845,
+ "loss": 0.6402,
+ "step": 144
+ },
+ {
+ "epoch": 1.9333333333333333,
+ "grad_norm": 0.8450446128845215,
+ "learning_rate": 0.00011023808792312227,
+ "loss": 0.7152,
+ "step": 145
+ },
+ {
+ "epoch": 1.9466666666666668,
+ "grad_norm": 0.7649719715118408,
+ "learning_rate": 0.0001091635999850655,
+ "loss": 0.6831,
+ "step": 146
+ },
+ {
+ "epoch": 1.96,
+ "grad_norm": 0.6236613988876343,
+ "learning_rate": 0.00010808804403614043,
+ "loss": 0.6671,
+ "step": 147
+ },
+ {
+ "epoch": 1.9733333333333334,
+ "grad_norm": 0.6295299530029297,
+ "learning_rate": 0.00010701154543160541,
+ "loss": 0.8226,
+ "step": 148
+ },
+ {
+ "epoch": 1.9866666666666668,
+ "grad_norm": 0.641965389251709,
+ "learning_rate": 0.00010593422963658452,
+ "loss": 0.6567,
+ "step": 149
+ },
+ {
+ "epoch": 2.0,
+ "grad_norm": 0.779958188533783,
+ "learning_rate": 0.00010485622221144484,
+ "loss": 0.6346,
+ "step": 150
+ },
+ {
+ "epoch": 2.013333333333333,
+ "grad_norm": 0.6322675347328186,
+ "learning_rate": 0.00010377764879716234,
+ "loss": 0.5576,
+ "step": 151
+ },
+ {
+ "epoch": 2.026666666666667,
+ "grad_norm": 0.7052869200706482,
+ "learning_rate": 0.00010269863510067872,
+ "loss": 0.4362,
+ "step": 152
+ },
+ {
+ "epoch": 2.04,
+ "grad_norm": 0.4991523027420044,
+ "learning_rate": 0.00010161930688025017,
+ "loss": 0.4478,
+ "step": 153
+ },
+ {
+ "epoch": 2.0533333333333332,
+ "grad_norm": 0.4096013903617859,
+ "learning_rate": 0.00010053978993079045,
+ "loss": 0.5258,
+ "step": 154
+ },
+ {
+ "epoch": 2.066666666666667,
+ "grad_norm": 0.4861268103122711,
+ "learning_rate": 9.946021006920959e-05,
+ "loss": 0.5085,
+ "step": 155
+ },
+ {
+ "epoch": 2.08,
+ "grad_norm": 0.5277904272079468,
+ "learning_rate": 9.838069311974986e-05,
+ "loss": 0.4549,
+ "step": 156
+ },
+ {
+ "epoch": 2.0933333333333333,
+ "grad_norm": 0.640762209892273,
+ "learning_rate": 9.730136489932133e-05,
+ "loss": 0.5168,
+ "step": 157
+ },
+ {
+ "epoch": 2.1066666666666665,
+ "grad_norm": 0.6294235587120056,
+ "learning_rate": 9.622235120283769e-05,
+ "loss": 0.3587,
+ "step": 158
+ },
+ {
+ "epoch": 2.12,
+ "grad_norm": 0.6455483436584473,
+ "learning_rate": 9.514377778855521e-05,
+ "loss": 0.5013,
+ "step": 159
+ },
+ {
+ "epoch": 2.1333333333333333,
+ "grad_norm": 0.7052115201950073,
+ "learning_rate": 9.406577036341548e-05,
+ "loss": 0.458,
+ "step": 160
+ },
+ {
+ "epoch": 2.1466666666666665,
+ "grad_norm": 0.6517965197563171,
+ "learning_rate": 9.298845456839459e-05,
+ "loss": 0.4427,
+ "step": 161
+ },
+ {
+ "epoch": 2.16,
+ "grad_norm": 1.0105723142623901,
+ "learning_rate": 9.19119559638596e-05,
+ "loss": 0.5482,
+ "step": 162
+ },
+ {
+ "epoch": 2.1733333333333333,
+ "grad_norm": 0.6122080683708191,
+ "learning_rate": 9.083640001493454e-05,
+ "loss": 0.3708,
+ "step": 163
+ },
+ {
+ "epoch": 2.1866666666666665,
+ "grad_norm": 0.7772620320320129,
+ "learning_rate": 8.976191207687775e-05,
+ "loss": 0.4423,
+ "step": 164
+ },
+ {
+ "epoch": 2.2,
+ "grad_norm": 0.5697551965713501,
+ "learning_rate": 8.868861738047158e-05,
+ "loss": 0.4544,
+ "step": 165
+ },
+ {
+ "epoch": 2.2133333333333334,
+ "grad_norm": 0.6369841694831848,
+ "learning_rate": 8.7616641017427e-05,
+ "loss": 0.4458,
+ "step": 166
+ },
+ {
+ "epoch": 2.2266666666666666,
+ "grad_norm": 0.6867621541023254,
+ "learning_rate": 8.654610792580415e-05,
+ "loss": 0.3971,
+ "step": 167
+ },
+ {
+ "epoch": 2.24,
+ "grad_norm": 0.6379404067993164,
+ "learning_rate": 8.5477142875451e-05,
+ "loss": 0.409,
+ "step": 168
+ },
+ {
+ "epoch": 2.2533333333333334,
+ "grad_norm": 0.5854802131652832,
+ "learning_rate": 8.440987045346134e-05,
+ "loss": 0.4489,
+ "step": 169
+ },
+ {
+ "epoch": 2.2666666666666666,
+ "grad_norm": 0.6577348113059998,
+ "learning_rate": 8.334441504965455e-05,
+ "loss": 0.5224,
+ "step": 170
+ },
+ {
+ "epoch": 2.2800000000000002,
+ "grad_norm": 0.6020484566688538,
+ "learning_rate": 8.228090084207774e-05,
+ "loss": 0.3876,
+ "step": 171
+ },
+ {
+ "epoch": 2.2933333333333334,
+ "grad_norm": 0.5832955241203308,
+ "learning_rate": 8.1219451782533e-05,
+ "loss": 0.4096,
+ "step": 172
+ },
+ {
+ "epoch": 2.3066666666666666,
+ "grad_norm": 0.6917557716369629,
+ "learning_rate": 8.016019158213101e-05,
+ "loss": 0.3827,
+ "step": 173
+ },
+ {
+ "epoch": 2.32,
+ "grad_norm": 0.6793459057807922,
+ "learning_rate": 7.91032436968725e-05,
+ "loss": 0.3801,
+ "step": 174
+ },
+ {
+ "epoch": 2.3333333333333335,
+ "grad_norm": 0.5776801705360413,
+ "learning_rate": 7.804873131325954e-05,
+ "loss": 0.4567,
+ "step": 175
+ },
+ {
+ "epoch": 2.3466666666666667,
+ "grad_norm": 0.7591734528541565,
+ "learning_rate": 7.699677733393826e-05,
+ "loss": 0.4957,
+ "step": 176
+ },
+ {
+ "epoch": 2.36,
+ "grad_norm": 0.8160498142242432,
+ "learning_rate": 7.594750436337467e-05,
+ "loss": 0.4485,
+ "step": 177
+ },
+ {
+ "epoch": 2.3733333333333335,
+ "grad_norm": 0.5776082873344421,
+ "learning_rate": 7.490103469356513e-05,
+ "loss": 0.5212,
+ "step": 178
+ },
+ {
+ "epoch": 2.3866666666666667,
+ "grad_norm": 0.6858205795288086,
+ "learning_rate": 7.385749028978346e-05,
+ "loss": 0.3985,
+ "step": 179
+ },
+ {
+ "epoch": 2.4,
+ "grad_norm": 0.7106760740280151,
+ "learning_rate": 7.281699277636572e-05,
+ "loss": 0.447,
+ "step": 180
+ },
+ {
+ "epoch": 2.413333333333333,
+ "grad_norm": 0.5468612313270569,
+ "learning_rate": 7.177966342253524e-05,
+ "loss": 0.3429,
+ "step": 181
+ },
+ {
+ "epoch": 2.4266666666666667,
+ "grad_norm": 0.6179500222206116,
+ "learning_rate": 7.07456231282686e-05,
+ "loss": 0.4873,
+ "step": 182
+ },
+ {
+ "epoch": 2.44,
+ "grad_norm": 0.677168607711792,
+ "learning_rate": 6.971499241020495e-05,
+ "loss": 0.5149,
+ "step": 183
+ },
+ {
+ "epoch": 2.453333333333333,
+ "grad_norm": 0.7949701547622681,
+ "learning_rate": 6.868789138759976e-05,
+ "loss": 0.5183,
+ "step": 184
+ },
+ {
+ "epoch": 2.466666666666667,
+ "grad_norm": 0.631366491317749,
+ "learning_rate": 6.766443976832517e-05,
+ "loss": 0.3915,
+ "step": 185
+ },
+ {
+ "epoch": 2.48,
+ "grad_norm": 0.6317359805107117,
+ "learning_rate": 6.664475683491796e-05,
+ "loss": 0.4246,
+ "step": 186
+ },
+ {
+ "epoch": 2.493333333333333,
+ "grad_norm": 0.7377456426620483,
+ "learning_rate": 6.562896143067734e-05,
+ "loss": 0.3572,
+ "step": 187
+ },
+ {
+ "epoch": 2.506666666666667,
+ "grad_norm": 0.7473776340484619,
+ "learning_rate": 6.461717194581393e-05,
+ "loss": 0.4051,
+ "step": 188
+ },
+ {
+ "epoch": 2.52,
+ "grad_norm": 0.6155073046684265,
+ "learning_rate": 6.360950630365126e-05,
+ "loss": 0.4465,
+ "step": 189
+ },
+ {
+ "epoch": 2.533333333333333,
+ "grad_norm": 0.599367082118988,
+ "learning_rate": 6.260608194688206e-05,
+ "loss": 0.4863,
+ "step": 190
+ },
+ {
+ "epoch": 2.546666666666667,
+ "grad_norm": 0.6318126320838928,
+ "learning_rate": 6.160701582388038e-05,
+ "loss": 0.5023,
+ "step": 191
+ },
+ {
+ "epoch": 2.56,
+ "grad_norm": 0.6112634539604187,
+ "learning_rate": 6.061242437507131e-05,
+ "loss": 0.5643,
+ "step": 192
+ },
+ {
+ "epoch": 2.5733333333333333,
+ "grad_norm": 0.9118645787239075,
+ "learning_rate": 5.962242351935985e-05,
+ "loss": 0.4692,
+ "step": 193
+ },
+ {
+ "epoch": 2.586666666666667,
+ "grad_norm": 0.7344533801078796,
+ "learning_rate": 5.863712864062089e-05,
+ "loss": 0.4355,
+ "step": 194
+ },
+ {
+ "epoch": 2.6,
+ "grad_norm": 0.6159957051277161,
+ "learning_rate": 5.765665457425102e-05,
+ "loss": 0.5576,
+ "step": 195
+ },
+ {
+ "epoch": 2.6133333333333333,
+ "grad_norm": 0.632001519203186,
+ "learning_rate": 5.668111559378471e-05,
+ "loss": 0.5418,
+ "step": 196
+ },
+ {
+ "epoch": 2.626666666666667,
+ "grad_norm": 0.7217976450920105,
+ "learning_rate": 5.571062539757581e-05,
+ "loss": 0.5315,
+ "step": 197
+ },
+ {
+ "epoch": 2.64,
+ "grad_norm": 0.5802445411682129,
+ "learning_rate": 5.474529709554612e-05,
+ "loss": 0.5183,
+ "step": 198
+ },
+ {
+ "epoch": 2.6533333333333333,
+ "grad_norm": 0.8810819983482361,
+ "learning_rate": 5.378524319600231e-05,
+ "loss": 0.5234,
+ "step": 199
+ },
+ {
+ "epoch": 2.6666666666666665,
+ "grad_norm": 0.8704924583435059,
+ "learning_rate": 5.283057559252341e-05,
+ "loss": 0.4652,
+ "step": 200
+ },
+ {
+ "epoch": 2.68,
+ "grad_norm": 0.6765443682670593,
+ "learning_rate": 5.1881405550919493e-05,
+ "loss": 0.4305,
+ "step": 201
+ },
+ {
+ "epoch": 2.6933333333333334,
+ "grad_norm": 0.7338111400604248,
+ "learning_rate": 5.0937843696263966e-05,
+ "loss": 0.5201,
+ "step": 202
+ },
+ {
+ "epoch": 2.7066666666666666,
+ "grad_norm": 0.8355885744094849,
+ "learning_rate": 5.000000000000002e-05,
+ "loss": 0.3281,
+ "step": 203
+ },
+ {
+ "epoch": 2.7199999999999998,
+ "grad_norm": 0.6787794828414917,
+ "learning_rate": 4.9067983767123736e-05,
+ "loss": 0.5057,
+ "step": 204
+ },
+ {
+ "epoch": 2.7333333333333334,
+ "grad_norm": 0.6520538330078125,
+ "learning_rate": 4.814190362344454e-05,
+ "loss": 0.3988,
+ "step": 205
+ },
+ {
+ "epoch": 2.7466666666666666,
+ "grad_norm": 0.9957300424575806,
+ "learning_rate": 4.722186750292511e-05,
+ "loss": 0.4347,
+ "step": 206
+ },
+ {
+ "epoch": 2.76,
+ "grad_norm": 0.6702403426170349,
+ "learning_rate": 4.630798263510162e-05,
+ "loss": 0.4431,
+ "step": 207
+ },
+ {
+ "epoch": 2.7733333333333334,
+ "grad_norm": 0.6874341368675232,
+ "learning_rate": 4.540035553258619e-05,
+ "loss": 0.5117,
+ "step": 208
+ },
+ {
+ "epoch": 2.7866666666666666,
+ "grad_norm": 0.5381680727005005,
+ "learning_rate": 4.449909197865303e-05,
+ "loss": 0.4461,
+ "step": 209
+ },
+ {
+ "epoch": 2.8,
+ "grad_norm": 0.5413621068000793,
+ "learning_rate": 4.360429701490934e-05,
+ "loss": 0.5286,
+ "step": 210
+ },
+ {
+ "epoch": 2.8133333333333335,
+ "grad_norm": 0.5521364808082581,
+ "learning_rate": 4.271607492905303e-05,
+ "loss": 0.4846,
+ "step": 211
+ },
+ {
+ "epoch": 2.8266666666666667,
+ "grad_norm": 0.5857036709785461,
+ "learning_rate": 4.183452924271776e-05,
+ "loss": 0.4799,
+ "step": 212
+ },
+ {
+ "epoch": 2.84,
+ "grad_norm": 0.7210860848426819,
+ "learning_rate": 4.0959762699407766e-05,
+ "loss": 0.3214,
+ "step": 213
+ },
+ {
+ "epoch": 2.8533333333333335,
+ "grad_norm": 0.6596788763999939,
+ "learning_rate": 4.009187725252309e-05,
+ "loss": 0.4818,
+ "step": 214
+ },
+ {
+ "epoch": 2.8666666666666667,
+ "grad_norm": 0.6683171987533569,
+ "learning_rate": 3.9230974053477086e-05,
+ "loss": 0.4854,
+ "step": 215
+ },
+ {
+ "epoch": 2.88,
+ "grad_norm": 0.6668866872787476,
+ "learning_rate": 3.8377153439907266e-05,
+ "loss": 0.3712,
+ "step": 216
+ },
+ {
+ "epoch": 2.8933333333333335,
+ "grad_norm": 0.8019754886627197,
+ "learning_rate": 3.7530514923980884e-05,
+ "loss": 0.3826,
+ "step": 217
+ },
+ {
+ "epoch": 2.9066666666666667,
+ "grad_norm": 0.7723852396011353,
+ "learning_rate": 3.669115718079702e-05,
+ "loss": 0.3923,
+ "step": 218
+ },
+ {
+ "epoch": 2.92,
+ "grad_norm": 0.8932898044586182,
+ "learning_rate": 3.585917803688603e-05,
+ "loss": 0.4372,
+ "step": 219
+ },
+ {
+ "epoch": 2.9333333333333336,
+ "grad_norm": 0.8380217552185059,
+ "learning_rate": 3.503467445880789e-05,
+ "loss": 0.6057,
+ "step": 220
+ },
+ {
+ "epoch": 2.9466666666666668,
+ "grad_norm": 0.7885947227478027,
+ "learning_rate": 3.421774254185096e-05,
+ "loss": 0.4252,
+ "step": 221
+ },
+ {
+ "epoch": 2.96,
+ "grad_norm": 0.6929410099983215,
+ "learning_rate": 3.340847749883191e-05,
+ "loss": 0.4339,
+ "step": 222
+ },
+ {
+ "epoch": 2.9733333333333336,
+ "grad_norm": 0.5754182934761047,
+ "learning_rate": 3.2606973648998915e-05,
+ "loss": 0.5381,
+ "step": 223
+ },
+ {
+ "epoch": 2.986666666666667,
+ "grad_norm": 0.5466946363449097,
+ "learning_rate": 3.1813324407038825e-05,
+ "loss": 0.4699,
+ "step": 224
+ },
+ {
+ "epoch": 3.0,
+ "grad_norm": 0.6496762037277222,
+ "learning_rate": 3.102762227218957e-05,
+ "loss": 0.4392,
+ "step": 225
+ },
+ {
+ "epoch": 3.013333333333333,
+ "grad_norm": 0.5232837200164795,
+ "learning_rate": 3.0249958817459722e-05,
+ "loss": 0.3204,
+ "step": 226
+ },
+ {
+ "epoch": 3.026666666666667,
+ "grad_norm": 0.6648370623588562,
+ "learning_rate": 2.9480424678955443e-05,
+ "loss": 0.3054,
+ "step": 227
+ },
+ {
+ "epoch": 3.04,
+ "grad_norm": 0.5386692881584167,
+ "learning_rate": 2.8719109545317103e-05,
+ "loss": 0.3565,
+ "step": 228
+ },
+ {
+ "epoch": 3.0533333333333332,
+ "grad_norm": 0.5926947593688965,
+ "learning_rate": 2.7966102147265994e-05,
+ "loss": 0.298,
+ "step": 229
+ },
+ {
+ "epoch": 3.066666666666667,
+ "grad_norm": 0.5218151807785034,
+ "learning_rate": 2.722149024726307e-05,
+ "loss": 0.242,
+ "step": 230
+ },
+ {
+ "epoch": 3.08,
+ "grad_norm": 0.5256300568580627,
+ "learning_rate": 2.6485360629279987e-05,
+ "loss": 0.3497,
+ "step": 231
+ },
+ {
+ "epoch": 3.0933333333333333,
+ "grad_norm": 0.5321183204650879,
+ "learning_rate": 2.5757799088684654e-05,
+ "loss": 0.2814,
+ "step": 232
+ },
+ {
+ "epoch": 3.1066666666666665,
+ "grad_norm": 0.4942474663257599,
+ "learning_rate": 2.5038890422241958e-05,
+ "loss": 0.3361,
+ "step": 233
+ },
+ {
+ "epoch": 3.12,
+ "grad_norm": 0.6570059657096863,
+ "learning_rate": 2.432871841823047e-05,
+ "loss": 0.2742,
+ "step": 234
+ },
+ {
+ "epoch": 3.1333333333333333,
+ "grad_norm": 0.6714842319488525,
+ "learning_rate": 2.3627365846677306e-05,
+ "loss": 0.3261,
+ "step": 235
+ },
+ {
+ "epoch": 3.1466666666666665,
+ "grad_norm": 0.68682861328125,
+ "learning_rate": 2.2934914449711087e-05,
+ "loss": 0.2931,
+ "step": 236
+ },
+ {
+ "epoch": 3.16,
+ "grad_norm": 0.519067645072937,
+ "learning_rate": 2.2251444932035094e-05,
+ "loss": 0.323,
+ "step": 237
+ },
+ {
+ "epoch": 3.1733333333333333,
+ "grad_norm": 0.5991199612617493,
+ "learning_rate": 2.157703695152109e-05,
+ "loss": 0.2603,
+ "step": 238
+ },
+ {
+ "epoch": 3.1866666666666665,
+ "grad_norm": 0.7177437543869019,
+ "learning_rate": 2.091176910992545e-05,
+ "loss": 0.3499,
+ "step": 239
+ },
+ {
+ "epoch": 3.2,
+ "grad_norm": 0.6380268335342407,
+ "learning_rate": 2.025571894372794e-05,
+ "loss": 0.3064,
+ "step": 240
+ },
+ {
+ "epoch": 3.2133333333333334,
+ "grad_norm": 0.5707582235336304,
+ "learning_rate": 1.9608962915094996e-05,
+ "loss": 0.2747,
+ "step": 241
+ },
+ {
+ "epoch": 3.2266666666666666,
+ "grad_norm": 0.6281158328056335,
+ "learning_rate": 1.897157640296825e-05,
+ "loss": 0.2461,
+ "step": 242
+ },
+ {
+ "epoch": 3.24,
+ "grad_norm": 0.6357036232948303,
+ "learning_rate": 1.8343633694278895e-05,
+ "loss": 0.2846,
+ "step": 243
+ },
+ {
+ "epoch": 3.2533333333333334,
+ "grad_norm": 0.7706279754638672,
+ "learning_rate": 1.772520797528988e-05,
+ "loss": 0.2931,
+ "step": 244
+ },
+ {
+ "epoch": 3.2666666666666666,
+ "grad_norm": 0.5687737464904785,
+ "learning_rate": 1.7116371323065883e-05,
+ "loss": 0.2162,
+ "step": 245
+ },
+ {
+ "epoch": 3.2800000000000002,
+ "grad_norm": 0.5638925433158875,
+ "learning_rate": 1.65171946970729e-05,
+ "loss": 0.2219,
+ "step": 246
+ },
+ {
+ "epoch": 3.2933333333333334,
+ "grad_norm": 0.6213463544845581,
+ "learning_rate": 1.592774793090792e-05,
+ "loss": 0.3329,
+ "step": 247
+ },
+ {
+ "epoch": 3.3066666666666666,
+ "grad_norm": 0.6423382759094238,
+ "learning_rate": 1.534809972415998e-05,
+ "loss": 0.225,
+ "step": 248
+ },
+ {
+ "epoch": 3.32,
+ "grad_norm": 0.818946361541748,
+ "learning_rate": 1.4778317634403083e-05,
+ "loss": 0.2252,
+ "step": 249
+ },
+ {
+ "epoch": 3.3333333333333335,
+ "grad_norm": 0.5952211022377014,
+ "learning_rate": 1.4218468069322578e-05,
+ "loss": 0.2295,
+ "step": 250
+ },
+ {
+ "epoch": 3.3466666666666667,
+ "grad_norm": 0.5699911117553711,
+ "learning_rate": 1.3668616278975343e-05,
+ "loss": 0.3132,
+ "step": 251
+ },
+ {
+ "epoch": 3.36,
+ "grad_norm": 0.7674257755279541,
+ "learning_rate": 1.3128826348184887e-05,
+ "loss": 0.3487,
+ "step": 252
+ },
+ {
+ "epoch": 3.3733333333333335,
+ "grad_norm": 0.6047098636627197,
+ "learning_rate": 1.2599161189072427e-05,
+ "loss": 0.1991,
+ "step": 253
+ },
+ {
+ "epoch": 3.3866666666666667,
+ "grad_norm": 0.6673244833946228,
+ "learning_rate": 1.2079682533724379e-05,
+ "loss": 0.2233,
+ "step": 254
+ },
+ {
+ "epoch": 3.4,
+ "grad_norm": 0.7468060851097107,
+ "learning_rate": 1.1570450926997655e-05,
+ "loss": 0.2155,
+ "step": 255
+ },
+ {
+ "epoch": 3.413333333333333,
+ "grad_norm": 0.6119690537452698,
+ "learning_rate": 1.1071525719463095e-05,
+ "loss": 0.1624,
+ "step": 256
+ },
+ {
+ "epoch": 3.4266666666666667,
+ "grad_norm": 0.5702852606773376,
+ "learning_rate": 1.0582965060488359e-05,
+ "loss": 0.3134,
+ "step": 257
+ },
+ {
+ "epoch": 3.44,
+ "grad_norm": 0.9024640321731567,
+ "learning_rate": 1.010482589146048e-05,
+ "loss": 0.3316,
+ "step": 258
+ },
+ {
+ "epoch": 3.453333333333333,
+ "grad_norm": 0.6158623695373535,
+ "learning_rate": 9.637163939149485e-06,
+ "loss": 0.2637,
+ "step": 259
+ },
+ {
+ "epoch": 3.466666666666667,
+ "grad_norm": 0.5979247689247131,
+ "learning_rate": 9.180033709213454e-06,
+ "loss": 0.3148,
+ "step": 260
+ },
+ {
+ "epoch": 3.48,
+ "grad_norm": 0.5976862907409668,
+ "learning_rate": 8.733488479845997e-06,
+ "loss": 0.293,
+ "step": 261
+ },
+ {
+ "epoch": 3.493333333333333,
+ "grad_norm": 0.6761628985404968,
+ "learning_rate": 8.297580295566575e-06,
+ "loss": 0.2675,
+ "step": 262
+ },
+ {
+ "epoch": 3.506666666666667,
+ "grad_norm": 0.7228958606719971,
+ "learning_rate": 7.872359961154906e-06,
+ "loss": 0.3601,
+ "step": 263
+ },
+ {
+ "epoch": 3.52,
+ "grad_norm": 0.6675010919570923,
+ "learning_rate": 7.457877035729588e-06,
+ "loss": 0.2551,
+ "step": 264
+ },
+ {
+ "epoch": 3.533333333333333,
+ "grad_norm": 0.6560245752334595,
+ "learning_rate": 7.054179826972074e-06,
+ "loss": 0.1799,
+ "step": 265
+ },
+ {
+ "epoch": 3.546666666666667,
+ "grad_norm": 0.6431569457054138,
+ "learning_rate": 6.661315385496425e-06,
+ "loss": 0.2183,
+ "step": 266
+ },
+ {
+ "epoch": 3.56,
+ "grad_norm": 0.7804158329963684,
+ "learning_rate": 6.2793294993656494e-06,
+ "loss": 0.2438,
+ "step": 267
+ },
+ {
+ "epoch": 3.5733333333333333,
+ "grad_norm": 0.8433220982551575,
+ "learning_rate": 5.908266688755049e-06,
+ "loss": 0.2806,
+ "step": 268
+ },
+ {
+ "epoch": 3.586666666666667,
+ "grad_norm": 0.7814007997512817,
+ "learning_rate": 5.54817020076347e-06,
+ "loss": 0.2841,
+ "step": 269
+ },
+ {
+ "epoch": 3.6,
+ "grad_norm": 0.5926189422607422,
+ "learning_rate": 5.199082004372957e-06,
+ "loss": 0.2349,
+ "step": 270
+ },
+ {
+ "epoch": 3.6133333333333333,
+ "grad_norm": 0.7108579277992249,
+ "learning_rate": 4.861042785557146e-06,
+ "loss": 0.2771,
+ "step": 271
+ },
+ {
+ "epoch": 3.626666666666667,
+ "grad_norm": 0.8635604381561279,
+ "learning_rate": 4.534091942539475e-06,
+ "loss": 0.3572,
+ "step": 272
+ },
+ {
+ "epoch": 3.64,
+ "grad_norm": 0.7461998462677002,
+ "learning_rate": 4.2182675812012965e-06,
+ "loss": 0.3266,
+ "step": 273
+ },
+ {
+ "epoch": 3.6533333333333333,
+ "grad_norm": 0.6848060488700867,
+ "learning_rate": 3.913606510640644e-06,
+ "loss": 0.2782,
+ "step": 274
+ },
+ {
+ "epoch": 3.6666666666666665,
+ "grad_norm": 0.5404589176177979,
+ "learning_rate": 3.620144238882206e-06,
+ "loss": 0.2365,
+ "step": 275
+ },
+ {
+ "epoch": 3.68,
+ "grad_norm": 0.8409443497657776,
+ "learning_rate": 3.3379149687388867e-06,
+ "loss": 0.243,
+ "step": 276
+ },
+ {
+ "epoch": 3.6933333333333334,
+ "grad_norm": 0.6383155584335327,
+ "learning_rate": 3.06695159382544e-06,
+ "loss": 0.3358,
+ "step": 277
+ },
+ {
+ "epoch": 3.7066666666666666,
+ "grad_norm": 0.790045976638794,
+ "learning_rate": 2.8072856947248037e-06,
+ "loss": 0.2835,
+ "step": 278
+ },
+ {
+ "epoch": 3.7199999999999998,
+ "grad_norm": 0.6109788417816162,
+ "learning_rate": 2.5589475353073988e-06,
+ "loss": 0.3113,
+ "step": 279
+ },
+ {
+ "epoch": 3.7333333333333334,
+ "grad_norm": 0.5038023591041565,
+ "learning_rate": 2.3219660592038285e-06,
+ "loss": 0.1902,
+ "step": 280
+ },
+ {
+ "epoch": 3.7466666666666666,
+ "grad_norm": 0.5871134996414185,
+ "learning_rate": 2.0963688864316323e-06,
+ "loss": 0.3928,
+ "step": 281
+ },
+ {
+ "epoch": 3.76,
+ "grad_norm": 0.5895046591758728,
+ "learning_rate": 1.882182310176095e-06,
+ "loss": 0.2864,
+ "step": 282
+ },
+ {
+ "epoch": 3.7733333333333334,
+ "grad_norm": 1.456066608428955,
+ "learning_rate": 1.6794312937258417e-06,
+ "loss": 0.2669,
+ "step": 283
+ },
+ {
+ "epoch": 3.7866666666666666,
+ "grad_norm": 0.6652315855026245,
+ "learning_rate": 1.488139467563354e-06,
+ "loss": 0.2859,
+ "step": 284
+ },
+ {
+ "epoch": 3.8,
+ "grad_norm": 0.7242224812507629,
+ "learning_rate": 1.30832912661093e-06,
+ "loss": 0.1955,
+ "step": 285
+ },
+ {
+ "epoch": 3.8133333333333335,
+ "grad_norm": 0.5488805174827576,
+ "learning_rate": 1.1400212276321376e-06,
+ "loss": 0.2772,
+ "step": 286
+ },
+ {
+ "epoch": 3.8266666666666667,
+ "grad_norm": 0.9780620336532593,
+ "learning_rate": 9.832353867893386e-07,
+ "loss": 0.2286,
+ "step": 287
+ },
+ {
+ "epoch": 3.84,
+ "grad_norm": 0.6662778258323669,
+ "learning_rate": 8.379898773574924e-07,
+ "loss": 0.2704,
+ "step": 288
+ },
+ {
+ "epoch": 3.8533333333333335,
+ "grad_norm": 0.6265354156494141,
+ "learning_rate": 7.043016275943615e-07,
+ "loss": 0.2296,
+ "step": 289
+ },
+ {
+ "epoch": 3.8666666666666667,
+ "grad_norm": 0.6008942127227783,
+ "learning_rate": 5.821862187675775e-07,
+ "loss": 0.2374,
+ "step": 290
+ },
+ {
+ "epoch": 3.88,
+ "grad_norm": 0.7946692109107971,
+ "learning_rate": 4.7165788333860536e-07,
+ "loss": 0.1566,
+ "step": 291
+ },
+ {
+ "epoch": 3.8933333333333335,
+ "grad_norm": 0.7700130939483643,
+ "learning_rate": 3.727295033040035e-07,
+ "loss": 0.2748,
+ "step": 292
+ },
+ {
+ "epoch": 3.9066666666666667,
+ "grad_norm": 0.671159029006958,
+ "learning_rate": 2.854126086940356e-07,
+ "loss": 0.2345,
+ "step": 293
+ },
+ {
+ "epoch": 3.92,
+ "grad_norm": 0.591444194316864,
+ "learning_rate": 2.0971737622883515e-07,
+ "loss": 0.2364,
+ "step": 294
+ },
+ {
+ "epoch": 3.9333333333333336,
+ "grad_norm": 0.6508089900016785,
+ "learning_rate": 1.4565262813230894e-07,
+ "loss": 0.2455,
+ "step": 295
+ },
+ {
+ "epoch": 3.9466666666666668,
+ "grad_norm": 0.8111832737922668,
+ "learning_rate": 9.32258311039269e-08,
+ "loss": 0.2412,
+ "step": 296
+ },
+ {
+ "epoch": 3.96,
+ "grad_norm": 0.6243981122970581,
+ "learning_rate": 5.2443095448506674e-08,
+ "loss": 0.2991,
+ "step": 297
+ },
+ {
+ "epoch": 3.9733333333333336,
+ "grad_norm": 0.8096401691436768,
+ "learning_rate": 2.3309174364027907e-08,
+ "loss": 0.2848,
+ "step": 298
+ },
+ {
+ "epoch": 3.986666666666667,
+ "grad_norm": 0.6315991878509521,
+ "learning_rate": 5.827463387653165e-09,
+ "loss": 0.3048,
+ "step": 299
+ },
+ {
+ "epoch": 4.0,
+ "grad_norm": 0.6686562895774841,
+ "learning_rate": 0.0,
+ "loss": 0.266,
+ "step": 300
+ },
+ {
+ "epoch": 4.0,
+ "step": 300,
+ "total_flos": 8.62109265035264e+16,
+ "train_loss": 0.6437234182655811,
+ "train_runtime": 652.1258,
+ "train_samples_per_second": 7.361,
+ "train_steps_per_second": 0.46
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 300,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 50,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": true
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 8.62109265035264e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/training_args.bin b/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..26de66c830692f3d22f375473f4f43447eefb78a
--- /dev/null
+++ b/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54335a849ae8d377cae5839a736f4196087c7894666ca9b9f10dd899e0ada95c
+size 6904