diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..b3a9bc0734800b501c8e7e77c01ebc055cda308f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +B_3/checkpoint-124/tokenizer.json filter=lfs diff=lfs merge=lfs -text +B_3/checkpoint-155/tokenizer.json filter=lfs diff=lfs merge=lfs -text +B_3/checkpoint-31/tokenizer.json filter=lfs diff=lfs merge=lfs -text +B_3/checkpoint-62/tokenizer.json filter=lfs diff=lfs merge=lfs -text +B_3/checkpoint-93/tokenizer.json filter=lfs diff=lfs merge=lfs -text +B_3/tokenizer.json filter=lfs diff=lfs merge=lfs -text diff --git a/B_3/README.md b/B_3/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5a92fd66a04fe1823fd13f08ffd5bddd1aa128d4 --- /dev/null +++ b/B_3/README.md @@ -0,0 +1,61 @@ +--- +library_name: peft +license: other +base_model: /workspace/meta-llama/Llama-3.1-70B +tags: +- llama-factory +- lora +- generated_from_trainer +model-index: +- name: B_3 + results: [] +--- + + + +# B_3 + +This model is a fine-tuned version of [/workspace/meta-llama/Llama-3.1-70B](https://huggingface.co//workspace/meta-llama/Llama-3.1-70B) on the Millfield1 and the Millfield3 datasets. + +## Model description + +More information needed + +## Intended uses & limitations + +More information needed + +## Training and evaluation data + +More information needed + +## Training procedure + +### Training hyperparameters + +The following hyperparameters were used during training: +- learning_rate: 0.0001 +- train_batch_size: 4 +- eval_batch_size: 8 +- seed: 42 +- distributed_type: multi-GPU +- num_devices: 4 +- gradient_accumulation_steps: 8 +- total_train_batch_size: 128 +- total_eval_batch_size: 32 +- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments +- lr_scheduler_type: cosine +- num_epochs: 10.0 + +### Training results + + + +### Framework versions + +- PEFT 0.15.2 +- Transformers 4.55.0 +- Pytorch 2.8.0.dev20250319+cu128 +- Datasets 3.6.0 +- Tokenizers 0.21.1 \ No newline at end of file diff --git a/B_3/adapter_config.json b/B_3/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..fdf1a09789f67a17c6c0acf8b1f378b341fb25b0 --- /dev/null +++ b/B_3/adapter_config.json @@ -0,0 +1,39 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "/workspace/meta-llama/Llama-3.1-70B", + "bias": "none", + "corda_config": null, + "eva_config": null, + "exclude_modules": null, + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layer_replication": null, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 1024, + "lora_bias": false, + "lora_dropout": 0, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 256, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "o_proj", + "gate_proj", + "up_proj", + "k_proj", + "q_proj", + "down_proj", + "v_proj" + ], + "task_type": "CAUSAL_LM", + "trainable_token_indices": null, + "use_dora": false, + "use_rslora": false +} \ No newline at end of file diff --git a/B_3/adapter_model.safetensors b/B_3/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..bc168897c3f44ed65ad5115ca0af3348c506aa99 --- /dev/null +++ b/B_3/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:764a72f8894757b5352c6f1186fa857fdc6509833aa5c710c7cd46ca51d0e9ec +size 6627156248 diff --git a/B_3/all_results.json b/B_3/all_results.json new file mode 100644 index 0000000000000000000000000000000000000000..155f7b8126e20313309f8fd37711b433fc1b8644 --- /dev/null +++ b/B_3/all_results.json @@ -0,0 +1,9 @@ +{ + "epoch": 10.0, + "num_input_tokens_seen": 40612480, + "total_flos": 320867359260672.0, + "train_loss": 0.048082037963582284, + "train_runtime": 10016.9898, + "train_samples_per_second": 1.974, + "train_steps_per_second": 0.016 +} \ No newline at end of file diff --git a/B_3/chat_template.jinja b/B_3/chat_template.jinja new file mode 100644 index 0000000000000000000000000000000000000000..c3af804a3bddb95bf03b7d349234a039a27de382 --- /dev/null +++ b/B_3/chat_template.jinja @@ -0,0 +1,7 @@ +{{ '<|begin_of_text|>' }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ '<|start_header_id|>system<|end_header_id|> + +' + system_message + '<|eot_id|>' }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|start_header_id|>user<|end_header_id|> + +' + content + '<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +' }}{% elif message['role'] == 'assistant' %}{{ content + '<|eot_id|>' }}{% endif %}{% endfor %} \ No newline at end of file diff --git a/B_3/checkpoint-124/README.md b/B_3/checkpoint-124/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0673d0313e0251ef70dd85d3ff379913ab5506d9 --- /dev/null +++ b/B_3/checkpoint-124/README.md @@ -0,0 +1,202 @@ +--- +base_model: /workspace/meta-llama/Llama-3.1-70B +library_name: peft +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] +### Framework versions + +- PEFT 0.15.2 \ No newline at end of file diff --git a/B_3/checkpoint-124/adapter_config.json b/B_3/checkpoint-124/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..fdf1a09789f67a17c6c0acf8b1f378b341fb25b0 --- /dev/null +++ b/B_3/checkpoint-124/adapter_config.json @@ -0,0 +1,39 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "/workspace/meta-llama/Llama-3.1-70B", + "bias": "none", + "corda_config": null, + "eva_config": null, + "exclude_modules": null, + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layer_replication": null, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 1024, + "lora_bias": false, + "lora_dropout": 0, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 256, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "o_proj", + "gate_proj", + "up_proj", + "k_proj", + "q_proj", + "down_proj", + "v_proj" + ], + "task_type": "CAUSAL_LM", + "trainable_token_indices": null, + "use_dora": false, + "use_rslora": false +} \ No newline at end of file diff --git a/B_3/checkpoint-124/adapter_model.safetensors b/B_3/checkpoint-124/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..cda25625619ec517caa8c90154c549aa3f7dcc42 --- /dev/null +++ b/B_3/checkpoint-124/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5610f59871893f5b224911c048501ce12fc228208c13796274055a1739014fb3 +size 6627156248 diff --git a/B_3/checkpoint-124/chat_template.jinja b/B_3/checkpoint-124/chat_template.jinja new file mode 100644 index 0000000000000000000000000000000000000000..c3af804a3bddb95bf03b7d349234a039a27de382 --- /dev/null +++ b/B_3/checkpoint-124/chat_template.jinja @@ -0,0 +1,7 @@ +{{ '<|begin_of_text|>' }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ '<|start_header_id|>system<|end_header_id|> + +' + system_message + '<|eot_id|>' }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|start_header_id|>user<|end_header_id|> + +' + content + '<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +' }}{% elif message['role'] == 'assistant' %}{{ content + '<|eot_id|>' }}{% endif %}{% endfor %} \ No newline at end of file diff --git a/B_3/checkpoint-124/global_step121/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/B_3/checkpoint-124/global_step121/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..d513fab66219b610d1bd7bc8858499526f35a092 --- /dev/null +++ b/B_3/checkpoint-124/global_step121/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ed45c4ecf520420e05125f0ab23fe3c02f94176b3f11b50d314c450bddbcb36 +size 9940504945 diff --git a/B_3/checkpoint-124/global_step121/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/B_3/checkpoint-124/global_step121/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..884eb8bc1355af4e8d603abcedc6fb5e86d5f7f3 --- /dev/null +++ b/B_3/checkpoint-124/global_step121/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7be213f2038942bef54e52753dc0a427aedbd157d7ba2d08b4620087e383e851 +size 9940504945 diff --git a/B_3/checkpoint-124/global_step121/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/B_3/checkpoint-124/global_step121/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..ba7c6f9ca1157df09525a535923196e36ac7960c --- /dev/null +++ b/B_3/checkpoint-124/global_step121/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d51258d69d7e56769c717f57da844e2e70d60337535603bc0f28688c9f454dc +size 9940504945 diff --git a/B_3/checkpoint-124/global_step121/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/B_3/checkpoint-124/global_step121/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..7e7017bf1132077e5c8f005cb50cc4016aa59811 --- /dev/null +++ b/B_3/checkpoint-124/global_step121/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f051d5daa893950e7e531f5818b8190a6a628d9c490474ddedc30be785aa0498 +size 9940504945 diff --git a/B_3/checkpoint-124/global_step121/zero_pp_rank_0_mp_rank_00_model_states.pt b/B_3/checkpoint-124/global_step121/zero_pp_rank_0_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..c683617c134722a578ad387e0c51c35f3a5117e1 --- /dev/null +++ b/B_3/checkpoint-124/global_step121/zero_pp_rank_0_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d43dfe895e4b6e22503713c0b7abfefa3453109432f006c84d3e432a889f8bf +size 1109201 diff --git a/B_3/checkpoint-124/global_step121/zero_pp_rank_1_mp_rank_00_model_states.pt b/B_3/checkpoint-124/global_step121/zero_pp_rank_1_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..9f15734cf938063ef0960bd22b8b3e295a719fbe --- /dev/null +++ b/B_3/checkpoint-124/global_step121/zero_pp_rank_1_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55b947ea452db0056ea23e3a0212ddb1baedca0f18ed1a3b9ac37a05cf619e59 +size 1109201 diff --git a/B_3/checkpoint-124/global_step121/zero_pp_rank_2_mp_rank_00_model_states.pt b/B_3/checkpoint-124/global_step121/zero_pp_rank_2_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..b29a109a16e723dc11d1c6b841284156b8055b0a --- /dev/null +++ b/B_3/checkpoint-124/global_step121/zero_pp_rank_2_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57a5ada542522de60ddb875312b0fb04e3e018bb3e15624a38c640624dff5e52 +size 1109201 diff --git a/B_3/checkpoint-124/global_step121/zero_pp_rank_3_mp_rank_00_model_states.pt b/B_3/checkpoint-124/global_step121/zero_pp_rank_3_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..918d37e449dc966153a347231840fb6052edf0f3 --- /dev/null +++ b/B_3/checkpoint-124/global_step121/zero_pp_rank_3_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b75e13352055c7d6cbf8e28de6e47ebb2ba5c9f04b1e08cb2b0cc6411cc7aea4 +size 1109201 diff --git a/B_3/checkpoint-124/latest b/B_3/checkpoint-124/latest new file mode 100644 index 0000000000000000000000000000000000000000..9514df933ccf9579207bb754da90ca456691308e --- /dev/null +++ b/B_3/checkpoint-124/latest @@ -0,0 +1 @@ +global_step121 \ No newline at end of file diff --git a/B_3/checkpoint-124/rng_state_0.pth b/B_3/checkpoint-124/rng_state_0.pth new file mode 100644 index 0000000000000000000000000000000000000000..c2a2270750ad34278e35aa053aac5182f2a93e99 --- /dev/null +++ b/B_3/checkpoint-124/rng_state_0.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccdd04b664fbfa5d05e90c688a91f18c4d07f58893b9b3a4cfad78d78efbc6ac +size 15429 diff --git a/B_3/checkpoint-124/rng_state_1.pth b/B_3/checkpoint-124/rng_state_1.pth new file mode 100644 index 0000000000000000000000000000000000000000..560af6f9618784d275690dd7c0725439475c3d26 --- /dev/null +++ b/B_3/checkpoint-124/rng_state_1.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a41e144bee8116d5a47773b5f326ca7bb65df8c457f0c58a2ed8003d7ffcc73 +size 15429 diff --git a/B_3/checkpoint-124/rng_state_2.pth b/B_3/checkpoint-124/rng_state_2.pth new file mode 100644 index 0000000000000000000000000000000000000000..2537b73c8fbe0077e6a758fa28986d271da796bb --- /dev/null +++ b/B_3/checkpoint-124/rng_state_2.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa38d5d663317603edb36bb18e908202a95a6e6250c06e2bea719fdba77f1277 +size 15429 diff --git a/B_3/checkpoint-124/rng_state_3.pth b/B_3/checkpoint-124/rng_state_3.pth new file mode 100644 index 0000000000000000000000000000000000000000..61c811fac31215597828fee76a70aef9491d6bcf --- /dev/null +++ b/B_3/checkpoint-124/rng_state_3.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbe1f72976acfa5e0e28d92c61725f779f476ad95d06ce9b63a6d9978dd7b9ba +size 15429 diff --git a/B_3/checkpoint-124/scheduler.pt b/B_3/checkpoint-124/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..08fbddee94ffc7d8738fd217efe5d000b8bbfed4 --- /dev/null +++ b/B_3/checkpoint-124/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f0ea27b5cf831c136ddea76444da04f4cc330271f8316c190646a4d63171f36 +size 1401 diff --git a/B_3/checkpoint-124/special_tokens_map.json b/B_3/checkpoint-124/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..14daf4588e61b4e4983af0fccaba4d5500c0977c --- /dev/null +++ b/B_3/checkpoint-124/special_tokens_map.json @@ -0,0 +1,26 @@ +{ + "additional_special_tokens": [ + { + "content": "<|eom_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } + ], + "bos_token": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": "<|eot_id|>" +} diff --git a/B_3/checkpoint-124/tokenizer.json b/B_3/checkpoint-124/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/B_3/checkpoint-124/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/B_3/checkpoint-124/tokenizer_config.json b/B_3/checkpoint-124/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d1e1ea9bc94ff1132f136710751e37fb23a64347 --- /dev/null +++ b/B_3/checkpoint-124/tokenizer_config.json @@ -0,0 +1,2068 @@ +{ + "added_tokens_decoder": { + "128000": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128001": { + "content": "<|end_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128002": { + "content": "<|reserved_special_token_0|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128003": { + "content": "<|reserved_special_token_1|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128004": { + "content": "<|finetune_right_pad_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128005": { + "content": "<|reserved_special_token_2|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128006": { + "content": "<|start_header_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128007": { + "content": "<|end_header_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128008": { + "content": "<|eom_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128009": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128010": { + "content": "<|python_tag|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128011": { + "content": "<|reserved_special_token_3|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128012": { + "content": "<|reserved_special_token_4|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128013": { + "content": "<|reserved_special_token_5|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128014": { + "content": "<|reserved_special_token_6|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128015": { + "content": "<|reserved_special_token_7|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128016": { + "content": "<|reserved_special_token_8|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128017": { + "content": "<|reserved_special_token_9|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128018": { + "content": "<|reserved_special_token_10|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128019": { + "content": "<|reserved_special_token_11|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128020": { + "content": "<|reserved_special_token_12|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128021": { + "content": "<|reserved_special_token_13|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128022": { + "content": "<|reserved_special_token_14|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128023": { + "content": "<|reserved_special_token_15|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128024": { + "content": "<|reserved_special_token_16|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128025": { + "content": "<|reserved_special_token_17|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128026": { + "content": "<|reserved_special_token_18|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128027": { + "content": "<|reserved_special_token_19|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128028": { + "content": "<|reserved_special_token_20|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128029": { + "content": "<|reserved_special_token_21|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128030": { + "content": "<|reserved_special_token_22|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128031": { + "content": "<|reserved_special_token_23|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128032": { + "content": "<|reserved_special_token_24|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128033": { + "content": "<|reserved_special_token_25|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128034": { + "content": "<|reserved_special_token_26|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128035": { + "content": "<|reserved_special_token_27|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128036": { + "content": "<|reserved_special_token_28|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128037": { + "content": "<|reserved_special_token_29|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128038": { + "content": "<|reserved_special_token_30|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128039": { + "content": "<|reserved_special_token_31|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128040": { + "content": "<|reserved_special_token_32|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128041": { + "content": "<|reserved_special_token_33|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128042": { + "content": "<|reserved_special_token_34|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128043": { + "content": "<|reserved_special_token_35|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128044": { + "content": "<|reserved_special_token_36|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128045": { + "content": "<|reserved_special_token_37|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128046": { + "content": "<|reserved_special_token_38|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128047": { + "content": "<|reserved_special_token_39|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128048": { + "content": "<|reserved_special_token_40|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128049": { + "content": "<|reserved_special_token_41|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128050": { + "content": "<|reserved_special_token_42|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128051": { + "content": "<|reserved_special_token_43|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128052": { + "content": "<|reserved_special_token_44|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128053": { + "content": "<|reserved_special_token_45|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128054": { + "content": "<|reserved_special_token_46|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128055": { + "content": "<|reserved_special_token_47|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128056": { + "content": "<|reserved_special_token_48|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128057": { + "content": "<|reserved_special_token_49|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128058": { + "content": "<|reserved_special_token_50|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128059": { + "content": "<|reserved_special_token_51|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128060": { + "content": "<|reserved_special_token_52|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128061": { + "content": "<|reserved_special_token_53|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128062": { + "content": "<|reserved_special_token_54|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128063": { + "content": "<|reserved_special_token_55|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128064": { + "content": "<|reserved_special_token_56|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128065": { + "content": "<|reserved_special_token_57|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128066": { + "content": "<|reserved_special_token_58|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128067": { + "content": "<|reserved_special_token_59|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128068": { + "content": "<|reserved_special_token_60|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128069": { + "content": "<|reserved_special_token_61|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128070": { + "content": "<|reserved_special_token_62|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128071": { + "content": "<|reserved_special_token_63|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128072": { + "content": "<|reserved_special_token_64|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128073": { + "content": "<|reserved_special_token_65|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128074": { + "content": "<|reserved_special_token_66|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128075": { + "content": "<|reserved_special_token_67|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128076": { + "content": "<|reserved_special_token_68|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128077": { + "content": "<|reserved_special_token_69|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128078": { + "content": "<|reserved_special_token_70|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128079": { + "content": "<|reserved_special_token_71|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128080": { + "content": "<|reserved_special_token_72|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128081": { + "content": "<|reserved_special_token_73|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128082": { + "content": "<|reserved_special_token_74|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128083": { + "content": "<|reserved_special_token_75|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128084": { + "content": "<|reserved_special_token_76|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128085": { + "content": "<|reserved_special_token_77|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128086": { + "content": "<|reserved_special_token_78|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128087": { + "content": "<|reserved_special_token_79|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128088": { + "content": "<|reserved_special_token_80|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128089": { + "content": "<|reserved_special_token_81|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128090": { + "content": "<|reserved_special_token_82|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128091": { + "content": "<|reserved_special_token_83|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128092": { + "content": "<|reserved_special_token_84|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128093": { + "content": "<|reserved_special_token_85|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128094": { + "content": "<|reserved_special_token_86|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128095": { + "content": "<|reserved_special_token_87|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128096": { + "content": "<|reserved_special_token_88|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128097": { + "content": "<|reserved_special_token_89|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128098": { + "content": "<|reserved_special_token_90|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128099": { + "content": "<|reserved_special_token_91|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128100": { + "content": "<|reserved_special_token_92|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128101": { + "content": "<|reserved_special_token_93|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128102": { + "content": "<|reserved_special_token_94|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128103": { + "content": "<|reserved_special_token_95|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128104": { + "content": "<|reserved_special_token_96|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128105": { + "content": "<|reserved_special_token_97|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128106": { + "content": "<|reserved_special_token_98|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128107": { + "content": "<|reserved_special_token_99|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128108": { + "content": "<|reserved_special_token_100|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128109": { + "content": "<|reserved_special_token_101|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128110": { + "content": "<|reserved_special_token_102|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128111": { + "content": "<|reserved_special_token_103|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128112": { + "content": "<|reserved_special_token_104|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128113": { + "content": "<|reserved_special_token_105|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128114": { + "content": "<|reserved_special_token_106|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128115": { + "content": "<|reserved_special_token_107|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128116": { + "content": "<|reserved_special_token_108|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128117": { + "content": "<|reserved_special_token_109|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128118": { + "content": "<|reserved_special_token_110|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128119": { + "content": "<|reserved_special_token_111|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128120": { + "content": "<|reserved_special_token_112|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128121": { + "content": "<|reserved_special_token_113|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128122": { + "content": "<|reserved_special_token_114|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128123": { + "content": "<|reserved_special_token_115|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128124": { + "content": "<|reserved_special_token_116|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128125": { + "content": "<|reserved_special_token_117|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128126": { + "content": "<|reserved_special_token_118|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128127": { + "content": "<|reserved_special_token_119|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128128": { + "content": "<|reserved_special_token_120|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128129": { + "content": "<|reserved_special_token_121|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128130": { + "content": "<|reserved_special_token_122|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128131": { + "content": "<|reserved_special_token_123|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128132": { + "content": "<|reserved_special_token_124|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128133": { + "content": "<|reserved_special_token_125|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128134": { + "content": "<|reserved_special_token_126|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128135": { + "content": "<|reserved_special_token_127|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128136": { + "content": "<|reserved_special_token_128|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128137": { + "content": "<|reserved_special_token_129|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128138": { + "content": "<|reserved_special_token_130|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128139": { + "content": "<|reserved_special_token_131|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128140": { + "content": "<|reserved_special_token_132|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128141": { + "content": "<|reserved_special_token_133|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128142": { + "content": "<|reserved_special_token_134|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128143": { + "content": "<|reserved_special_token_135|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128144": { + "content": "<|reserved_special_token_136|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128145": { + "content": "<|reserved_special_token_137|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128146": { + "content": "<|reserved_special_token_138|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128147": { + "content": "<|reserved_special_token_139|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128148": { + "content": "<|reserved_special_token_140|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128149": { + "content": "<|reserved_special_token_141|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128150": { + "content": "<|reserved_special_token_142|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128151": { + "content": "<|reserved_special_token_143|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128152": { + "content": "<|reserved_special_token_144|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128153": { + "content": "<|reserved_special_token_145|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128154": { + "content": "<|reserved_special_token_146|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128155": { + "content": "<|reserved_special_token_147|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128156": { + "content": "<|reserved_special_token_148|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128157": { + "content": "<|reserved_special_token_149|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128158": { + "content": "<|reserved_special_token_150|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128159": { + "content": "<|reserved_special_token_151|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128160": { + "content": "<|reserved_special_token_152|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128161": { + "content": "<|reserved_special_token_153|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128162": { + "content": "<|reserved_special_token_154|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128163": { + "content": "<|reserved_special_token_155|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128164": { + "content": "<|reserved_special_token_156|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128165": { + "content": "<|reserved_special_token_157|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128166": { + "content": "<|reserved_special_token_158|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128167": { + "content": "<|reserved_special_token_159|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128168": { + "content": "<|reserved_special_token_160|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128169": { + "content": "<|reserved_special_token_161|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128170": { + "content": "<|reserved_special_token_162|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128171": { + "content": "<|reserved_special_token_163|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128172": { + "content": "<|reserved_special_token_164|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128173": { + "content": "<|reserved_special_token_165|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128174": { + "content": "<|reserved_special_token_166|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128175": { + "content": "<|reserved_special_token_167|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128176": { + "content": "<|reserved_special_token_168|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128177": { + "content": "<|reserved_special_token_169|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128178": { + "content": "<|reserved_special_token_170|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128179": { + "content": "<|reserved_special_token_171|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128180": { + "content": "<|reserved_special_token_172|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128181": { + "content": "<|reserved_special_token_173|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128182": { + "content": "<|reserved_special_token_174|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128183": { + "content": "<|reserved_special_token_175|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128184": { + "content": "<|reserved_special_token_176|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128185": { + "content": "<|reserved_special_token_177|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128186": { + "content": "<|reserved_special_token_178|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128187": { + "content": "<|reserved_special_token_179|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128188": { + "content": "<|reserved_special_token_180|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128189": { + "content": "<|reserved_special_token_181|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128190": { + "content": "<|reserved_special_token_182|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128191": { + "content": "<|reserved_special_token_183|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128192": { + "content": "<|reserved_special_token_184|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128193": { + "content": "<|reserved_special_token_185|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128194": { + "content": "<|reserved_special_token_186|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128195": { + "content": "<|reserved_special_token_187|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128196": { + "content": "<|reserved_special_token_188|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128197": { + "content": "<|reserved_special_token_189|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128198": { + "content": "<|reserved_special_token_190|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128199": { + "content": "<|reserved_special_token_191|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128200": { + "content": "<|reserved_special_token_192|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128201": { + "content": "<|reserved_special_token_193|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128202": { + "content": "<|reserved_special_token_194|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128203": { + "content": "<|reserved_special_token_195|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128204": { + "content": "<|reserved_special_token_196|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128205": { + "content": "<|reserved_special_token_197|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128206": { + "content": "<|reserved_special_token_198|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128207": { + "content": "<|reserved_special_token_199|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128208": { + "content": "<|reserved_special_token_200|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128209": { + "content": "<|reserved_special_token_201|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128210": { + "content": "<|reserved_special_token_202|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128211": { + "content": "<|reserved_special_token_203|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128212": { + "content": "<|reserved_special_token_204|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128213": { + "content": "<|reserved_special_token_205|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128214": { + "content": "<|reserved_special_token_206|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128215": { + "content": "<|reserved_special_token_207|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128216": { + "content": "<|reserved_special_token_208|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128217": { + "content": "<|reserved_special_token_209|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128218": { + "content": "<|reserved_special_token_210|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128219": { + "content": "<|reserved_special_token_211|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128220": { + "content": "<|reserved_special_token_212|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128221": { + "content": "<|reserved_special_token_213|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128222": { + "content": "<|reserved_special_token_214|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128223": { + "content": "<|reserved_special_token_215|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128224": { + "content": "<|reserved_special_token_216|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128225": { + "content": "<|reserved_special_token_217|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128226": { + "content": "<|reserved_special_token_218|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128227": { + "content": "<|reserved_special_token_219|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128228": { + "content": "<|reserved_special_token_220|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128229": { + "content": "<|reserved_special_token_221|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128230": { + "content": "<|reserved_special_token_222|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128231": { + "content": "<|reserved_special_token_223|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128232": { + "content": "<|reserved_special_token_224|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128233": { + "content": "<|reserved_special_token_225|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128234": { + "content": "<|reserved_special_token_226|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128235": { + "content": "<|reserved_special_token_227|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128236": { + "content": "<|reserved_special_token_228|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128237": { + "content": "<|reserved_special_token_229|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128238": { + "content": "<|reserved_special_token_230|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128239": { + "content": "<|reserved_special_token_231|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128240": { + "content": "<|reserved_special_token_232|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128241": { + "content": "<|reserved_special_token_233|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128242": { + "content": "<|reserved_special_token_234|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128243": { + "content": "<|reserved_special_token_235|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128244": { + "content": "<|reserved_special_token_236|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128245": { + "content": "<|reserved_special_token_237|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128246": { + "content": "<|reserved_special_token_238|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128247": { + "content": "<|reserved_special_token_239|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128248": { + "content": "<|reserved_special_token_240|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128249": { + "content": "<|reserved_special_token_241|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128250": { + "content": "<|reserved_special_token_242|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128251": { + "content": "<|reserved_special_token_243|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128252": { + "content": "<|reserved_special_token_244|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128253": { + "content": "<|reserved_special_token_245|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128254": { + "content": "<|reserved_special_token_246|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128255": { + "content": "<|reserved_special_token_247|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [ + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "clean_up_tokenization_spaces": true, + "eos_token": "<|eot_id|>", + "extra_special_tokens": {}, + "model_input_names": [ + "input_ids", + "attention_mask" + ], + "model_max_length": 131072, + "pad_token": "<|eot_id|>", + "padding_side": "right", + "split_special_tokens": false, + "tokenizer_class": "PreTrainedTokenizerFast" +} diff --git a/B_3/checkpoint-124/trainer_state.json b/B_3/checkpoint-124/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..a0c86763be8113e344964393372f79aa812046e9 --- /dev/null +++ b/B_3/checkpoint-124/trainer_state.json @@ -0,0 +1,1274 @@ +{ + "best_global_step": null, + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 7.838709677419355, + "eval_steps": 500, + "global_step": 124, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.06451612903225806, + "grad_norm": 2.073743358513411, + "learning_rate": 0.0001, + "loss": 1.3591, + "num_input_tokens_seen": 262016, + "step": 1, + "train_runtime": 148.9807, + "train_tokens_per_second": 1758.725 + }, + { + "epoch": 0.12903225806451613, + "grad_norm": 1.7083966194301452, + "learning_rate": 9.999036202410325e-05, + "loss": 1.4321, + "num_input_tokens_seen": 524032, + "step": 2, + "train_runtime": 298.073, + "train_tokens_per_second": 1758.066 + }, + { + "epoch": 0.1935483870967742, + "grad_norm": 0.8154519457271471, + "learning_rate": 9.996145181203615e-05, + "loss": 1.3036, + "num_input_tokens_seen": 786048, + "step": 3, + "train_runtime": 449.085, + "train_tokens_per_second": 1750.332 + }, + { + "epoch": 0.25806451612903225, + "grad_norm": 1.6435854418760425, + "learning_rate": 9.991328050923581e-05, + "loss": 1.3119, + "num_input_tokens_seen": 1048064, + "step": 4, + "train_runtime": 600.5729, + "train_tokens_per_second": 1745.107 + }, + { + "epoch": 0.3225806451612903, + "grad_norm": 1.2034889741595727, + "learning_rate": 9.98458666866564e-05, + "loss": 1.21, + "num_input_tokens_seen": 1310080, + "step": 5, + "train_runtime": 752.2986, + "train_tokens_per_second": 1741.436 + }, + { + "epoch": 0.3870967741935484, + "grad_norm": 0.6081797430818349, + "learning_rate": 9.975923633360985e-05, + "loss": 1.1713, + "num_input_tokens_seen": 1572096, + "step": 6, + "train_runtime": 903.8941, + "train_tokens_per_second": 1739.248 + }, + { + "epoch": 0.45161290322580644, + "grad_norm": 0.43860201273645816, + "learning_rate": 9.965342284774632e-05, + "loss": 1.1687, + "num_input_tokens_seen": 1834112, + "step": 7, + "train_runtime": 1055.454, + "train_tokens_per_second": 1737.747 + }, + { + "epoch": 0.5161290322580645, + "grad_norm": 0.3489614049191675, + "learning_rate": 9.952846702217886e-05, + "loss": 1.1151, + "num_input_tokens_seen": 2096128, + "step": 8, + "train_runtime": 1207.0142, + "train_tokens_per_second": 1736.623 + }, + { + "epoch": 0.5806451612903226, + "grad_norm": 3.182590493238281, + "learning_rate": 9.938441702975689e-05, + "loss": 1.0694, + "num_input_tokens_seen": 2358144, + "step": 9, + "train_runtime": 1358.12, + "train_tokens_per_second": 1736.33 + }, + { + "epoch": 0.6451612903225806, + "grad_norm": 0.40091824929141967, + "learning_rate": 9.922132840449459e-05, + "loss": 1.0431, + "num_input_tokens_seen": 2620160, + "step": 10, + "train_runtime": 1509.1349, + "train_tokens_per_second": 1736.2 + }, + { + "epoch": 0.7096774193548387, + "grad_norm": 0.4778713425435316, + "learning_rate": 9.903926402016153e-05, + "loss": 1.0812, + "num_input_tokens_seen": 2882176, + "step": 11, + "train_runtime": 1661.0314, + "train_tokens_per_second": 1735.172 + }, + { + "epoch": 0.7741935483870968, + "grad_norm": 0.3361596465836105, + "learning_rate": 9.883829406604363e-05, + "loss": 1.0296, + "num_input_tokens_seen": 3144192, + "step": 12, + "train_runtime": 1812.2372, + "train_tokens_per_second": 1734.978 + }, + { + "epoch": 0.8387096774193549, + "grad_norm": 0.30484937194478195, + "learning_rate": 9.861849601988383e-05, + "loss": 0.9806, + "num_input_tokens_seen": 3406208, + "step": 13, + "train_runtime": 1963.4393, + "train_tokens_per_second": 1734.817 + }, + { + "epoch": 0.9032258064516129, + "grad_norm": 0.31491494208058013, + "learning_rate": 9.837995461801299e-05, + "loss": 1.0244, + "num_input_tokens_seen": 3668224, + "step": 14, + "train_runtime": 2115.2404, + "train_tokens_per_second": 1734.188 + }, + { + "epoch": 0.967741935483871, + "grad_norm": 0.2555325387827011, + "learning_rate": 9.812276182268236e-05, + "loss": 0.9701, + "num_input_tokens_seen": 3930240, + "step": 15, + "train_runtime": 2266.9917, + "train_tokens_per_second": 1733.681 + }, + { + "epoch": 1.0, + "grad_norm": 0.2555325387827011, + "learning_rate": 9.784701678661045e-05, + "loss": 0.9885, + "num_input_tokens_seen": 4061248, + "step": 16, + "train_runtime": 2343.254, + "train_tokens_per_second": 1733.166 + }, + { + "epoch": 1.064516129032258, + "grad_norm": 0.3803435873199766, + "learning_rate": 9.755282581475769e-05, + "loss": 0.9221, + "num_input_tokens_seen": 4323264, + "step": 17, + "train_runtime": 2495.3616, + "train_tokens_per_second": 1732.52 + }, + { + "epoch": 1.129032258064516, + "grad_norm": 0.25742598219685603, + "learning_rate": 9.724030232334391e-05, + "loss": 0.9187, + "num_input_tokens_seen": 4585280, + "step": 18, + "train_runtime": 2646.9782, + "train_tokens_per_second": 1732.27 + }, + { + "epoch": 1.1935483870967742, + "grad_norm": 0.22468849320033518, + "learning_rate": 9.690956679612421e-05, + "loss": 0.8943, + "num_input_tokens_seen": 4847296, + "step": 19, + "train_runtime": 2798.7721, + "train_tokens_per_second": 1731.937 + }, + { + "epoch": 1.2580645161290323, + "grad_norm": 0.28670637895213946, + "learning_rate": 9.656074673794018e-05, + "loss": 0.8567, + "num_input_tokens_seen": 5109312, + "step": 20, + "train_runtime": 2950.5573, + "train_tokens_per_second": 1731.643 + }, + { + "epoch": 1.3225806451612903, + "grad_norm": 0.2043611694383423, + "learning_rate": 9.619397662556435e-05, + "loss": 0.8557, + "num_input_tokens_seen": 5371328, + "step": 21, + "train_runtime": 3103.0692, + "train_tokens_per_second": 1730.973 + }, + { + "epoch": 1.3870967741935485, + "grad_norm": 0.24667572059564058, + "learning_rate": 9.580939785585681e-05, + "loss": 0.8546, + "num_input_tokens_seen": 5633344, + "step": 22, + "train_runtime": 3254.6109, + "train_tokens_per_second": 1730.881 + }, + { + "epoch": 1.4516129032258065, + "grad_norm": 0.24067782128131054, + "learning_rate": 9.540715869125407e-05, + "loss": 0.851, + "num_input_tokens_seen": 5895360, + "step": 23, + "train_runtime": 3406.7776, + "train_tokens_per_second": 1730.48 + }, + { + "epoch": 1.5161290322580645, + "grad_norm": 0.2000848793492341, + "learning_rate": 9.498741420261108e-05, + "loss": 0.8483, + "num_input_tokens_seen": 6157376, + "step": 24, + "train_runtime": 3558.7104, + "train_tokens_per_second": 1730.227 + }, + { + "epoch": 1.5806451612903225, + "grad_norm": 0.22359283400518964, + "learning_rate": 9.45503262094184e-05, + "loss": 0.8226, + "num_input_tokens_seen": 6419392, + "step": 25, + "train_runtime": 3711.0551, + "train_tokens_per_second": 1729.802 + }, + { + "epoch": 1.6451612903225805, + "grad_norm": 1.0648985294170912, + "learning_rate": 9.409606321741775e-05, + "loss": 0.8259, + "num_input_tokens_seen": 6681408, + "step": 26, + "train_runtime": 3863.7709, + "train_tokens_per_second": 1729.245 + }, + { + "epoch": 1.7096774193548387, + "grad_norm": 0.18133437899141433, + "learning_rate": 9.362480035363986e-05, + "loss": 0.8524, + "num_input_tokens_seen": 6943424, + "step": 27, + "train_runtime": 4016.4383, + "train_tokens_per_second": 1728.752 + }, + { + "epoch": 1.7741935483870968, + "grad_norm": 0.456273625303925, + "learning_rate": 9.31367192988896e-05, + "loss": 0.8209, + "num_input_tokens_seen": 7205440, + "step": 28, + "train_runtime": 4168.9223, + "train_tokens_per_second": 1728.37 + }, + { + "epoch": 1.838709677419355, + "grad_norm": 0.4673225345753548, + "learning_rate": 9.263200821770461e-05, + "loss": 0.7853, + "num_input_tokens_seen": 7467456, + "step": 29, + "train_runtime": 4321.7056, + "train_tokens_per_second": 1727.896 + }, + { + "epoch": 1.903225806451613, + "grad_norm": 0.1996315966078707, + "learning_rate": 9.211086168581433e-05, + "loss": 0.7779, + "num_input_tokens_seen": 7729472, + "step": 30, + "train_runtime": 4474.7713, + "train_tokens_per_second": 1727.345 + }, + { + "epoch": 1.967741935483871, + "grad_norm": 0.3752545960960864, + "learning_rate": 9.157348061512727e-05, + "loss": 0.8146, + "num_input_tokens_seen": 7991488, + "step": 31, + "train_runtime": 4627.5272, + "train_tokens_per_second": 1726.946 + }, + { + "epoch": 2.0, + "grad_norm": 0.484664872247163, + "learning_rate": 9.102007217627568e-05, + "loss": 0.7787, + "num_input_tokens_seen": 8122496, + "step": 32, + "train_runtime": 4809.6299, + "train_tokens_per_second": 1688.799 + }, + { + "epoch": 2.064516129032258, + "grad_norm": 0.299023275639115, + "learning_rate": 9.045084971874738e-05, + "loss": 0.7361, + "num_input_tokens_seen": 8384512, + "step": 33, + "train_runtime": 4960.4611, + "train_tokens_per_second": 1690.269 + }, + { + "epoch": 2.129032258064516, + "grad_norm": 0.20981231086811225, + "learning_rate": 8.986603268863536e-05, + "loss": 0.6956, + "num_input_tokens_seen": 8646528, + "step": 34, + "train_runtime": 5112.1398, + "train_tokens_per_second": 1691.372 + }, + { + "epoch": 2.193548387096774, + "grad_norm": 0.3648857123126151, + "learning_rate": 8.926584654403724e-05, + "loss": 0.6819, + "num_input_tokens_seen": 8908544, + "step": 35, + "train_runtime": 5264.3603, + "train_tokens_per_second": 1692.237 + }, + { + "epoch": 2.258064516129032, + "grad_norm": 0.31269893807625165, + "learning_rate": 8.865052266813685e-05, + "loss": 0.6958, + "num_input_tokens_seen": 9170560, + "step": 36, + "train_runtime": 5416.2601, + "train_tokens_per_second": 1693.154 + }, + { + "epoch": 2.3225806451612905, + "grad_norm": 0.2727897516670068, + "learning_rate": 8.802029828000156e-05, + "loss": 0.6756, + "num_input_tokens_seen": 9432576, + "step": 37, + "train_runtime": 5567.9089, + "train_tokens_per_second": 1694.097 + }, + { + "epoch": 2.3870967741935485, + "grad_norm": 0.25721446603694037, + "learning_rate": 8.737541634312985e-05, + "loss": 0.648, + "num_input_tokens_seen": 9694592, + "step": 38, + "train_runtime": 5719.9113, + "train_tokens_per_second": 1694.885 + }, + { + "epoch": 2.4516129032258065, + "grad_norm": 20.44651031163169, + "learning_rate": 8.671612547178428e-05, + "loss": 0.669, + "num_input_tokens_seen": 9956608, + "step": 39, + "train_runtime": 5871.7821, + "train_tokens_per_second": 1695.671 + }, + { + "epoch": 2.5161290322580645, + "grad_norm": 3.0150996948618145, + "learning_rate": 8.604267983514594e-05, + "loss": 0.6856, + "num_input_tokens_seen": 10218624, + "step": 40, + "train_runtime": 6024.0794, + "train_tokens_per_second": 1696.296 + }, + { + "epoch": 2.5806451612903225, + "grad_norm": 19.306885630930985, + "learning_rate": 8.535533905932738e-05, + "loss": 0.6949, + "num_input_tokens_seen": 10480640, + "step": 41, + "train_runtime": 6176.6303, + "train_tokens_per_second": 1696.822 + }, + { + "epoch": 2.6451612903225805, + "grad_norm": 46.29810754342157, + "learning_rate": 8.46543681272818e-05, + "loss": 0.7467, + "num_input_tokens_seen": 10742656, + "step": 42, + "train_runtime": 6329.1576, + "train_tokens_per_second": 1697.328 + }, + { + "epoch": 2.709677419354839, + "grad_norm": 9.77264228502113, + "learning_rate": 8.39400372766471e-05, + "loss": 0.6638, + "num_input_tokens_seen": 11004672, + "step": 43, + "train_runtime": 6481.6817, + "train_tokens_per_second": 1697.811 + }, + { + "epoch": 2.774193548387097, + "grad_norm": 16.058872629216598, + "learning_rate": 8.321262189556409e-05, + "loss": 0.7096, + "num_input_tokens_seen": 11266688, + "step": 44, + "train_runtime": 6633.7733, + "train_tokens_per_second": 1698.383 + }, + { + "epoch": 2.838709677419355, + "grad_norm": 1.0261214777781495, + "learning_rate": 8.247240241650918e-05, + "loss": 0.6737, + "num_input_tokens_seen": 11528704, + "step": 45, + "train_runtime": 6785.7147, + "train_tokens_per_second": 1698.967 + }, + { + "epoch": 2.903225806451613, + "grad_norm": 1.6378077961358868, + "learning_rate": 8.171966420818228e-05, + "loss": 0.6638, + "num_input_tokens_seen": 11790720, + "step": 46, + "train_runtime": 6937.3194, + "train_tokens_per_second": 1699.607 + }, + { + "epoch": 2.967741935483871, + "grad_norm": 2.4944381489903455, + "learning_rate": 8.095469746549172e-05, + "loss": 0.6753, + "num_input_tokens_seen": 12052736, + "step": 47, + "train_runtime": 7089.3141, + "train_tokens_per_second": 1700.127 + }, + { + "epoch": 3.0, + "grad_norm": 2.4944381489903455, + "learning_rate": 8.017779709767858e-05, + "loss": 0.6673, + "num_input_tokens_seen": 12183744, + "step": 48, + "train_runtime": 7165.3736, + "train_tokens_per_second": 1700.364 + }, + { + "epoch": 3.064516129032258, + "grad_norm": 26.97115523850904, + "learning_rate": 7.938926261462366e-05, + "loss": 0.6049, + "num_input_tokens_seen": 12445760, + "step": 49, + "train_runtime": 7317.3555, + "train_tokens_per_second": 1700.855 + }, + { + "epoch": 3.129032258064516, + "grad_norm": 5.4430349870657375, + "learning_rate": 7.858939801138061e-05, + "loss": 1.0716, + "num_input_tokens_seen": 12707776, + "step": 50, + "train_runtime": 7468.9695, + "train_tokens_per_second": 1701.41 + }, + { + "epoch": 3.193548387096774, + "grad_norm": 12.78395158722848, + "learning_rate": 7.777851165098012e-05, + "loss": 1.0428, + "num_input_tokens_seen": 12969792, + "step": 51, + "train_runtime": 7620.5451, + "train_tokens_per_second": 1701.951 + }, + { + "epoch": 3.258064516129032, + "grad_norm": 326.4832878895978, + "learning_rate": 7.695691614555003e-05, + "loss": 0.9172, + "num_input_tokens_seen": 13231808, + "step": 52, + "train_runtime": 7772.2756, + "train_tokens_per_second": 1702.437 + }, + { + "epoch": 3.3225806451612905, + "grad_norm": 69.03004430934298, + "learning_rate": 7.612492823579745e-05, + "loss": 0.7593, + "num_input_tokens_seen": 13493824, + "step": 53, + "train_runtime": 7924.7666, + "train_tokens_per_second": 1702.741 + }, + { + "epoch": 3.3870967741935485, + "grad_norm": 1.1495044460164596, + "learning_rate": 7.528286866889924e-05, + "loss": 0.6151, + "num_input_tokens_seen": 13755840, + "step": 54, + "train_runtime": 8077.0191, + "train_tokens_per_second": 1703.084 + }, + { + "epoch": 3.4516129032258065, + "grad_norm": 0.252822200967686, + "learning_rate": 7.443106207484776e-05, + "loss": 0.5964, + "num_input_tokens_seen": 14017856, + "step": 55, + "train_runtime": 8229.2406, + "train_tokens_per_second": 1703.42 + }, + { + "epoch": 3.5161290322580645, + "grad_norm": 0.36169399074673414, + "learning_rate": 7.35698368412999e-05, + "loss": 0.5809, + "num_input_tokens_seen": 14279872, + "step": 56, + "train_runtime": 8380.9399, + "train_tokens_per_second": 1703.851 + }, + { + "epoch": 3.5806451612903225, + "grad_norm": 0.3219026144290714, + "learning_rate": 7.269952498697734e-05, + "loss": 0.5638, + "num_input_tokens_seen": 14541888, + "step": 57, + "train_runtime": 8533.1482, + "train_tokens_per_second": 1704.164 + }, + { + "epoch": 3.6451612903225805, + "grad_norm": 1.0720094012961243, + "learning_rate": 7.18204620336671e-05, + "loss": 0.5553, + "num_input_tokens_seen": 14803904, + "step": 58, + "train_runtime": 8685.2309, + "train_tokens_per_second": 1704.492 + }, + { + "epoch": 3.709677419354839, + "grad_norm": 0.3151374305079844, + "learning_rate": 7.09329868768714e-05, + "loss": 0.5555, + "num_input_tokens_seen": 15065920, + "step": 59, + "train_runtime": 8837.7033, + "train_tokens_per_second": 1704.733 + }, + { + "epoch": 3.774193548387097, + "grad_norm": 0.2917481639941811, + "learning_rate": 7.003744165515705e-05, + "loss": 0.5635, + "num_input_tokens_seen": 15327936, + "step": 60, + "train_runtime": 8990.7028, + "train_tokens_per_second": 1704.865 + }, + { + "epoch": 3.838709677419355, + "grad_norm": 0.39703897734831073, + "learning_rate": 6.91341716182545e-05, + "loss": 0.5775, + "num_input_tokens_seen": 15589952, + "step": 61, + "train_runtime": 9142.8639, + "train_tokens_per_second": 1705.15 + }, + { + "epoch": 3.903225806451613, + "grad_norm": 0.330407023300617, + "learning_rate": 6.82235249939575e-05, + "loss": 0.5305, + "num_input_tokens_seen": 15851968, + "step": 62, + "train_runtime": 9294.6576, + "train_tokens_per_second": 1705.492 + }, + { + "epoch": 3.967741935483871, + "grad_norm": 0.2594779667409557, + "learning_rate": 6.730585285387465e-05, + "loss": 0.5428, + "num_input_tokens_seen": 16113984, + "step": 63, + "train_runtime": 9538.1388, + "train_tokens_per_second": 1689.426 + }, + { + "epoch": 4.0, + "grad_norm": 0.5188739404593391, + "learning_rate": 6.638150897808468e-05, + "loss": 0.5671, + "num_input_tokens_seen": 16244992, + "step": 64, + "train_runtime": 9613.4951, + "train_tokens_per_second": 1689.811 + }, + { + "epoch": 4.064516129032258, + "grad_norm": 0.2817891367025121, + "learning_rate": 6.545084971874738e-05, + "loss": 0.4715, + "num_input_tokens_seen": 16507008, + "step": 65, + "train_runtime": 9764.9453, + "train_tokens_per_second": 1690.435 + }, + { + "epoch": 4.129032258064516, + "grad_norm": 0.4313237711714659, + "learning_rate": 6.451423386272312e-05, + "loss": 0.4488, + "num_input_tokens_seen": 16769024, + "step": 66, + "train_runtime": 9916.329, + "train_tokens_per_second": 1691.052 + }, + { + "epoch": 4.193548387096774, + "grad_norm": 9.407910846391866, + "learning_rate": 6.357202249325371e-05, + "loss": 0.4582, + "num_input_tokens_seen": 17031040, + "step": 67, + "train_runtime": 10068.4234, + "train_tokens_per_second": 1691.53 + }, + { + "epoch": 4.258064516129032, + "grad_norm": 0.4318030663645452, + "learning_rate": 6.26245788507579e-05, + "loss": 0.4588, + "num_input_tokens_seen": 17293056, + "step": 68, + "train_runtime": 10219.9518, + "train_tokens_per_second": 1692.088 + }, + { + "epoch": 4.32258064516129, + "grad_norm": 0.2711661394321373, + "learning_rate": 6.167226819279528e-05, + "loss": 0.4447, + "num_input_tokens_seen": 17555072, + "step": 69, + "train_runtime": 10371.656, + "train_tokens_per_second": 1692.601 + }, + { + "epoch": 4.387096774193548, + "grad_norm": 0.2785432314686511, + "learning_rate": 6.071545765325254e-05, + "loss": 0.4565, + "num_input_tokens_seen": 17817088, + "step": 70, + "train_runtime": 10524.0113, + "train_tokens_per_second": 1692.994 + }, + { + "epoch": 4.451612903225806, + "grad_norm": 0.24786649766049834, + "learning_rate": 5.9754516100806423e-05, + "loss": 0.4665, + "num_input_tokens_seen": 18079104, + "step": 71, + "train_runtime": 10676.331, + "train_tokens_per_second": 1693.382 + }, + { + "epoch": 4.516129032258064, + "grad_norm": 0.2454529793840644, + "learning_rate": 5.8789813996717736e-05, + "loss": 0.446, + "num_input_tokens_seen": 18341120, + "step": 72, + "train_runtime": 10828.6719, + "train_tokens_per_second": 1693.755 + }, + { + "epoch": 4.580645161290323, + "grad_norm": 0.25559855808246584, + "learning_rate": 5.782172325201155e-05, + "loss": 0.4384, + "num_input_tokens_seen": 18603136, + "step": 73, + "train_runtime": 10981.143, + "train_tokens_per_second": 1694.098 + }, + { + "epoch": 4.645161290322581, + "grad_norm": 0.24822833593804589, + "learning_rate": 5.685061708409841e-05, + "loss": 0.4339, + "num_input_tokens_seen": 18865152, + "step": 74, + "train_runtime": 11133.4844, + "train_tokens_per_second": 1694.452 + }, + { + "epoch": 4.709677419354839, + "grad_norm": 0.24264729806408034, + "learning_rate": 5.587686987289189e-05, + "loss": 0.444, + "num_input_tokens_seen": 19127168, + "step": 75, + "train_runtime": 11285.3939, + "train_tokens_per_second": 1694.86 + }, + { + "epoch": 4.774193548387097, + "grad_norm": 0.22384324563071528, + "learning_rate": 5.490085701647805e-05, + "loss": 0.4353, + "num_input_tokens_seen": 19389184, + "step": 76, + "train_runtime": 11437.3833, + "train_tokens_per_second": 1695.246 + }, + { + "epoch": 4.838709677419355, + "grad_norm": 0.24764076326899273, + "learning_rate": 5.392295478639225e-05, + "loss": 0.4257, + "num_input_tokens_seen": 19651200, + "step": 77, + "train_runtime": 11589.4114, + "train_tokens_per_second": 1695.617 + }, + { + "epoch": 4.903225806451613, + "grad_norm": 0.2228535077281988, + "learning_rate": 5.294354018255945e-05, + "loss": 0.4402, + "num_input_tokens_seen": 19913216, + "step": 78, + "train_runtime": 11741.453, + "train_tokens_per_second": 1695.975 + }, + { + "epoch": 4.967741935483871, + "grad_norm": 0.21632763381463402, + "learning_rate": 5.196299078795344e-05, + "loss": 0.4326, + "num_input_tokens_seen": 20175232, + "step": 79, + "train_runtime": 11893.3046, + "train_tokens_per_second": 1696.352 + }, + { + "epoch": 5.0, + "grad_norm": 0.21632763381463402, + "learning_rate": 5.0981684623031415e-05, + "loss": 0.4125, + "num_input_tokens_seen": 20306240, + "step": 80, + "train_runtime": 11969.094, + "train_tokens_per_second": 1696.556 + }, + { + "epoch": 5.064516129032258, + "grad_norm": 0.3571828769341814, + "learning_rate": 5e-05, + "loss": 0.3198, + "num_input_tokens_seen": 20568256, + "step": 81, + "train_runtime": 12121.0302, + "train_tokens_per_second": 1696.907 + }, + { + "epoch": 5.129032258064516, + "grad_norm": 0.21648090918089968, + "learning_rate": 4.901831537696859e-05, + "loss": 0.2964, + "num_input_tokens_seen": 20830272, + "step": 82, + "train_runtime": 12272.7158, + "train_tokens_per_second": 1697.283 + }, + { + "epoch": 5.193548387096774, + "grad_norm": 0.31458036325066546, + "learning_rate": 4.8037009212046586e-05, + "loss": 0.3057, + "num_input_tokens_seen": 21092288, + "step": 83, + "train_runtime": 12424.73, + "train_tokens_per_second": 1697.605 + }, + { + "epoch": 5.258064516129032, + "grad_norm": 0.2497078272482313, + "learning_rate": 4.7056459817440544e-05, + "loss": 0.3129, + "num_input_tokens_seen": 21354304, + "step": 84, + "train_runtime": 12576.3946, + "train_tokens_per_second": 1697.967 + }, + { + "epoch": 5.32258064516129, + "grad_norm": 0.26688301370237244, + "learning_rate": 4.607704521360776e-05, + "loss": 0.2966, + "num_input_tokens_seen": 21616320, + "step": 85, + "train_runtime": 12727.8322, + "train_tokens_per_second": 1698.35 + }, + { + "epoch": 5.387096774193548, + "grad_norm": 0.23431543616740275, + "learning_rate": 4.509914298352197e-05, + "loss": 0.2895, + "num_input_tokens_seen": 21878336, + "step": 86, + "train_runtime": 12879.5644, + "train_tokens_per_second": 1698.686 + }, + { + "epoch": 5.451612903225806, + "grad_norm": 0.24692139115327455, + "learning_rate": 4.412313012710813e-05, + "loss": 0.2926, + "num_input_tokens_seen": 22140352, + "step": 87, + "train_runtime": 13031.2099, + "train_tokens_per_second": 1699.025 + }, + { + "epoch": 5.516129032258064, + "grad_norm": 0.25361311803158476, + "learning_rate": 4.3149382915901606e-05, + "loss": 0.3056, + "num_input_tokens_seen": 22402368, + "step": 88, + "train_runtime": 13182.6668, + "train_tokens_per_second": 1699.381 + }, + { + "epoch": 5.580645161290323, + "grad_norm": 0.24929333634502235, + "learning_rate": 4.2178276747988446e-05, + "loss": 0.2922, + "num_input_tokens_seen": 22664384, + "step": 89, + "train_runtime": 13333.9686, + "train_tokens_per_second": 1699.748 + }, + { + "epoch": 5.645161290322581, + "grad_norm": 0.23498457386000374, + "learning_rate": 4.1210186003282275e-05, + "loss": 0.2947, + "num_input_tokens_seen": 22926400, + "step": 90, + "train_runtime": 13485.686, + "train_tokens_per_second": 1700.054 + }, + { + "epoch": 5.709677419354839, + "grad_norm": 0.2260009367281118, + "learning_rate": 4.0245483899193595e-05, + "loss": 0.2809, + "num_input_tokens_seen": 23188416, + "step": 91, + "train_runtime": 13637.0722, + "train_tokens_per_second": 1700.395 + }, + { + "epoch": 5.774193548387097, + "grad_norm": 0.23177978895395898, + "learning_rate": 3.928454234674747e-05, + "loss": 0.2835, + "num_input_tokens_seen": 23450432, + "step": 92, + "train_runtime": 13788.6307, + "train_tokens_per_second": 1700.708 + }, + { + "epoch": 5.838709677419355, + "grad_norm": 0.23286376279827942, + "learning_rate": 3.832773180720475e-05, + "loss": 0.2899, + "num_input_tokens_seen": 23712448, + "step": 93, + "train_runtime": 13940.8531, + "train_tokens_per_second": 1700.932 + }, + { + "epoch": 5.903225806451613, + "grad_norm": 0.25123564466559106, + "learning_rate": 3.73754211492421e-05, + "loss": 0.2187, + "num_input_tokens_seen": 23974464, + "step": 94, + "train_runtime": 152.8243, + "train_tokens_per_second": 156875.973 + }, + { + "epoch": 5.967741935483871, + "grad_norm": 0.21842207206834566, + "learning_rate": 3.642797750674629e-05, + "loss": 0.2062, + "num_input_tokens_seen": 24236480, + "step": 95, + "train_runtime": 304.4082, + "train_tokens_per_second": 79618.347 + }, + { + "epoch": 6.064516129032258, + "grad_norm": 0.4137660833142641, + "learning_rate": 3.5485766137276894e-05, + "loss": 0.3804, + "num_input_tokens_seen": 24629504, + "step": 96, + "train_runtime": 531.3585, + "train_tokens_per_second": 46351.953 + }, + { + "epoch": 6.129032258064516, + "grad_norm": 0.24930112620091768, + "learning_rate": 3.4549150281252636e-05, + "loss": 0.1957, + "num_input_tokens_seen": 24891520, + "step": 97, + "train_runtime": 683.4085, + "train_tokens_per_second": 36422.61 + }, + { + "epoch": 6.193548387096774, + "grad_norm": 0.25330156602007586, + "learning_rate": 3.361849102191533e-05, + "loss": 0.205, + "num_input_tokens_seen": 25153536, + "step": 98, + "train_runtime": 835.5065, + "train_tokens_per_second": 30105.735 + }, + { + "epoch": 6.258064516129032, + "grad_norm": 0.29921778638612334, + "learning_rate": 3.2694147146125345e-05, + "loss": 0.1908, + "num_input_tokens_seen": 25415552, + "step": 99, + "train_runtime": 987.5232, + "train_tokens_per_second": 25736.663 + }, + { + "epoch": 6.32258064516129, + "grad_norm": 0.2469233673084958, + "learning_rate": 3.177647500604252e-05, + "loss": 0.1847, + "num_input_tokens_seen": 25677568, + "step": 100, + "train_runtime": 1139.8183, + "train_tokens_per_second": 22527.774 + }, + { + "epoch": 6.387096774193548, + "grad_norm": 0.7725747563793509, + "learning_rate": 3.086582838174551e-05, + "loss": 0.1817, + "num_input_tokens_seen": 25939584, + "step": 101, + "train_runtime": 1291.4222, + "train_tokens_per_second": 20086.06 + }, + { + "epoch": 6.451612903225806, + "grad_norm": 0.25680554550185525, + "learning_rate": 2.996255834484296e-05, + "loss": 0.1734, + "num_input_tokens_seen": 26201600, + "step": 102, + "train_runtime": 1443.928, + "train_tokens_per_second": 18146.057 + }, + { + "epoch": 6.516129032258064, + "grad_norm": 0.27042296025640733, + "learning_rate": 2.9067013123128613e-05, + "loss": 0.1869, + "num_input_tokens_seen": 26463616, + "step": 103, + "train_runtime": 1596.4915, + "train_tokens_per_second": 16576.108 + }, + { + "epoch": 6.580645161290323, + "grad_norm": 0.2882085640613909, + "learning_rate": 2.8179537966332887e-05, + "loss": 0.1892, + "num_input_tokens_seen": 26725632, + "step": 104, + "train_runtime": 1748.874, + "train_tokens_per_second": 15281.622 + }, + { + "epoch": 6.645161290322581, + "grad_norm": 0.23789859154459053, + "learning_rate": 2.7300475013022663e-05, + "loss": 0.2003, + "num_input_tokens_seen": 26987648, + "step": 105, + "train_runtime": 1900.7787, + "train_tokens_per_second": 14198.206 + }, + { + "epoch": 6.709677419354839, + "grad_norm": 0.24333597114107516, + "learning_rate": 2.6430163158700115e-05, + "loss": 0.1869, + "num_input_tokens_seen": 27249664, + "step": 106, + "train_runtime": 2053.1166, + "train_tokens_per_second": 13272.341 + }, + { + "epoch": 6.774193548387097, + "grad_norm": 0.22162314084558382, + "learning_rate": 2.556893792515227e-05, + "loss": 0.1652, + "num_input_tokens_seen": 27511680, + "step": 107, + "train_runtime": 2205.8735, + "train_tokens_per_second": 12472.012 + }, + { + "epoch": 6.838709677419355, + "grad_norm": 0.22353605121241715, + "learning_rate": 2.471713133110078e-05, + "loss": 0.1931, + "num_input_tokens_seen": 27773696, + "step": 108, + "train_runtime": 2358.313, + "train_tokens_per_second": 11776.934 + }, + { + "epoch": 6.903225806451613, + "grad_norm": 0.22455516265290895, + "learning_rate": 2.3875071764202563e-05, + "loss": 0.1841, + "num_input_tokens_seen": 28035712, + "step": 109, + "train_runtime": 2510.9803, + "train_tokens_per_second": 11165.246 + }, + { + "epoch": 6.967741935483871, + "grad_norm": 0.38402001653538137, + "learning_rate": 2.3043083854449988e-05, + "loss": 0.1843, + "num_input_tokens_seen": 28297728, + "step": 110, + "train_runtime": 2663.6487, + "train_tokens_per_second": 10623.671 + }, + { + "epoch": 7.0, + "grad_norm": 0.3742502503244311, + "learning_rate": 2.2221488349019903e-05, + "loss": 0.1603, + "num_input_tokens_seen": 28428736, + "step": 111, + "train_runtime": 2739.8421, + "train_tokens_per_second": 10376.049 + }, + { + "epoch": 7.064516129032258, + "grad_norm": 0.2248185819076052, + "learning_rate": 2.1410601988619394e-05, + "loss": 0.116, + "num_input_tokens_seen": 28690752, + "step": 112, + "train_runtime": 2892.3751, + "train_tokens_per_second": 9919.444 + }, + { + "epoch": 7.129032258064516, + "grad_norm": 0.2125797568607351, + "learning_rate": 2.061073738537635e-05, + "loss": 0.1325, + "num_input_tokens_seen": 28952768, + "step": 113, + "train_runtime": 3044.8954, + "train_tokens_per_second": 9508.625 + }, + { + "epoch": 7.193548387096774, + "grad_norm": 0.193911925191499, + "learning_rate": 1.982220290232143e-05, + "loss": 0.1212, + "num_input_tokens_seen": 29214784, + "step": 114, + "train_runtime": 3197.3523, + "train_tokens_per_second": 9137.18 + }, + { + "epoch": 7.258064516129032, + "grad_norm": 0.2538299645355182, + "learning_rate": 1.9045302534508297e-05, + "loss": 0.121, + "num_input_tokens_seen": 29476800, + "step": 115, + "train_runtime": 3350.2031, + "train_tokens_per_second": 8798.511 + }, + { + "epoch": 7.32258064516129, + "grad_norm": 0.23812141734052034, + "learning_rate": 1.8280335791817733e-05, + "loss": 0.1172, + "num_input_tokens_seen": 29738816, + "step": 116, + "train_runtime": 3502.6801, + "train_tokens_per_second": 8490.303 + }, + { + "epoch": 7.387096774193548, + "grad_norm": 0.2583083911424977, + "learning_rate": 1.7527597583490822e-05, + "loss": 0.1267, + "num_input_tokens_seen": 30000832, + "step": 117, + "train_runtime": 3655.3399, + "train_tokens_per_second": 8207.399 + }, + { + "epoch": 7.451612903225806, + "grad_norm": 0.35547790737132534, + "learning_rate": 1.678737810443593e-05, + "loss": 0.1132, + "num_input_tokens_seen": 30262848, + "step": 118, + "train_runtime": 3807.6391, + "train_tokens_per_second": 7947.93 + }, + { + "epoch": 7.516129032258064, + "grad_norm": 0.20767168289368249, + "learning_rate": 1.605996272335291e-05, + "loss": 0.1036, + "num_input_tokens_seen": 30524864, + "step": 119, + "train_runtime": 3960.35, + "train_tokens_per_second": 7707.618 + }, + { + "epoch": 7.580645161290323, + "grad_norm": 0.20569986142986987, + "learning_rate": 1.5345631872718214e-05, + "loss": 0.1085, + "num_input_tokens_seen": 30786880, + "step": 120, + "train_runtime": 4112.7549, + "train_tokens_per_second": 7485.708 + }, + { + "epoch": 7.645161290322581, + "grad_norm": 0.2080233876424552, + "learning_rate": 1.4644660940672627e-05, + "loss": 0.1075, + "num_input_tokens_seen": 31048896, + "step": 121, + "train_runtime": 4265.0677, + "train_tokens_per_second": 7279.813 + }, + { + "epoch": 7.709677419354839, + "grad_norm": 0.22886347662826706, + "learning_rate": 1.3957320164854059e-05, + "loss": 0.1091, + "num_input_tokens_seen": 31310912, + "step": 122, + "train_runtime": 4417.2414, + "train_tokens_per_second": 7088.341 + }, + { + "epoch": 7.774193548387097, + "grad_norm": 0.21253506561933785, + "learning_rate": 1.3283874528215733e-05, + "loss": 0.1003, + "num_input_tokens_seen": 31572928, + "step": 123, + "train_runtime": 4569.705, + "train_tokens_per_second": 6909.183 + }, + { + "epoch": 7.838709677419355, + "grad_norm": 0.211985762025977, + "learning_rate": 1.2624583656870154e-05, + "loss": 0.1123, + "num_input_tokens_seen": 31834944, + "step": 124, + "train_runtime": 4721.8673, + "train_tokens_per_second": 6742.024 + } + ], + "logging_steps": 1, + "max_steps": 160, + "num_input_tokens_seen": 31834944, + "num_train_epochs": 10, + "save_steps": 31, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 251406573895680.0, + "train_batch_size": 4, + "trial_name": null, + "trial_params": null +} diff --git a/B_3/checkpoint-124/training_args.bin b/B_3/checkpoint-124/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..dae6cb82f77aa91f4f2aac3f1f2dd06336bf044e --- /dev/null +++ b/B_3/checkpoint-124/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cc4c22dcd8008af055092cdfdd6cef9bf6aaaae83714bdb9dad705f555cb73e +size 8081 diff --git a/B_3/checkpoint-124/zero_to_fp32.py b/B_3/checkpoint-124/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04 --- /dev/null +++ b/B_3/checkpoint-124/zero_to_fp32.py @@ -0,0 +1,760 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: +# python zero_to_fp32.py . output_dir/ +# or +# python zero_to_fp32.py . output_dir/ --safe_serialization + +import argparse +import torch +import glob +import math +import os +import re +import gc +import json +import numpy as np +from tqdm import tqdm +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device, weights_only=False) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + total_files = len(files) + state_dicts = [] + for f in tqdm(files, desc='Loading checkpoint shards'): + state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +class GatheredTensor: + """ + A pseudo tensor that collects partitioned weights. + It is more memory efficient when there are multiple groups. + """ + + def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape): + self.flat_groups = flat_groups + self.flat_groups_offset = flat_groups_offset + self.offset = offset + self.partitioned_numel = partitioned_numel + self.shape = shape + self.dtype = self.flat_groups[0][0].dtype + + def contiguous(self): + """ + Merge partitioned weights from flat_groups into a single tensor. + """ + end_idx = self.offset + self.partitioned_numel + world_size = len(self.flat_groups) + pad_flat_param_chunks = [] + + for rank_i in range(world_size): + # for each rank, we need to collect weights from related group/groups + flat_groups_at_rank_i = self.flat_groups[rank_i] + start_group_id = None + end_group_id = None + for group_id in range(len(self.flat_groups_offset)): + if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]: + start_group_id = group_id + if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]: + end_group_id = group_id + break + # collect weights from related group/groups + for group_id in range(start_group_id, end_group_id + 1): + flat_tensor = flat_groups_at_rank_i[group_id] + start_offset = self.offset - self.flat_groups_offset[group_id] + end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id] + pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset]) + + # collect weights from all ranks + pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0) + param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous() + return param + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size + + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]])) + for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'): + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # memory efficient tensor + tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape) + state_dict[name] = tensor + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def to_torch_tensor(state_dict, return_empty_tensor=False): + """ + Convert state_dict of GatheredTensor to torch tensor + """ + torch_state_dict = {} + converted_tensors = {} + for name, tensor in state_dict.items(): + tensor_id = id(tensor) + if tensor_id in converted_tensors: # shared tensors + shared_tensor = torch_state_dict[converted_tensors[tensor_id]] + torch_state_dict[name] = shared_tensor + else: + converted_tensors[tensor_id] = name + if return_empty_tensor: + torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype) + else: + torch_state_dict[name] = tensor.contiguous() + return torch_state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag=None, + exclude_frozen_parameters=False, + lazy_mode=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient. + Convert the pesduo tensor to torch tensor by ``.contiguous()`` + + Returns: + - pytorch ``state_dict`` + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + Note: the above usage may not work if your application doesn't have sufficient free CPU memory. + You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. Or you can load state_dict in lazy mode :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu + for name, lazy_tensor in state_dict.item(): + tensor = lazy_tensor.contiguous() # to cpu + print(name, tensor) + # del tensor to release memory if it no longer in use + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) + if lazy_mode: + return state_dict + else: + return to_torch_tensor(state_dict) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, + output_dir, + max_shard_size="5GB", + safe_serialization=False, + tag=None, + exclude_frozen_parameters=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_dir``: directory to the pytorch fp32 state_dict output files + - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB + - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + """ + + # Dependency pre-check + if safe_serialization: + try: + from safetensors.torch import save_file + except ImportError: + print('If you want to use `safe_serialization`, please `pip install safetensors`') + raise + if max_shard_size is not None: + try: + from huggingface_hub import split_torch_state_dict_into_shards + except ImportError: + print('If you want to use `max_shard_size`, please `pip install huggingface_hub`') + raise + + # Convert zero checkpoint to state_dict + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag, + exclude_frozen_parameters, + lazy_mode=True) + + # Shard the model if it is too big. + weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin" + if max_shard_size is not None: + filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") + # an memory-efficient approach for sharding + empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True) + state_dict_split = split_torch_state_dict_into_shards(empty_state_dict, + filename_pattern=filename_pattern, + max_shard_size=max_shard_size) + else: + from collections import namedtuple + StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"]) + state_dict_split = StateDictSplit(is_sharded=False, + filename_to_tensors={weights_name: list(state_dict.keys())}) + + # Save the model by shard + os.makedirs(output_dir, exist_ok=True) + filename_to_tensors = state_dict_split.filename_to_tensors.items() + for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"): + shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors} + shard_state_dict = to_torch_tensor(shard_state_dict) + output_path = os.path.join(output_dir, shard_file) + if safe_serialization: + save_file(shard_state_dict, output_path, metadata={"format": "pt"}) + else: + torch.save(shard_state_dict, output_path) + # release the memory of current shard + for tensor_name in list(shard_state_dict.keys()): + del state_dict[tensor_name] + del shard_state_dict[tensor_name] + del shard_state_dict + gc.collect() + + # Save index if sharded + if state_dict_split.is_sharded: + index = { + "metadata": state_dict_split.metadata, + "weight_map": state_dict_split.tensor_to_filename, + } + save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json" + save_index_file = os.path.join(output_dir, save_index_file) + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument("output_dir", + type=str, + help="directory to the pytorch fp32 state_dict output files" + "(e.g. path/checkpoint-12-output/)") + parser.add_argument( + "--max_shard_size", + type=str, + default="5GB", + help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size" + "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`" + "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances" + "without CPU OOM issues.") + parser.add_argument( + "--safe_serialization", + default=False, + action='store_true', + help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, + args.output_dir, + max_shard_size=args.max_shard_size, + safe_serialization=args.safe_serialization, + tag=args.tag, + exclude_frozen_parameters=args.exclude_frozen_parameters) diff --git a/B_3/checkpoint-155/README.md b/B_3/checkpoint-155/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0673d0313e0251ef70dd85d3ff379913ab5506d9 --- /dev/null +++ b/B_3/checkpoint-155/README.md @@ -0,0 +1,202 @@ +--- +base_model: /workspace/meta-llama/Llama-3.1-70B +library_name: peft +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] +### Framework versions + +- PEFT 0.15.2 \ No newline at end of file diff --git a/B_3/checkpoint-155/adapter_config.json b/B_3/checkpoint-155/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..fdf1a09789f67a17c6c0acf8b1f378b341fb25b0 --- /dev/null +++ b/B_3/checkpoint-155/adapter_config.json @@ -0,0 +1,39 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "/workspace/meta-llama/Llama-3.1-70B", + "bias": "none", + "corda_config": null, + "eva_config": null, + "exclude_modules": null, + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layer_replication": null, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 1024, + "lora_bias": false, + "lora_dropout": 0, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 256, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "o_proj", + "gate_proj", + "up_proj", + "k_proj", + "q_proj", + "down_proj", + "v_proj" + ], + "task_type": "CAUSAL_LM", + "trainable_token_indices": null, + "use_dora": false, + "use_rslora": false +} \ No newline at end of file diff --git a/B_3/checkpoint-155/adapter_model.safetensors b/B_3/checkpoint-155/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..db37f3744e95b40d438931a61fc59abba5b6128d --- /dev/null +++ b/B_3/checkpoint-155/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4af1d99e2089547ff748dbd411ec353a8cd495223e6176e604ef5c692291010 +size 6627156248 diff --git a/B_3/checkpoint-155/chat_template.jinja b/B_3/checkpoint-155/chat_template.jinja new file mode 100644 index 0000000000000000000000000000000000000000..c3af804a3bddb95bf03b7d349234a039a27de382 --- /dev/null +++ b/B_3/checkpoint-155/chat_template.jinja @@ -0,0 +1,7 @@ +{{ '<|begin_of_text|>' }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ '<|start_header_id|>system<|end_header_id|> + +' + system_message + '<|eot_id|>' }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|start_header_id|>user<|end_header_id|> + +' + content + '<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +' }}{% elif message['role'] == 'assistant' %}{{ content + '<|eot_id|>' }}{% endif %}{% endfor %} \ No newline at end of file diff --git a/B_3/checkpoint-155/global_step151/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/B_3/checkpoint-155/global_step151/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..cf78fd068475e23d523de62d78eb5ca7041fa86f --- /dev/null +++ b/B_3/checkpoint-155/global_step151/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e859aa9c02a56111b212a4c1fbfe27da82f5a149517a33940c6005ad9928743e +size 9940504945 diff --git a/B_3/checkpoint-155/global_step151/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/B_3/checkpoint-155/global_step151/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..3e4bf31916612624f889c5648001a28f06933226 --- /dev/null +++ b/B_3/checkpoint-155/global_step151/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3a243a7b4bea5e5cec2c00cbba76bab6807d40cd2cfd880a414c8b82f34032f +size 9940504945 diff --git a/B_3/checkpoint-155/global_step151/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/B_3/checkpoint-155/global_step151/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..32eecb2c752b2053b7f52a35b511c75a1a8d968b --- /dev/null +++ b/B_3/checkpoint-155/global_step151/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ebbbbd656edc9d5702e89ce7b3c376106b1cd9b93f48cc3da5922026dc0fae4 +size 9940504945 diff --git a/B_3/checkpoint-155/global_step151/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/B_3/checkpoint-155/global_step151/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..25bfee0748ee85e7592823aea11b38b872cea517 --- /dev/null +++ b/B_3/checkpoint-155/global_step151/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b695d129495aa334b9f7afd0f7bd241ca48aee6c464ae6a225ce894b18fccd3b +size 9940504945 diff --git a/B_3/checkpoint-155/global_step151/zero_pp_rank_0_mp_rank_00_model_states.pt b/B_3/checkpoint-155/global_step151/zero_pp_rank_0_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..3eb51792a9dca623b446c979f0c3a91f85cd0e09 --- /dev/null +++ b/B_3/checkpoint-155/global_step151/zero_pp_rank_0_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52cc1db236eb01a15d9ce5fe82932bc960f31c8ae2e40e5e679ffc69ff5f6d14 +size 1109201 diff --git a/B_3/checkpoint-155/global_step151/zero_pp_rank_1_mp_rank_00_model_states.pt b/B_3/checkpoint-155/global_step151/zero_pp_rank_1_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..1129d41d5a888a452c3c3761edb7af2af5112f23 --- /dev/null +++ b/B_3/checkpoint-155/global_step151/zero_pp_rank_1_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b98366385610eb0c2da3ba54f3af3e7cf3cc1119a95a7cb5903ee3bc3f0a499a +size 1109201 diff --git a/B_3/checkpoint-155/global_step151/zero_pp_rank_2_mp_rank_00_model_states.pt b/B_3/checkpoint-155/global_step151/zero_pp_rank_2_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..935be0f0fbd4448c797f5dd0a321aa12f4df77c7 --- /dev/null +++ b/B_3/checkpoint-155/global_step151/zero_pp_rank_2_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a2b70418b2857ee98aa941319bb5380a3286010013ebbc584886ade8c76ccd5 +size 1109201 diff --git a/B_3/checkpoint-155/global_step151/zero_pp_rank_3_mp_rank_00_model_states.pt b/B_3/checkpoint-155/global_step151/zero_pp_rank_3_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..b3cebfbabdeb89cd8fe891f25412e6a4618afdaa --- /dev/null +++ b/B_3/checkpoint-155/global_step151/zero_pp_rank_3_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cb6a47b7ee0488551488672615b65e95ace7321860cc6c3378d4a179c7e1f16 +size 1109201 diff --git a/B_3/checkpoint-155/latest b/B_3/checkpoint-155/latest new file mode 100644 index 0000000000000000000000000000000000000000..7b7fb121aa418c3a0810fb614050d48f10c0791d --- /dev/null +++ b/B_3/checkpoint-155/latest @@ -0,0 +1 @@ +global_step151 \ No newline at end of file diff --git a/B_3/checkpoint-155/rng_state_0.pth b/B_3/checkpoint-155/rng_state_0.pth new file mode 100644 index 0000000000000000000000000000000000000000..3892ced04a314ac7c5749797be63c6f777549bc2 --- /dev/null +++ b/B_3/checkpoint-155/rng_state_0.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68aff53abf54556534374818e977997b8a37c6e60af2c6f5ff5db0dc5f64413b +size 15429 diff --git a/B_3/checkpoint-155/rng_state_1.pth b/B_3/checkpoint-155/rng_state_1.pth new file mode 100644 index 0000000000000000000000000000000000000000..7eb71cdb30c04e63235eefff4104835a74ed59b6 --- /dev/null +++ b/B_3/checkpoint-155/rng_state_1.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f17c1244ae8c28d751b811466a73fdcffc3debd687d05ab876d2d7b206f505f +size 15429 diff --git a/B_3/checkpoint-155/rng_state_2.pth b/B_3/checkpoint-155/rng_state_2.pth new file mode 100644 index 0000000000000000000000000000000000000000..a37d78a23eedc970031ad10da11caa3b2df7e26d --- /dev/null +++ b/B_3/checkpoint-155/rng_state_2.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:463ef398a9e9c9a73c7cd35268b11bcd9cae6069a9c14ebb21e396600eaae4ff +size 15429 diff --git a/B_3/checkpoint-155/rng_state_3.pth b/B_3/checkpoint-155/rng_state_3.pth new file mode 100644 index 0000000000000000000000000000000000000000..601244d64ac2a451a02bb2fc8c17ab77089ed593 --- /dev/null +++ b/B_3/checkpoint-155/rng_state_3.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d69faf291bc0df091284d690ac4d423585b7909dfea277c2e2205c80ebd7f7c +size 15429 diff --git a/B_3/checkpoint-155/scheduler.pt b/B_3/checkpoint-155/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..bded5e6908b9ea0319e226abc6bd0923facb9203 --- /dev/null +++ b/B_3/checkpoint-155/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ba5f00cff22d60fd67599d2b924f9770f76e6148c15e5efcc7730c78da70bdc +size 1401 diff --git a/B_3/checkpoint-155/special_tokens_map.json b/B_3/checkpoint-155/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..14daf4588e61b4e4983af0fccaba4d5500c0977c --- /dev/null +++ b/B_3/checkpoint-155/special_tokens_map.json @@ -0,0 +1,26 @@ +{ + "additional_special_tokens": [ + { + "content": "<|eom_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } + ], + "bos_token": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": "<|eot_id|>" +} diff --git a/B_3/checkpoint-155/tokenizer.json b/B_3/checkpoint-155/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/B_3/checkpoint-155/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/B_3/checkpoint-155/tokenizer_config.json b/B_3/checkpoint-155/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d1e1ea9bc94ff1132f136710751e37fb23a64347 --- /dev/null +++ b/B_3/checkpoint-155/tokenizer_config.json @@ -0,0 +1,2068 @@ +{ + "added_tokens_decoder": { + "128000": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128001": { + "content": "<|end_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128002": { + "content": "<|reserved_special_token_0|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128003": { + "content": "<|reserved_special_token_1|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128004": { + "content": "<|finetune_right_pad_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128005": { + "content": "<|reserved_special_token_2|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128006": { + "content": "<|start_header_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128007": { + "content": "<|end_header_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128008": { + "content": "<|eom_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128009": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128010": { + "content": "<|python_tag|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128011": { + "content": "<|reserved_special_token_3|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128012": { + "content": "<|reserved_special_token_4|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128013": { + "content": "<|reserved_special_token_5|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128014": { + "content": "<|reserved_special_token_6|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128015": { + "content": "<|reserved_special_token_7|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128016": { + "content": "<|reserved_special_token_8|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128017": { + "content": "<|reserved_special_token_9|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128018": { + "content": "<|reserved_special_token_10|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128019": { + "content": "<|reserved_special_token_11|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128020": { + "content": "<|reserved_special_token_12|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128021": { + "content": "<|reserved_special_token_13|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128022": { + "content": "<|reserved_special_token_14|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128023": { + "content": "<|reserved_special_token_15|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128024": { + "content": "<|reserved_special_token_16|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128025": { + "content": "<|reserved_special_token_17|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128026": { + "content": "<|reserved_special_token_18|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128027": { + "content": "<|reserved_special_token_19|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128028": { + "content": "<|reserved_special_token_20|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128029": { + "content": "<|reserved_special_token_21|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128030": { + "content": "<|reserved_special_token_22|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128031": { + "content": "<|reserved_special_token_23|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128032": { + "content": "<|reserved_special_token_24|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128033": { + "content": "<|reserved_special_token_25|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128034": { + "content": "<|reserved_special_token_26|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128035": { + "content": "<|reserved_special_token_27|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128036": { + "content": "<|reserved_special_token_28|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128037": { + "content": "<|reserved_special_token_29|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128038": { + "content": "<|reserved_special_token_30|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128039": { + "content": "<|reserved_special_token_31|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128040": { + "content": "<|reserved_special_token_32|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128041": { + "content": "<|reserved_special_token_33|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128042": { + "content": "<|reserved_special_token_34|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128043": { + "content": "<|reserved_special_token_35|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128044": { + "content": "<|reserved_special_token_36|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128045": { + "content": "<|reserved_special_token_37|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128046": { + "content": "<|reserved_special_token_38|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128047": { + "content": "<|reserved_special_token_39|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128048": { + "content": "<|reserved_special_token_40|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128049": { + "content": "<|reserved_special_token_41|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128050": { + "content": "<|reserved_special_token_42|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128051": { + "content": "<|reserved_special_token_43|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128052": { + "content": "<|reserved_special_token_44|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128053": { + "content": "<|reserved_special_token_45|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128054": { + "content": "<|reserved_special_token_46|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128055": { + "content": "<|reserved_special_token_47|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128056": { + "content": "<|reserved_special_token_48|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128057": { + "content": "<|reserved_special_token_49|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128058": { + "content": "<|reserved_special_token_50|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128059": { + "content": "<|reserved_special_token_51|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128060": { + "content": "<|reserved_special_token_52|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128061": { + "content": "<|reserved_special_token_53|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128062": { + "content": "<|reserved_special_token_54|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128063": { + "content": "<|reserved_special_token_55|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128064": { + "content": "<|reserved_special_token_56|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128065": { + "content": "<|reserved_special_token_57|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128066": { + "content": "<|reserved_special_token_58|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128067": { + "content": "<|reserved_special_token_59|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128068": { + "content": "<|reserved_special_token_60|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128069": { + "content": "<|reserved_special_token_61|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128070": { + "content": "<|reserved_special_token_62|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128071": { + "content": "<|reserved_special_token_63|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128072": { + "content": "<|reserved_special_token_64|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128073": { + "content": "<|reserved_special_token_65|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128074": { + "content": "<|reserved_special_token_66|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128075": { + "content": "<|reserved_special_token_67|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128076": { + "content": "<|reserved_special_token_68|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128077": { + "content": "<|reserved_special_token_69|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128078": { + "content": "<|reserved_special_token_70|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128079": { + "content": "<|reserved_special_token_71|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128080": { + "content": "<|reserved_special_token_72|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128081": { + "content": "<|reserved_special_token_73|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128082": { + "content": "<|reserved_special_token_74|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128083": { + "content": "<|reserved_special_token_75|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128084": { + "content": "<|reserved_special_token_76|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128085": { + "content": "<|reserved_special_token_77|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128086": { + "content": "<|reserved_special_token_78|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128087": { + "content": "<|reserved_special_token_79|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128088": { + "content": "<|reserved_special_token_80|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128089": { + "content": "<|reserved_special_token_81|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128090": { + "content": "<|reserved_special_token_82|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128091": { + "content": "<|reserved_special_token_83|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128092": { + "content": "<|reserved_special_token_84|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128093": { + "content": "<|reserved_special_token_85|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128094": { + "content": "<|reserved_special_token_86|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128095": { + "content": "<|reserved_special_token_87|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128096": { + "content": "<|reserved_special_token_88|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128097": { + "content": "<|reserved_special_token_89|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128098": { + "content": "<|reserved_special_token_90|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128099": { + "content": "<|reserved_special_token_91|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128100": { + "content": "<|reserved_special_token_92|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128101": { + "content": "<|reserved_special_token_93|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128102": { + "content": "<|reserved_special_token_94|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128103": { + "content": "<|reserved_special_token_95|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128104": { + "content": "<|reserved_special_token_96|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128105": { + "content": "<|reserved_special_token_97|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128106": { + "content": "<|reserved_special_token_98|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128107": { + "content": "<|reserved_special_token_99|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128108": { + "content": "<|reserved_special_token_100|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128109": { + "content": "<|reserved_special_token_101|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128110": { + "content": "<|reserved_special_token_102|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128111": { + "content": "<|reserved_special_token_103|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128112": { + "content": "<|reserved_special_token_104|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128113": { + "content": "<|reserved_special_token_105|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128114": { + "content": "<|reserved_special_token_106|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128115": { + "content": "<|reserved_special_token_107|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128116": { + "content": "<|reserved_special_token_108|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128117": { + "content": "<|reserved_special_token_109|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128118": { + "content": "<|reserved_special_token_110|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128119": { + "content": "<|reserved_special_token_111|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128120": { + "content": "<|reserved_special_token_112|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128121": { + "content": "<|reserved_special_token_113|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128122": { + "content": "<|reserved_special_token_114|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128123": { + "content": "<|reserved_special_token_115|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128124": { + "content": "<|reserved_special_token_116|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128125": { + "content": "<|reserved_special_token_117|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128126": { + "content": "<|reserved_special_token_118|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128127": { + "content": "<|reserved_special_token_119|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128128": { + "content": "<|reserved_special_token_120|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128129": { + "content": "<|reserved_special_token_121|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128130": { + "content": "<|reserved_special_token_122|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128131": { + "content": "<|reserved_special_token_123|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128132": { + "content": "<|reserved_special_token_124|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128133": { + "content": "<|reserved_special_token_125|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128134": { + "content": "<|reserved_special_token_126|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128135": { + "content": "<|reserved_special_token_127|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128136": { + "content": "<|reserved_special_token_128|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128137": { + "content": "<|reserved_special_token_129|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128138": { + "content": "<|reserved_special_token_130|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128139": { + "content": "<|reserved_special_token_131|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128140": { + "content": "<|reserved_special_token_132|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128141": { + "content": "<|reserved_special_token_133|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128142": { + "content": "<|reserved_special_token_134|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128143": { + "content": "<|reserved_special_token_135|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128144": { + "content": "<|reserved_special_token_136|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128145": { + "content": "<|reserved_special_token_137|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128146": { + "content": "<|reserved_special_token_138|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128147": { + "content": "<|reserved_special_token_139|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128148": { + "content": "<|reserved_special_token_140|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128149": { + "content": "<|reserved_special_token_141|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128150": { + "content": "<|reserved_special_token_142|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128151": { + "content": "<|reserved_special_token_143|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128152": { + "content": "<|reserved_special_token_144|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128153": { + "content": "<|reserved_special_token_145|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128154": { + "content": "<|reserved_special_token_146|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128155": { + "content": "<|reserved_special_token_147|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128156": { + "content": "<|reserved_special_token_148|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128157": { + "content": "<|reserved_special_token_149|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128158": { + "content": "<|reserved_special_token_150|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128159": { + "content": "<|reserved_special_token_151|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128160": { + "content": "<|reserved_special_token_152|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128161": { + "content": "<|reserved_special_token_153|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128162": { + "content": "<|reserved_special_token_154|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128163": { + "content": "<|reserved_special_token_155|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128164": { + "content": "<|reserved_special_token_156|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128165": { + "content": "<|reserved_special_token_157|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128166": { + "content": "<|reserved_special_token_158|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128167": { + "content": "<|reserved_special_token_159|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128168": { + "content": "<|reserved_special_token_160|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128169": { + "content": "<|reserved_special_token_161|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128170": { + "content": "<|reserved_special_token_162|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128171": { + "content": "<|reserved_special_token_163|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128172": { + "content": "<|reserved_special_token_164|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128173": { + "content": "<|reserved_special_token_165|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128174": { + "content": "<|reserved_special_token_166|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128175": { + "content": "<|reserved_special_token_167|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128176": { + "content": "<|reserved_special_token_168|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128177": { + "content": "<|reserved_special_token_169|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128178": { + "content": "<|reserved_special_token_170|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128179": { + "content": "<|reserved_special_token_171|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128180": { + "content": "<|reserved_special_token_172|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128181": { + "content": "<|reserved_special_token_173|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128182": { + "content": "<|reserved_special_token_174|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128183": { + "content": "<|reserved_special_token_175|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128184": { + "content": "<|reserved_special_token_176|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128185": { + "content": "<|reserved_special_token_177|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128186": { + "content": "<|reserved_special_token_178|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128187": { + "content": "<|reserved_special_token_179|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128188": { + "content": "<|reserved_special_token_180|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128189": { + "content": "<|reserved_special_token_181|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128190": { + "content": "<|reserved_special_token_182|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128191": { + "content": "<|reserved_special_token_183|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128192": { + "content": "<|reserved_special_token_184|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128193": { + "content": "<|reserved_special_token_185|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128194": { + "content": "<|reserved_special_token_186|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128195": { + "content": "<|reserved_special_token_187|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128196": { + "content": "<|reserved_special_token_188|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128197": { + "content": "<|reserved_special_token_189|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128198": { + "content": "<|reserved_special_token_190|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128199": { + "content": "<|reserved_special_token_191|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128200": { + "content": "<|reserved_special_token_192|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128201": { + "content": "<|reserved_special_token_193|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128202": { + "content": "<|reserved_special_token_194|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128203": { + "content": "<|reserved_special_token_195|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128204": { + "content": "<|reserved_special_token_196|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128205": { + "content": "<|reserved_special_token_197|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128206": { + "content": "<|reserved_special_token_198|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128207": { + "content": "<|reserved_special_token_199|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128208": { + "content": "<|reserved_special_token_200|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128209": { + "content": "<|reserved_special_token_201|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128210": { + "content": "<|reserved_special_token_202|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128211": { + "content": "<|reserved_special_token_203|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128212": { + "content": "<|reserved_special_token_204|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128213": { + "content": "<|reserved_special_token_205|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128214": { + "content": "<|reserved_special_token_206|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128215": { + "content": "<|reserved_special_token_207|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128216": { + "content": "<|reserved_special_token_208|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128217": { + "content": "<|reserved_special_token_209|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128218": { + "content": "<|reserved_special_token_210|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128219": { + "content": "<|reserved_special_token_211|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128220": { + "content": "<|reserved_special_token_212|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128221": { + "content": "<|reserved_special_token_213|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128222": { + "content": "<|reserved_special_token_214|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128223": { + "content": "<|reserved_special_token_215|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128224": { + "content": "<|reserved_special_token_216|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128225": { + "content": "<|reserved_special_token_217|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128226": { + "content": "<|reserved_special_token_218|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128227": { + "content": "<|reserved_special_token_219|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128228": { + "content": "<|reserved_special_token_220|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128229": { + "content": "<|reserved_special_token_221|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128230": { + "content": "<|reserved_special_token_222|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128231": { + "content": "<|reserved_special_token_223|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128232": { + "content": "<|reserved_special_token_224|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128233": { + "content": "<|reserved_special_token_225|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128234": { + "content": "<|reserved_special_token_226|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128235": { + "content": "<|reserved_special_token_227|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128236": { + "content": "<|reserved_special_token_228|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128237": { + "content": "<|reserved_special_token_229|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128238": { + "content": "<|reserved_special_token_230|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128239": { + "content": "<|reserved_special_token_231|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128240": { + "content": "<|reserved_special_token_232|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128241": { + "content": "<|reserved_special_token_233|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128242": { + "content": "<|reserved_special_token_234|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128243": { + "content": "<|reserved_special_token_235|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128244": { + "content": "<|reserved_special_token_236|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128245": { + "content": "<|reserved_special_token_237|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128246": { + "content": "<|reserved_special_token_238|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128247": { + "content": "<|reserved_special_token_239|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128248": { + "content": "<|reserved_special_token_240|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128249": { + "content": "<|reserved_special_token_241|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128250": { + "content": "<|reserved_special_token_242|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128251": { + "content": "<|reserved_special_token_243|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128252": { + "content": "<|reserved_special_token_244|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128253": { + "content": "<|reserved_special_token_245|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128254": { + "content": "<|reserved_special_token_246|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128255": { + "content": "<|reserved_special_token_247|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [ + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "clean_up_tokenization_spaces": true, + "eos_token": "<|eot_id|>", + "extra_special_tokens": {}, + "model_input_names": [ + "input_ids", + "attention_mask" + ], + "model_max_length": 131072, + "pad_token": "<|eot_id|>", + "padding_side": "right", + "split_special_tokens": false, + "tokenizer_class": "PreTrainedTokenizerFast" +} diff --git a/B_3/checkpoint-155/trainer_state.json b/B_3/checkpoint-155/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..2d0c01ae4901b8f3944f324a6b1fdd6fbbcac339 --- /dev/null +++ b/B_3/checkpoint-155/trainer_state.json @@ -0,0 +1,1584 @@ +{ + "best_global_step": null, + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 9.774193548387096, + "eval_steps": 500, + "global_step": 155, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.06451612903225806, + "grad_norm": 2.073743358513411, + "learning_rate": 0.0001, + "loss": 1.3591, + "num_input_tokens_seen": 262016, + "step": 1, + "train_runtime": 148.9807, + "train_tokens_per_second": 1758.725 + }, + { + "epoch": 0.12903225806451613, + "grad_norm": 1.7083966194301452, + "learning_rate": 9.999036202410325e-05, + "loss": 1.4321, + "num_input_tokens_seen": 524032, + "step": 2, + "train_runtime": 298.073, + "train_tokens_per_second": 1758.066 + }, + { + "epoch": 0.1935483870967742, + "grad_norm": 0.8154519457271471, + "learning_rate": 9.996145181203615e-05, + "loss": 1.3036, + "num_input_tokens_seen": 786048, + "step": 3, + "train_runtime": 449.085, + "train_tokens_per_second": 1750.332 + }, + { + "epoch": 0.25806451612903225, + "grad_norm": 1.6435854418760425, + "learning_rate": 9.991328050923581e-05, + "loss": 1.3119, + "num_input_tokens_seen": 1048064, + "step": 4, + "train_runtime": 600.5729, + "train_tokens_per_second": 1745.107 + }, + { + "epoch": 0.3225806451612903, + "grad_norm": 1.2034889741595727, + "learning_rate": 9.98458666866564e-05, + "loss": 1.21, + "num_input_tokens_seen": 1310080, + "step": 5, + "train_runtime": 752.2986, + "train_tokens_per_second": 1741.436 + }, + { + "epoch": 0.3870967741935484, + "grad_norm": 0.6081797430818349, + "learning_rate": 9.975923633360985e-05, + "loss": 1.1713, + "num_input_tokens_seen": 1572096, + "step": 6, + "train_runtime": 903.8941, + "train_tokens_per_second": 1739.248 + }, + { + "epoch": 0.45161290322580644, + "grad_norm": 0.43860201273645816, + "learning_rate": 9.965342284774632e-05, + "loss": 1.1687, + "num_input_tokens_seen": 1834112, + "step": 7, + "train_runtime": 1055.454, + "train_tokens_per_second": 1737.747 + }, + { + "epoch": 0.5161290322580645, + "grad_norm": 0.3489614049191675, + "learning_rate": 9.952846702217886e-05, + "loss": 1.1151, + "num_input_tokens_seen": 2096128, + "step": 8, + "train_runtime": 1207.0142, + "train_tokens_per_second": 1736.623 + }, + { + "epoch": 0.5806451612903226, + "grad_norm": 3.182590493238281, + "learning_rate": 9.938441702975689e-05, + "loss": 1.0694, + "num_input_tokens_seen": 2358144, + "step": 9, + "train_runtime": 1358.12, + "train_tokens_per_second": 1736.33 + }, + { + "epoch": 0.6451612903225806, + "grad_norm": 0.40091824929141967, + "learning_rate": 9.922132840449459e-05, + "loss": 1.0431, + "num_input_tokens_seen": 2620160, + "step": 10, + "train_runtime": 1509.1349, + "train_tokens_per_second": 1736.2 + }, + { + "epoch": 0.7096774193548387, + "grad_norm": 0.4778713425435316, + "learning_rate": 9.903926402016153e-05, + "loss": 1.0812, + "num_input_tokens_seen": 2882176, + "step": 11, + "train_runtime": 1661.0314, + "train_tokens_per_second": 1735.172 + }, + { + "epoch": 0.7741935483870968, + "grad_norm": 0.3361596465836105, + "learning_rate": 9.883829406604363e-05, + "loss": 1.0296, + "num_input_tokens_seen": 3144192, + "step": 12, + "train_runtime": 1812.2372, + "train_tokens_per_second": 1734.978 + }, + { + "epoch": 0.8387096774193549, + "grad_norm": 0.30484937194478195, + "learning_rate": 9.861849601988383e-05, + "loss": 0.9806, + "num_input_tokens_seen": 3406208, + "step": 13, + "train_runtime": 1963.4393, + "train_tokens_per_second": 1734.817 + }, + { + "epoch": 0.9032258064516129, + "grad_norm": 0.31491494208058013, + "learning_rate": 9.837995461801299e-05, + "loss": 1.0244, + "num_input_tokens_seen": 3668224, + "step": 14, + "train_runtime": 2115.2404, + "train_tokens_per_second": 1734.188 + }, + { + "epoch": 0.967741935483871, + "grad_norm": 0.2555325387827011, + "learning_rate": 9.812276182268236e-05, + "loss": 0.9701, + "num_input_tokens_seen": 3930240, + "step": 15, + "train_runtime": 2266.9917, + "train_tokens_per_second": 1733.681 + }, + { + "epoch": 1.0, + "grad_norm": 0.2555325387827011, + "learning_rate": 9.784701678661045e-05, + "loss": 0.9885, + "num_input_tokens_seen": 4061248, + "step": 16, + "train_runtime": 2343.254, + "train_tokens_per_second": 1733.166 + }, + { + "epoch": 1.064516129032258, + "grad_norm": 0.3803435873199766, + "learning_rate": 9.755282581475769e-05, + "loss": 0.9221, + "num_input_tokens_seen": 4323264, + "step": 17, + "train_runtime": 2495.3616, + "train_tokens_per_second": 1732.52 + }, + { + "epoch": 1.129032258064516, + "grad_norm": 0.25742598219685603, + "learning_rate": 9.724030232334391e-05, + "loss": 0.9187, + "num_input_tokens_seen": 4585280, + "step": 18, + "train_runtime": 2646.9782, + "train_tokens_per_second": 1732.27 + }, + { + "epoch": 1.1935483870967742, + "grad_norm": 0.22468849320033518, + "learning_rate": 9.690956679612421e-05, + "loss": 0.8943, + "num_input_tokens_seen": 4847296, + "step": 19, + "train_runtime": 2798.7721, + "train_tokens_per_second": 1731.937 + }, + { + "epoch": 1.2580645161290323, + "grad_norm": 0.28670637895213946, + "learning_rate": 9.656074673794018e-05, + "loss": 0.8567, + "num_input_tokens_seen": 5109312, + "step": 20, + "train_runtime": 2950.5573, + "train_tokens_per_second": 1731.643 + }, + { + "epoch": 1.3225806451612903, + "grad_norm": 0.2043611694383423, + "learning_rate": 9.619397662556435e-05, + "loss": 0.8557, + "num_input_tokens_seen": 5371328, + "step": 21, + "train_runtime": 3103.0692, + "train_tokens_per_second": 1730.973 + }, + { + "epoch": 1.3870967741935485, + "grad_norm": 0.24667572059564058, + "learning_rate": 9.580939785585681e-05, + "loss": 0.8546, + "num_input_tokens_seen": 5633344, + "step": 22, + "train_runtime": 3254.6109, + "train_tokens_per_second": 1730.881 + }, + { + "epoch": 1.4516129032258065, + "grad_norm": 0.24067782128131054, + "learning_rate": 9.540715869125407e-05, + "loss": 0.851, + "num_input_tokens_seen": 5895360, + "step": 23, + "train_runtime": 3406.7776, + "train_tokens_per_second": 1730.48 + }, + { + "epoch": 1.5161290322580645, + "grad_norm": 0.2000848793492341, + "learning_rate": 9.498741420261108e-05, + "loss": 0.8483, + "num_input_tokens_seen": 6157376, + "step": 24, + "train_runtime": 3558.7104, + "train_tokens_per_second": 1730.227 + }, + { + "epoch": 1.5806451612903225, + "grad_norm": 0.22359283400518964, + "learning_rate": 9.45503262094184e-05, + "loss": 0.8226, + "num_input_tokens_seen": 6419392, + "step": 25, + "train_runtime": 3711.0551, + "train_tokens_per_second": 1729.802 + }, + { + "epoch": 1.6451612903225805, + "grad_norm": 1.0648985294170912, + "learning_rate": 9.409606321741775e-05, + "loss": 0.8259, + "num_input_tokens_seen": 6681408, + "step": 26, + "train_runtime": 3863.7709, + "train_tokens_per_second": 1729.245 + }, + { + "epoch": 1.7096774193548387, + "grad_norm": 0.18133437899141433, + "learning_rate": 9.362480035363986e-05, + "loss": 0.8524, + "num_input_tokens_seen": 6943424, + "step": 27, + "train_runtime": 4016.4383, + "train_tokens_per_second": 1728.752 + }, + { + "epoch": 1.7741935483870968, + "grad_norm": 0.456273625303925, + "learning_rate": 9.31367192988896e-05, + "loss": 0.8209, + "num_input_tokens_seen": 7205440, + "step": 28, + "train_runtime": 4168.9223, + "train_tokens_per_second": 1728.37 + }, + { + "epoch": 1.838709677419355, + "grad_norm": 0.4673225345753548, + "learning_rate": 9.263200821770461e-05, + "loss": 0.7853, + "num_input_tokens_seen": 7467456, + "step": 29, + "train_runtime": 4321.7056, + "train_tokens_per_second": 1727.896 + }, + { + "epoch": 1.903225806451613, + "grad_norm": 0.1996315966078707, + "learning_rate": 9.211086168581433e-05, + "loss": 0.7779, + "num_input_tokens_seen": 7729472, + "step": 30, + "train_runtime": 4474.7713, + "train_tokens_per_second": 1727.345 + }, + { + "epoch": 1.967741935483871, + "grad_norm": 0.3752545960960864, + "learning_rate": 9.157348061512727e-05, + "loss": 0.8146, + "num_input_tokens_seen": 7991488, + "step": 31, + "train_runtime": 4627.5272, + "train_tokens_per_second": 1726.946 + }, + { + "epoch": 2.0, + "grad_norm": 0.484664872247163, + "learning_rate": 9.102007217627568e-05, + "loss": 0.7787, + "num_input_tokens_seen": 8122496, + "step": 32, + "train_runtime": 4809.6299, + "train_tokens_per_second": 1688.799 + }, + { + "epoch": 2.064516129032258, + "grad_norm": 0.299023275639115, + "learning_rate": 9.045084971874738e-05, + "loss": 0.7361, + "num_input_tokens_seen": 8384512, + "step": 33, + "train_runtime": 4960.4611, + "train_tokens_per_second": 1690.269 + }, + { + "epoch": 2.129032258064516, + "grad_norm": 0.20981231086811225, + "learning_rate": 8.986603268863536e-05, + "loss": 0.6956, + "num_input_tokens_seen": 8646528, + "step": 34, + "train_runtime": 5112.1398, + "train_tokens_per_second": 1691.372 + }, + { + "epoch": 2.193548387096774, + "grad_norm": 0.3648857123126151, + "learning_rate": 8.926584654403724e-05, + "loss": 0.6819, + "num_input_tokens_seen": 8908544, + "step": 35, + "train_runtime": 5264.3603, + "train_tokens_per_second": 1692.237 + }, + { + "epoch": 2.258064516129032, + "grad_norm": 0.31269893807625165, + "learning_rate": 8.865052266813685e-05, + "loss": 0.6958, + "num_input_tokens_seen": 9170560, + "step": 36, + "train_runtime": 5416.2601, + "train_tokens_per_second": 1693.154 + }, + { + "epoch": 2.3225806451612905, + "grad_norm": 0.2727897516670068, + "learning_rate": 8.802029828000156e-05, + "loss": 0.6756, + "num_input_tokens_seen": 9432576, + "step": 37, + "train_runtime": 5567.9089, + "train_tokens_per_second": 1694.097 + }, + { + "epoch": 2.3870967741935485, + "grad_norm": 0.25721446603694037, + "learning_rate": 8.737541634312985e-05, + "loss": 0.648, + "num_input_tokens_seen": 9694592, + "step": 38, + "train_runtime": 5719.9113, + "train_tokens_per_second": 1694.885 + }, + { + "epoch": 2.4516129032258065, + "grad_norm": 20.44651031163169, + "learning_rate": 8.671612547178428e-05, + "loss": 0.669, + "num_input_tokens_seen": 9956608, + "step": 39, + "train_runtime": 5871.7821, + "train_tokens_per_second": 1695.671 + }, + { + "epoch": 2.5161290322580645, + "grad_norm": 3.0150996948618145, + "learning_rate": 8.604267983514594e-05, + "loss": 0.6856, + "num_input_tokens_seen": 10218624, + "step": 40, + "train_runtime": 6024.0794, + "train_tokens_per_second": 1696.296 + }, + { + "epoch": 2.5806451612903225, + "grad_norm": 19.306885630930985, + "learning_rate": 8.535533905932738e-05, + "loss": 0.6949, + "num_input_tokens_seen": 10480640, + "step": 41, + "train_runtime": 6176.6303, + "train_tokens_per_second": 1696.822 + }, + { + "epoch": 2.6451612903225805, + "grad_norm": 46.29810754342157, + "learning_rate": 8.46543681272818e-05, + "loss": 0.7467, + "num_input_tokens_seen": 10742656, + "step": 42, + "train_runtime": 6329.1576, + "train_tokens_per_second": 1697.328 + }, + { + "epoch": 2.709677419354839, + "grad_norm": 9.77264228502113, + "learning_rate": 8.39400372766471e-05, + "loss": 0.6638, + "num_input_tokens_seen": 11004672, + "step": 43, + "train_runtime": 6481.6817, + "train_tokens_per_second": 1697.811 + }, + { + "epoch": 2.774193548387097, + "grad_norm": 16.058872629216598, + "learning_rate": 8.321262189556409e-05, + "loss": 0.7096, + "num_input_tokens_seen": 11266688, + "step": 44, + "train_runtime": 6633.7733, + "train_tokens_per_second": 1698.383 + }, + { + "epoch": 2.838709677419355, + "grad_norm": 1.0261214777781495, + "learning_rate": 8.247240241650918e-05, + "loss": 0.6737, + "num_input_tokens_seen": 11528704, + "step": 45, + "train_runtime": 6785.7147, + "train_tokens_per_second": 1698.967 + }, + { + "epoch": 2.903225806451613, + "grad_norm": 1.6378077961358868, + "learning_rate": 8.171966420818228e-05, + "loss": 0.6638, + "num_input_tokens_seen": 11790720, + "step": 46, + "train_runtime": 6937.3194, + "train_tokens_per_second": 1699.607 + }, + { + "epoch": 2.967741935483871, + "grad_norm": 2.4944381489903455, + "learning_rate": 8.095469746549172e-05, + "loss": 0.6753, + "num_input_tokens_seen": 12052736, + "step": 47, + "train_runtime": 7089.3141, + "train_tokens_per_second": 1700.127 + }, + { + "epoch": 3.0, + "grad_norm": 2.4944381489903455, + "learning_rate": 8.017779709767858e-05, + "loss": 0.6673, + "num_input_tokens_seen": 12183744, + "step": 48, + "train_runtime": 7165.3736, + "train_tokens_per_second": 1700.364 + }, + { + "epoch": 3.064516129032258, + "grad_norm": 26.97115523850904, + "learning_rate": 7.938926261462366e-05, + "loss": 0.6049, + "num_input_tokens_seen": 12445760, + "step": 49, + "train_runtime": 7317.3555, + "train_tokens_per_second": 1700.855 + }, + { + "epoch": 3.129032258064516, + "grad_norm": 5.4430349870657375, + "learning_rate": 7.858939801138061e-05, + "loss": 1.0716, + "num_input_tokens_seen": 12707776, + "step": 50, + "train_runtime": 7468.9695, + "train_tokens_per_second": 1701.41 + }, + { + "epoch": 3.193548387096774, + "grad_norm": 12.78395158722848, + "learning_rate": 7.777851165098012e-05, + "loss": 1.0428, + "num_input_tokens_seen": 12969792, + "step": 51, + "train_runtime": 7620.5451, + "train_tokens_per_second": 1701.951 + }, + { + "epoch": 3.258064516129032, + "grad_norm": 326.4832878895978, + "learning_rate": 7.695691614555003e-05, + "loss": 0.9172, + "num_input_tokens_seen": 13231808, + "step": 52, + "train_runtime": 7772.2756, + "train_tokens_per_second": 1702.437 + }, + { + "epoch": 3.3225806451612905, + "grad_norm": 69.03004430934298, + "learning_rate": 7.612492823579745e-05, + "loss": 0.7593, + "num_input_tokens_seen": 13493824, + "step": 53, + "train_runtime": 7924.7666, + "train_tokens_per_second": 1702.741 + }, + { + "epoch": 3.3870967741935485, + "grad_norm": 1.1495044460164596, + "learning_rate": 7.528286866889924e-05, + "loss": 0.6151, + "num_input_tokens_seen": 13755840, + "step": 54, + "train_runtime": 8077.0191, + "train_tokens_per_second": 1703.084 + }, + { + "epoch": 3.4516129032258065, + "grad_norm": 0.252822200967686, + "learning_rate": 7.443106207484776e-05, + "loss": 0.5964, + "num_input_tokens_seen": 14017856, + "step": 55, + "train_runtime": 8229.2406, + "train_tokens_per_second": 1703.42 + }, + { + "epoch": 3.5161290322580645, + "grad_norm": 0.36169399074673414, + "learning_rate": 7.35698368412999e-05, + "loss": 0.5809, + "num_input_tokens_seen": 14279872, + "step": 56, + "train_runtime": 8380.9399, + "train_tokens_per_second": 1703.851 + }, + { + "epoch": 3.5806451612903225, + "grad_norm": 0.3219026144290714, + "learning_rate": 7.269952498697734e-05, + "loss": 0.5638, + "num_input_tokens_seen": 14541888, + "step": 57, + "train_runtime": 8533.1482, + "train_tokens_per_second": 1704.164 + }, + { + "epoch": 3.6451612903225805, + "grad_norm": 1.0720094012961243, + "learning_rate": 7.18204620336671e-05, + "loss": 0.5553, + "num_input_tokens_seen": 14803904, + "step": 58, + "train_runtime": 8685.2309, + "train_tokens_per_second": 1704.492 + }, + { + "epoch": 3.709677419354839, + "grad_norm": 0.3151374305079844, + "learning_rate": 7.09329868768714e-05, + "loss": 0.5555, + "num_input_tokens_seen": 15065920, + "step": 59, + "train_runtime": 8837.7033, + "train_tokens_per_second": 1704.733 + }, + { + "epoch": 3.774193548387097, + "grad_norm": 0.2917481639941811, + "learning_rate": 7.003744165515705e-05, + "loss": 0.5635, + "num_input_tokens_seen": 15327936, + "step": 60, + "train_runtime": 8990.7028, + "train_tokens_per_second": 1704.865 + }, + { + "epoch": 3.838709677419355, + "grad_norm": 0.39703897734831073, + "learning_rate": 6.91341716182545e-05, + "loss": 0.5775, + "num_input_tokens_seen": 15589952, + "step": 61, + "train_runtime": 9142.8639, + "train_tokens_per_second": 1705.15 + }, + { + "epoch": 3.903225806451613, + "grad_norm": 0.330407023300617, + "learning_rate": 6.82235249939575e-05, + "loss": 0.5305, + "num_input_tokens_seen": 15851968, + "step": 62, + "train_runtime": 9294.6576, + "train_tokens_per_second": 1705.492 + }, + { + "epoch": 3.967741935483871, + "grad_norm": 0.2594779667409557, + "learning_rate": 6.730585285387465e-05, + "loss": 0.5428, + "num_input_tokens_seen": 16113984, + "step": 63, + "train_runtime": 9538.1388, + "train_tokens_per_second": 1689.426 + }, + { + "epoch": 4.0, + "grad_norm": 0.5188739404593391, + "learning_rate": 6.638150897808468e-05, + "loss": 0.5671, + "num_input_tokens_seen": 16244992, + "step": 64, + "train_runtime": 9613.4951, + "train_tokens_per_second": 1689.811 + }, + { + "epoch": 4.064516129032258, + "grad_norm": 0.2817891367025121, + "learning_rate": 6.545084971874738e-05, + "loss": 0.4715, + "num_input_tokens_seen": 16507008, + "step": 65, + "train_runtime": 9764.9453, + "train_tokens_per_second": 1690.435 + }, + { + "epoch": 4.129032258064516, + "grad_norm": 0.4313237711714659, + "learning_rate": 6.451423386272312e-05, + "loss": 0.4488, + "num_input_tokens_seen": 16769024, + "step": 66, + "train_runtime": 9916.329, + "train_tokens_per_second": 1691.052 + }, + { + "epoch": 4.193548387096774, + "grad_norm": 9.407910846391866, + "learning_rate": 6.357202249325371e-05, + "loss": 0.4582, + "num_input_tokens_seen": 17031040, + "step": 67, + "train_runtime": 10068.4234, + "train_tokens_per_second": 1691.53 + }, + { + "epoch": 4.258064516129032, + "grad_norm": 0.4318030663645452, + "learning_rate": 6.26245788507579e-05, + "loss": 0.4588, + "num_input_tokens_seen": 17293056, + "step": 68, + "train_runtime": 10219.9518, + "train_tokens_per_second": 1692.088 + }, + { + "epoch": 4.32258064516129, + "grad_norm": 0.2711661394321373, + "learning_rate": 6.167226819279528e-05, + "loss": 0.4447, + "num_input_tokens_seen": 17555072, + "step": 69, + "train_runtime": 10371.656, + "train_tokens_per_second": 1692.601 + }, + { + "epoch": 4.387096774193548, + "grad_norm": 0.2785432314686511, + "learning_rate": 6.071545765325254e-05, + "loss": 0.4565, + "num_input_tokens_seen": 17817088, + "step": 70, + "train_runtime": 10524.0113, + "train_tokens_per_second": 1692.994 + }, + { + "epoch": 4.451612903225806, + "grad_norm": 0.24786649766049834, + "learning_rate": 5.9754516100806423e-05, + "loss": 0.4665, + "num_input_tokens_seen": 18079104, + "step": 71, + "train_runtime": 10676.331, + "train_tokens_per_second": 1693.382 + }, + { + "epoch": 4.516129032258064, + "grad_norm": 0.2454529793840644, + "learning_rate": 5.8789813996717736e-05, + "loss": 0.446, + "num_input_tokens_seen": 18341120, + "step": 72, + "train_runtime": 10828.6719, + "train_tokens_per_second": 1693.755 + }, + { + "epoch": 4.580645161290323, + "grad_norm": 0.25559855808246584, + "learning_rate": 5.782172325201155e-05, + "loss": 0.4384, + "num_input_tokens_seen": 18603136, + "step": 73, + "train_runtime": 10981.143, + "train_tokens_per_second": 1694.098 + }, + { + "epoch": 4.645161290322581, + "grad_norm": 0.24822833593804589, + "learning_rate": 5.685061708409841e-05, + "loss": 0.4339, + "num_input_tokens_seen": 18865152, + "step": 74, + "train_runtime": 11133.4844, + "train_tokens_per_second": 1694.452 + }, + { + "epoch": 4.709677419354839, + "grad_norm": 0.24264729806408034, + "learning_rate": 5.587686987289189e-05, + "loss": 0.444, + "num_input_tokens_seen": 19127168, + "step": 75, + "train_runtime": 11285.3939, + "train_tokens_per_second": 1694.86 + }, + { + "epoch": 4.774193548387097, + "grad_norm": 0.22384324563071528, + "learning_rate": 5.490085701647805e-05, + "loss": 0.4353, + "num_input_tokens_seen": 19389184, + "step": 76, + "train_runtime": 11437.3833, + "train_tokens_per_second": 1695.246 + }, + { + "epoch": 4.838709677419355, + "grad_norm": 0.24764076326899273, + "learning_rate": 5.392295478639225e-05, + "loss": 0.4257, + "num_input_tokens_seen": 19651200, + "step": 77, + "train_runtime": 11589.4114, + "train_tokens_per_second": 1695.617 + }, + { + "epoch": 4.903225806451613, + "grad_norm": 0.2228535077281988, + "learning_rate": 5.294354018255945e-05, + "loss": 0.4402, + "num_input_tokens_seen": 19913216, + "step": 78, + "train_runtime": 11741.453, + "train_tokens_per_second": 1695.975 + }, + { + "epoch": 4.967741935483871, + "grad_norm": 0.21632763381463402, + "learning_rate": 5.196299078795344e-05, + "loss": 0.4326, + "num_input_tokens_seen": 20175232, + "step": 79, + "train_runtime": 11893.3046, + "train_tokens_per_second": 1696.352 + }, + { + "epoch": 5.0, + "grad_norm": 0.21632763381463402, + "learning_rate": 5.0981684623031415e-05, + "loss": 0.4125, + "num_input_tokens_seen": 20306240, + "step": 80, + "train_runtime": 11969.094, + "train_tokens_per_second": 1696.556 + }, + { + "epoch": 5.064516129032258, + "grad_norm": 0.3571828769341814, + "learning_rate": 5e-05, + "loss": 0.3198, + "num_input_tokens_seen": 20568256, + "step": 81, + "train_runtime": 12121.0302, + "train_tokens_per_second": 1696.907 + }, + { + "epoch": 5.129032258064516, + "grad_norm": 0.21648090918089968, + "learning_rate": 4.901831537696859e-05, + "loss": 0.2964, + "num_input_tokens_seen": 20830272, + "step": 82, + "train_runtime": 12272.7158, + "train_tokens_per_second": 1697.283 + }, + { + "epoch": 5.193548387096774, + "grad_norm": 0.31458036325066546, + "learning_rate": 4.8037009212046586e-05, + "loss": 0.3057, + "num_input_tokens_seen": 21092288, + "step": 83, + "train_runtime": 12424.73, + "train_tokens_per_second": 1697.605 + }, + { + "epoch": 5.258064516129032, + "grad_norm": 0.2497078272482313, + "learning_rate": 4.7056459817440544e-05, + "loss": 0.3129, + "num_input_tokens_seen": 21354304, + "step": 84, + "train_runtime": 12576.3946, + "train_tokens_per_second": 1697.967 + }, + { + "epoch": 5.32258064516129, + "grad_norm": 0.26688301370237244, + "learning_rate": 4.607704521360776e-05, + "loss": 0.2966, + "num_input_tokens_seen": 21616320, + "step": 85, + "train_runtime": 12727.8322, + "train_tokens_per_second": 1698.35 + }, + { + "epoch": 5.387096774193548, + "grad_norm": 0.23431543616740275, + "learning_rate": 4.509914298352197e-05, + "loss": 0.2895, + "num_input_tokens_seen": 21878336, + "step": 86, + "train_runtime": 12879.5644, + "train_tokens_per_second": 1698.686 + }, + { + "epoch": 5.451612903225806, + "grad_norm": 0.24692139115327455, + "learning_rate": 4.412313012710813e-05, + "loss": 0.2926, + "num_input_tokens_seen": 22140352, + "step": 87, + "train_runtime": 13031.2099, + "train_tokens_per_second": 1699.025 + }, + { + "epoch": 5.516129032258064, + "grad_norm": 0.25361311803158476, + "learning_rate": 4.3149382915901606e-05, + "loss": 0.3056, + "num_input_tokens_seen": 22402368, + "step": 88, + "train_runtime": 13182.6668, + "train_tokens_per_second": 1699.381 + }, + { + "epoch": 5.580645161290323, + "grad_norm": 0.24929333634502235, + "learning_rate": 4.2178276747988446e-05, + "loss": 0.2922, + "num_input_tokens_seen": 22664384, + "step": 89, + "train_runtime": 13333.9686, + "train_tokens_per_second": 1699.748 + }, + { + "epoch": 5.645161290322581, + "grad_norm": 0.23498457386000374, + "learning_rate": 4.1210186003282275e-05, + "loss": 0.2947, + "num_input_tokens_seen": 22926400, + "step": 90, + "train_runtime": 13485.686, + "train_tokens_per_second": 1700.054 + }, + { + "epoch": 5.709677419354839, + "grad_norm": 0.2260009367281118, + "learning_rate": 4.0245483899193595e-05, + "loss": 0.2809, + "num_input_tokens_seen": 23188416, + "step": 91, + "train_runtime": 13637.0722, + "train_tokens_per_second": 1700.395 + }, + { + "epoch": 5.774193548387097, + "grad_norm": 0.23177978895395898, + "learning_rate": 3.928454234674747e-05, + "loss": 0.2835, + "num_input_tokens_seen": 23450432, + "step": 92, + "train_runtime": 13788.6307, + "train_tokens_per_second": 1700.708 + }, + { + "epoch": 5.838709677419355, + "grad_norm": 0.23286376279827942, + "learning_rate": 3.832773180720475e-05, + "loss": 0.2899, + "num_input_tokens_seen": 23712448, + "step": 93, + "train_runtime": 13940.8531, + "train_tokens_per_second": 1700.932 + }, + { + "epoch": 5.903225806451613, + "grad_norm": 0.25123564466559106, + "learning_rate": 3.73754211492421e-05, + "loss": 0.2187, + "num_input_tokens_seen": 23974464, + "step": 94, + "train_runtime": 152.8243, + "train_tokens_per_second": 156875.973 + }, + { + "epoch": 5.967741935483871, + "grad_norm": 0.21842207206834566, + "learning_rate": 3.642797750674629e-05, + "loss": 0.2062, + "num_input_tokens_seen": 24236480, + "step": 95, + "train_runtime": 304.4082, + "train_tokens_per_second": 79618.347 + }, + { + "epoch": 6.064516129032258, + "grad_norm": 0.4137660833142641, + "learning_rate": 3.5485766137276894e-05, + "loss": 0.3804, + "num_input_tokens_seen": 24629504, + "step": 96, + "train_runtime": 531.3585, + "train_tokens_per_second": 46351.953 + }, + { + "epoch": 6.129032258064516, + "grad_norm": 0.24930112620091768, + "learning_rate": 3.4549150281252636e-05, + "loss": 0.1957, + "num_input_tokens_seen": 24891520, + "step": 97, + "train_runtime": 683.4085, + "train_tokens_per_second": 36422.61 + }, + { + "epoch": 6.193548387096774, + "grad_norm": 0.25330156602007586, + "learning_rate": 3.361849102191533e-05, + "loss": 0.205, + "num_input_tokens_seen": 25153536, + "step": 98, + "train_runtime": 835.5065, + "train_tokens_per_second": 30105.735 + }, + { + "epoch": 6.258064516129032, + "grad_norm": 0.29921778638612334, + "learning_rate": 3.2694147146125345e-05, + "loss": 0.1908, + "num_input_tokens_seen": 25415552, + "step": 99, + "train_runtime": 987.5232, + "train_tokens_per_second": 25736.663 + }, + { + "epoch": 6.32258064516129, + "grad_norm": 0.2469233673084958, + "learning_rate": 3.177647500604252e-05, + "loss": 0.1847, + "num_input_tokens_seen": 25677568, + "step": 100, + "train_runtime": 1139.8183, + "train_tokens_per_second": 22527.774 + }, + { + "epoch": 6.387096774193548, + "grad_norm": 0.7725747563793509, + "learning_rate": 3.086582838174551e-05, + "loss": 0.1817, + "num_input_tokens_seen": 25939584, + "step": 101, + "train_runtime": 1291.4222, + "train_tokens_per_second": 20086.06 + }, + { + "epoch": 6.451612903225806, + "grad_norm": 0.25680554550185525, + "learning_rate": 2.996255834484296e-05, + "loss": 0.1734, + "num_input_tokens_seen": 26201600, + "step": 102, + "train_runtime": 1443.928, + "train_tokens_per_second": 18146.057 + }, + { + "epoch": 6.516129032258064, + "grad_norm": 0.27042296025640733, + "learning_rate": 2.9067013123128613e-05, + "loss": 0.1869, + "num_input_tokens_seen": 26463616, + "step": 103, + "train_runtime": 1596.4915, + "train_tokens_per_second": 16576.108 + }, + { + "epoch": 6.580645161290323, + "grad_norm": 0.2882085640613909, + "learning_rate": 2.8179537966332887e-05, + "loss": 0.1892, + "num_input_tokens_seen": 26725632, + "step": 104, + "train_runtime": 1748.874, + "train_tokens_per_second": 15281.622 + }, + { + "epoch": 6.645161290322581, + "grad_norm": 0.23789859154459053, + "learning_rate": 2.7300475013022663e-05, + "loss": 0.2003, + "num_input_tokens_seen": 26987648, + "step": 105, + "train_runtime": 1900.7787, + "train_tokens_per_second": 14198.206 + }, + { + "epoch": 6.709677419354839, + "grad_norm": 0.24333597114107516, + "learning_rate": 2.6430163158700115e-05, + "loss": 0.1869, + "num_input_tokens_seen": 27249664, + "step": 106, + "train_runtime": 2053.1166, + "train_tokens_per_second": 13272.341 + }, + { + "epoch": 6.774193548387097, + "grad_norm": 0.22162314084558382, + "learning_rate": 2.556893792515227e-05, + "loss": 0.1652, + "num_input_tokens_seen": 27511680, + "step": 107, + "train_runtime": 2205.8735, + "train_tokens_per_second": 12472.012 + }, + { + "epoch": 6.838709677419355, + "grad_norm": 0.22353605121241715, + "learning_rate": 2.471713133110078e-05, + "loss": 0.1931, + "num_input_tokens_seen": 27773696, + "step": 108, + "train_runtime": 2358.313, + "train_tokens_per_second": 11776.934 + }, + { + "epoch": 6.903225806451613, + "grad_norm": 0.22455516265290895, + "learning_rate": 2.3875071764202563e-05, + "loss": 0.1841, + "num_input_tokens_seen": 28035712, + "step": 109, + "train_runtime": 2510.9803, + "train_tokens_per_second": 11165.246 + }, + { + "epoch": 6.967741935483871, + "grad_norm": 0.38402001653538137, + "learning_rate": 2.3043083854449988e-05, + "loss": 0.1843, + "num_input_tokens_seen": 28297728, + "step": 110, + "train_runtime": 2663.6487, + "train_tokens_per_second": 10623.671 + }, + { + "epoch": 7.0, + "grad_norm": 0.3742502503244311, + "learning_rate": 2.2221488349019903e-05, + "loss": 0.1603, + "num_input_tokens_seen": 28428736, + "step": 111, + "train_runtime": 2739.8421, + "train_tokens_per_second": 10376.049 + }, + { + "epoch": 7.064516129032258, + "grad_norm": 0.2248185819076052, + "learning_rate": 2.1410601988619394e-05, + "loss": 0.116, + "num_input_tokens_seen": 28690752, + "step": 112, + "train_runtime": 2892.3751, + "train_tokens_per_second": 9919.444 + }, + { + "epoch": 7.129032258064516, + "grad_norm": 0.2125797568607351, + "learning_rate": 2.061073738537635e-05, + "loss": 0.1325, + "num_input_tokens_seen": 28952768, + "step": 113, + "train_runtime": 3044.8954, + "train_tokens_per_second": 9508.625 + }, + { + "epoch": 7.193548387096774, + "grad_norm": 0.193911925191499, + "learning_rate": 1.982220290232143e-05, + "loss": 0.1212, + "num_input_tokens_seen": 29214784, + "step": 114, + "train_runtime": 3197.3523, + "train_tokens_per_second": 9137.18 + }, + { + "epoch": 7.258064516129032, + "grad_norm": 0.2538299645355182, + "learning_rate": 1.9045302534508297e-05, + "loss": 0.121, + "num_input_tokens_seen": 29476800, + "step": 115, + "train_runtime": 3350.2031, + "train_tokens_per_second": 8798.511 + }, + { + "epoch": 7.32258064516129, + "grad_norm": 0.23812141734052034, + "learning_rate": 1.8280335791817733e-05, + "loss": 0.1172, + "num_input_tokens_seen": 29738816, + "step": 116, + "train_runtime": 3502.6801, + "train_tokens_per_second": 8490.303 + }, + { + "epoch": 7.387096774193548, + "grad_norm": 0.2583083911424977, + "learning_rate": 1.7527597583490822e-05, + "loss": 0.1267, + "num_input_tokens_seen": 30000832, + "step": 117, + "train_runtime": 3655.3399, + "train_tokens_per_second": 8207.399 + }, + { + "epoch": 7.451612903225806, + "grad_norm": 0.35547790737132534, + "learning_rate": 1.678737810443593e-05, + "loss": 0.1132, + "num_input_tokens_seen": 30262848, + "step": 118, + "train_runtime": 3807.6391, + "train_tokens_per_second": 7947.93 + }, + { + "epoch": 7.516129032258064, + "grad_norm": 0.20767168289368249, + "learning_rate": 1.605996272335291e-05, + "loss": 0.1036, + "num_input_tokens_seen": 30524864, + "step": 119, + "train_runtime": 3960.35, + "train_tokens_per_second": 7707.618 + }, + { + "epoch": 7.580645161290323, + "grad_norm": 0.20569986142986987, + "learning_rate": 1.5345631872718214e-05, + "loss": 0.1085, + "num_input_tokens_seen": 30786880, + "step": 120, + "train_runtime": 4112.7549, + "train_tokens_per_second": 7485.708 + }, + { + "epoch": 7.645161290322581, + "grad_norm": 0.2080233876424552, + "learning_rate": 1.4644660940672627e-05, + "loss": 0.1075, + "num_input_tokens_seen": 31048896, + "step": 121, + "train_runtime": 4265.0677, + "train_tokens_per_second": 7279.813 + }, + { + "epoch": 7.709677419354839, + "grad_norm": 0.22886347662826706, + "learning_rate": 1.3957320164854059e-05, + "loss": 0.1091, + "num_input_tokens_seen": 31310912, + "step": 122, + "train_runtime": 4417.2414, + "train_tokens_per_second": 7088.341 + }, + { + "epoch": 7.774193548387097, + "grad_norm": 0.21253506561933785, + "learning_rate": 1.3283874528215733e-05, + "loss": 0.1003, + "num_input_tokens_seen": 31572928, + "step": 123, + "train_runtime": 4569.705, + "train_tokens_per_second": 6909.183 + }, + { + "epoch": 7.838709677419355, + "grad_norm": 0.211985762025977, + "learning_rate": 1.2624583656870154e-05, + "loss": 0.1123, + "num_input_tokens_seen": 31834944, + "step": 124, + "train_runtime": 4721.8673, + "train_tokens_per_second": 6742.024 + }, + { + "epoch": 7.903225806451613, + "grad_norm": 0.21676935398531322, + "learning_rate": 1.1979701719998453e-05, + "loss": 0.1183, + "num_input_tokens_seen": 32096960, + "step": 125, + "train_runtime": 4974.2634, + "train_tokens_per_second": 6452.606 + }, + { + "epoch": 7.967741935483871, + "grad_norm": 0.21396793459722308, + "learning_rate": 1.134947733186315e-05, + "loss": 0.1099, + "num_input_tokens_seen": 32358976, + "step": 126, + "train_runtime": 5125.1703, + "train_tokens_per_second": 6313.737 + }, + { + "epoch": 8.0, + "grad_norm": 0.21396793459722308, + "learning_rate": 1.0734153455962765e-05, + "loss": 0.1076, + "num_input_tokens_seen": 32489984, + "step": 127, + "train_runtime": 5201.0438, + "train_tokens_per_second": 6246.82 + }, + { + "epoch": 8.064516129032258, + "grad_norm": 0.33028107335658585, + "learning_rate": 1.013396731136465e-05, + "loss": 0.0796, + "num_input_tokens_seen": 32752000, + "step": 128, + "train_runtime": 5353.4043, + "train_tokens_per_second": 6117.976 + }, + { + "epoch": 8.129032258064516, + "grad_norm": 0.1828204126753726, + "learning_rate": 9.549150281252633e-06, + "loss": 0.0836, + "num_input_tokens_seen": 33014016, + "step": 129, + "train_runtime": 5505.4754, + "train_tokens_per_second": 5996.579 + }, + { + "epoch": 8.193548387096774, + "grad_norm": 0.1736663582033454, + "learning_rate": 8.97992782372432e-06, + "loss": 0.0874, + "num_input_tokens_seen": 33276032, + "step": 130, + "train_runtime": 5657.5383, + "train_tokens_per_second": 5881.716 + }, + { + "epoch": 8.258064516129032, + "grad_norm": 0.1732376583485956, + "learning_rate": 8.426519384872733e-06, + "loss": 0.0755, + "num_input_tokens_seen": 33538048, + "step": 131, + "train_runtime": 5809.2201, + "train_tokens_per_second": 5773.244 + }, + { + "epoch": 8.32258064516129, + "grad_norm": 0.2273082510299754, + "learning_rate": 7.889138314185678e-06, + "loss": 0.071, + "num_input_tokens_seen": 33800064, + "step": 132, + "train_runtime": 5961.4033, + "train_tokens_per_second": 5669.817 + }, + { + "epoch": 8.387096774193548, + "grad_norm": 0.1857251298054902, + "learning_rate": 7.367991782295391e-06, + "loss": 0.0742, + "num_input_tokens_seen": 34062080, + "step": 133, + "train_runtime": 6112.7404, + "train_tokens_per_second": 5572.309 + }, + { + "epoch": 8.451612903225806, + "grad_norm": 0.19048334214878657, + "learning_rate": 6.863280701110408e-06, + "loss": 0.0784, + "num_input_tokens_seen": 34324096, + "step": 134, + "train_runtime": 6264.502, + "train_tokens_per_second": 5479.142 + }, + { + "epoch": 8.516129032258064, + "grad_norm": 0.21366469439070385, + "learning_rate": 6.375199646360142e-06, + "loss": 0.0706, + "num_input_tokens_seen": 34586112, + "step": 135, + "train_runtime": 6416.173, + "train_tokens_per_second": 5390.458 + }, + { + "epoch": 8.580645161290322, + "grad_norm": 0.1718209125679383, + "learning_rate": 5.903936782582253e-06, + "loss": 0.0678, + "num_input_tokens_seen": 34848128, + "step": 136, + "train_runtime": 6567.9915, + "train_tokens_per_second": 5305.751 + }, + { + "epoch": 8.64516129032258, + "grad_norm": 0.2788885426429946, + "learning_rate": 5.449673790581611e-06, + "loss": 0.0778, + "num_input_tokens_seen": 35110144, + "step": 137, + "train_runtime": 6719.8609, + "train_tokens_per_second": 5224.832 + }, + { + "epoch": 8.709677419354838, + "grad_norm": 0.18095259642151965, + "learning_rate": 5.012585797388936e-06, + "loss": 0.0719, + "num_input_tokens_seen": 35372160, + "step": 138, + "train_runtime": 6871.6076, + "train_tokens_per_second": 5147.582 + }, + { + "epoch": 8.774193548387096, + "grad_norm": 0.17022106370632772, + "learning_rate": 4.592841308745932e-06, + "loss": 0.0748, + "num_input_tokens_seen": 35634176, + "step": 139, + "train_runtime": 7023.699, + "train_tokens_per_second": 5073.42 + }, + { + "epoch": 8.838709677419354, + "grad_norm": 0.16715517913143546, + "learning_rate": 4.190602144143207e-06, + "loss": 0.0828, + "num_input_tokens_seen": 35896192, + "step": 140, + "train_runtime": 7175.6961, + "train_tokens_per_second": 5002.468 + }, + { + "epoch": 8.903225806451612, + "grad_norm": 0.16949575977901518, + "learning_rate": 3.8060233744356633e-06, + "loss": 0.0761, + "num_input_tokens_seen": 36158208, + "step": 141, + "train_runtime": 7328.1762, + "train_tokens_per_second": 4934.135 + }, + { + "epoch": 8.967741935483872, + "grad_norm": 0.17039551803545613, + "learning_rate": 3.4392532620598216e-06, + "loss": 0.0771, + "num_input_tokens_seen": 36420224, + "step": 142, + "train_runtime": 7480.3902, + "train_tokens_per_second": 4868.76 + }, + { + "epoch": 9.0, + "grad_norm": 0.30950909838198004, + "learning_rate": 3.0904332038757977e-06, + "loss": 0.0753, + "num_input_tokens_seen": 36551232, + "step": 143, + "train_runtime": 7556.6237, + "train_tokens_per_second": 4836.979 + }, + { + "epoch": 9.064516129032258, + "grad_norm": 0.147866843609634, + "learning_rate": 2.759697676656098e-06, + "loss": 0.0681, + "num_input_tokens_seen": 36813248, + "step": 144, + "train_runtime": 7708.2614, + "train_tokens_per_second": 4775.817 + }, + { + "epoch": 9.129032258064516, + "grad_norm": 0.14249192821986728, + "learning_rate": 2.4471741852423237e-06, + "loss": 0.0643, + "num_input_tokens_seen": 37075264, + "step": 145, + "train_runtime": 7860.6392, + "train_tokens_per_second": 4716.571 + }, + { + "epoch": 9.193548387096774, + "grad_norm": 0.13759617454869796, + "learning_rate": 2.152983213389559e-06, + "loss": 0.0589, + "num_input_tokens_seen": 37337280, + "step": 146, + "train_runtime": 8013.0274, + "train_tokens_per_second": 4659.572 + }, + { + "epoch": 9.258064516129032, + "grad_norm": 0.1411589262898803, + "learning_rate": 1.8772381773176417e-06, + "loss": 0.0628, + "num_input_tokens_seen": 37599296, + "step": 147, + "train_runtime": 8165.0817, + "train_tokens_per_second": 4604.889 + }, + { + "epoch": 9.32258064516129, + "grad_norm": 0.1448799275709374, + "learning_rate": 1.620045381987012e-06, + "loss": 0.0663, + "num_input_tokens_seen": 37861312, + "step": 148, + "train_runtime": 8317.3617, + "train_tokens_per_second": 4552.082 + }, + { + "epoch": 9.387096774193548, + "grad_norm": 0.12990304958177346, + "learning_rate": 1.3815039801161721e-06, + "loss": 0.0506, + "num_input_tokens_seen": 38123328, + "step": 149, + "train_runtime": 8469.3923, + "train_tokens_per_second": 4501.306 + }, + { + "epoch": 9.451612903225806, + "grad_norm": 0.147433482907245, + "learning_rate": 1.1617059339563807e-06, + "loss": 0.0627, + "num_input_tokens_seen": 38385344, + "step": 150, + "train_runtime": 8621.4093, + "train_tokens_per_second": 4452.328 + }, + { + "epoch": 9.516129032258064, + "grad_norm": 0.1700792731577258, + "learning_rate": 9.607359798384785e-07, + "loss": 0.061, + "num_input_tokens_seen": 38647360, + "step": 151, + "train_runtime": 8773.7579, + "train_tokens_per_second": 4404.881 + }, + { + "epoch": 9.580645161290322, + "grad_norm": 0.14427724556347693, + "learning_rate": 7.786715955054203e-07, + "loss": 0.0652, + "num_input_tokens_seen": 38909376, + "step": 152, + "train_runtime": 8927.0159, + "train_tokens_per_second": 4358.609 + }, + { + "epoch": 9.64516129032258, + "grad_norm": 0.13912027574637723, + "learning_rate": 6.15582970243117e-07, + "loss": 0.056, + "num_input_tokens_seen": 39171392, + "step": 153, + "train_runtime": 9079.5151, + "train_tokens_per_second": 4314.26 + }, + { + "epoch": 9.709677419354838, + "grad_norm": 0.14425913554967099, + "learning_rate": 4.715329778211375e-07, + "loss": 0.0658, + "num_input_tokens_seen": 39433408, + "step": 154, + "train_runtime": 9231.7766, + "train_tokens_per_second": 4271.486 + }, + { + "epoch": 9.774193548387096, + "grad_norm": 0.1468697481065812, + "learning_rate": 3.465771522536854e-07, + "loss": 0.0673, + "num_input_tokens_seen": 39695424, + "step": 155, + "train_runtime": 9383.9718, + "train_tokens_per_second": 4230.13 + } + ], + "logging_steps": 1, + "max_steps": 160, + "num_input_tokens_seen": 39695424, + "num_train_epochs": 10, + "save_steps": 31, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 313610262282240.0, + "train_batch_size": 4, + "trial_name": null, + "trial_params": null +} diff --git a/B_3/checkpoint-155/training_args.bin b/B_3/checkpoint-155/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..dae6cb82f77aa91f4f2aac3f1f2dd06336bf044e --- /dev/null +++ b/B_3/checkpoint-155/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cc4c22dcd8008af055092cdfdd6cef9bf6aaaae83714bdb9dad705f555cb73e +size 8081 diff --git a/B_3/checkpoint-155/zero_to_fp32.py b/B_3/checkpoint-155/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04 --- /dev/null +++ b/B_3/checkpoint-155/zero_to_fp32.py @@ -0,0 +1,760 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: +# python zero_to_fp32.py . output_dir/ +# or +# python zero_to_fp32.py . output_dir/ --safe_serialization + +import argparse +import torch +import glob +import math +import os +import re +import gc +import json +import numpy as np +from tqdm import tqdm +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device, weights_only=False) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + total_files = len(files) + state_dicts = [] + for f in tqdm(files, desc='Loading checkpoint shards'): + state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +class GatheredTensor: + """ + A pseudo tensor that collects partitioned weights. + It is more memory efficient when there are multiple groups. + """ + + def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape): + self.flat_groups = flat_groups + self.flat_groups_offset = flat_groups_offset + self.offset = offset + self.partitioned_numel = partitioned_numel + self.shape = shape + self.dtype = self.flat_groups[0][0].dtype + + def contiguous(self): + """ + Merge partitioned weights from flat_groups into a single tensor. + """ + end_idx = self.offset + self.partitioned_numel + world_size = len(self.flat_groups) + pad_flat_param_chunks = [] + + for rank_i in range(world_size): + # for each rank, we need to collect weights from related group/groups + flat_groups_at_rank_i = self.flat_groups[rank_i] + start_group_id = None + end_group_id = None + for group_id in range(len(self.flat_groups_offset)): + if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]: + start_group_id = group_id + if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]: + end_group_id = group_id + break + # collect weights from related group/groups + for group_id in range(start_group_id, end_group_id + 1): + flat_tensor = flat_groups_at_rank_i[group_id] + start_offset = self.offset - self.flat_groups_offset[group_id] + end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id] + pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset]) + + # collect weights from all ranks + pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0) + param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous() + return param + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size + + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]])) + for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'): + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # memory efficient tensor + tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape) + state_dict[name] = tensor + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def to_torch_tensor(state_dict, return_empty_tensor=False): + """ + Convert state_dict of GatheredTensor to torch tensor + """ + torch_state_dict = {} + converted_tensors = {} + for name, tensor in state_dict.items(): + tensor_id = id(tensor) + if tensor_id in converted_tensors: # shared tensors + shared_tensor = torch_state_dict[converted_tensors[tensor_id]] + torch_state_dict[name] = shared_tensor + else: + converted_tensors[tensor_id] = name + if return_empty_tensor: + torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype) + else: + torch_state_dict[name] = tensor.contiguous() + return torch_state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag=None, + exclude_frozen_parameters=False, + lazy_mode=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient. + Convert the pesduo tensor to torch tensor by ``.contiguous()`` + + Returns: + - pytorch ``state_dict`` + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + Note: the above usage may not work if your application doesn't have sufficient free CPU memory. + You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. Or you can load state_dict in lazy mode :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu + for name, lazy_tensor in state_dict.item(): + tensor = lazy_tensor.contiguous() # to cpu + print(name, tensor) + # del tensor to release memory if it no longer in use + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) + if lazy_mode: + return state_dict + else: + return to_torch_tensor(state_dict) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, + output_dir, + max_shard_size="5GB", + safe_serialization=False, + tag=None, + exclude_frozen_parameters=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_dir``: directory to the pytorch fp32 state_dict output files + - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB + - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + """ + + # Dependency pre-check + if safe_serialization: + try: + from safetensors.torch import save_file + except ImportError: + print('If you want to use `safe_serialization`, please `pip install safetensors`') + raise + if max_shard_size is not None: + try: + from huggingface_hub import split_torch_state_dict_into_shards + except ImportError: + print('If you want to use `max_shard_size`, please `pip install huggingface_hub`') + raise + + # Convert zero checkpoint to state_dict + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag, + exclude_frozen_parameters, + lazy_mode=True) + + # Shard the model if it is too big. + weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin" + if max_shard_size is not None: + filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") + # an memory-efficient approach for sharding + empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True) + state_dict_split = split_torch_state_dict_into_shards(empty_state_dict, + filename_pattern=filename_pattern, + max_shard_size=max_shard_size) + else: + from collections import namedtuple + StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"]) + state_dict_split = StateDictSplit(is_sharded=False, + filename_to_tensors={weights_name: list(state_dict.keys())}) + + # Save the model by shard + os.makedirs(output_dir, exist_ok=True) + filename_to_tensors = state_dict_split.filename_to_tensors.items() + for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"): + shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors} + shard_state_dict = to_torch_tensor(shard_state_dict) + output_path = os.path.join(output_dir, shard_file) + if safe_serialization: + save_file(shard_state_dict, output_path, metadata={"format": "pt"}) + else: + torch.save(shard_state_dict, output_path) + # release the memory of current shard + for tensor_name in list(shard_state_dict.keys()): + del state_dict[tensor_name] + del shard_state_dict[tensor_name] + del shard_state_dict + gc.collect() + + # Save index if sharded + if state_dict_split.is_sharded: + index = { + "metadata": state_dict_split.metadata, + "weight_map": state_dict_split.tensor_to_filename, + } + save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json" + save_index_file = os.path.join(output_dir, save_index_file) + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument("output_dir", + type=str, + help="directory to the pytorch fp32 state_dict output files" + "(e.g. path/checkpoint-12-output/)") + parser.add_argument( + "--max_shard_size", + type=str, + default="5GB", + help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size" + "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`" + "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances" + "without CPU OOM issues.") + parser.add_argument( + "--safe_serialization", + default=False, + action='store_true', + help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, + args.output_dir, + max_shard_size=args.max_shard_size, + safe_serialization=args.safe_serialization, + tag=args.tag, + exclude_frozen_parameters=args.exclude_frozen_parameters) diff --git a/B_3/checkpoint-31/README.md b/B_3/checkpoint-31/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0673d0313e0251ef70dd85d3ff379913ab5506d9 --- /dev/null +++ b/B_3/checkpoint-31/README.md @@ -0,0 +1,202 @@ +--- +base_model: /workspace/meta-llama/Llama-3.1-70B +library_name: peft +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] +### Framework versions + +- PEFT 0.15.2 \ No newline at end of file diff --git a/B_3/checkpoint-31/adapter_config.json b/B_3/checkpoint-31/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..9f0d6a37659f3015d3fdd427235805c5bbb829e9 --- /dev/null +++ b/B_3/checkpoint-31/adapter_config.json @@ -0,0 +1,39 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "/workspace/meta-llama/Llama-3.1-70B", + "bias": "none", + "corda_config": null, + "eva_config": null, + "exclude_modules": null, + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layer_replication": null, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 1024, + "lora_bias": false, + "lora_dropout": 0, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 256, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "down_proj", + "q_proj", + "k_proj", + "o_proj", + "v_proj", + "gate_proj", + "up_proj" + ], + "task_type": "CAUSAL_LM", + "trainable_token_indices": null, + "use_dora": false, + "use_rslora": false +} \ No newline at end of file diff --git a/B_3/checkpoint-31/adapter_model.safetensors b/B_3/checkpoint-31/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..02d5d8d21e2e932f18d985233fb329329b8010a8 --- /dev/null +++ b/B_3/checkpoint-31/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:962d751d5dac9e5bf8a395b0c13c83f59092dacdfecc4d6b444a95c37bcd9a87 +size 6627156248 diff --git a/B_3/checkpoint-31/chat_template.jinja b/B_3/checkpoint-31/chat_template.jinja new file mode 100644 index 0000000000000000000000000000000000000000..c3af804a3bddb95bf03b7d349234a039a27de382 --- /dev/null +++ b/B_3/checkpoint-31/chat_template.jinja @@ -0,0 +1,7 @@ +{{ '<|begin_of_text|>' }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ '<|start_header_id|>system<|end_header_id|> + +' + system_message + '<|eot_id|>' }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|start_header_id|>user<|end_header_id|> + +' + content + '<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +' }}{% elif message['role'] == 'assistant' %}{{ content + '<|eot_id|>' }}{% endif %}{% endfor %} \ No newline at end of file diff --git a/B_3/checkpoint-31/global_step30/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/B_3/checkpoint-31/global_step30/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..6278a03114cbf1b09748f7a2eafa5837be33257f --- /dev/null +++ b/B_3/checkpoint-31/global_step30/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9effbeadc54047a0d6764a7aeeb97a365c6db3bf7dc881ae7556629b769cd70 +size 9940504945 diff --git a/B_3/checkpoint-31/global_step30/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/B_3/checkpoint-31/global_step30/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..301334665622156e94298ce6eda573e678303ade --- /dev/null +++ b/B_3/checkpoint-31/global_step30/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6654b90292cc54865c75f4d38f5a8784cd5ef5ad7b6453942343b038a3a7bd38 +size 9940504945 diff --git a/B_3/checkpoint-31/global_step30/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/B_3/checkpoint-31/global_step30/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..6078deef3df6f0669b6d7ba1f48a8a70a76e393b --- /dev/null +++ b/B_3/checkpoint-31/global_step30/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e66f8b0e3777d43e50043e7f691925357e95285963289f3cfcdddfabf3652c3 +size 9940504945 diff --git a/B_3/checkpoint-31/global_step30/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/B_3/checkpoint-31/global_step30/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..f657bd17ae163d924601841232dc60860068f3a6 --- /dev/null +++ b/B_3/checkpoint-31/global_step30/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b390af1e2641842bfbaccad6301de54b21f6f63fa713cda5d2dcc296d9f58a41 +size 9940504945 diff --git a/B_3/checkpoint-31/global_step30/zero_pp_rank_0_mp_rank_00_model_states.pt b/B_3/checkpoint-31/global_step30/zero_pp_rank_0_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..86dae7fe3987ec520e93fab7fa122edd5618f5f2 --- /dev/null +++ b/B_3/checkpoint-31/global_step30/zero_pp_rank_0_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c85c79a6a694bba5308bf819e5ea73f69e2739d22b9e5ab3ad55c9d9e2bcb57a +size 1109201 diff --git a/B_3/checkpoint-31/global_step30/zero_pp_rank_1_mp_rank_00_model_states.pt b/B_3/checkpoint-31/global_step30/zero_pp_rank_1_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..5ae9aa1181941a80847080011df3a030ed0b0399 --- /dev/null +++ b/B_3/checkpoint-31/global_step30/zero_pp_rank_1_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9b2e73576c43147300bb3decf127dd713e7116f98647263dc5fdea0bf4ae81a +size 1109201 diff --git a/B_3/checkpoint-31/global_step30/zero_pp_rank_2_mp_rank_00_model_states.pt b/B_3/checkpoint-31/global_step30/zero_pp_rank_2_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..3e5d028cd4288291bb958793bb69e12a41e2f959 --- /dev/null +++ b/B_3/checkpoint-31/global_step30/zero_pp_rank_2_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8c3dee054aa8fde33f420913e7d3b8370b73a871f27a46255c35aac2bfd7153 +size 1109201 diff --git a/B_3/checkpoint-31/global_step30/zero_pp_rank_3_mp_rank_00_model_states.pt b/B_3/checkpoint-31/global_step30/zero_pp_rank_3_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..192b9be1e897b104103c55bad3c40a1a0f1e03fa --- /dev/null +++ b/B_3/checkpoint-31/global_step30/zero_pp_rank_3_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5d6b840cac88c3e8338329c59ac7ec4732453b32c8de6fb4a0d45d5886cf629 +size 1109201 diff --git a/B_3/checkpoint-31/latest b/B_3/checkpoint-31/latest new file mode 100644 index 0000000000000000000000000000000000000000..f11ba0855f2215bf9c6cbacc2e9fd7b7c5f5a480 --- /dev/null +++ b/B_3/checkpoint-31/latest @@ -0,0 +1 @@ +global_step30 \ No newline at end of file diff --git a/B_3/checkpoint-31/rng_state_0.pth b/B_3/checkpoint-31/rng_state_0.pth new file mode 100644 index 0000000000000000000000000000000000000000..08fe74f5a6509346bc7bac0a5ffae3f0c1235010 --- /dev/null +++ b/B_3/checkpoint-31/rng_state_0.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4188048c882e5b595f5e2cce07e8c73ea45afa11bc95f5627f2d6191ccaa5e40 +size 15429 diff --git a/B_3/checkpoint-31/rng_state_1.pth b/B_3/checkpoint-31/rng_state_1.pth new file mode 100644 index 0000000000000000000000000000000000000000..c34a6346a54e818fbc0db7ed1066952b078027e4 --- /dev/null +++ b/B_3/checkpoint-31/rng_state_1.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0eb1f7b1acfd920c8cdef076eee48ccfec7d3ed4d8c5d83c2592fbbb4a4f9b38 +size 15429 diff --git a/B_3/checkpoint-31/rng_state_2.pth b/B_3/checkpoint-31/rng_state_2.pth new file mode 100644 index 0000000000000000000000000000000000000000..98679a008686d92dc4d6a79a00e839fa51627c00 --- /dev/null +++ b/B_3/checkpoint-31/rng_state_2.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5664fd5079c69dc8edcc429ded884f07d45c771f27a5a65fe1ad751348f9e1de +size 15429 diff --git a/B_3/checkpoint-31/rng_state_3.pth b/B_3/checkpoint-31/rng_state_3.pth new file mode 100644 index 0000000000000000000000000000000000000000..d0852d3d83273be11573602862d25a8ec574a5ae --- /dev/null +++ b/B_3/checkpoint-31/rng_state_3.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a3f17c4e252e5b7e99f3573c3bca992070f9ef436dd578ee9e88c333a94cef0 +size 15429 diff --git a/B_3/checkpoint-31/scheduler.pt b/B_3/checkpoint-31/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..427b818d2dfc769a20ba8f2eb2586e743dfb0bf5 --- /dev/null +++ b/B_3/checkpoint-31/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:755a0b9a77983cc4d980e62691612f9c0a93ff338c95886ef98077aedda4ce2a +size 1401 diff --git a/B_3/checkpoint-31/special_tokens_map.json b/B_3/checkpoint-31/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..14daf4588e61b4e4983af0fccaba4d5500c0977c --- /dev/null +++ b/B_3/checkpoint-31/special_tokens_map.json @@ -0,0 +1,26 @@ +{ + "additional_special_tokens": [ + { + "content": "<|eom_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } + ], + "bos_token": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": "<|eot_id|>" +} diff --git a/B_3/checkpoint-31/tokenizer.json b/B_3/checkpoint-31/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/B_3/checkpoint-31/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/B_3/checkpoint-31/tokenizer_config.json b/B_3/checkpoint-31/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d1e1ea9bc94ff1132f136710751e37fb23a64347 --- /dev/null +++ b/B_3/checkpoint-31/tokenizer_config.json @@ -0,0 +1,2068 @@ +{ + "added_tokens_decoder": { + "128000": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128001": { + "content": "<|end_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128002": { + "content": "<|reserved_special_token_0|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128003": { + "content": "<|reserved_special_token_1|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128004": { + "content": "<|finetune_right_pad_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128005": { + "content": "<|reserved_special_token_2|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128006": { + "content": "<|start_header_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128007": { + "content": "<|end_header_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128008": { + "content": "<|eom_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128009": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128010": { + "content": "<|python_tag|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128011": { + "content": "<|reserved_special_token_3|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128012": { + "content": "<|reserved_special_token_4|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128013": { + "content": "<|reserved_special_token_5|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128014": { + "content": "<|reserved_special_token_6|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128015": { + "content": "<|reserved_special_token_7|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128016": { + "content": "<|reserved_special_token_8|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128017": { + "content": "<|reserved_special_token_9|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128018": { + "content": "<|reserved_special_token_10|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128019": { + "content": "<|reserved_special_token_11|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128020": { + "content": "<|reserved_special_token_12|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128021": { + "content": "<|reserved_special_token_13|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128022": { + "content": "<|reserved_special_token_14|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128023": { + "content": "<|reserved_special_token_15|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128024": { + "content": "<|reserved_special_token_16|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128025": { + "content": "<|reserved_special_token_17|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128026": { + "content": "<|reserved_special_token_18|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128027": { + "content": "<|reserved_special_token_19|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128028": { + "content": "<|reserved_special_token_20|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128029": { + "content": "<|reserved_special_token_21|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128030": { + "content": "<|reserved_special_token_22|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128031": { + "content": "<|reserved_special_token_23|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128032": { + "content": "<|reserved_special_token_24|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128033": { + "content": "<|reserved_special_token_25|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128034": { + "content": "<|reserved_special_token_26|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128035": { + "content": "<|reserved_special_token_27|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128036": { + "content": "<|reserved_special_token_28|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128037": { + "content": "<|reserved_special_token_29|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128038": { + "content": "<|reserved_special_token_30|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128039": { + "content": "<|reserved_special_token_31|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128040": { + "content": "<|reserved_special_token_32|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128041": { + "content": "<|reserved_special_token_33|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128042": { + "content": "<|reserved_special_token_34|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128043": { + "content": "<|reserved_special_token_35|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128044": { + "content": "<|reserved_special_token_36|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128045": { + "content": "<|reserved_special_token_37|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128046": { + "content": "<|reserved_special_token_38|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128047": { + "content": "<|reserved_special_token_39|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128048": { + "content": "<|reserved_special_token_40|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128049": { + "content": "<|reserved_special_token_41|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128050": { + "content": "<|reserved_special_token_42|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128051": { + "content": "<|reserved_special_token_43|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128052": { + "content": "<|reserved_special_token_44|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128053": { + "content": "<|reserved_special_token_45|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128054": { + "content": "<|reserved_special_token_46|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128055": { + "content": "<|reserved_special_token_47|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128056": { + "content": "<|reserved_special_token_48|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128057": { + "content": "<|reserved_special_token_49|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128058": { + "content": "<|reserved_special_token_50|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128059": { + "content": "<|reserved_special_token_51|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128060": { + "content": "<|reserved_special_token_52|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128061": { + "content": "<|reserved_special_token_53|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128062": { + "content": "<|reserved_special_token_54|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128063": { + "content": "<|reserved_special_token_55|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128064": { + "content": "<|reserved_special_token_56|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128065": { + "content": "<|reserved_special_token_57|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128066": { + "content": "<|reserved_special_token_58|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128067": { + "content": "<|reserved_special_token_59|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128068": { + "content": "<|reserved_special_token_60|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128069": { + "content": "<|reserved_special_token_61|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128070": { + "content": "<|reserved_special_token_62|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128071": { + "content": "<|reserved_special_token_63|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128072": { + "content": "<|reserved_special_token_64|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128073": { + "content": "<|reserved_special_token_65|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128074": { + "content": "<|reserved_special_token_66|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128075": { + "content": "<|reserved_special_token_67|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128076": { + "content": "<|reserved_special_token_68|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128077": { + "content": "<|reserved_special_token_69|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128078": { + "content": "<|reserved_special_token_70|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128079": { + "content": "<|reserved_special_token_71|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128080": { + "content": "<|reserved_special_token_72|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128081": { + "content": "<|reserved_special_token_73|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128082": { + "content": "<|reserved_special_token_74|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128083": { + "content": "<|reserved_special_token_75|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128084": { + "content": "<|reserved_special_token_76|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128085": { + "content": "<|reserved_special_token_77|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128086": { + "content": "<|reserved_special_token_78|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128087": { + "content": "<|reserved_special_token_79|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128088": { + "content": "<|reserved_special_token_80|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128089": { + "content": "<|reserved_special_token_81|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128090": { + "content": "<|reserved_special_token_82|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128091": { + "content": "<|reserved_special_token_83|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128092": { + "content": "<|reserved_special_token_84|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128093": { + "content": "<|reserved_special_token_85|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128094": { + "content": "<|reserved_special_token_86|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128095": { + "content": "<|reserved_special_token_87|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128096": { + "content": "<|reserved_special_token_88|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128097": { + "content": "<|reserved_special_token_89|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128098": { + "content": "<|reserved_special_token_90|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128099": { + "content": "<|reserved_special_token_91|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128100": { + "content": "<|reserved_special_token_92|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128101": { + "content": "<|reserved_special_token_93|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128102": { + "content": "<|reserved_special_token_94|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128103": { + "content": "<|reserved_special_token_95|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128104": { + "content": "<|reserved_special_token_96|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128105": { + "content": "<|reserved_special_token_97|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128106": { + "content": "<|reserved_special_token_98|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128107": { + "content": "<|reserved_special_token_99|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128108": { + "content": "<|reserved_special_token_100|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128109": { + "content": "<|reserved_special_token_101|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128110": { + "content": "<|reserved_special_token_102|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128111": { + "content": "<|reserved_special_token_103|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128112": { + "content": "<|reserved_special_token_104|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128113": { + "content": "<|reserved_special_token_105|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128114": { + "content": "<|reserved_special_token_106|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128115": { + "content": "<|reserved_special_token_107|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128116": { + "content": "<|reserved_special_token_108|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128117": { + "content": "<|reserved_special_token_109|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128118": { + "content": "<|reserved_special_token_110|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128119": { + "content": "<|reserved_special_token_111|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128120": { + "content": "<|reserved_special_token_112|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128121": { + "content": "<|reserved_special_token_113|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128122": { + "content": "<|reserved_special_token_114|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128123": { + "content": "<|reserved_special_token_115|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128124": { + "content": "<|reserved_special_token_116|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128125": { + "content": "<|reserved_special_token_117|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128126": { + "content": "<|reserved_special_token_118|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128127": { + "content": "<|reserved_special_token_119|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128128": { + "content": "<|reserved_special_token_120|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128129": { + "content": "<|reserved_special_token_121|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128130": { + "content": "<|reserved_special_token_122|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128131": { + "content": "<|reserved_special_token_123|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128132": { + "content": "<|reserved_special_token_124|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128133": { + "content": "<|reserved_special_token_125|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128134": { + "content": "<|reserved_special_token_126|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128135": { + "content": "<|reserved_special_token_127|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128136": { + "content": "<|reserved_special_token_128|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128137": { + "content": "<|reserved_special_token_129|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128138": { + "content": "<|reserved_special_token_130|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128139": { + "content": "<|reserved_special_token_131|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128140": { + "content": "<|reserved_special_token_132|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128141": { + "content": "<|reserved_special_token_133|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128142": { + "content": "<|reserved_special_token_134|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128143": { + "content": "<|reserved_special_token_135|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128144": { + "content": "<|reserved_special_token_136|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128145": { + "content": "<|reserved_special_token_137|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128146": { + "content": "<|reserved_special_token_138|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128147": { + "content": "<|reserved_special_token_139|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128148": { + "content": "<|reserved_special_token_140|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128149": { + "content": "<|reserved_special_token_141|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128150": { + "content": "<|reserved_special_token_142|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128151": { + "content": "<|reserved_special_token_143|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128152": { + "content": "<|reserved_special_token_144|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128153": { + "content": "<|reserved_special_token_145|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128154": { + "content": "<|reserved_special_token_146|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128155": { + "content": "<|reserved_special_token_147|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128156": { + "content": "<|reserved_special_token_148|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128157": { + "content": "<|reserved_special_token_149|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128158": { + "content": "<|reserved_special_token_150|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128159": { + "content": "<|reserved_special_token_151|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128160": { + "content": "<|reserved_special_token_152|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128161": { + "content": "<|reserved_special_token_153|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128162": { + "content": "<|reserved_special_token_154|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128163": { + "content": "<|reserved_special_token_155|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128164": { + "content": "<|reserved_special_token_156|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128165": { + "content": "<|reserved_special_token_157|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128166": { + "content": "<|reserved_special_token_158|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128167": { + "content": "<|reserved_special_token_159|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128168": { + "content": "<|reserved_special_token_160|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128169": { + "content": "<|reserved_special_token_161|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128170": { + "content": "<|reserved_special_token_162|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128171": { + "content": "<|reserved_special_token_163|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128172": { + "content": "<|reserved_special_token_164|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128173": { + "content": "<|reserved_special_token_165|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128174": { + "content": "<|reserved_special_token_166|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128175": { + "content": "<|reserved_special_token_167|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128176": { + "content": "<|reserved_special_token_168|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128177": { + "content": "<|reserved_special_token_169|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128178": { + "content": "<|reserved_special_token_170|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128179": { + "content": "<|reserved_special_token_171|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128180": { + "content": "<|reserved_special_token_172|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128181": { + "content": "<|reserved_special_token_173|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128182": { + "content": "<|reserved_special_token_174|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128183": { + "content": "<|reserved_special_token_175|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128184": { + "content": "<|reserved_special_token_176|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128185": { + "content": "<|reserved_special_token_177|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128186": { + "content": "<|reserved_special_token_178|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128187": { + "content": "<|reserved_special_token_179|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128188": { + "content": "<|reserved_special_token_180|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128189": { + "content": "<|reserved_special_token_181|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128190": { + "content": "<|reserved_special_token_182|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128191": { + "content": "<|reserved_special_token_183|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128192": { + "content": "<|reserved_special_token_184|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128193": { + "content": "<|reserved_special_token_185|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128194": { + "content": "<|reserved_special_token_186|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128195": { + "content": "<|reserved_special_token_187|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128196": { + "content": "<|reserved_special_token_188|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128197": { + "content": "<|reserved_special_token_189|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128198": { + "content": "<|reserved_special_token_190|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128199": { + "content": "<|reserved_special_token_191|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128200": { + "content": "<|reserved_special_token_192|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128201": { + "content": "<|reserved_special_token_193|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128202": { + "content": "<|reserved_special_token_194|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128203": { + "content": "<|reserved_special_token_195|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128204": { + "content": "<|reserved_special_token_196|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128205": { + "content": "<|reserved_special_token_197|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128206": { + "content": "<|reserved_special_token_198|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128207": { + "content": "<|reserved_special_token_199|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128208": { + "content": "<|reserved_special_token_200|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128209": { + "content": "<|reserved_special_token_201|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128210": { + "content": "<|reserved_special_token_202|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128211": { + "content": "<|reserved_special_token_203|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128212": { + "content": "<|reserved_special_token_204|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128213": { + "content": "<|reserved_special_token_205|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128214": { + "content": "<|reserved_special_token_206|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128215": { + "content": "<|reserved_special_token_207|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128216": { + "content": "<|reserved_special_token_208|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128217": { + "content": "<|reserved_special_token_209|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128218": { + "content": "<|reserved_special_token_210|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128219": { + "content": "<|reserved_special_token_211|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128220": { + "content": "<|reserved_special_token_212|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128221": { + "content": "<|reserved_special_token_213|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128222": { + "content": "<|reserved_special_token_214|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128223": { + "content": "<|reserved_special_token_215|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128224": { + "content": "<|reserved_special_token_216|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128225": { + "content": "<|reserved_special_token_217|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128226": { + "content": "<|reserved_special_token_218|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128227": { + "content": "<|reserved_special_token_219|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128228": { + "content": "<|reserved_special_token_220|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128229": { + "content": "<|reserved_special_token_221|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128230": { + "content": "<|reserved_special_token_222|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128231": { + "content": "<|reserved_special_token_223|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128232": { + "content": "<|reserved_special_token_224|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128233": { + "content": "<|reserved_special_token_225|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128234": { + "content": "<|reserved_special_token_226|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128235": { + "content": "<|reserved_special_token_227|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128236": { + "content": "<|reserved_special_token_228|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128237": { + "content": "<|reserved_special_token_229|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128238": { + "content": "<|reserved_special_token_230|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128239": { + "content": "<|reserved_special_token_231|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128240": { + "content": "<|reserved_special_token_232|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128241": { + "content": "<|reserved_special_token_233|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128242": { + "content": "<|reserved_special_token_234|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128243": { + "content": "<|reserved_special_token_235|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128244": { + "content": "<|reserved_special_token_236|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128245": { + "content": "<|reserved_special_token_237|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128246": { + "content": "<|reserved_special_token_238|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128247": { + "content": "<|reserved_special_token_239|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128248": { + "content": "<|reserved_special_token_240|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128249": { + "content": "<|reserved_special_token_241|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128250": { + "content": "<|reserved_special_token_242|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128251": { + "content": "<|reserved_special_token_243|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128252": { + "content": "<|reserved_special_token_244|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128253": { + "content": "<|reserved_special_token_245|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128254": { + "content": "<|reserved_special_token_246|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128255": { + "content": "<|reserved_special_token_247|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [ + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "clean_up_tokenization_spaces": true, + "eos_token": "<|eot_id|>", + "extra_special_tokens": {}, + "model_input_names": [ + "input_ids", + "attention_mask" + ], + "model_max_length": 131072, + "pad_token": "<|eot_id|>", + "padding_side": "right", + "split_special_tokens": false, + "tokenizer_class": "PreTrainedTokenizerFast" +} diff --git a/B_3/checkpoint-31/trainer_state.json b/B_3/checkpoint-31/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..47274dca5ea7d09796a8bf3c70f1dc75855992a1 --- /dev/null +++ b/B_3/checkpoint-31/trainer_state.json @@ -0,0 +1,344 @@ +{ + "best_global_step": null, + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 1.967741935483871, + "eval_steps": 500, + "global_step": 31, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.06451612903225806, + "grad_norm": 2.073743358513411, + "learning_rate": 0.0001, + "loss": 1.3591, + "num_input_tokens_seen": 262016, + "step": 1, + "train_runtime": 148.9807, + "train_tokens_per_second": 1758.725 + }, + { + "epoch": 0.12903225806451613, + "grad_norm": 1.7083966194301452, + "learning_rate": 9.999036202410325e-05, + "loss": 1.4321, + "num_input_tokens_seen": 524032, + "step": 2, + "train_runtime": 298.073, + "train_tokens_per_second": 1758.066 + }, + { + "epoch": 0.1935483870967742, + "grad_norm": 0.8154519457271471, + "learning_rate": 9.996145181203615e-05, + "loss": 1.3036, + "num_input_tokens_seen": 786048, + "step": 3, + "train_runtime": 449.085, + "train_tokens_per_second": 1750.332 + }, + { + "epoch": 0.25806451612903225, + "grad_norm": 1.6435854418760425, + "learning_rate": 9.991328050923581e-05, + "loss": 1.3119, + "num_input_tokens_seen": 1048064, + "step": 4, + "train_runtime": 600.5729, + "train_tokens_per_second": 1745.107 + }, + { + "epoch": 0.3225806451612903, + "grad_norm": 1.2034889741595727, + "learning_rate": 9.98458666866564e-05, + "loss": 1.21, + "num_input_tokens_seen": 1310080, + "step": 5, + "train_runtime": 752.2986, + "train_tokens_per_second": 1741.436 + }, + { + "epoch": 0.3870967741935484, + "grad_norm": 0.6081797430818349, + "learning_rate": 9.975923633360985e-05, + "loss": 1.1713, + "num_input_tokens_seen": 1572096, + "step": 6, + "train_runtime": 903.8941, + "train_tokens_per_second": 1739.248 + }, + { + "epoch": 0.45161290322580644, + "grad_norm": 0.43860201273645816, + "learning_rate": 9.965342284774632e-05, + "loss": 1.1687, + "num_input_tokens_seen": 1834112, + "step": 7, + "train_runtime": 1055.454, + "train_tokens_per_second": 1737.747 + }, + { + "epoch": 0.5161290322580645, + "grad_norm": 0.3489614049191675, + "learning_rate": 9.952846702217886e-05, + "loss": 1.1151, + "num_input_tokens_seen": 2096128, + "step": 8, + "train_runtime": 1207.0142, + "train_tokens_per_second": 1736.623 + }, + { + "epoch": 0.5806451612903226, + "grad_norm": 3.182590493238281, + "learning_rate": 9.938441702975689e-05, + "loss": 1.0694, + "num_input_tokens_seen": 2358144, + "step": 9, + "train_runtime": 1358.12, + "train_tokens_per_second": 1736.33 + }, + { + "epoch": 0.6451612903225806, + "grad_norm": 0.40091824929141967, + "learning_rate": 9.922132840449459e-05, + "loss": 1.0431, + "num_input_tokens_seen": 2620160, + "step": 10, + "train_runtime": 1509.1349, + "train_tokens_per_second": 1736.2 + }, + { + "epoch": 0.7096774193548387, + "grad_norm": 0.4778713425435316, + "learning_rate": 9.903926402016153e-05, + "loss": 1.0812, + "num_input_tokens_seen": 2882176, + "step": 11, + "train_runtime": 1661.0314, + "train_tokens_per_second": 1735.172 + }, + { + "epoch": 0.7741935483870968, + "grad_norm": 0.3361596465836105, + "learning_rate": 9.883829406604363e-05, + "loss": 1.0296, + "num_input_tokens_seen": 3144192, + "step": 12, + "train_runtime": 1812.2372, + "train_tokens_per_second": 1734.978 + }, + { + "epoch": 0.8387096774193549, + "grad_norm": 0.30484937194478195, + "learning_rate": 9.861849601988383e-05, + "loss": 0.9806, + "num_input_tokens_seen": 3406208, + "step": 13, + "train_runtime": 1963.4393, + "train_tokens_per_second": 1734.817 + }, + { + "epoch": 0.9032258064516129, + "grad_norm": 0.31491494208058013, + "learning_rate": 9.837995461801299e-05, + "loss": 1.0244, + "num_input_tokens_seen": 3668224, + "step": 14, + "train_runtime": 2115.2404, + "train_tokens_per_second": 1734.188 + }, + { + "epoch": 0.967741935483871, + "grad_norm": 0.2555325387827011, + "learning_rate": 9.812276182268236e-05, + "loss": 0.9701, + "num_input_tokens_seen": 3930240, + "step": 15, + "train_runtime": 2266.9917, + "train_tokens_per_second": 1733.681 + }, + { + "epoch": 1.0, + "grad_norm": 0.2555325387827011, + "learning_rate": 9.784701678661045e-05, + "loss": 0.9885, + "num_input_tokens_seen": 4061248, + "step": 16, + "train_runtime": 2343.254, + "train_tokens_per_second": 1733.166 + }, + { + "epoch": 1.064516129032258, + "grad_norm": 0.3803435873199766, + "learning_rate": 9.755282581475769e-05, + "loss": 0.9221, + "num_input_tokens_seen": 4323264, + "step": 17, + "train_runtime": 2495.3616, + "train_tokens_per_second": 1732.52 + }, + { + "epoch": 1.129032258064516, + "grad_norm": 0.25742598219685603, + "learning_rate": 9.724030232334391e-05, + "loss": 0.9187, + "num_input_tokens_seen": 4585280, + "step": 18, + "train_runtime": 2646.9782, + "train_tokens_per_second": 1732.27 + }, + { + "epoch": 1.1935483870967742, + "grad_norm": 0.22468849320033518, + "learning_rate": 9.690956679612421e-05, + "loss": 0.8943, + "num_input_tokens_seen": 4847296, + "step": 19, + "train_runtime": 2798.7721, + "train_tokens_per_second": 1731.937 + }, + { + "epoch": 1.2580645161290323, + "grad_norm": 0.28670637895213946, + "learning_rate": 9.656074673794018e-05, + "loss": 0.8567, + "num_input_tokens_seen": 5109312, + "step": 20, + "train_runtime": 2950.5573, + "train_tokens_per_second": 1731.643 + }, + { + "epoch": 1.3225806451612903, + "grad_norm": 0.2043611694383423, + "learning_rate": 9.619397662556435e-05, + "loss": 0.8557, + "num_input_tokens_seen": 5371328, + "step": 21, + "train_runtime": 3103.0692, + "train_tokens_per_second": 1730.973 + }, + { + "epoch": 1.3870967741935485, + "grad_norm": 0.24667572059564058, + "learning_rate": 9.580939785585681e-05, + "loss": 0.8546, + "num_input_tokens_seen": 5633344, + "step": 22, + "train_runtime": 3254.6109, + "train_tokens_per_second": 1730.881 + }, + { + "epoch": 1.4516129032258065, + "grad_norm": 0.24067782128131054, + "learning_rate": 9.540715869125407e-05, + "loss": 0.851, + "num_input_tokens_seen": 5895360, + "step": 23, + "train_runtime": 3406.7776, + "train_tokens_per_second": 1730.48 + }, + { + "epoch": 1.5161290322580645, + "grad_norm": 0.2000848793492341, + "learning_rate": 9.498741420261108e-05, + "loss": 0.8483, + "num_input_tokens_seen": 6157376, + "step": 24, + "train_runtime": 3558.7104, + "train_tokens_per_second": 1730.227 + }, + { + "epoch": 1.5806451612903225, + "grad_norm": 0.22359283400518964, + "learning_rate": 9.45503262094184e-05, + "loss": 0.8226, + "num_input_tokens_seen": 6419392, + "step": 25, + "train_runtime": 3711.0551, + "train_tokens_per_second": 1729.802 + }, + { + "epoch": 1.6451612903225805, + "grad_norm": 1.0648985294170912, + "learning_rate": 9.409606321741775e-05, + "loss": 0.8259, + "num_input_tokens_seen": 6681408, + "step": 26, + "train_runtime": 3863.7709, + "train_tokens_per_second": 1729.245 + }, + { + "epoch": 1.7096774193548387, + "grad_norm": 0.18133437899141433, + "learning_rate": 9.362480035363986e-05, + "loss": 0.8524, + "num_input_tokens_seen": 6943424, + "step": 27, + "train_runtime": 4016.4383, + "train_tokens_per_second": 1728.752 + }, + { + "epoch": 1.7741935483870968, + "grad_norm": 0.456273625303925, + "learning_rate": 9.31367192988896e-05, + "loss": 0.8209, + "num_input_tokens_seen": 7205440, + "step": 28, + "train_runtime": 4168.9223, + "train_tokens_per_second": 1728.37 + }, + { + "epoch": 1.838709677419355, + "grad_norm": 0.4673225345753548, + "learning_rate": 9.263200821770461e-05, + "loss": 0.7853, + "num_input_tokens_seen": 7467456, + "step": 29, + "train_runtime": 4321.7056, + "train_tokens_per_second": 1727.896 + }, + { + "epoch": 1.903225806451613, + "grad_norm": 0.1996315966078707, + "learning_rate": 9.211086168581433e-05, + "loss": 0.7779, + "num_input_tokens_seen": 7729472, + "step": 30, + "train_runtime": 4474.7713, + "train_tokens_per_second": 1727.345 + }, + { + "epoch": 1.967741935483871, + "grad_norm": 0.3752545960960864, + "learning_rate": 9.157348061512727e-05, + "loss": 0.8146, + "num_input_tokens_seen": 7991488, + "step": 31, + "train_runtime": 4627.5272, + "train_tokens_per_second": 1726.946 + } + ], + "logging_steps": 1, + "max_steps": 160, + "num_input_tokens_seen": 7991488, + "num_train_epochs": 10, + "save_steps": 31, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 62981234491392.0, + "train_batch_size": 4, + "trial_name": null, + "trial_params": null +} diff --git a/B_3/checkpoint-31/training_args.bin b/B_3/checkpoint-31/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..11cf327ddd83fc52012ab679d2c8f1eea3993a5d --- /dev/null +++ b/B_3/checkpoint-31/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:538340330f0a2e51763a2666184aff893a2f4b0e6e52d3a71923bb9fc8b781c0 +size 8017 diff --git a/B_3/checkpoint-31/zero_to_fp32.py b/B_3/checkpoint-31/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04 --- /dev/null +++ b/B_3/checkpoint-31/zero_to_fp32.py @@ -0,0 +1,760 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: +# python zero_to_fp32.py . output_dir/ +# or +# python zero_to_fp32.py . output_dir/ --safe_serialization + +import argparse +import torch +import glob +import math +import os +import re +import gc +import json +import numpy as np +from tqdm import tqdm +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device, weights_only=False) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + total_files = len(files) + state_dicts = [] + for f in tqdm(files, desc='Loading checkpoint shards'): + state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +class GatheredTensor: + """ + A pseudo tensor that collects partitioned weights. + It is more memory efficient when there are multiple groups. + """ + + def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape): + self.flat_groups = flat_groups + self.flat_groups_offset = flat_groups_offset + self.offset = offset + self.partitioned_numel = partitioned_numel + self.shape = shape + self.dtype = self.flat_groups[0][0].dtype + + def contiguous(self): + """ + Merge partitioned weights from flat_groups into a single tensor. + """ + end_idx = self.offset + self.partitioned_numel + world_size = len(self.flat_groups) + pad_flat_param_chunks = [] + + for rank_i in range(world_size): + # for each rank, we need to collect weights from related group/groups + flat_groups_at_rank_i = self.flat_groups[rank_i] + start_group_id = None + end_group_id = None + for group_id in range(len(self.flat_groups_offset)): + if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]: + start_group_id = group_id + if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]: + end_group_id = group_id + break + # collect weights from related group/groups + for group_id in range(start_group_id, end_group_id + 1): + flat_tensor = flat_groups_at_rank_i[group_id] + start_offset = self.offset - self.flat_groups_offset[group_id] + end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id] + pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset]) + + # collect weights from all ranks + pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0) + param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous() + return param + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size + + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]])) + for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'): + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # memory efficient tensor + tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape) + state_dict[name] = tensor + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def to_torch_tensor(state_dict, return_empty_tensor=False): + """ + Convert state_dict of GatheredTensor to torch tensor + """ + torch_state_dict = {} + converted_tensors = {} + for name, tensor in state_dict.items(): + tensor_id = id(tensor) + if tensor_id in converted_tensors: # shared tensors + shared_tensor = torch_state_dict[converted_tensors[tensor_id]] + torch_state_dict[name] = shared_tensor + else: + converted_tensors[tensor_id] = name + if return_empty_tensor: + torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype) + else: + torch_state_dict[name] = tensor.contiguous() + return torch_state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag=None, + exclude_frozen_parameters=False, + lazy_mode=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient. + Convert the pesduo tensor to torch tensor by ``.contiguous()`` + + Returns: + - pytorch ``state_dict`` + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + Note: the above usage may not work if your application doesn't have sufficient free CPU memory. + You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. Or you can load state_dict in lazy mode :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu + for name, lazy_tensor in state_dict.item(): + tensor = lazy_tensor.contiguous() # to cpu + print(name, tensor) + # del tensor to release memory if it no longer in use + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) + if lazy_mode: + return state_dict + else: + return to_torch_tensor(state_dict) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, + output_dir, + max_shard_size="5GB", + safe_serialization=False, + tag=None, + exclude_frozen_parameters=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_dir``: directory to the pytorch fp32 state_dict output files + - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB + - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + """ + + # Dependency pre-check + if safe_serialization: + try: + from safetensors.torch import save_file + except ImportError: + print('If you want to use `safe_serialization`, please `pip install safetensors`') + raise + if max_shard_size is not None: + try: + from huggingface_hub import split_torch_state_dict_into_shards + except ImportError: + print('If you want to use `max_shard_size`, please `pip install huggingface_hub`') + raise + + # Convert zero checkpoint to state_dict + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag, + exclude_frozen_parameters, + lazy_mode=True) + + # Shard the model if it is too big. + weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin" + if max_shard_size is not None: + filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") + # an memory-efficient approach for sharding + empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True) + state_dict_split = split_torch_state_dict_into_shards(empty_state_dict, + filename_pattern=filename_pattern, + max_shard_size=max_shard_size) + else: + from collections import namedtuple + StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"]) + state_dict_split = StateDictSplit(is_sharded=False, + filename_to_tensors={weights_name: list(state_dict.keys())}) + + # Save the model by shard + os.makedirs(output_dir, exist_ok=True) + filename_to_tensors = state_dict_split.filename_to_tensors.items() + for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"): + shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors} + shard_state_dict = to_torch_tensor(shard_state_dict) + output_path = os.path.join(output_dir, shard_file) + if safe_serialization: + save_file(shard_state_dict, output_path, metadata={"format": "pt"}) + else: + torch.save(shard_state_dict, output_path) + # release the memory of current shard + for tensor_name in list(shard_state_dict.keys()): + del state_dict[tensor_name] + del shard_state_dict[tensor_name] + del shard_state_dict + gc.collect() + + # Save index if sharded + if state_dict_split.is_sharded: + index = { + "metadata": state_dict_split.metadata, + "weight_map": state_dict_split.tensor_to_filename, + } + save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json" + save_index_file = os.path.join(output_dir, save_index_file) + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument("output_dir", + type=str, + help="directory to the pytorch fp32 state_dict output files" + "(e.g. path/checkpoint-12-output/)") + parser.add_argument( + "--max_shard_size", + type=str, + default="5GB", + help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size" + "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`" + "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances" + "without CPU OOM issues.") + parser.add_argument( + "--safe_serialization", + default=False, + action='store_true', + help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, + args.output_dir, + max_shard_size=args.max_shard_size, + safe_serialization=args.safe_serialization, + tag=args.tag, + exclude_frozen_parameters=args.exclude_frozen_parameters) diff --git a/B_3/checkpoint-62/README.md b/B_3/checkpoint-62/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0673d0313e0251ef70dd85d3ff379913ab5506d9 --- /dev/null +++ b/B_3/checkpoint-62/README.md @@ -0,0 +1,202 @@ +--- +base_model: /workspace/meta-llama/Llama-3.1-70B +library_name: peft +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] +### Framework versions + +- PEFT 0.15.2 \ No newline at end of file diff --git a/B_3/checkpoint-62/adapter_config.json b/B_3/checkpoint-62/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..9f0d6a37659f3015d3fdd427235805c5bbb829e9 --- /dev/null +++ b/B_3/checkpoint-62/adapter_config.json @@ -0,0 +1,39 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "/workspace/meta-llama/Llama-3.1-70B", + "bias": "none", + "corda_config": null, + "eva_config": null, + "exclude_modules": null, + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layer_replication": null, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 1024, + "lora_bias": false, + "lora_dropout": 0, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 256, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "down_proj", + "q_proj", + "k_proj", + "o_proj", + "v_proj", + "gate_proj", + "up_proj" + ], + "task_type": "CAUSAL_LM", + "trainable_token_indices": null, + "use_dora": false, + "use_rslora": false +} \ No newline at end of file diff --git a/B_3/checkpoint-62/adapter_model.safetensors b/B_3/checkpoint-62/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..1f9573427fab1cb99b94cb35ce0fa5d61469ca69 --- /dev/null +++ b/B_3/checkpoint-62/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19649261ef18be5c73d28d6995a5d287aff40b9cb796a6957a32a9c8c47e4d5a +size 6627156248 diff --git a/B_3/checkpoint-62/chat_template.jinja b/B_3/checkpoint-62/chat_template.jinja new file mode 100644 index 0000000000000000000000000000000000000000..c3af804a3bddb95bf03b7d349234a039a27de382 --- /dev/null +++ b/B_3/checkpoint-62/chat_template.jinja @@ -0,0 +1,7 @@ +{{ '<|begin_of_text|>' }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ '<|start_header_id|>system<|end_header_id|> + +' + system_message + '<|eot_id|>' }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|start_header_id|>user<|end_header_id|> + +' + content + '<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +' }}{% elif message['role'] == 'assistant' %}{{ content + '<|eot_id|>' }}{% endif %}{% endfor %} \ No newline at end of file diff --git a/B_3/checkpoint-62/global_step60/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/B_3/checkpoint-62/global_step60/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..08f188d6210527bdf1d3e41f60aee78066f53670 --- /dev/null +++ b/B_3/checkpoint-62/global_step60/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45f4f51d828c4afec29b73b06d278f3d68a9423c5a179f2994c03a143e7e27ff +size 9940504945 diff --git a/B_3/checkpoint-62/global_step60/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/B_3/checkpoint-62/global_step60/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..2efb757e30dbcd650e9bbacb9df1c715e99ecb10 --- /dev/null +++ b/B_3/checkpoint-62/global_step60/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5196cad3c28f48f3d42c0df7b59c6a07cbce04b8afe011d34d9d4d4bdb1da46c +size 9940504945 diff --git a/B_3/checkpoint-62/global_step60/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/B_3/checkpoint-62/global_step60/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..4b1708ccf4b398116fa6b4e1d3323cb05508457a --- /dev/null +++ b/B_3/checkpoint-62/global_step60/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fff7662ea0d8054205acf412b7ef1ec361ee2e38891e42a6082ad7df6b0eaeab +size 9940504945 diff --git a/B_3/checkpoint-62/global_step60/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/B_3/checkpoint-62/global_step60/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..26489b3dff7dd3e0b2fa9e0523abc1999bd5da67 --- /dev/null +++ b/B_3/checkpoint-62/global_step60/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c238efc23c019beceb6bec2330a23b5a6ef439f1fe0527bc40d5cc0dc1ebb2cf +size 9940504945 diff --git a/B_3/checkpoint-62/global_step60/zero_pp_rank_0_mp_rank_00_model_states.pt b/B_3/checkpoint-62/global_step60/zero_pp_rank_0_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..58dc940421e1ced7615b13a6d6545f8945030c57 --- /dev/null +++ b/B_3/checkpoint-62/global_step60/zero_pp_rank_0_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea9eef7692acb25fb47fd8b63fb5356e1d97a79da279f46beb00553034933531 +size 1109201 diff --git a/B_3/checkpoint-62/global_step60/zero_pp_rank_1_mp_rank_00_model_states.pt b/B_3/checkpoint-62/global_step60/zero_pp_rank_1_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..cbd54fc10a5362d8efadf2dfdffcf134489a6997 --- /dev/null +++ b/B_3/checkpoint-62/global_step60/zero_pp_rank_1_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed1713be6db438367cdda5077de03c933116d408bd9b08af3096ab228de8bb7d +size 1109201 diff --git a/B_3/checkpoint-62/global_step60/zero_pp_rank_2_mp_rank_00_model_states.pt b/B_3/checkpoint-62/global_step60/zero_pp_rank_2_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..6cce53c0e644dd078b598bf96782f326aa54ebf6 --- /dev/null +++ b/B_3/checkpoint-62/global_step60/zero_pp_rank_2_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d6cb0f952eb0820135b7ab9a68e9f827cd4e8c2ff00185c30b20c79e7b73c85 +size 1109201 diff --git a/B_3/checkpoint-62/global_step60/zero_pp_rank_3_mp_rank_00_model_states.pt b/B_3/checkpoint-62/global_step60/zero_pp_rank_3_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..9b81a6d088aaa6ce49402df31e390ce495e84a97 --- /dev/null +++ b/B_3/checkpoint-62/global_step60/zero_pp_rank_3_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1ac43574a56ed745017c48ba2185d2b6630c3bbb444deffea202cb68b837e9f +size 1109201 diff --git a/B_3/checkpoint-62/latest b/B_3/checkpoint-62/latest new file mode 100644 index 0000000000000000000000000000000000000000..6dac34b840ecfb636ba8ab1e4da79fa1bdc8c3d4 --- /dev/null +++ b/B_3/checkpoint-62/latest @@ -0,0 +1 @@ +global_step60 \ No newline at end of file diff --git a/B_3/checkpoint-62/rng_state_0.pth b/B_3/checkpoint-62/rng_state_0.pth new file mode 100644 index 0000000000000000000000000000000000000000..3cd72c974e82825e0615a259e4bcca95a3a3273c --- /dev/null +++ b/B_3/checkpoint-62/rng_state_0.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b80829a78a755ba0cf5e985f9212f164229ce25f7c02fa877b4cdebf230922b5 +size 15429 diff --git a/B_3/checkpoint-62/rng_state_1.pth b/B_3/checkpoint-62/rng_state_1.pth new file mode 100644 index 0000000000000000000000000000000000000000..2749e3e0a290757087922def08bd7cfa7e218b5e --- /dev/null +++ b/B_3/checkpoint-62/rng_state_1.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c6f4074cb5241eff8959c1f45b18c23c893119f6c03fa2da27359dbe7dba66c +size 15429 diff --git a/B_3/checkpoint-62/rng_state_2.pth b/B_3/checkpoint-62/rng_state_2.pth new file mode 100644 index 0000000000000000000000000000000000000000..c4dcd559098a83c8196eebb4aa54e5ee6c3f2a8f --- /dev/null +++ b/B_3/checkpoint-62/rng_state_2.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1e84026d823a1c04897f710cd119a65b654c08ad443e97c4486fc7a26f6e64b +size 15429 diff --git a/B_3/checkpoint-62/rng_state_3.pth b/B_3/checkpoint-62/rng_state_3.pth new file mode 100644 index 0000000000000000000000000000000000000000..57161b2dc15e410494ead6a7f9ba4524f334abae --- /dev/null +++ b/B_3/checkpoint-62/rng_state_3.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9b60a41c989678f542cc75416e61e7a43f81d6221d259490134253d1f38a920 +size 15429 diff --git a/B_3/checkpoint-62/scheduler.pt b/B_3/checkpoint-62/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..fca0f09cce4a966743aed6919169ade9fc304df6 --- /dev/null +++ b/B_3/checkpoint-62/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d868ef4540be0e9b70e99c2e067f4f9e5566f45d69e1e34396881ab19ddef33 +size 1401 diff --git a/B_3/checkpoint-62/special_tokens_map.json b/B_3/checkpoint-62/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..14daf4588e61b4e4983af0fccaba4d5500c0977c --- /dev/null +++ b/B_3/checkpoint-62/special_tokens_map.json @@ -0,0 +1,26 @@ +{ + "additional_special_tokens": [ + { + "content": "<|eom_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } + ], + "bos_token": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": "<|eot_id|>" +} diff --git a/B_3/checkpoint-62/tokenizer.json b/B_3/checkpoint-62/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/B_3/checkpoint-62/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/B_3/checkpoint-62/tokenizer_config.json b/B_3/checkpoint-62/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d1e1ea9bc94ff1132f136710751e37fb23a64347 --- /dev/null +++ b/B_3/checkpoint-62/tokenizer_config.json @@ -0,0 +1,2068 @@ +{ + "added_tokens_decoder": { + "128000": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128001": { + "content": "<|end_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128002": { + "content": "<|reserved_special_token_0|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128003": { + "content": "<|reserved_special_token_1|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128004": { + "content": "<|finetune_right_pad_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128005": { + "content": "<|reserved_special_token_2|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128006": { + "content": "<|start_header_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128007": { + "content": "<|end_header_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128008": { + "content": "<|eom_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128009": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128010": { + "content": "<|python_tag|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128011": { + "content": "<|reserved_special_token_3|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128012": { + "content": "<|reserved_special_token_4|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128013": { + "content": "<|reserved_special_token_5|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128014": { + "content": "<|reserved_special_token_6|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128015": { + "content": "<|reserved_special_token_7|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128016": { + "content": "<|reserved_special_token_8|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128017": { + "content": "<|reserved_special_token_9|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128018": { + "content": "<|reserved_special_token_10|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128019": { + "content": "<|reserved_special_token_11|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128020": { + "content": "<|reserved_special_token_12|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128021": { + "content": "<|reserved_special_token_13|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128022": { + "content": "<|reserved_special_token_14|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128023": { + "content": "<|reserved_special_token_15|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128024": { + "content": "<|reserved_special_token_16|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128025": { + "content": "<|reserved_special_token_17|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128026": { + "content": "<|reserved_special_token_18|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128027": { + "content": "<|reserved_special_token_19|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128028": { + "content": "<|reserved_special_token_20|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128029": { + "content": "<|reserved_special_token_21|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128030": { + "content": "<|reserved_special_token_22|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128031": { + "content": "<|reserved_special_token_23|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128032": { + "content": "<|reserved_special_token_24|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128033": { + "content": "<|reserved_special_token_25|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128034": { + "content": "<|reserved_special_token_26|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128035": { + "content": "<|reserved_special_token_27|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128036": { + "content": "<|reserved_special_token_28|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128037": { + "content": "<|reserved_special_token_29|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128038": { + "content": "<|reserved_special_token_30|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128039": { + "content": "<|reserved_special_token_31|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128040": { + "content": "<|reserved_special_token_32|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128041": { + "content": "<|reserved_special_token_33|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128042": { + "content": "<|reserved_special_token_34|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128043": { + "content": "<|reserved_special_token_35|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128044": { + "content": "<|reserved_special_token_36|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128045": { + "content": "<|reserved_special_token_37|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128046": { + "content": "<|reserved_special_token_38|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128047": { + "content": "<|reserved_special_token_39|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128048": { + "content": "<|reserved_special_token_40|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128049": { + "content": "<|reserved_special_token_41|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128050": { + "content": "<|reserved_special_token_42|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128051": { + "content": "<|reserved_special_token_43|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128052": { + "content": "<|reserved_special_token_44|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128053": { + "content": "<|reserved_special_token_45|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128054": { + "content": "<|reserved_special_token_46|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128055": { + "content": "<|reserved_special_token_47|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128056": { + "content": "<|reserved_special_token_48|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128057": { + "content": "<|reserved_special_token_49|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128058": { + "content": "<|reserved_special_token_50|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128059": { + "content": "<|reserved_special_token_51|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128060": { + "content": "<|reserved_special_token_52|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128061": { + "content": "<|reserved_special_token_53|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128062": { + "content": "<|reserved_special_token_54|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128063": { + "content": "<|reserved_special_token_55|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128064": { + "content": "<|reserved_special_token_56|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128065": { + "content": "<|reserved_special_token_57|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128066": { + "content": "<|reserved_special_token_58|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128067": { + "content": "<|reserved_special_token_59|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128068": { + "content": "<|reserved_special_token_60|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128069": { + "content": "<|reserved_special_token_61|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128070": { + "content": "<|reserved_special_token_62|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128071": { + "content": "<|reserved_special_token_63|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128072": { + "content": "<|reserved_special_token_64|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128073": { + "content": "<|reserved_special_token_65|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128074": { + "content": "<|reserved_special_token_66|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128075": { + "content": "<|reserved_special_token_67|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128076": { + "content": "<|reserved_special_token_68|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128077": { + "content": "<|reserved_special_token_69|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128078": { + "content": "<|reserved_special_token_70|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128079": { + "content": "<|reserved_special_token_71|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128080": { + "content": "<|reserved_special_token_72|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128081": { + "content": "<|reserved_special_token_73|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128082": { + "content": "<|reserved_special_token_74|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128083": { + "content": "<|reserved_special_token_75|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128084": { + "content": "<|reserved_special_token_76|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128085": { + "content": "<|reserved_special_token_77|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128086": { + "content": "<|reserved_special_token_78|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128087": { + "content": "<|reserved_special_token_79|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128088": { + "content": "<|reserved_special_token_80|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128089": { + "content": "<|reserved_special_token_81|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128090": { + "content": "<|reserved_special_token_82|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128091": { + "content": "<|reserved_special_token_83|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128092": { + "content": "<|reserved_special_token_84|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128093": { + "content": "<|reserved_special_token_85|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128094": { + "content": "<|reserved_special_token_86|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128095": { + "content": "<|reserved_special_token_87|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128096": { + "content": "<|reserved_special_token_88|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128097": { + "content": "<|reserved_special_token_89|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128098": { + "content": "<|reserved_special_token_90|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128099": { + "content": "<|reserved_special_token_91|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128100": { + "content": "<|reserved_special_token_92|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128101": { + "content": "<|reserved_special_token_93|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128102": { + "content": "<|reserved_special_token_94|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128103": { + "content": "<|reserved_special_token_95|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128104": { + "content": "<|reserved_special_token_96|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128105": { + "content": "<|reserved_special_token_97|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128106": { + "content": "<|reserved_special_token_98|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128107": { + "content": "<|reserved_special_token_99|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128108": { + "content": "<|reserved_special_token_100|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128109": { + "content": "<|reserved_special_token_101|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128110": { + "content": "<|reserved_special_token_102|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128111": { + "content": "<|reserved_special_token_103|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128112": { + "content": "<|reserved_special_token_104|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128113": { + "content": "<|reserved_special_token_105|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128114": { + "content": "<|reserved_special_token_106|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128115": { + "content": "<|reserved_special_token_107|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128116": { + "content": "<|reserved_special_token_108|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128117": { + "content": "<|reserved_special_token_109|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128118": { + "content": "<|reserved_special_token_110|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128119": { + "content": "<|reserved_special_token_111|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128120": { + "content": "<|reserved_special_token_112|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128121": { + "content": "<|reserved_special_token_113|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128122": { + "content": "<|reserved_special_token_114|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128123": { + "content": "<|reserved_special_token_115|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128124": { + "content": "<|reserved_special_token_116|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128125": { + "content": "<|reserved_special_token_117|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128126": { + "content": "<|reserved_special_token_118|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128127": { + "content": "<|reserved_special_token_119|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128128": { + "content": "<|reserved_special_token_120|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128129": { + "content": "<|reserved_special_token_121|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128130": { + "content": "<|reserved_special_token_122|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128131": { + "content": "<|reserved_special_token_123|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128132": { + "content": "<|reserved_special_token_124|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128133": { + "content": "<|reserved_special_token_125|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128134": { + "content": "<|reserved_special_token_126|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128135": { + "content": "<|reserved_special_token_127|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128136": { + "content": "<|reserved_special_token_128|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128137": { + "content": "<|reserved_special_token_129|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128138": { + "content": "<|reserved_special_token_130|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128139": { + "content": "<|reserved_special_token_131|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128140": { + "content": "<|reserved_special_token_132|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128141": { + "content": "<|reserved_special_token_133|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128142": { + "content": "<|reserved_special_token_134|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128143": { + "content": "<|reserved_special_token_135|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128144": { + "content": "<|reserved_special_token_136|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128145": { + "content": "<|reserved_special_token_137|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128146": { + "content": "<|reserved_special_token_138|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128147": { + "content": "<|reserved_special_token_139|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128148": { + "content": "<|reserved_special_token_140|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128149": { + "content": "<|reserved_special_token_141|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128150": { + "content": "<|reserved_special_token_142|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128151": { + "content": "<|reserved_special_token_143|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128152": { + "content": "<|reserved_special_token_144|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128153": { + "content": "<|reserved_special_token_145|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128154": { + "content": "<|reserved_special_token_146|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128155": { + "content": "<|reserved_special_token_147|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128156": { + "content": "<|reserved_special_token_148|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128157": { + "content": "<|reserved_special_token_149|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128158": { + "content": "<|reserved_special_token_150|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128159": { + "content": "<|reserved_special_token_151|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128160": { + "content": "<|reserved_special_token_152|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128161": { + "content": "<|reserved_special_token_153|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128162": { + "content": "<|reserved_special_token_154|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128163": { + "content": "<|reserved_special_token_155|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128164": { + "content": "<|reserved_special_token_156|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128165": { + "content": "<|reserved_special_token_157|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128166": { + "content": "<|reserved_special_token_158|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128167": { + "content": "<|reserved_special_token_159|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128168": { + "content": "<|reserved_special_token_160|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128169": { + "content": "<|reserved_special_token_161|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128170": { + "content": "<|reserved_special_token_162|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128171": { + "content": "<|reserved_special_token_163|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128172": { + "content": "<|reserved_special_token_164|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128173": { + "content": "<|reserved_special_token_165|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128174": { + "content": "<|reserved_special_token_166|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128175": { + "content": "<|reserved_special_token_167|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128176": { + "content": "<|reserved_special_token_168|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128177": { + "content": "<|reserved_special_token_169|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128178": { + "content": "<|reserved_special_token_170|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128179": { + "content": "<|reserved_special_token_171|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128180": { + "content": "<|reserved_special_token_172|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128181": { + "content": "<|reserved_special_token_173|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128182": { + "content": "<|reserved_special_token_174|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128183": { + "content": "<|reserved_special_token_175|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128184": { + "content": "<|reserved_special_token_176|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128185": { + "content": "<|reserved_special_token_177|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128186": { + "content": "<|reserved_special_token_178|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128187": { + "content": "<|reserved_special_token_179|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128188": { + "content": "<|reserved_special_token_180|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128189": { + "content": "<|reserved_special_token_181|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128190": { + "content": "<|reserved_special_token_182|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128191": { + "content": "<|reserved_special_token_183|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128192": { + "content": "<|reserved_special_token_184|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128193": { + "content": "<|reserved_special_token_185|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128194": { + "content": "<|reserved_special_token_186|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128195": { + "content": "<|reserved_special_token_187|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128196": { + "content": "<|reserved_special_token_188|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128197": { + "content": "<|reserved_special_token_189|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128198": { + "content": "<|reserved_special_token_190|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128199": { + "content": "<|reserved_special_token_191|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128200": { + "content": "<|reserved_special_token_192|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128201": { + "content": "<|reserved_special_token_193|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128202": { + "content": "<|reserved_special_token_194|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128203": { + "content": "<|reserved_special_token_195|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128204": { + "content": "<|reserved_special_token_196|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128205": { + "content": "<|reserved_special_token_197|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128206": { + "content": "<|reserved_special_token_198|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128207": { + "content": "<|reserved_special_token_199|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128208": { + "content": "<|reserved_special_token_200|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128209": { + "content": "<|reserved_special_token_201|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128210": { + "content": "<|reserved_special_token_202|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128211": { + "content": "<|reserved_special_token_203|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128212": { + "content": "<|reserved_special_token_204|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128213": { + "content": "<|reserved_special_token_205|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128214": { + "content": "<|reserved_special_token_206|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128215": { + "content": "<|reserved_special_token_207|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128216": { + "content": "<|reserved_special_token_208|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128217": { + "content": "<|reserved_special_token_209|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128218": { + "content": "<|reserved_special_token_210|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128219": { + "content": "<|reserved_special_token_211|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128220": { + "content": "<|reserved_special_token_212|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128221": { + "content": "<|reserved_special_token_213|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128222": { + "content": "<|reserved_special_token_214|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128223": { + "content": "<|reserved_special_token_215|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128224": { + "content": "<|reserved_special_token_216|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128225": { + "content": "<|reserved_special_token_217|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128226": { + "content": "<|reserved_special_token_218|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128227": { + "content": "<|reserved_special_token_219|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128228": { + "content": "<|reserved_special_token_220|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128229": { + "content": "<|reserved_special_token_221|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128230": { + "content": "<|reserved_special_token_222|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128231": { + "content": "<|reserved_special_token_223|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128232": { + "content": "<|reserved_special_token_224|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128233": { + "content": "<|reserved_special_token_225|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128234": { + "content": "<|reserved_special_token_226|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128235": { + "content": "<|reserved_special_token_227|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128236": { + "content": "<|reserved_special_token_228|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128237": { + "content": "<|reserved_special_token_229|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128238": { + "content": "<|reserved_special_token_230|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128239": { + "content": "<|reserved_special_token_231|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128240": { + "content": "<|reserved_special_token_232|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128241": { + "content": "<|reserved_special_token_233|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128242": { + "content": "<|reserved_special_token_234|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128243": { + "content": "<|reserved_special_token_235|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128244": { + "content": "<|reserved_special_token_236|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128245": { + "content": "<|reserved_special_token_237|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128246": { + "content": "<|reserved_special_token_238|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128247": { + "content": "<|reserved_special_token_239|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128248": { + "content": "<|reserved_special_token_240|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128249": { + "content": "<|reserved_special_token_241|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128250": { + "content": "<|reserved_special_token_242|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128251": { + "content": "<|reserved_special_token_243|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128252": { + "content": "<|reserved_special_token_244|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128253": { + "content": "<|reserved_special_token_245|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128254": { + "content": "<|reserved_special_token_246|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128255": { + "content": "<|reserved_special_token_247|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [ + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "clean_up_tokenization_spaces": true, + "eos_token": "<|eot_id|>", + "extra_special_tokens": {}, + "model_input_names": [ + "input_ids", + "attention_mask" + ], + "model_max_length": 131072, + "pad_token": "<|eot_id|>", + "padding_side": "right", + "split_special_tokens": false, + "tokenizer_class": "PreTrainedTokenizerFast" +} diff --git a/B_3/checkpoint-62/trainer_state.json b/B_3/checkpoint-62/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..8438df65561ee4dd00c18fbb3e9feefc0b8f7d6c --- /dev/null +++ b/B_3/checkpoint-62/trainer_state.json @@ -0,0 +1,654 @@ +{ + "best_global_step": null, + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 3.903225806451613, + "eval_steps": 500, + "global_step": 62, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.06451612903225806, + "grad_norm": 2.073743358513411, + "learning_rate": 0.0001, + "loss": 1.3591, + "num_input_tokens_seen": 262016, + "step": 1, + "train_runtime": 148.9807, + "train_tokens_per_second": 1758.725 + }, + { + "epoch": 0.12903225806451613, + "grad_norm": 1.7083966194301452, + "learning_rate": 9.999036202410325e-05, + "loss": 1.4321, + "num_input_tokens_seen": 524032, + "step": 2, + "train_runtime": 298.073, + "train_tokens_per_second": 1758.066 + }, + { + "epoch": 0.1935483870967742, + "grad_norm": 0.8154519457271471, + "learning_rate": 9.996145181203615e-05, + "loss": 1.3036, + "num_input_tokens_seen": 786048, + "step": 3, + "train_runtime": 449.085, + "train_tokens_per_second": 1750.332 + }, + { + "epoch": 0.25806451612903225, + "grad_norm": 1.6435854418760425, + "learning_rate": 9.991328050923581e-05, + "loss": 1.3119, + "num_input_tokens_seen": 1048064, + "step": 4, + "train_runtime": 600.5729, + "train_tokens_per_second": 1745.107 + }, + { + "epoch": 0.3225806451612903, + "grad_norm": 1.2034889741595727, + "learning_rate": 9.98458666866564e-05, + "loss": 1.21, + "num_input_tokens_seen": 1310080, + "step": 5, + "train_runtime": 752.2986, + "train_tokens_per_second": 1741.436 + }, + { + "epoch": 0.3870967741935484, + "grad_norm": 0.6081797430818349, + "learning_rate": 9.975923633360985e-05, + "loss": 1.1713, + "num_input_tokens_seen": 1572096, + "step": 6, + "train_runtime": 903.8941, + "train_tokens_per_second": 1739.248 + }, + { + "epoch": 0.45161290322580644, + "grad_norm": 0.43860201273645816, + "learning_rate": 9.965342284774632e-05, + "loss": 1.1687, + "num_input_tokens_seen": 1834112, + "step": 7, + "train_runtime": 1055.454, + "train_tokens_per_second": 1737.747 + }, + { + "epoch": 0.5161290322580645, + "grad_norm": 0.3489614049191675, + "learning_rate": 9.952846702217886e-05, + "loss": 1.1151, + "num_input_tokens_seen": 2096128, + "step": 8, + "train_runtime": 1207.0142, + "train_tokens_per_second": 1736.623 + }, + { + "epoch": 0.5806451612903226, + "grad_norm": 3.182590493238281, + "learning_rate": 9.938441702975689e-05, + "loss": 1.0694, + "num_input_tokens_seen": 2358144, + "step": 9, + "train_runtime": 1358.12, + "train_tokens_per_second": 1736.33 + }, + { + "epoch": 0.6451612903225806, + "grad_norm": 0.40091824929141967, + "learning_rate": 9.922132840449459e-05, + "loss": 1.0431, + "num_input_tokens_seen": 2620160, + "step": 10, + "train_runtime": 1509.1349, + "train_tokens_per_second": 1736.2 + }, + { + "epoch": 0.7096774193548387, + "grad_norm": 0.4778713425435316, + "learning_rate": 9.903926402016153e-05, + "loss": 1.0812, + "num_input_tokens_seen": 2882176, + "step": 11, + "train_runtime": 1661.0314, + "train_tokens_per_second": 1735.172 + }, + { + "epoch": 0.7741935483870968, + "grad_norm": 0.3361596465836105, + "learning_rate": 9.883829406604363e-05, + "loss": 1.0296, + "num_input_tokens_seen": 3144192, + "step": 12, + "train_runtime": 1812.2372, + "train_tokens_per_second": 1734.978 + }, + { + "epoch": 0.8387096774193549, + "grad_norm": 0.30484937194478195, + "learning_rate": 9.861849601988383e-05, + "loss": 0.9806, + "num_input_tokens_seen": 3406208, + "step": 13, + "train_runtime": 1963.4393, + "train_tokens_per_second": 1734.817 + }, + { + "epoch": 0.9032258064516129, + "grad_norm": 0.31491494208058013, + "learning_rate": 9.837995461801299e-05, + "loss": 1.0244, + "num_input_tokens_seen": 3668224, + "step": 14, + "train_runtime": 2115.2404, + "train_tokens_per_second": 1734.188 + }, + { + "epoch": 0.967741935483871, + "grad_norm": 0.2555325387827011, + "learning_rate": 9.812276182268236e-05, + "loss": 0.9701, + "num_input_tokens_seen": 3930240, + "step": 15, + "train_runtime": 2266.9917, + "train_tokens_per_second": 1733.681 + }, + { + "epoch": 1.0, + "grad_norm": 0.2555325387827011, + "learning_rate": 9.784701678661045e-05, + "loss": 0.9885, + "num_input_tokens_seen": 4061248, + "step": 16, + "train_runtime": 2343.254, + "train_tokens_per_second": 1733.166 + }, + { + "epoch": 1.064516129032258, + "grad_norm": 0.3803435873199766, + "learning_rate": 9.755282581475769e-05, + "loss": 0.9221, + "num_input_tokens_seen": 4323264, + "step": 17, + "train_runtime": 2495.3616, + "train_tokens_per_second": 1732.52 + }, + { + "epoch": 1.129032258064516, + "grad_norm": 0.25742598219685603, + "learning_rate": 9.724030232334391e-05, + "loss": 0.9187, + "num_input_tokens_seen": 4585280, + "step": 18, + "train_runtime": 2646.9782, + "train_tokens_per_second": 1732.27 + }, + { + "epoch": 1.1935483870967742, + "grad_norm": 0.22468849320033518, + "learning_rate": 9.690956679612421e-05, + "loss": 0.8943, + "num_input_tokens_seen": 4847296, + "step": 19, + "train_runtime": 2798.7721, + "train_tokens_per_second": 1731.937 + }, + { + "epoch": 1.2580645161290323, + "grad_norm": 0.28670637895213946, + "learning_rate": 9.656074673794018e-05, + "loss": 0.8567, + "num_input_tokens_seen": 5109312, + "step": 20, + "train_runtime": 2950.5573, + "train_tokens_per_second": 1731.643 + }, + { + "epoch": 1.3225806451612903, + "grad_norm": 0.2043611694383423, + "learning_rate": 9.619397662556435e-05, + "loss": 0.8557, + "num_input_tokens_seen": 5371328, + "step": 21, + "train_runtime": 3103.0692, + "train_tokens_per_second": 1730.973 + }, + { + "epoch": 1.3870967741935485, + "grad_norm": 0.24667572059564058, + "learning_rate": 9.580939785585681e-05, + "loss": 0.8546, + "num_input_tokens_seen": 5633344, + "step": 22, + "train_runtime": 3254.6109, + "train_tokens_per_second": 1730.881 + }, + { + "epoch": 1.4516129032258065, + "grad_norm": 0.24067782128131054, + "learning_rate": 9.540715869125407e-05, + "loss": 0.851, + "num_input_tokens_seen": 5895360, + "step": 23, + "train_runtime": 3406.7776, + "train_tokens_per_second": 1730.48 + }, + { + "epoch": 1.5161290322580645, + "grad_norm": 0.2000848793492341, + "learning_rate": 9.498741420261108e-05, + "loss": 0.8483, + "num_input_tokens_seen": 6157376, + "step": 24, + "train_runtime": 3558.7104, + "train_tokens_per_second": 1730.227 + }, + { + "epoch": 1.5806451612903225, + "grad_norm": 0.22359283400518964, + "learning_rate": 9.45503262094184e-05, + "loss": 0.8226, + "num_input_tokens_seen": 6419392, + "step": 25, + "train_runtime": 3711.0551, + "train_tokens_per_second": 1729.802 + }, + { + "epoch": 1.6451612903225805, + "grad_norm": 1.0648985294170912, + "learning_rate": 9.409606321741775e-05, + "loss": 0.8259, + "num_input_tokens_seen": 6681408, + "step": 26, + "train_runtime": 3863.7709, + "train_tokens_per_second": 1729.245 + }, + { + "epoch": 1.7096774193548387, + "grad_norm": 0.18133437899141433, + "learning_rate": 9.362480035363986e-05, + "loss": 0.8524, + "num_input_tokens_seen": 6943424, + "step": 27, + "train_runtime": 4016.4383, + "train_tokens_per_second": 1728.752 + }, + { + "epoch": 1.7741935483870968, + "grad_norm": 0.456273625303925, + "learning_rate": 9.31367192988896e-05, + "loss": 0.8209, + "num_input_tokens_seen": 7205440, + "step": 28, + "train_runtime": 4168.9223, + "train_tokens_per_second": 1728.37 + }, + { + "epoch": 1.838709677419355, + "grad_norm": 0.4673225345753548, + "learning_rate": 9.263200821770461e-05, + "loss": 0.7853, + "num_input_tokens_seen": 7467456, + "step": 29, + "train_runtime": 4321.7056, + "train_tokens_per_second": 1727.896 + }, + { + "epoch": 1.903225806451613, + "grad_norm": 0.1996315966078707, + "learning_rate": 9.211086168581433e-05, + "loss": 0.7779, + "num_input_tokens_seen": 7729472, + "step": 30, + "train_runtime": 4474.7713, + "train_tokens_per_second": 1727.345 + }, + { + "epoch": 1.967741935483871, + "grad_norm": 0.3752545960960864, + "learning_rate": 9.157348061512727e-05, + "loss": 0.8146, + "num_input_tokens_seen": 7991488, + "step": 31, + "train_runtime": 4627.5272, + "train_tokens_per_second": 1726.946 + }, + { + "epoch": 2.0, + "grad_norm": 0.484664872247163, + "learning_rate": 9.102007217627568e-05, + "loss": 0.7787, + "num_input_tokens_seen": 8122496, + "step": 32, + "train_runtime": 4809.6299, + "train_tokens_per_second": 1688.799 + }, + { + "epoch": 2.064516129032258, + "grad_norm": 0.299023275639115, + "learning_rate": 9.045084971874738e-05, + "loss": 0.7361, + "num_input_tokens_seen": 8384512, + "step": 33, + "train_runtime": 4960.4611, + "train_tokens_per_second": 1690.269 + }, + { + "epoch": 2.129032258064516, + "grad_norm": 0.20981231086811225, + "learning_rate": 8.986603268863536e-05, + "loss": 0.6956, + "num_input_tokens_seen": 8646528, + "step": 34, + "train_runtime": 5112.1398, + "train_tokens_per_second": 1691.372 + }, + { + "epoch": 2.193548387096774, + "grad_norm": 0.3648857123126151, + "learning_rate": 8.926584654403724e-05, + "loss": 0.6819, + "num_input_tokens_seen": 8908544, + "step": 35, + "train_runtime": 5264.3603, + "train_tokens_per_second": 1692.237 + }, + { + "epoch": 2.258064516129032, + "grad_norm": 0.31269893807625165, + "learning_rate": 8.865052266813685e-05, + "loss": 0.6958, + "num_input_tokens_seen": 9170560, + "step": 36, + "train_runtime": 5416.2601, + "train_tokens_per_second": 1693.154 + }, + { + "epoch": 2.3225806451612905, + "grad_norm": 0.2727897516670068, + "learning_rate": 8.802029828000156e-05, + "loss": 0.6756, + "num_input_tokens_seen": 9432576, + "step": 37, + "train_runtime": 5567.9089, + "train_tokens_per_second": 1694.097 + }, + { + "epoch": 2.3870967741935485, + "grad_norm": 0.25721446603694037, + "learning_rate": 8.737541634312985e-05, + "loss": 0.648, + "num_input_tokens_seen": 9694592, + "step": 38, + "train_runtime": 5719.9113, + "train_tokens_per_second": 1694.885 + }, + { + "epoch": 2.4516129032258065, + "grad_norm": 20.44651031163169, + "learning_rate": 8.671612547178428e-05, + "loss": 0.669, + "num_input_tokens_seen": 9956608, + "step": 39, + "train_runtime": 5871.7821, + "train_tokens_per_second": 1695.671 + }, + { + "epoch": 2.5161290322580645, + "grad_norm": 3.0150996948618145, + "learning_rate": 8.604267983514594e-05, + "loss": 0.6856, + "num_input_tokens_seen": 10218624, + "step": 40, + "train_runtime": 6024.0794, + "train_tokens_per_second": 1696.296 + }, + { + "epoch": 2.5806451612903225, + "grad_norm": 19.306885630930985, + "learning_rate": 8.535533905932738e-05, + "loss": 0.6949, + "num_input_tokens_seen": 10480640, + "step": 41, + "train_runtime": 6176.6303, + "train_tokens_per_second": 1696.822 + }, + { + "epoch": 2.6451612903225805, + "grad_norm": 46.29810754342157, + "learning_rate": 8.46543681272818e-05, + "loss": 0.7467, + "num_input_tokens_seen": 10742656, + "step": 42, + "train_runtime": 6329.1576, + "train_tokens_per_second": 1697.328 + }, + { + "epoch": 2.709677419354839, + "grad_norm": 9.77264228502113, + "learning_rate": 8.39400372766471e-05, + "loss": 0.6638, + "num_input_tokens_seen": 11004672, + "step": 43, + "train_runtime": 6481.6817, + "train_tokens_per_second": 1697.811 + }, + { + "epoch": 2.774193548387097, + "grad_norm": 16.058872629216598, + "learning_rate": 8.321262189556409e-05, + "loss": 0.7096, + "num_input_tokens_seen": 11266688, + "step": 44, + "train_runtime": 6633.7733, + "train_tokens_per_second": 1698.383 + }, + { + "epoch": 2.838709677419355, + "grad_norm": 1.0261214777781495, + "learning_rate": 8.247240241650918e-05, + "loss": 0.6737, + "num_input_tokens_seen": 11528704, + "step": 45, + "train_runtime": 6785.7147, + "train_tokens_per_second": 1698.967 + }, + { + "epoch": 2.903225806451613, + "grad_norm": 1.6378077961358868, + "learning_rate": 8.171966420818228e-05, + "loss": 0.6638, + "num_input_tokens_seen": 11790720, + "step": 46, + "train_runtime": 6937.3194, + "train_tokens_per_second": 1699.607 + }, + { + "epoch": 2.967741935483871, + "grad_norm": 2.4944381489903455, + "learning_rate": 8.095469746549172e-05, + "loss": 0.6753, + "num_input_tokens_seen": 12052736, + "step": 47, + "train_runtime": 7089.3141, + "train_tokens_per_second": 1700.127 + }, + { + "epoch": 3.0, + "grad_norm": 2.4944381489903455, + "learning_rate": 8.017779709767858e-05, + "loss": 0.6673, + "num_input_tokens_seen": 12183744, + "step": 48, + "train_runtime": 7165.3736, + "train_tokens_per_second": 1700.364 + }, + { + "epoch": 3.064516129032258, + "grad_norm": 26.97115523850904, + "learning_rate": 7.938926261462366e-05, + "loss": 0.6049, + "num_input_tokens_seen": 12445760, + "step": 49, + "train_runtime": 7317.3555, + "train_tokens_per_second": 1700.855 + }, + { + "epoch": 3.129032258064516, + "grad_norm": 5.4430349870657375, + "learning_rate": 7.858939801138061e-05, + "loss": 1.0716, + "num_input_tokens_seen": 12707776, + "step": 50, + "train_runtime": 7468.9695, + "train_tokens_per_second": 1701.41 + }, + { + "epoch": 3.193548387096774, + "grad_norm": 12.78395158722848, + "learning_rate": 7.777851165098012e-05, + "loss": 1.0428, + "num_input_tokens_seen": 12969792, + "step": 51, + "train_runtime": 7620.5451, + "train_tokens_per_second": 1701.951 + }, + { + "epoch": 3.258064516129032, + "grad_norm": 326.4832878895978, + "learning_rate": 7.695691614555003e-05, + "loss": 0.9172, + "num_input_tokens_seen": 13231808, + "step": 52, + "train_runtime": 7772.2756, + "train_tokens_per_second": 1702.437 + }, + { + "epoch": 3.3225806451612905, + "grad_norm": 69.03004430934298, + "learning_rate": 7.612492823579745e-05, + "loss": 0.7593, + "num_input_tokens_seen": 13493824, + "step": 53, + "train_runtime": 7924.7666, + "train_tokens_per_second": 1702.741 + }, + { + "epoch": 3.3870967741935485, + "grad_norm": 1.1495044460164596, + "learning_rate": 7.528286866889924e-05, + "loss": 0.6151, + "num_input_tokens_seen": 13755840, + "step": 54, + "train_runtime": 8077.0191, + "train_tokens_per_second": 1703.084 + }, + { + "epoch": 3.4516129032258065, + "grad_norm": 0.252822200967686, + "learning_rate": 7.443106207484776e-05, + "loss": 0.5964, + "num_input_tokens_seen": 14017856, + "step": 55, + "train_runtime": 8229.2406, + "train_tokens_per_second": 1703.42 + }, + { + "epoch": 3.5161290322580645, + "grad_norm": 0.36169399074673414, + "learning_rate": 7.35698368412999e-05, + "loss": 0.5809, + "num_input_tokens_seen": 14279872, + "step": 56, + "train_runtime": 8380.9399, + "train_tokens_per_second": 1703.851 + }, + { + "epoch": 3.5806451612903225, + "grad_norm": 0.3219026144290714, + "learning_rate": 7.269952498697734e-05, + "loss": 0.5638, + "num_input_tokens_seen": 14541888, + "step": 57, + "train_runtime": 8533.1482, + "train_tokens_per_second": 1704.164 + }, + { + "epoch": 3.6451612903225805, + "grad_norm": 1.0720094012961243, + "learning_rate": 7.18204620336671e-05, + "loss": 0.5553, + "num_input_tokens_seen": 14803904, + "step": 58, + "train_runtime": 8685.2309, + "train_tokens_per_second": 1704.492 + }, + { + "epoch": 3.709677419354839, + "grad_norm": 0.3151374305079844, + "learning_rate": 7.09329868768714e-05, + "loss": 0.5555, + "num_input_tokens_seen": 15065920, + "step": 59, + "train_runtime": 8837.7033, + "train_tokens_per_second": 1704.733 + }, + { + "epoch": 3.774193548387097, + "grad_norm": 0.2917481639941811, + "learning_rate": 7.003744165515705e-05, + "loss": 0.5635, + "num_input_tokens_seen": 15327936, + "step": 60, + "train_runtime": 8990.7028, + "train_tokens_per_second": 1704.865 + }, + { + "epoch": 3.838709677419355, + "grad_norm": 0.39703897734831073, + "learning_rate": 6.91341716182545e-05, + "loss": 0.5775, + "num_input_tokens_seen": 15589952, + "step": 61, + "train_runtime": 9142.8639, + "train_tokens_per_second": 1705.15 + }, + { + "epoch": 3.903225806451613, + "grad_norm": 0.330407023300617, + "learning_rate": 6.82235249939575e-05, + "loss": 0.5305, + "num_input_tokens_seen": 15851968, + "step": 62, + "train_runtime": 9294.6576, + "train_tokens_per_second": 1705.492 + } + ], + "logging_steps": 1, + "max_steps": 160, + "num_input_tokens_seen": 15851968, + "num_train_epochs": 10, + "save_steps": 31, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 125184922877952.0, + "train_batch_size": 4, + "trial_name": null, + "trial_params": null +} diff --git a/B_3/checkpoint-62/training_args.bin b/B_3/checkpoint-62/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..11cf327ddd83fc52012ab679d2c8f1eea3993a5d --- /dev/null +++ b/B_3/checkpoint-62/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:538340330f0a2e51763a2666184aff893a2f4b0e6e52d3a71923bb9fc8b781c0 +size 8017 diff --git a/B_3/checkpoint-62/zero_to_fp32.py b/B_3/checkpoint-62/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04 --- /dev/null +++ b/B_3/checkpoint-62/zero_to_fp32.py @@ -0,0 +1,760 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: +# python zero_to_fp32.py . output_dir/ +# or +# python zero_to_fp32.py . output_dir/ --safe_serialization + +import argparse +import torch +import glob +import math +import os +import re +import gc +import json +import numpy as np +from tqdm import tqdm +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device, weights_only=False) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + total_files = len(files) + state_dicts = [] + for f in tqdm(files, desc='Loading checkpoint shards'): + state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +class GatheredTensor: + """ + A pseudo tensor that collects partitioned weights. + It is more memory efficient when there are multiple groups. + """ + + def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape): + self.flat_groups = flat_groups + self.flat_groups_offset = flat_groups_offset + self.offset = offset + self.partitioned_numel = partitioned_numel + self.shape = shape + self.dtype = self.flat_groups[0][0].dtype + + def contiguous(self): + """ + Merge partitioned weights from flat_groups into a single tensor. + """ + end_idx = self.offset + self.partitioned_numel + world_size = len(self.flat_groups) + pad_flat_param_chunks = [] + + for rank_i in range(world_size): + # for each rank, we need to collect weights from related group/groups + flat_groups_at_rank_i = self.flat_groups[rank_i] + start_group_id = None + end_group_id = None + for group_id in range(len(self.flat_groups_offset)): + if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]: + start_group_id = group_id + if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]: + end_group_id = group_id + break + # collect weights from related group/groups + for group_id in range(start_group_id, end_group_id + 1): + flat_tensor = flat_groups_at_rank_i[group_id] + start_offset = self.offset - self.flat_groups_offset[group_id] + end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id] + pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset]) + + # collect weights from all ranks + pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0) + param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous() + return param + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size + + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]])) + for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'): + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # memory efficient tensor + tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape) + state_dict[name] = tensor + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def to_torch_tensor(state_dict, return_empty_tensor=False): + """ + Convert state_dict of GatheredTensor to torch tensor + """ + torch_state_dict = {} + converted_tensors = {} + for name, tensor in state_dict.items(): + tensor_id = id(tensor) + if tensor_id in converted_tensors: # shared tensors + shared_tensor = torch_state_dict[converted_tensors[tensor_id]] + torch_state_dict[name] = shared_tensor + else: + converted_tensors[tensor_id] = name + if return_empty_tensor: + torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype) + else: + torch_state_dict[name] = tensor.contiguous() + return torch_state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag=None, + exclude_frozen_parameters=False, + lazy_mode=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient. + Convert the pesduo tensor to torch tensor by ``.contiguous()`` + + Returns: + - pytorch ``state_dict`` + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + Note: the above usage may not work if your application doesn't have sufficient free CPU memory. + You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. Or you can load state_dict in lazy mode :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu + for name, lazy_tensor in state_dict.item(): + tensor = lazy_tensor.contiguous() # to cpu + print(name, tensor) + # del tensor to release memory if it no longer in use + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) + if lazy_mode: + return state_dict + else: + return to_torch_tensor(state_dict) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, + output_dir, + max_shard_size="5GB", + safe_serialization=False, + tag=None, + exclude_frozen_parameters=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_dir``: directory to the pytorch fp32 state_dict output files + - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB + - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + """ + + # Dependency pre-check + if safe_serialization: + try: + from safetensors.torch import save_file + except ImportError: + print('If you want to use `safe_serialization`, please `pip install safetensors`') + raise + if max_shard_size is not None: + try: + from huggingface_hub import split_torch_state_dict_into_shards + except ImportError: + print('If you want to use `max_shard_size`, please `pip install huggingface_hub`') + raise + + # Convert zero checkpoint to state_dict + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag, + exclude_frozen_parameters, + lazy_mode=True) + + # Shard the model if it is too big. + weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin" + if max_shard_size is not None: + filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") + # an memory-efficient approach for sharding + empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True) + state_dict_split = split_torch_state_dict_into_shards(empty_state_dict, + filename_pattern=filename_pattern, + max_shard_size=max_shard_size) + else: + from collections import namedtuple + StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"]) + state_dict_split = StateDictSplit(is_sharded=False, + filename_to_tensors={weights_name: list(state_dict.keys())}) + + # Save the model by shard + os.makedirs(output_dir, exist_ok=True) + filename_to_tensors = state_dict_split.filename_to_tensors.items() + for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"): + shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors} + shard_state_dict = to_torch_tensor(shard_state_dict) + output_path = os.path.join(output_dir, shard_file) + if safe_serialization: + save_file(shard_state_dict, output_path, metadata={"format": "pt"}) + else: + torch.save(shard_state_dict, output_path) + # release the memory of current shard + for tensor_name in list(shard_state_dict.keys()): + del state_dict[tensor_name] + del shard_state_dict[tensor_name] + del shard_state_dict + gc.collect() + + # Save index if sharded + if state_dict_split.is_sharded: + index = { + "metadata": state_dict_split.metadata, + "weight_map": state_dict_split.tensor_to_filename, + } + save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json" + save_index_file = os.path.join(output_dir, save_index_file) + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument("output_dir", + type=str, + help="directory to the pytorch fp32 state_dict output files" + "(e.g. path/checkpoint-12-output/)") + parser.add_argument( + "--max_shard_size", + type=str, + default="5GB", + help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size" + "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`" + "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances" + "without CPU OOM issues.") + parser.add_argument( + "--safe_serialization", + default=False, + action='store_true', + help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, + args.output_dir, + max_shard_size=args.max_shard_size, + safe_serialization=args.safe_serialization, + tag=args.tag, + exclude_frozen_parameters=args.exclude_frozen_parameters) diff --git a/B_3/checkpoint-93/README.md b/B_3/checkpoint-93/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0673d0313e0251ef70dd85d3ff379913ab5506d9 --- /dev/null +++ b/B_3/checkpoint-93/README.md @@ -0,0 +1,202 @@ +--- +base_model: /workspace/meta-llama/Llama-3.1-70B +library_name: peft +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] +### Framework versions + +- PEFT 0.15.2 \ No newline at end of file diff --git a/B_3/checkpoint-93/adapter_config.json b/B_3/checkpoint-93/adapter_config.json new file mode 100644 index 0000000000000000000000000000000000000000..9f0d6a37659f3015d3fdd427235805c5bbb829e9 --- /dev/null +++ b/B_3/checkpoint-93/adapter_config.json @@ -0,0 +1,39 @@ +{ + "alpha_pattern": {}, + "auto_mapping": null, + "base_model_name_or_path": "/workspace/meta-llama/Llama-3.1-70B", + "bias": "none", + "corda_config": null, + "eva_config": null, + "exclude_modules": null, + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layer_replication": null, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": {}, + "lora_alpha": 1024, + "lora_bias": false, + "lora_dropout": 0, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 256, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "down_proj", + "q_proj", + "k_proj", + "o_proj", + "v_proj", + "gate_proj", + "up_proj" + ], + "task_type": "CAUSAL_LM", + "trainable_token_indices": null, + "use_dora": false, + "use_rslora": false +} \ No newline at end of file diff --git a/B_3/checkpoint-93/adapter_model.safetensors b/B_3/checkpoint-93/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f6818a0d0475fca600b2a62442a4e87bcbfb9c5c --- /dev/null +++ b/B_3/checkpoint-93/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e6fd6d329f1f8f04367bffc4d168e20232c43840479062c0bc5240e89ea81f5 +size 6627156248 diff --git a/B_3/checkpoint-93/chat_template.jinja b/B_3/checkpoint-93/chat_template.jinja new file mode 100644 index 0000000000000000000000000000000000000000..c3af804a3bddb95bf03b7d349234a039a27de382 --- /dev/null +++ b/B_3/checkpoint-93/chat_template.jinja @@ -0,0 +1,7 @@ +{{ '<|begin_of_text|>' }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ '<|start_header_id|>system<|end_header_id|> + +' + system_message + '<|eot_id|>' }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|start_header_id|>user<|end_header_id|> + +' + content + '<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +' }}{% elif message['role'] == 'assistant' %}{{ content + '<|eot_id|>' }}{% endif %}{% endfor %} \ No newline at end of file diff --git a/B_3/checkpoint-93/global_step90/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/B_3/checkpoint-93/global_step90/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..971d2c7074bf23f0acd75a832d0da51605261415 --- /dev/null +++ b/B_3/checkpoint-93/global_step90/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9be844e5ea14c8380a2e3d502e1520e851a25d384303461298b2adf49dc5a9aa +size 9940504945 diff --git a/B_3/checkpoint-93/global_step90/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/B_3/checkpoint-93/global_step90/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..6069751e77a0d18680581148f6fee056b264308d --- /dev/null +++ b/B_3/checkpoint-93/global_step90/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef2f1ae0de56ee1a039476752fdf3b27cff49bd41a37466e0f865965ead3e318 +size 9940504945 diff --git a/B_3/checkpoint-93/global_step90/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/B_3/checkpoint-93/global_step90/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..e1b7f52ec368ac91d5d82f9a0c677ea77d6f6b3e --- /dev/null +++ b/B_3/checkpoint-93/global_step90/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56df7d9208af1cc7d77d6c10d6ec3977e832c24720b82212f1fb52081d83ebf2 +size 9940504945 diff --git a/B_3/checkpoint-93/global_step90/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/B_3/checkpoint-93/global_step90/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..868de31da9b5a03a92c6e9d3c81075e4da23ab2e --- /dev/null +++ b/B_3/checkpoint-93/global_step90/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:087a00cea550a88c11a7b7429a278958c5e894b0fa8b5cd0ced2b5f77115cab1 +size 9940504945 diff --git a/B_3/checkpoint-93/global_step90/zero_pp_rank_0_mp_rank_00_model_states.pt b/B_3/checkpoint-93/global_step90/zero_pp_rank_0_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..13c9eb928e42fd5054a2ed8c34f7f8970d302be9 --- /dev/null +++ b/B_3/checkpoint-93/global_step90/zero_pp_rank_0_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dedc42677cfee9490aa4bf4302dd4dbd4a1be2d32b3e19db16600e92daae4461 +size 1109201 diff --git a/B_3/checkpoint-93/global_step90/zero_pp_rank_1_mp_rank_00_model_states.pt b/B_3/checkpoint-93/global_step90/zero_pp_rank_1_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..8542ebc66d1c3f7e9b72e9b65370c634a647ac7f --- /dev/null +++ b/B_3/checkpoint-93/global_step90/zero_pp_rank_1_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f14208b0e24c126047c44272f6e366804380226f1ba0161a845bc327334a99b6 +size 1109201 diff --git a/B_3/checkpoint-93/global_step90/zero_pp_rank_2_mp_rank_00_model_states.pt b/B_3/checkpoint-93/global_step90/zero_pp_rank_2_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..d948f272d4b2251608123831e892bc4bbea2c34f --- /dev/null +++ b/B_3/checkpoint-93/global_step90/zero_pp_rank_2_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aaab5f8afb3a8658230904cfc88eb713e7b03509c0dd5e7a848221a4b469e768 +size 1109201 diff --git a/B_3/checkpoint-93/global_step90/zero_pp_rank_3_mp_rank_00_model_states.pt b/B_3/checkpoint-93/global_step90/zero_pp_rank_3_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..70cf3e986347a473d06d4f217da1142ada3b1d6f --- /dev/null +++ b/B_3/checkpoint-93/global_step90/zero_pp_rank_3_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af4bee03e582dcbaf682f37f5a2a621d5280d60526ed38a75dd2619cd11c14e5 +size 1109201 diff --git a/B_3/checkpoint-93/latest b/B_3/checkpoint-93/latest new file mode 100644 index 0000000000000000000000000000000000000000..8e7a337e2cb23bf07023d223dd647df2d25f0fc1 --- /dev/null +++ b/B_3/checkpoint-93/latest @@ -0,0 +1 @@ +global_step90 \ No newline at end of file diff --git a/B_3/checkpoint-93/rng_state_0.pth b/B_3/checkpoint-93/rng_state_0.pth new file mode 100644 index 0000000000000000000000000000000000000000..afcc75952a72312775719a5b7436d424ac06a7be --- /dev/null +++ b/B_3/checkpoint-93/rng_state_0.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:485c3eb7b8ebe39984117ce19c256db4c11dba6dcd8d32908be6d94ca7a4d529 +size 15429 diff --git a/B_3/checkpoint-93/rng_state_1.pth b/B_3/checkpoint-93/rng_state_1.pth new file mode 100644 index 0000000000000000000000000000000000000000..187216abeb18820101c39ef4d7489769e9c8b018 --- /dev/null +++ b/B_3/checkpoint-93/rng_state_1.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7cd0007903b1a0344b4ba578f518e1366c3607febc6b70080704119da49e8fc +size 15429 diff --git a/B_3/checkpoint-93/rng_state_2.pth b/B_3/checkpoint-93/rng_state_2.pth new file mode 100644 index 0000000000000000000000000000000000000000..cfc3b0628b54f6466425b592bb5d9cd311937f5a --- /dev/null +++ b/B_3/checkpoint-93/rng_state_2.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a453120913ecd5657ebaf5fb69504cf1a158e4ff4015f66acf4be6e06bc270e0 +size 15429 diff --git a/B_3/checkpoint-93/rng_state_3.pth b/B_3/checkpoint-93/rng_state_3.pth new file mode 100644 index 0000000000000000000000000000000000000000..b116710f2349218d380226e98e466d0cb6e39731 --- /dev/null +++ b/B_3/checkpoint-93/rng_state_3.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c18edf48cc3adf4eb70b31f8e96972c6e25718fd8fcd0866b9ace124a9269d2 +size 15429 diff --git a/B_3/checkpoint-93/scheduler.pt b/B_3/checkpoint-93/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..230895657560816e491073f69868114c63f1b4e2 --- /dev/null +++ b/B_3/checkpoint-93/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c929e38cfa2ce4dfc5e6186aa5d113d3bf383809f7126d6137845506822676c +size 1401 diff --git a/B_3/checkpoint-93/special_tokens_map.json b/B_3/checkpoint-93/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..14daf4588e61b4e4983af0fccaba4d5500c0977c --- /dev/null +++ b/B_3/checkpoint-93/special_tokens_map.json @@ -0,0 +1,26 @@ +{ + "additional_special_tokens": [ + { + "content": "<|eom_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } + ], + "bos_token": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": "<|eot_id|>" +} diff --git a/B_3/checkpoint-93/tokenizer.json b/B_3/checkpoint-93/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/B_3/checkpoint-93/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/B_3/checkpoint-93/tokenizer_config.json b/B_3/checkpoint-93/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d1e1ea9bc94ff1132f136710751e37fb23a64347 --- /dev/null +++ b/B_3/checkpoint-93/tokenizer_config.json @@ -0,0 +1,2068 @@ +{ + "added_tokens_decoder": { + "128000": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128001": { + "content": "<|end_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128002": { + "content": "<|reserved_special_token_0|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128003": { + "content": "<|reserved_special_token_1|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128004": { + "content": "<|finetune_right_pad_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128005": { + "content": "<|reserved_special_token_2|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128006": { + "content": "<|start_header_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128007": { + "content": "<|end_header_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128008": { + "content": "<|eom_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128009": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128010": { + "content": "<|python_tag|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128011": { + "content": "<|reserved_special_token_3|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128012": { + "content": "<|reserved_special_token_4|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128013": { + "content": "<|reserved_special_token_5|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128014": { + "content": "<|reserved_special_token_6|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128015": { + "content": "<|reserved_special_token_7|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128016": { + "content": "<|reserved_special_token_8|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128017": { + "content": "<|reserved_special_token_9|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128018": { + "content": "<|reserved_special_token_10|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128019": { + "content": "<|reserved_special_token_11|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128020": { + "content": "<|reserved_special_token_12|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128021": { + "content": "<|reserved_special_token_13|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128022": { + "content": "<|reserved_special_token_14|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128023": { + "content": "<|reserved_special_token_15|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128024": { + "content": "<|reserved_special_token_16|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128025": { + "content": "<|reserved_special_token_17|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128026": { + "content": "<|reserved_special_token_18|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128027": { + "content": "<|reserved_special_token_19|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128028": { + "content": "<|reserved_special_token_20|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128029": { + "content": "<|reserved_special_token_21|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128030": { + "content": "<|reserved_special_token_22|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128031": { + "content": "<|reserved_special_token_23|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128032": { + "content": "<|reserved_special_token_24|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128033": { + "content": "<|reserved_special_token_25|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128034": { + "content": "<|reserved_special_token_26|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128035": { + "content": "<|reserved_special_token_27|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128036": { + "content": "<|reserved_special_token_28|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128037": { + "content": "<|reserved_special_token_29|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128038": { + "content": "<|reserved_special_token_30|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128039": { + "content": "<|reserved_special_token_31|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128040": { + "content": "<|reserved_special_token_32|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128041": { + "content": "<|reserved_special_token_33|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128042": { + "content": "<|reserved_special_token_34|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128043": { + "content": "<|reserved_special_token_35|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128044": { + "content": "<|reserved_special_token_36|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128045": { + "content": "<|reserved_special_token_37|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128046": { + "content": "<|reserved_special_token_38|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128047": { + "content": "<|reserved_special_token_39|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128048": { + "content": "<|reserved_special_token_40|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128049": { + "content": "<|reserved_special_token_41|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128050": { + "content": "<|reserved_special_token_42|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128051": { + "content": "<|reserved_special_token_43|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128052": { + "content": "<|reserved_special_token_44|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128053": { + "content": "<|reserved_special_token_45|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128054": { + "content": "<|reserved_special_token_46|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128055": { + "content": "<|reserved_special_token_47|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128056": { + "content": "<|reserved_special_token_48|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128057": { + "content": "<|reserved_special_token_49|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128058": { + "content": "<|reserved_special_token_50|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128059": { + "content": "<|reserved_special_token_51|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128060": { + "content": "<|reserved_special_token_52|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128061": { + "content": "<|reserved_special_token_53|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128062": { + "content": "<|reserved_special_token_54|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128063": { + "content": "<|reserved_special_token_55|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128064": { + "content": "<|reserved_special_token_56|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128065": { + "content": "<|reserved_special_token_57|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128066": { + "content": "<|reserved_special_token_58|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128067": { + "content": "<|reserved_special_token_59|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128068": { + "content": "<|reserved_special_token_60|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128069": { + "content": "<|reserved_special_token_61|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128070": { + "content": "<|reserved_special_token_62|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128071": { + "content": "<|reserved_special_token_63|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128072": { + "content": "<|reserved_special_token_64|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128073": { + "content": "<|reserved_special_token_65|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128074": { + "content": "<|reserved_special_token_66|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128075": { + "content": "<|reserved_special_token_67|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128076": { + "content": "<|reserved_special_token_68|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128077": { + "content": "<|reserved_special_token_69|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128078": { + "content": "<|reserved_special_token_70|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128079": { + "content": "<|reserved_special_token_71|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128080": { + "content": "<|reserved_special_token_72|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128081": { + "content": "<|reserved_special_token_73|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128082": { + "content": "<|reserved_special_token_74|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128083": { + "content": "<|reserved_special_token_75|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128084": { + "content": "<|reserved_special_token_76|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128085": { + "content": "<|reserved_special_token_77|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128086": { + "content": "<|reserved_special_token_78|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128087": { + "content": "<|reserved_special_token_79|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128088": { + "content": "<|reserved_special_token_80|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128089": { + "content": "<|reserved_special_token_81|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128090": { + "content": "<|reserved_special_token_82|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128091": { + "content": "<|reserved_special_token_83|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128092": { + "content": "<|reserved_special_token_84|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128093": { + "content": "<|reserved_special_token_85|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128094": { + "content": "<|reserved_special_token_86|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128095": { + "content": "<|reserved_special_token_87|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128096": { + "content": "<|reserved_special_token_88|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128097": { + "content": "<|reserved_special_token_89|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128098": { + "content": "<|reserved_special_token_90|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128099": { + "content": "<|reserved_special_token_91|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128100": { + "content": "<|reserved_special_token_92|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128101": { + "content": "<|reserved_special_token_93|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128102": { + "content": "<|reserved_special_token_94|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128103": { + "content": "<|reserved_special_token_95|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128104": { + "content": "<|reserved_special_token_96|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128105": { + "content": "<|reserved_special_token_97|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128106": { + "content": "<|reserved_special_token_98|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128107": { + "content": "<|reserved_special_token_99|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128108": { + "content": "<|reserved_special_token_100|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128109": { + "content": "<|reserved_special_token_101|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128110": { + "content": "<|reserved_special_token_102|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128111": { + "content": "<|reserved_special_token_103|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128112": { + "content": "<|reserved_special_token_104|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128113": { + "content": "<|reserved_special_token_105|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128114": { + "content": "<|reserved_special_token_106|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128115": { + "content": "<|reserved_special_token_107|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128116": { + "content": "<|reserved_special_token_108|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128117": { + "content": "<|reserved_special_token_109|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128118": { + "content": "<|reserved_special_token_110|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128119": { + "content": "<|reserved_special_token_111|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128120": { + "content": "<|reserved_special_token_112|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128121": { + "content": "<|reserved_special_token_113|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128122": { + "content": "<|reserved_special_token_114|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128123": { + "content": "<|reserved_special_token_115|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128124": { + "content": "<|reserved_special_token_116|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128125": { + "content": "<|reserved_special_token_117|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128126": { + "content": "<|reserved_special_token_118|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128127": { + "content": "<|reserved_special_token_119|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128128": { + "content": "<|reserved_special_token_120|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128129": { + "content": "<|reserved_special_token_121|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128130": { + "content": "<|reserved_special_token_122|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128131": { + "content": "<|reserved_special_token_123|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128132": { + "content": "<|reserved_special_token_124|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128133": { + "content": "<|reserved_special_token_125|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128134": { + "content": "<|reserved_special_token_126|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128135": { + "content": "<|reserved_special_token_127|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128136": { + "content": "<|reserved_special_token_128|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128137": { + "content": "<|reserved_special_token_129|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128138": { + "content": "<|reserved_special_token_130|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128139": { + "content": "<|reserved_special_token_131|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128140": { + "content": "<|reserved_special_token_132|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128141": { + "content": "<|reserved_special_token_133|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128142": { + "content": "<|reserved_special_token_134|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128143": { + "content": "<|reserved_special_token_135|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128144": { + "content": "<|reserved_special_token_136|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128145": { + "content": "<|reserved_special_token_137|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128146": { + "content": "<|reserved_special_token_138|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128147": { + "content": "<|reserved_special_token_139|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128148": { + "content": "<|reserved_special_token_140|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128149": { + "content": "<|reserved_special_token_141|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128150": { + "content": "<|reserved_special_token_142|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128151": { + "content": "<|reserved_special_token_143|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128152": { + "content": "<|reserved_special_token_144|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128153": { + "content": "<|reserved_special_token_145|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128154": { + "content": "<|reserved_special_token_146|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128155": { + "content": "<|reserved_special_token_147|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128156": { + "content": "<|reserved_special_token_148|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128157": { + "content": "<|reserved_special_token_149|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128158": { + "content": "<|reserved_special_token_150|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128159": { + "content": "<|reserved_special_token_151|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128160": { + "content": "<|reserved_special_token_152|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128161": { + "content": "<|reserved_special_token_153|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128162": { + "content": "<|reserved_special_token_154|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128163": { + "content": "<|reserved_special_token_155|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128164": { + "content": "<|reserved_special_token_156|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128165": { + "content": "<|reserved_special_token_157|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128166": { + "content": "<|reserved_special_token_158|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128167": { + "content": "<|reserved_special_token_159|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128168": { + "content": "<|reserved_special_token_160|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128169": { + "content": "<|reserved_special_token_161|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128170": { + "content": "<|reserved_special_token_162|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128171": { + "content": "<|reserved_special_token_163|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128172": { + "content": "<|reserved_special_token_164|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128173": { + "content": "<|reserved_special_token_165|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128174": { + "content": "<|reserved_special_token_166|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128175": { + "content": "<|reserved_special_token_167|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128176": { + "content": "<|reserved_special_token_168|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128177": { + "content": "<|reserved_special_token_169|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128178": { + "content": "<|reserved_special_token_170|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128179": { + "content": "<|reserved_special_token_171|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128180": { + "content": "<|reserved_special_token_172|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128181": { + "content": "<|reserved_special_token_173|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128182": { + "content": "<|reserved_special_token_174|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128183": { + "content": "<|reserved_special_token_175|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128184": { + "content": "<|reserved_special_token_176|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128185": { + "content": "<|reserved_special_token_177|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128186": { + "content": "<|reserved_special_token_178|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128187": { + "content": "<|reserved_special_token_179|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128188": { + "content": "<|reserved_special_token_180|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128189": { + "content": "<|reserved_special_token_181|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128190": { + "content": "<|reserved_special_token_182|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128191": { + "content": "<|reserved_special_token_183|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128192": { + "content": "<|reserved_special_token_184|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128193": { + "content": "<|reserved_special_token_185|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128194": { + "content": "<|reserved_special_token_186|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128195": { + "content": "<|reserved_special_token_187|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128196": { + "content": "<|reserved_special_token_188|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128197": { + "content": "<|reserved_special_token_189|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128198": { + "content": "<|reserved_special_token_190|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128199": { + "content": "<|reserved_special_token_191|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128200": { + "content": "<|reserved_special_token_192|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128201": { + "content": "<|reserved_special_token_193|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128202": { + "content": "<|reserved_special_token_194|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128203": { + "content": "<|reserved_special_token_195|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128204": { + "content": "<|reserved_special_token_196|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128205": { + "content": "<|reserved_special_token_197|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128206": { + "content": "<|reserved_special_token_198|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128207": { + "content": "<|reserved_special_token_199|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128208": { + "content": "<|reserved_special_token_200|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128209": { + "content": "<|reserved_special_token_201|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128210": { + "content": "<|reserved_special_token_202|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128211": { + "content": "<|reserved_special_token_203|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128212": { + "content": "<|reserved_special_token_204|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128213": { + "content": "<|reserved_special_token_205|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128214": { + "content": "<|reserved_special_token_206|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128215": { + "content": "<|reserved_special_token_207|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128216": { + "content": "<|reserved_special_token_208|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128217": { + "content": "<|reserved_special_token_209|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128218": { + "content": "<|reserved_special_token_210|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128219": { + "content": "<|reserved_special_token_211|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128220": { + "content": "<|reserved_special_token_212|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128221": { + "content": "<|reserved_special_token_213|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128222": { + "content": "<|reserved_special_token_214|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128223": { + "content": "<|reserved_special_token_215|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128224": { + "content": "<|reserved_special_token_216|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128225": { + "content": "<|reserved_special_token_217|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128226": { + "content": "<|reserved_special_token_218|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128227": { + "content": "<|reserved_special_token_219|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128228": { + "content": "<|reserved_special_token_220|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128229": { + "content": "<|reserved_special_token_221|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128230": { + "content": "<|reserved_special_token_222|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128231": { + "content": "<|reserved_special_token_223|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128232": { + "content": "<|reserved_special_token_224|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128233": { + "content": "<|reserved_special_token_225|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128234": { + "content": "<|reserved_special_token_226|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128235": { + "content": "<|reserved_special_token_227|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128236": { + "content": "<|reserved_special_token_228|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128237": { + "content": "<|reserved_special_token_229|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128238": { + "content": "<|reserved_special_token_230|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128239": { + "content": "<|reserved_special_token_231|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128240": { + "content": "<|reserved_special_token_232|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128241": { + "content": "<|reserved_special_token_233|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128242": { + "content": "<|reserved_special_token_234|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128243": { + "content": "<|reserved_special_token_235|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128244": { + "content": "<|reserved_special_token_236|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128245": { + "content": "<|reserved_special_token_237|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128246": { + "content": "<|reserved_special_token_238|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128247": { + "content": "<|reserved_special_token_239|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128248": { + "content": "<|reserved_special_token_240|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128249": { + "content": "<|reserved_special_token_241|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128250": { + "content": "<|reserved_special_token_242|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128251": { + "content": "<|reserved_special_token_243|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128252": { + "content": "<|reserved_special_token_244|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128253": { + "content": "<|reserved_special_token_245|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128254": { + "content": "<|reserved_special_token_246|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128255": { + "content": "<|reserved_special_token_247|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [ + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "clean_up_tokenization_spaces": true, + "eos_token": "<|eot_id|>", + "extra_special_tokens": {}, + "model_input_names": [ + "input_ids", + "attention_mask" + ], + "model_max_length": 131072, + "pad_token": "<|eot_id|>", + "padding_side": "right", + "split_special_tokens": false, + "tokenizer_class": "PreTrainedTokenizerFast" +} diff --git a/B_3/checkpoint-93/trainer_state.json b/B_3/checkpoint-93/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..3cef866ea33e5b40d6acdca1d9c65f855626df15 --- /dev/null +++ b/B_3/checkpoint-93/trainer_state.json @@ -0,0 +1,964 @@ +{ + "best_global_step": null, + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 5.838709677419355, + "eval_steps": 500, + "global_step": 93, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.06451612903225806, + "grad_norm": 2.073743358513411, + "learning_rate": 0.0001, + "loss": 1.3591, + "num_input_tokens_seen": 262016, + "step": 1, + "train_runtime": 148.9807, + "train_tokens_per_second": 1758.725 + }, + { + "epoch": 0.12903225806451613, + "grad_norm": 1.7083966194301452, + "learning_rate": 9.999036202410325e-05, + "loss": 1.4321, + "num_input_tokens_seen": 524032, + "step": 2, + "train_runtime": 298.073, + "train_tokens_per_second": 1758.066 + }, + { + "epoch": 0.1935483870967742, + "grad_norm": 0.8154519457271471, + "learning_rate": 9.996145181203615e-05, + "loss": 1.3036, + "num_input_tokens_seen": 786048, + "step": 3, + "train_runtime": 449.085, + "train_tokens_per_second": 1750.332 + }, + { + "epoch": 0.25806451612903225, + "grad_norm": 1.6435854418760425, + "learning_rate": 9.991328050923581e-05, + "loss": 1.3119, + "num_input_tokens_seen": 1048064, + "step": 4, + "train_runtime": 600.5729, + "train_tokens_per_second": 1745.107 + }, + { + "epoch": 0.3225806451612903, + "grad_norm": 1.2034889741595727, + "learning_rate": 9.98458666866564e-05, + "loss": 1.21, + "num_input_tokens_seen": 1310080, + "step": 5, + "train_runtime": 752.2986, + "train_tokens_per_second": 1741.436 + }, + { + "epoch": 0.3870967741935484, + "grad_norm": 0.6081797430818349, + "learning_rate": 9.975923633360985e-05, + "loss": 1.1713, + "num_input_tokens_seen": 1572096, + "step": 6, + "train_runtime": 903.8941, + "train_tokens_per_second": 1739.248 + }, + { + "epoch": 0.45161290322580644, + "grad_norm": 0.43860201273645816, + "learning_rate": 9.965342284774632e-05, + "loss": 1.1687, + "num_input_tokens_seen": 1834112, + "step": 7, + "train_runtime": 1055.454, + "train_tokens_per_second": 1737.747 + }, + { + "epoch": 0.5161290322580645, + "grad_norm": 0.3489614049191675, + "learning_rate": 9.952846702217886e-05, + "loss": 1.1151, + "num_input_tokens_seen": 2096128, + "step": 8, + "train_runtime": 1207.0142, + "train_tokens_per_second": 1736.623 + }, + { + "epoch": 0.5806451612903226, + "grad_norm": 3.182590493238281, + "learning_rate": 9.938441702975689e-05, + "loss": 1.0694, + "num_input_tokens_seen": 2358144, + "step": 9, + "train_runtime": 1358.12, + "train_tokens_per_second": 1736.33 + }, + { + "epoch": 0.6451612903225806, + "grad_norm": 0.40091824929141967, + "learning_rate": 9.922132840449459e-05, + "loss": 1.0431, + "num_input_tokens_seen": 2620160, + "step": 10, + "train_runtime": 1509.1349, + "train_tokens_per_second": 1736.2 + }, + { + "epoch": 0.7096774193548387, + "grad_norm": 0.4778713425435316, + "learning_rate": 9.903926402016153e-05, + "loss": 1.0812, + "num_input_tokens_seen": 2882176, + "step": 11, + "train_runtime": 1661.0314, + "train_tokens_per_second": 1735.172 + }, + { + "epoch": 0.7741935483870968, + "grad_norm": 0.3361596465836105, + "learning_rate": 9.883829406604363e-05, + "loss": 1.0296, + "num_input_tokens_seen": 3144192, + "step": 12, + "train_runtime": 1812.2372, + "train_tokens_per_second": 1734.978 + }, + { + "epoch": 0.8387096774193549, + "grad_norm": 0.30484937194478195, + "learning_rate": 9.861849601988383e-05, + "loss": 0.9806, + "num_input_tokens_seen": 3406208, + "step": 13, + "train_runtime": 1963.4393, + "train_tokens_per_second": 1734.817 + }, + { + "epoch": 0.9032258064516129, + "grad_norm": 0.31491494208058013, + "learning_rate": 9.837995461801299e-05, + "loss": 1.0244, + "num_input_tokens_seen": 3668224, + "step": 14, + "train_runtime": 2115.2404, + "train_tokens_per_second": 1734.188 + }, + { + "epoch": 0.967741935483871, + "grad_norm": 0.2555325387827011, + "learning_rate": 9.812276182268236e-05, + "loss": 0.9701, + "num_input_tokens_seen": 3930240, + "step": 15, + "train_runtime": 2266.9917, + "train_tokens_per_second": 1733.681 + }, + { + "epoch": 1.0, + "grad_norm": 0.2555325387827011, + "learning_rate": 9.784701678661045e-05, + "loss": 0.9885, + "num_input_tokens_seen": 4061248, + "step": 16, + "train_runtime": 2343.254, + "train_tokens_per_second": 1733.166 + }, + { + "epoch": 1.064516129032258, + "grad_norm": 0.3803435873199766, + "learning_rate": 9.755282581475769e-05, + "loss": 0.9221, + "num_input_tokens_seen": 4323264, + "step": 17, + "train_runtime": 2495.3616, + "train_tokens_per_second": 1732.52 + }, + { + "epoch": 1.129032258064516, + "grad_norm": 0.25742598219685603, + "learning_rate": 9.724030232334391e-05, + "loss": 0.9187, + "num_input_tokens_seen": 4585280, + "step": 18, + "train_runtime": 2646.9782, + "train_tokens_per_second": 1732.27 + }, + { + "epoch": 1.1935483870967742, + "grad_norm": 0.22468849320033518, + "learning_rate": 9.690956679612421e-05, + "loss": 0.8943, + "num_input_tokens_seen": 4847296, + "step": 19, + "train_runtime": 2798.7721, + "train_tokens_per_second": 1731.937 + }, + { + "epoch": 1.2580645161290323, + "grad_norm": 0.28670637895213946, + "learning_rate": 9.656074673794018e-05, + "loss": 0.8567, + "num_input_tokens_seen": 5109312, + "step": 20, + "train_runtime": 2950.5573, + "train_tokens_per_second": 1731.643 + }, + { + "epoch": 1.3225806451612903, + "grad_norm": 0.2043611694383423, + "learning_rate": 9.619397662556435e-05, + "loss": 0.8557, + "num_input_tokens_seen": 5371328, + "step": 21, + "train_runtime": 3103.0692, + "train_tokens_per_second": 1730.973 + }, + { + "epoch": 1.3870967741935485, + "grad_norm": 0.24667572059564058, + "learning_rate": 9.580939785585681e-05, + "loss": 0.8546, + "num_input_tokens_seen": 5633344, + "step": 22, + "train_runtime": 3254.6109, + "train_tokens_per_second": 1730.881 + }, + { + "epoch": 1.4516129032258065, + "grad_norm": 0.24067782128131054, + "learning_rate": 9.540715869125407e-05, + "loss": 0.851, + "num_input_tokens_seen": 5895360, + "step": 23, + "train_runtime": 3406.7776, + "train_tokens_per_second": 1730.48 + }, + { + "epoch": 1.5161290322580645, + "grad_norm": 0.2000848793492341, + "learning_rate": 9.498741420261108e-05, + "loss": 0.8483, + "num_input_tokens_seen": 6157376, + "step": 24, + "train_runtime": 3558.7104, + "train_tokens_per_second": 1730.227 + }, + { + "epoch": 1.5806451612903225, + "grad_norm": 0.22359283400518964, + "learning_rate": 9.45503262094184e-05, + "loss": 0.8226, + "num_input_tokens_seen": 6419392, + "step": 25, + "train_runtime": 3711.0551, + "train_tokens_per_second": 1729.802 + }, + { + "epoch": 1.6451612903225805, + "grad_norm": 1.0648985294170912, + "learning_rate": 9.409606321741775e-05, + "loss": 0.8259, + "num_input_tokens_seen": 6681408, + "step": 26, + "train_runtime": 3863.7709, + "train_tokens_per_second": 1729.245 + }, + { + "epoch": 1.7096774193548387, + "grad_norm": 0.18133437899141433, + "learning_rate": 9.362480035363986e-05, + "loss": 0.8524, + "num_input_tokens_seen": 6943424, + "step": 27, + "train_runtime": 4016.4383, + "train_tokens_per_second": 1728.752 + }, + { + "epoch": 1.7741935483870968, + "grad_norm": 0.456273625303925, + "learning_rate": 9.31367192988896e-05, + "loss": 0.8209, + "num_input_tokens_seen": 7205440, + "step": 28, + "train_runtime": 4168.9223, + "train_tokens_per_second": 1728.37 + }, + { + "epoch": 1.838709677419355, + "grad_norm": 0.4673225345753548, + "learning_rate": 9.263200821770461e-05, + "loss": 0.7853, + "num_input_tokens_seen": 7467456, + "step": 29, + "train_runtime": 4321.7056, + "train_tokens_per_second": 1727.896 + }, + { + "epoch": 1.903225806451613, + "grad_norm": 0.1996315966078707, + "learning_rate": 9.211086168581433e-05, + "loss": 0.7779, + "num_input_tokens_seen": 7729472, + "step": 30, + "train_runtime": 4474.7713, + "train_tokens_per_second": 1727.345 + }, + { + "epoch": 1.967741935483871, + "grad_norm": 0.3752545960960864, + "learning_rate": 9.157348061512727e-05, + "loss": 0.8146, + "num_input_tokens_seen": 7991488, + "step": 31, + "train_runtime": 4627.5272, + "train_tokens_per_second": 1726.946 + }, + { + "epoch": 2.0, + "grad_norm": 0.484664872247163, + "learning_rate": 9.102007217627568e-05, + "loss": 0.7787, + "num_input_tokens_seen": 8122496, + "step": 32, + "train_runtime": 4809.6299, + "train_tokens_per_second": 1688.799 + }, + { + "epoch": 2.064516129032258, + "grad_norm": 0.299023275639115, + "learning_rate": 9.045084971874738e-05, + "loss": 0.7361, + "num_input_tokens_seen": 8384512, + "step": 33, + "train_runtime": 4960.4611, + "train_tokens_per_second": 1690.269 + }, + { + "epoch": 2.129032258064516, + "grad_norm": 0.20981231086811225, + "learning_rate": 8.986603268863536e-05, + "loss": 0.6956, + "num_input_tokens_seen": 8646528, + "step": 34, + "train_runtime": 5112.1398, + "train_tokens_per_second": 1691.372 + }, + { + "epoch": 2.193548387096774, + "grad_norm": 0.3648857123126151, + "learning_rate": 8.926584654403724e-05, + "loss": 0.6819, + "num_input_tokens_seen": 8908544, + "step": 35, + "train_runtime": 5264.3603, + "train_tokens_per_second": 1692.237 + }, + { + "epoch": 2.258064516129032, + "grad_norm": 0.31269893807625165, + "learning_rate": 8.865052266813685e-05, + "loss": 0.6958, + "num_input_tokens_seen": 9170560, + "step": 36, + "train_runtime": 5416.2601, + "train_tokens_per_second": 1693.154 + }, + { + "epoch": 2.3225806451612905, + "grad_norm": 0.2727897516670068, + "learning_rate": 8.802029828000156e-05, + "loss": 0.6756, + "num_input_tokens_seen": 9432576, + "step": 37, + "train_runtime": 5567.9089, + "train_tokens_per_second": 1694.097 + }, + { + "epoch": 2.3870967741935485, + "grad_norm": 0.25721446603694037, + "learning_rate": 8.737541634312985e-05, + "loss": 0.648, + "num_input_tokens_seen": 9694592, + "step": 38, + "train_runtime": 5719.9113, + "train_tokens_per_second": 1694.885 + }, + { + "epoch": 2.4516129032258065, + "grad_norm": 20.44651031163169, + "learning_rate": 8.671612547178428e-05, + "loss": 0.669, + "num_input_tokens_seen": 9956608, + "step": 39, + "train_runtime": 5871.7821, + "train_tokens_per_second": 1695.671 + }, + { + "epoch": 2.5161290322580645, + "grad_norm": 3.0150996948618145, + "learning_rate": 8.604267983514594e-05, + "loss": 0.6856, + "num_input_tokens_seen": 10218624, + "step": 40, + "train_runtime": 6024.0794, + "train_tokens_per_second": 1696.296 + }, + { + "epoch": 2.5806451612903225, + "grad_norm": 19.306885630930985, + "learning_rate": 8.535533905932738e-05, + "loss": 0.6949, + "num_input_tokens_seen": 10480640, + "step": 41, + "train_runtime": 6176.6303, + "train_tokens_per_second": 1696.822 + }, + { + "epoch": 2.6451612903225805, + "grad_norm": 46.29810754342157, + "learning_rate": 8.46543681272818e-05, + "loss": 0.7467, + "num_input_tokens_seen": 10742656, + "step": 42, + "train_runtime": 6329.1576, + "train_tokens_per_second": 1697.328 + }, + { + "epoch": 2.709677419354839, + "grad_norm": 9.77264228502113, + "learning_rate": 8.39400372766471e-05, + "loss": 0.6638, + "num_input_tokens_seen": 11004672, + "step": 43, + "train_runtime": 6481.6817, + "train_tokens_per_second": 1697.811 + }, + { + "epoch": 2.774193548387097, + "grad_norm": 16.058872629216598, + "learning_rate": 8.321262189556409e-05, + "loss": 0.7096, + "num_input_tokens_seen": 11266688, + "step": 44, + "train_runtime": 6633.7733, + "train_tokens_per_second": 1698.383 + }, + { + "epoch": 2.838709677419355, + "grad_norm": 1.0261214777781495, + "learning_rate": 8.247240241650918e-05, + "loss": 0.6737, + "num_input_tokens_seen": 11528704, + "step": 45, + "train_runtime": 6785.7147, + "train_tokens_per_second": 1698.967 + }, + { + "epoch": 2.903225806451613, + "grad_norm": 1.6378077961358868, + "learning_rate": 8.171966420818228e-05, + "loss": 0.6638, + "num_input_tokens_seen": 11790720, + "step": 46, + "train_runtime": 6937.3194, + "train_tokens_per_second": 1699.607 + }, + { + "epoch": 2.967741935483871, + "grad_norm": 2.4944381489903455, + "learning_rate": 8.095469746549172e-05, + "loss": 0.6753, + "num_input_tokens_seen": 12052736, + "step": 47, + "train_runtime": 7089.3141, + "train_tokens_per_second": 1700.127 + }, + { + "epoch": 3.0, + "grad_norm": 2.4944381489903455, + "learning_rate": 8.017779709767858e-05, + "loss": 0.6673, + "num_input_tokens_seen": 12183744, + "step": 48, + "train_runtime": 7165.3736, + "train_tokens_per_second": 1700.364 + }, + { + "epoch": 3.064516129032258, + "grad_norm": 26.97115523850904, + "learning_rate": 7.938926261462366e-05, + "loss": 0.6049, + "num_input_tokens_seen": 12445760, + "step": 49, + "train_runtime": 7317.3555, + "train_tokens_per_second": 1700.855 + }, + { + "epoch": 3.129032258064516, + "grad_norm": 5.4430349870657375, + "learning_rate": 7.858939801138061e-05, + "loss": 1.0716, + "num_input_tokens_seen": 12707776, + "step": 50, + "train_runtime": 7468.9695, + "train_tokens_per_second": 1701.41 + }, + { + "epoch": 3.193548387096774, + "grad_norm": 12.78395158722848, + "learning_rate": 7.777851165098012e-05, + "loss": 1.0428, + "num_input_tokens_seen": 12969792, + "step": 51, + "train_runtime": 7620.5451, + "train_tokens_per_second": 1701.951 + }, + { + "epoch": 3.258064516129032, + "grad_norm": 326.4832878895978, + "learning_rate": 7.695691614555003e-05, + "loss": 0.9172, + "num_input_tokens_seen": 13231808, + "step": 52, + "train_runtime": 7772.2756, + "train_tokens_per_second": 1702.437 + }, + { + "epoch": 3.3225806451612905, + "grad_norm": 69.03004430934298, + "learning_rate": 7.612492823579745e-05, + "loss": 0.7593, + "num_input_tokens_seen": 13493824, + "step": 53, + "train_runtime": 7924.7666, + "train_tokens_per_second": 1702.741 + }, + { + "epoch": 3.3870967741935485, + "grad_norm": 1.1495044460164596, + "learning_rate": 7.528286866889924e-05, + "loss": 0.6151, + "num_input_tokens_seen": 13755840, + "step": 54, + "train_runtime": 8077.0191, + "train_tokens_per_second": 1703.084 + }, + { + "epoch": 3.4516129032258065, + "grad_norm": 0.252822200967686, + "learning_rate": 7.443106207484776e-05, + "loss": 0.5964, + "num_input_tokens_seen": 14017856, + "step": 55, + "train_runtime": 8229.2406, + "train_tokens_per_second": 1703.42 + }, + { + "epoch": 3.5161290322580645, + "grad_norm": 0.36169399074673414, + "learning_rate": 7.35698368412999e-05, + "loss": 0.5809, + "num_input_tokens_seen": 14279872, + "step": 56, + "train_runtime": 8380.9399, + "train_tokens_per_second": 1703.851 + }, + { + "epoch": 3.5806451612903225, + "grad_norm": 0.3219026144290714, + "learning_rate": 7.269952498697734e-05, + "loss": 0.5638, + "num_input_tokens_seen": 14541888, + "step": 57, + "train_runtime": 8533.1482, + "train_tokens_per_second": 1704.164 + }, + { + "epoch": 3.6451612903225805, + "grad_norm": 1.0720094012961243, + "learning_rate": 7.18204620336671e-05, + "loss": 0.5553, + "num_input_tokens_seen": 14803904, + "step": 58, + "train_runtime": 8685.2309, + "train_tokens_per_second": 1704.492 + }, + { + "epoch": 3.709677419354839, + "grad_norm": 0.3151374305079844, + "learning_rate": 7.09329868768714e-05, + "loss": 0.5555, + "num_input_tokens_seen": 15065920, + "step": 59, + "train_runtime": 8837.7033, + "train_tokens_per_second": 1704.733 + }, + { + "epoch": 3.774193548387097, + "grad_norm": 0.2917481639941811, + "learning_rate": 7.003744165515705e-05, + "loss": 0.5635, + "num_input_tokens_seen": 15327936, + "step": 60, + "train_runtime": 8990.7028, + "train_tokens_per_second": 1704.865 + }, + { + "epoch": 3.838709677419355, + "grad_norm": 0.39703897734831073, + "learning_rate": 6.91341716182545e-05, + "loss": 0.5775, + "num_input_tokens_seen": 15589952, + "step": 61, + "train_runtime": 9142.8639, + "train_tokens_per_second": 1705.15 + }, + { + "epoch": 3.903225806451613, + "grad_norm": 0.330407023300617, + "learning_rate": 6.82235249939575e-05, + "loss": 0.5305, + "num_input_tokens_seen": 15851968, + "step": 62, + "train_runtime": 9294.6576, + "train_tokens_per_second": 1705.492 + }, + { + "epoch": 3.967741935483871, + "grad_norm": 0.2594779667409557, + "learning_rate": 6.730585285387465e-05, + "loss": 0.5428, + "num_input_tokens_seen": 16113984, + "step": 63, + "train_runtime": 9538.1388, + "train_tokens_per_second": 1689.426 + }, + { + "epoch": 4.0, + "grad_norm": 0.5188739404593391, + "learning_rate": 6.638150897808468e-05, + "loss": 0.5671, + "num_input_tokens_seen": 16244992, + "step": 64, + "train_runtime": 9613.4951, + "train_tokens_per_second": 1689.811 + }, + { + "epoch": 4.064516129032258, + "grad_norm": 0.2817891367025121, + "learning_rate": 6.545084971874738e-05, + "loss": 0.4715, + "num_input_tokens_seen": 16507008, + "step": 65, + "train_runtime": 9764.9453, + "train_tokens_per_second": 1690.435 + }, + { + "epoch": 4.129032258064516, + "grad_norm": 0.4313237711714659, + "learning_rate": 6.451423386272312e-05, + "loss": 0.4488, + "num_input_tokens_seen": 16769024, + "step": 66, + "train_runtime": 9916.329, + "train_tokens_per_second": 1691.052 + }, + { + "epoch": 4.193548387096774, + "grad_norm": 9.407910846391866, + "learning_rate": 6.357202249325371e-05, + "loss": 0.4582, + "num_input_tokens_seen": 17031040, + "step": 67, + "train_runtime": 10068.4234, + "train_tokens_per_second": 1691.53 + }, + { + "epoch": 4.258064516129032, + "grad_norm": 0.4318030663645452, + "learning_rate": 6.26245788507579e-05, + "loss": 0.4588, + "num_input_tokens_seen": 17293056, + "step": 68, + "train_runtime": 10219.9518, + "train_tokens_per_second": 1692.088 + }, + { + "epoch": 4.32258064516129, + "grad_norm": 0.2711661394321373, + "learning_rate": 6.167226819279528e-05, + "loss": 0.4447, + "num_input_tokens_seen": 17555072, + "step": 69, + "train_runtime": 10371.656, + "train_tokens_per_second": 1692.601 + }, + { + "epoch": 4.387096774193548, + "grad_norm": 0.2785432314686511, + "learning_rate": 6.071545765325254e-05, + "loss": 0.4565, + "num_input_tokens_seen": 17817088, + "step": 70, + "train_runtime": 10524.0113, + "train_tokens_per_second": 1692.994 + }, + { + "epoch": 4.451612903225806, + "grad_norm": 0.24786649766049834, + "learning_rate": 5.9754516100806423e-05, + "loss": 0.4665, + "num_input_tokens_seen": 18079104, + "step": 71, + "train_runtime": 10676.331, + "train_tokens_per_second": 1693.382 + }, + { + "epoch": 4.516129032258064, + "grad_norm": 0.2454529793840644, + "learning_rate": 5.8789813996717736e-05, + "loss": 0.446, + "num_input_tokens_seen": 18341120, + "step": 72, + "train_runtime": 10828.6719, + "train_tokens_per_second": 1693.755 + }, + { + "epoch": 4.580645161290323, + "grad_norm": 0.25559855808246584, + "learning_rate": 5.782172325201155e-05, + "loss": 0.4384, + "num_input_tokens_seen": 18603136, + "step": 73, + "train_runtime": 10981.143, + "train_tokens_per_second": 1694.098 + }, + { + "epoch": 4.645161290322581, + "grad_norm": 0.24822833593804589, + "learning_rate": 5.685061708409841e-05, + "loss": 0.4339, + "num_input_tokens_seen": 18865152, + "step": 74, + "train_runtime": 11133.4844, + "train_tokens_per_second": 1694.452 + }, + { + "epoch": 4.709677419354839, + "grad_norm": 0.24264729806408034, + "learning_rate": 5.587686987289189e-05, + "loss": 0.444, + "num_input_tokens_seen": 19127168, + "step": 75, + "train_runtime": 11285.3939, + "train_tokens_per_second": 1694.86 + }, + { + "epoch": 4.774193548387097, + "grad_norm": 0.22384324563071528, + "learning_rate": 5.490085701647805e-05, + "loss": 0.4353, + "num_input_tokens_seen": 19389184, + "step": 76, + "train_runtime": 11437.3833, + "train_tokens_per_second": 1695.246 + }, + { + "epoch": 4.838709677419355, + "grad_norm": 0.24764076326899273, + "learning_rate": 5.392295478639225e-05, + "loss": 0.4257, + "num_input_tokens_seen": 19651200, + "step": 77, + "train_runtime": 11589.4114, + "train_tokens_per_second": 1695.617 + }, + { + "epoch": 4.903225806451613, + "grad_norm": 0.2228535077281988, + "learning_rate": 5.294354018255945e-05, + "loss": 0.4402, + "num_input_tokens_seen": 19913216, + "step": 78, + "train_runtime": 11741.453, + "train_tokens_per_second": 1695.975 + }, + { + "epoch": 4.967741935483871, + "grad_norm": 0.21632763381463402, + "learning_rate": 5.196299078795344e-05, + "loss": 0.4326, + "num_input_tokens_seen": 20175232, + "step": 79, + "train_runtime": 11893.3046, + "train_tokens_per_second": 1696.352 + }, + { + "epoch": 5.0, + "grad_norm": 0.21632763381463402, + "learning_rate": 5.0981684623031415e-05, + "loss": 0.4125, + "num_input_tokens_seen": 20306240, + "step": 80, + "train_runtime": 11969.094, + "train_tokens_per_second": 1696.556 + }, + { + "epoch": 5.064516129032258, + "grad_norm": 0.3571828769341814, + "learning_rate": 5e-05, + "loss": 0.3198, + "num_input_tokens_seen": 20568256, + "step": 81, + "train_runtime": 12121.0302, + "train_tokens_per_second": 1696.907 + }, + { + "epoch": 5.129032258064516, + "grad_norm": 0.21648090918089968, + "learning_rate": 4.901831537696859e-05, + "loss": 0.2964, + "num_input_tokens_seen": 20830272, + "step": 82, + "train_runtime": 12272.7158, + "train_tokens_per_second": 1697.283 + }, + { + "epoch": 5.193548387096774, + "grad_norm": 0.31458036325066546, + "learning_rate": 4.8037009212046586e-05, + "loss": 0.3057, + "num_input_tokens_seen": 21092288, + "step": 83, + "train_runtime": 12424.73, + "train_tokens_per_second": 1697.605 + }, + { + "epoch": 5.258064516129032, + "grad_norm": 0.2497078272482313, + "learning_rate": 4.7056459817440544e-05, + "loss": 0.3129, + "num_input_tokens_seen": 21354304, + "step": 84, + "train_runtime": 12576.3946, + "train_tokens_per_second": 1697.967 + }, + { + "epoch": 5.32258064516129, + "grad_norm": 0.26688301370237244, + "learning_rate": 4.607704521360776e-05, + "loss": 0.2966, + "num_input_tokens_seen": 21616320, + "step": 85, + "train_runtime": 12727.8322, + "train_tokens_per_second": 1698.35 + }, + { + "epoch": 5.387096774193548, + "grad_norm": 0.23431543616740275, + "learning_rate": 4.509914298352197e-05, + "loss": 0.2895, + "num_input_tokens_seen": 21878336, + "step": 86, + "train_runtime": 12879.5644, + "train_tokens_per_second": 1698.686 + }, + { + "epoch": 5.451612903225806, + "grad_norm": 0.24692139115327455, + "learning_rate": 4.412313012710813e-05, + "loss": 0.2926, + "num_input_tokens_seen": 22140352, + "step": 87, + "train_runtime": 13031.2099, + "train_tokens_per_second": 1699.025 + }, + { + "epoch": 5.516129032258064, + "grad_norm": 0.25361311803158476, + "learning_rate": 4.3149382915901606e-05, + "loss": 0.3056, + "num_input_tokens_seen": 22402368, + "step": 88, + "train_runtime": 13182.6668, + "train_tokens_per_second": 1699.381 + }, + { + "epoch": 5.580645161290323, + "grad_norm": 0.24929333634502235, + "learning_rate": 4.2178276747988446e-05, + "loss": 0.2922, + "num_input_tokens_seen": 22664384, + "step": 89, + "train_runtime": 13333.9686, + "train_tokens_per_second": 1699.748 + }, + { + "epoch": 5.645161290322581, + "grad_norm": 0.23498457386000374, + "learning_rate": 4.1210186003282275e-05, + "loss": 0.2947, + "num_input_tokens_seen": 22926400, + "step": 90, + "train_runtime": 13485.686, + "train_tokens_per_second": 1700.054 + }, + { + "epoch": 5.709677419354839, + "grad_norm": 0.2260009367281118, + "learning_rate": 4.0245483899193595e-05, + "loss": 0.2809, + "num_input_tokens_seen": 23188416, + "step": 91, + "train_runtime": 13637.0722, + "train_tokens_per_second": 1700.395 + }, + { + "epoch": 5.774193548387097, + "grad_norm": 0.23177978895395898, + "learning_rate": 3.928454234674747e-05, + "loss": 0.2835, + "num_input_tokens_seen": 23450432, + "step": 92, + "train_runtime": 13788.6307, + "train_tokens_per_second": 1700.708 + }, + { + "epoch": 5.838709677419355, + "grad_norm": 0.23286376279827942, + "learning_rate": 3.832773180720475e-05, + "loss": 0.2899, + "num_input_tokens_seen": 23712448, + "step": 93, + "train_runtime": 13940.8531, + "train_tokens_per_second": 1700.932 + } + ], + "logging_steps": 1, + "max_steps": 160, + "num_input_tokens_seen": 23712448, + "num_train_epochs": 10, + "save_steps": 31, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 187388611264512.0, + "train_batch_size": 4, + "trial_name": null, + "trial_params": null +} diff --git a/B_3/checkpoint-93/training_args.bin b/B_3/checkpoint-93/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..11cf327ddd83fc52012ab679d2c8f1eea3993a5d --- /dev/null +++ b/B_3/checkpoint-93/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:538340330f0a2e51763a2666184aff893a2f4b0e6e52d3a71923bb9fc8b781c0 +size 8017 diff --git a/B_3/checkpoint-93/zero_to_fp32.py b/B_3/checkpoint-93/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04 --- /dev/null +++ b/B_3/checkpoint-93/zero_to_fp32.py @@ -0,0 +1,760 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: +# python zero_to_fp32.py . output_dir/ +# or +# python zero_to_fp32.py . output_dir/ --safe_serialization + +import argparse +import torch +import glob +import math +import os +import re +import gc +import json +import numpy as np +from tqdm import tqdm +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device, weights_only=False) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + total_files = len(files) + state_dicts = [] + for f in tqdm(files, desc='Loading checkpoint shards'): + state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +class GatheredTensor: + """ + A pseudo tensor that collects partitioned weights. + It is more memory efficient when there are multiple groups. + """ + + def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape): + self.flat_groups = flat_groups + self.flat_groups_offset = flat_groups_offset + self.offset = offset + self.partitioned_numel = partitioned_numel + self.shape = shape + self.dtype = self.flat_groups[0][0].dtype + + def contiguous(self): + """ + Merge partitioned weights from flat_groups into a single tensor. + """ + end_idx = self.offset + self.partitioned_numel + world_size = len(self.flat_groups) + pad_flat_param_chunks = [] + + for rank_i in range(world_size): + # for each rank, we need to collect weights from related group/groups + flat_groups_at_rank_i = self.flat_groups[rank_i] + start_group_id = None + end_group_id = None + for group_id in range(len(self.flat_groups_offset)): + if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]: + start_group_id = group_id + if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]: + end_group_id = group_id + break + # collect weights from related group/groups + for group_id in range(start_group_id, end_group_id + 1): + flat_tensor = flat_groups_at_rank_i[group_id] + start_offset = self.offset - self.flat_groups_offset[group_id] + end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id] + pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset]) + + # collect weights from all ranks + pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0) + param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous() + return param + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size + + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]])) + for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'): + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # memory efficient tensor + tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape) + state_dict[name] = tensor + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def to_torch_tensor(state_dict, return_empty_tensor=False): + """ + Convert state_dict of GatheredTensor to torch tensor + """ + torch_state_dict = {} + converted_tensors = {} + for name, tensor in state_dict.items(): + tensor_id = id(tensor) + if tensor_id in converted_tensors: # shared tensors + shared_tensor = torch_state_dict[converted_tensors[tensor_id]] + torch_state_dict[name] = shared_tensor + else: + converted_tensors[tensor_id] = name + if return_empty_tensor: + torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype) + else: + torch_state_dict[name] = tensor.contiguous() + return torch_state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag=None, + exclude_frozen_parameters=False, + lazy_mode=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient. + Convert the pesduo tensor to torch tensor by ``.contiguous()`` + + Returns: + - pytorch ``state_dict`` + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + Note: the above usage may not work if your application doesn't have sufficient free CPU memory. + You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. Or you can load state_dict in lazy mode :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu + for name, lazy_tensor in state_dict.item(): + tensor = lazy_tensor.contiguous() # to cpu + print(name, tensor) + # del tensor to release memory if it no longer in use + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) + if lazy_mode: + return state_dict + else: + return to_torch_tensor(state_dict) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, + output_dir, + max_shard_size="5GB", + safe_serialization=False, + tag=None, + exclude_frozen_parameters=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_dir``: directory to the pytorch fp32 state_dict output files + - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB + - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + """ + + # Dependency pre-check + if safe_serialization: + try: + from safetensors.torch import save_file + except ImportError: + print('If you want to use `safe_serialization`, please `pip install safetensors`') + raise + if max_shard_size is not None: + try: + from huggingface_hub import split_torch_state_dict_into_shards + except ImportError: + print('If you want to use `max_shard_size`, please `pip install huggingface_hub`') + raise + + # Convert zero checkpoint to state_dict + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag, + exclude_frozen_parameters, + lazy_mode=True) + + # Shard the model if it is too big. + weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin" + if max_shard_size is not None: + filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") + # an memory-efficient approach for sharding + empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True) + state_dict_split = split_torch_state_dict_into_shards(empty_state_dict, + filename_pattern=filename_pattern, + max_shard_size=max_shard_size) + else: + from collections import namedtuple + StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"]) + state_dict_split = StateDictSplit(is_sharded=False, + filename_to_tensors={weights_name: list(state_dict.keys())}) + + # Save the model by shard + os.makedirs(output_dir, exist_ok=True) + filename_to_tensors = state_dict_split.filename_to_tensors.items() + for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"): + shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors} + shard_state_dict = to_torch_tensor(shard_state_dict) + output_path = os.path.join(output_dir, shard_file) + if safe_serialization: + save_file(shard_state_dict, output_path, metadata={"format": "pt"}) + else: + torch.save(shard_state_dict, output_path) + # release the memory of current shard + for tensor_name in list(shard_state_dict.keys()): + del state_dict[tensor_name] + del shard_state_dict[tensor_name] + del shard_state_dict + gc.collect() + + # Save index if sharded + if state_dict_split.is_sharded: + index = { + "metadata": state_dict_split.metadata, + "weight_map": state_dict_split.tensor_to_filename, + } + save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json" + save_index_file = os.path.join(output_dir, save_index_file) + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument("output_dir", + type=str, + help="directory to the pytorch fp32 state_dict output files" + "(e.g. path/checkpoint-12-output/)") + parser.add_argument( + "--max_shard_size", + type=str, + default="5GB", + help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size" + "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`" + "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances" + "without CPU OOM issues.") + parser.add_argument( + "--safe_serialization", + default=False, + action='store_true', + help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, + args.output_dir, + max_shard_size=args.max_shard_size, + safe_serialization=args.safe_serialization, + tag=args.tag, + exclude_frozen_parameters=args.exclude_frozen_parameters) diff --git a/B_3/llamaboard_config.yaml b/B_3/llamaboard_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ef343021cfc0a73e22101daff9789f914432456 --- /dev/null +++ b/B_3/llamaboard_config.yaml @@ -0,0 +1,87 @@ +top.booster: auto +top.checkpoint_path: +- /workspace/LLaMA-Factory/saves/Llama-3.1-70B/lora/B_3/checkpoint-93 +top.finetuning_type: lora +top.model_name: Llama-3.1-70B +top.quantization_bit: none +top.quantization_method: bnb +top.rope_scaling: llama3 +top.template: llama3 +train.additional_target: '' +train.apollo_rank: 16 +train.apollo_scale: 32 +train.apollo_target: all +train.apollo_update_interval: 200 +train.badam_mode: layer +train.badam_switch_interval: 50 +train.badam_switch_mode: ascending +train.badam_update_ratio: 0.05 +train.batch_size: 4 +train.compute_type: bf16 +train.create_new_adapter: false +train.cutoff_len: 2048 +train.dataset: +- Millfield1 +- Millfield3 +train.dataset_dir: data +train.ds_offload: false +train.ds_stage: '3' +train.enable_thinking: true +train.extra_args: '{"optim": "adamw_torch"}' +train.freeze_extra_modules: '' +train.freeze_language_model: false +train.freeze_multi_modal_projector: true +train.freeze_trainable_layers: 2 +train.freeze_trainable_modules: all +train.freeze_vision_tower: true +train.galore_rank: 16 +train.galore_scale: 2 +train.galore_target: all +train.galore_update_interval: 200 +train.gradient_accumulation_steps: 8 +train.image_max_pixels: 768*768 +train.image_min_pixels: 32*32 +train.learning_rate: 1e-4 +train.logging_steps: 1 +train.lora_alpha: 1024 +train.lora_dropout: 0 +train.lora_rank: 256 +train.lora_target: '' +train.loraplus_lr_ratio: 0 +train.lr_scheduler_type: cosine +train.mask_history: false +train.max_grad_norm: '1.0' +train.max_samples: '100000' +train.neat_packing: false +train.neftune_alpha: 0 +train.num_train_epochs: '10.0' +train.packing: true +train.ppo_score_norm: false +train.ppo_whiten_rewards: false +train.pref_beta: 0.1 +train.pref_ftx: 0 +train.pref_loss: sigmoid +train.report_to: wandb +train.resize_vocab: false +train.reward_model: [] +train.save_steps: 31 +train.swanlab_api_key: '' +train.swanlab_link: null +train.swanlab_mode: cloud +train.swanlab_project: llamafactory +train.swanlab_run_name: '' +train.swanlab_workspace: '' +train.train_on_prompt: false +train.training_stage: Pre-Training +train.use_apollo: false +train.use_badam: false +train.use_dora: false +train.use_galore: false +train.use_llama_pro: false +train.use_pissa: false +train.use_rslora: false +train.use_swanlab: false +train.val_size: 0 +train.video_max_pixels: 256*256 +train.video_min_pixels: 16*16 +train.warmup_steps: 0 diff --git a/B_3/running_log.txt b/B_3/running_log.txt new file mode 100644 index 0000000000000000000000000000000000000000..b89d5b442953458dc8dd08baf17697d4768a4927 --- /dev/null +++ b/B_3/running_log.txt @@ -0,0 +1,278 @@ +[INFO|2025-08-26 07:43:00] configuration_utils.py:750 >> loading configuration file /workspace/meta-llama/Llama-3.1-70B/config.json +[INFO|2025-08-26 07:43:00] configuration_utils.py:817 >> Model config LlamaConfig { + "architectures": [ + "LlamaForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 128000, + "eos_token_id": 128001, + "head_dim": 128, + "hidden_act": "silu", + "hidden_size": 8192, + "initializer_range": 0.02, + "intermediate_size": 28672, + "max_position_embeddings": 131072, + "mlp_bias": false, + "model_type": "llama", + "num_attention_heads": 64, + "num_hidden_layers": 80, + "num_key_value_heads": 8, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": { + "factor": 8.0, + "high_freq_factor": 4.0, + "low_freq_factor": 1.0, + "original_max_position_embeddings": 8192, + "rope_type": "llama3" + }, + "rope_theta": 500000.0, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.55.0", + "use_cache": true, + "vocab_size": 128256 +} + +[WARNING|2025-08-26 07:43:00] logging.py:148 >> Input length is smaller than max length. Disabling rope scaling. +[INFO|2025-08-26 07:43:00] logging.py:143 >> KV cache is disabled during training. +[INFO|2025-08-26 07:43:00] modeling_utils.py:1305 >> loading weights file /workspace/meta-llama/Llama-3.1-70B/model.safetensors.index.json +[INFO|2025-08-26 07:43:00] modeling_utils.py:4363 >> Detected DeepSpeed ZeRO-3: activating zero.init() for this model +[INFO|2025-08-26 07:43:00] configuration_utils.py:1098 >> Generate config GenerationConfig { + "bos_token_id": 128000, + "eos_token_id": 128001, + "use_cache": false +} + +[INFO|2025-08-26 07:43:31] modeling_utils.py:5606 >> All model checkpoint weights were used when initializing LlamaForCausalLM. + +[INFO|2025-08-26 07:43:31] modeling_utils.py:5614 >> All the weights of LlamaForCausalLM were initialized from the model checkpoint at /workspace/meta-llama/Llama-3.1-70B. +If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training. +[INFO|2025-08-26 07:43:31] configuration_utils.py:1051 >> loading configuration file /workspace/meta-llama/Llama-3.1-70B/generation_config.json +[INFO|2025-08-26 07:43:31] configuration_utils.py:1098 >> Generate config GenerationConfig { + "bos_token_id": 128000, + "do_sample": true, + "eos_token_id": 128001, + "temperature": 0.6, + "top_p": 0.9 +} + +[INFO|2025-08-26 07:43:31] logging.py:143 >> Gradient checkpointing enabled. +[INFO|2025-08-26 07:43:31] logging.py:143 >> Using torch SDPA for faster training and inference. +[INFO|2025-08-26 07:43:31] logging.py:143 >> DeepSpeed ZeRO3 detected, remaining trainable params in float32. +[INFO|2025-08-26 07:43:31] logging.py:143 >> Fine-tuning method: LoRA +[INFO|2025-08-26 07:43:53] logging.py:143 >> Loaded adapter(s): /workspace/LLaMA-Factory/saves/Llama-3.1-70B/lora/B_3/checkpoint-93 +[INFO|2025-08-26 07:43:53] logging.py:143 >> trainable params: 3,313,500,160 || all params: 73,867,206,656 || trainable%: 4.4858 +[INFO|2025-08-26 07:43:53] trainer.py:757 >> Using auto half precision backend +[INFO|2025-08-26 07:44:05] deepspeed.py:492 >> Attempting to resume from saves/Llama-3.1-70B/lora/B_3/checkpoint-93 +[INFO|2025-08-26 07:44:11] trainer.py:2433 >> ***** Running training ***** +[INFO|2025-08-26 07:44:11] trainer.py:2434 >> Num examples = 1,977 +[INFO|2025-08-26 07:44:11] trainer.py:2435 >> Num Epochs = 10 +[INFO|2025-08-26 07:44:11] trainer.py:2436 >> Instantaneous batch size per device = 4 +[INFO|2025-08-26 07:44:11] trainer.py:2439 >> Total train batch size (w. parallel, distributed & accumulation) = 128 +[INFO|2025-08-26 07:44:11] trainer.py:2440 >> Gradient Accumulation steps = 8 +[INFO|2025-08-26 07:44:11] trainer.py:2441 >> Total optimization steps = 160 +[INFO|2025-08-26 07:44:11] trainer.py:2442 >> Number of trainable parameters = 3,313,500,160 +[INFO|2025-08-26 07:44:11] trainer.py:2464 >> Continuing training from checkpoint, will skip to saved global_step +[INFO|2025-08-26 07:44:11] trainer.py:2465 >> Continuing training from epoch 5 +[INFO|2025-08-26 07:44:11] trainer.py:2466 >> Continuing training from global step 93 +[INFO|2025-08-26 07:44:11] trainer.py:2468 >> Will skip the first 5 epochs then the first 104 batches in the first epoch. +[INFO|2025-08-26 07:44:11] integration_utils.py:866 >> Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true" +[INFO|2025-08-26 07:46:44] logging.py:143 >> {'loss': 0.2187, 'learning_rate': 3.7375e-05, 'epoch': 5.90, 'throughput': 158574.48} +[INFO|2025-08-26 07:49:16] logging.py:143 >> {'loss': 0.2062, 'learning_rate': 3.6428e-05, 'epoch': 5.97, 'throughput': 80048.80} +[INFO|2025-08-26 07:53:03] logging.py:143 >> {'loss': 0.3804, 'learning_rate': 3.5486e-05, 'epoch': 6.06, 'throughput': 46495.19} +[INFO|2025-08-26 07:55:35] logging.py:143 >> {'loss': 0.1957, 'learning_rate': 3.4549e-05, 'epoch': 6.13, 'throughput': 36510.06} +[INFO|2025-08-26 07:58:07] logging.py:143 >> {'loss': 0.2050, 'learning_rate': 3.3618e-05, 'epoch': 6.19, 'throughput': 30164.83} +[INFO|2025-08-26 08:00:39] logging.py:143 >> {'loss': 0.1908, 'learning_rate': 3.2694e-05, 'epoch': 6.26, 'throughput': 25779.39} +[INFO|2025-08-26 08:03:11] logging.py:143 >> {'loss': 0.1847, 'learning_rate': 3.1776e-05, 'epoch': 6.32, 'throughput': 22560.17} +[INFO|2025-08-26 08:05:43] logging.py:143 >> {'loss': 0.1817, 'learning_rate': 3.0866e-05, 'epoch': 6.39, 'throughput': 20111.55} +[INFO|2025-08-26 08:08:15] logging.py:143 >> {'loss': 0.1734, 'learning_rate': 2.9963e-05, 'epoch': 6.45, 'throughput': 18166.65} +[INFO|2025-08-26 08:10:48] logging.py:143 >> {'loss': 0.1869, 'learning_rate': 2.9067e-05, 'epoch': 6.52, 'throughput': 16593.12} +[INFO|2025-08-26 08:13:20] logging.py:143 >> {'loss': 0.1892, 'learning_rate': 2.8180e-05, 'epoch': 6.58, 'throughput': 15295.94} +[INFO|2025-08-26 08:15:52] logging.py:143 >> {'loss': 0.2003, 'learning_rate': 2.7300e-05, 'epoch': 6.65, 'throughput': 14210.44} +[INFO|2025-08-26 08:18:24] logging.py:143 >> {'loss': 0.1869, 'learning_rate': 2.6430e-05, 'epoch': 6.71, 'throughput': 13282.93} +[INFO|2025-08-26 08:20:57] logging.py:143 >> {'loss': 0.1652, 'learning_rate': 2.5569e-05, 'epoch': 6.77, 'throughput': 12481.27} +[INFO|2025-08-26 08:23:30] logging.py:143 >> {'loss': 0.1931, 'learning_rate': 2.4717e-05, 'epoch': 6.84, 'throughput': 11785.11} +[INFO|2025-08-26 08:26:02] logging.py:143 >> {'loss': 0.1841, 'learning_rate': 2.3875e-05, 'epoch': 6.90, 'throughput': 11172.53} +[INFO|2025-08-26 08:28:35] logging.py:143 >> {'loss': 0.1843, 'learning_rate': 2.3043e-05, 'epoch': 6.97, 'throughput': 10630.20} +[INFO|2025-08-26 08:29:51] logging.py:143 >> {'loss': 0.1603, 'learning_rate': 2.2221e-05, 'epoch': 7.00, 'throughput': 10382.25} +[INFO|2025-08-26 08:32:24] logging.py:143 >> {'loss': 0.1160, 'learning_rate': 2.1411e-05, 'epoch': 7.06, 'throughput': 9925.06} +[INFO|2025-08-26 08:34:56] logging.py:143 >> {'loss': 0.1325, 'learning_rate': 2.0611e-05, 'epoch': 7.13, 'throughput': 9513.74} +[INFO|2025-08-26 08:37:29] logging.py:143 >> {'loss': 0.1212, 'learning_rate': 1.9822e-05, 'epoch': 7.19, 'throughput': 9141.86} +[INFO|2025-08-26 08:40:02] logging.py:143 >> {'loss': 0.1210, 'learning_rate': 1.9045e-05, 'epoch': 7.26, 'throughput': 8802.81} +[INFO|2025-08-26 08:42:34] logging.py:143 >> {'loss': 0.1172, 'learning_rate': 1.8280e-05, 'epoch': 7.32, 'throughput': 8494.27} +[INFO|2025-08-26 08:45:07] logging.py:143 >> {'loss': 0.1267, 'learning_rate': 1.7528e-05, 'epoch': 7.39, 'throughput': 8211.08} +[INFO|2025-08-26 08:47:39] logging.py:143 >> {'loss': 0.1132, 'learning_rate': 1.6787e-05, 'epoch': 7.45, 'throughput': 7951.35} +[INFO|2025-08-26 08:50:12] logging.py:143 >> {'loss': 0.1036, 'learning_rate': 1.6060e-05, 'epoch': 7.52, 'throughput': 7710.80} +[INFO|2025-08-26 08:52:44] logging.py:143 >> {'loss': 0.1085, 'learning_rate': 1.5346e-05, 'epoch': 7.58, 'throughput': 7488.69} +[INFO|2025-08-26 08:55:16] logging.py:143 >> {'loss': 0.1075, 'learning_rate': 1.4645e-05, 'epoch': 7.65, 'throughput': 7282.61} +[INFO|2025-08-26 08:57:49] logging.py:143 >> {'loss': 0.1091, 'learning_rate': 1.3957e-05, 'epoch': 7.71, 'throughput': 7090.97} +[INFO|2025-08-26 09:00:21] logging.py:143 >> {'loss': 0.1003, 'learning_rate': 1.3284e-05, 'epoch': 7.77, 'throughput': 6911.66} +[INFO|2025-08-26 09:02:53] logging.py:143 >> {'loss': 0.1123, 'learning_rate': 1.2625e-05, 'epoch': 7.84, 'throughput': 6744.36} +[INFO|2025-08-26 09:04:10] trainer.py:4074 >> Saving model checkpoint to saves/Llama-3.1-70B/lora/B_3/checkpoint-124 +[INFO|2025-08-26 09:04:10] configuration_utils.py:750 >> loading configuration file /workspace/meta-llama/Llama-3.1-70B/config.json +[INFO|2025-08-26 09:04:10] configuration_utils.py:817 >> Model config LlamaConfig { + "architectures": [ + "LlamaForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 128000, + "eos_token_id": 128001, + "head_dim": 128, + "hidden_act": "silu", + "hidden_size": 8192, + "initializer_range": 0.02, + "intermediate_size": 28672, + "max_position_embeddings": 131072, + "mlp_bias": false, + "model_type": "llama", + "num_attention_heads": 64, + "num_hidden_layers": 80, + "num_key_value_heads": 8, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": { + "factor": 8.0, + "high_freq_factor": 4.0, + "low_freq_factor": 1.0, + "original_max_position_embeddings": 8192, + "rope_type": "llama3" + }, + "rope_theta": 500000.0, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.55.0", + "use_cache": true, + "vocab_size": 128256 +} + +[INFO|2025-08-26 09:04:18] tokenization_utils_base.py:2393 >> chat template saved in saves/Llama-3.1-70B/lora/B_3/checkpoint-124/chat_template.jinja +[INFO|2025-08-26 09:04:18] tokenization_utils_base.py:2562 >> tokenizer config file saved in saves/Llama-3.1-70B/lora/B_3/checkpoint-124/tokenizer_config.json +[INFO|2025-08-26 09:04:18] tokenization_utils_base.py:2571 >> Special tokens file saved in saves/Llama-3.1-70B/lora/B_3/checkpoint-124/special_tokens_map.json +[INFO|2025-08-26 09:07:06] logging.py:143 >> {'loss': 0.1183, 'learning_rate': 1.1980e-05, 'epoch': 7.90, 'throughput': 6454.73} +[INFO|2025-08-26 09:09:37] logging.py:143 >> {'loss': 0.1099, 'learning_rate': 1.1349e-05, 'epoch': 7.97, 'throughput': 6315.75} +[INFO|2025-08-26 09:10:52] logging.py:143 >> {'loss': 0.1076, 'learning_rate': 1.0734e-05, 'epoch': 8.00, 'throughput': 6248.79} +[INFO|2025-08-26 09:13:25] logging.py:143 >> {'loss': 0.0796, 'learning_rate': 1.0134e-05, 'epoch': 8.06, 'throughput': 6119.85} +[INFO|2025-08-26 09:15:57] logging.py:143 >> {'loss': 0.0836, 'learning_rate': 9.5492e-06, 'epoch': 8.13, 'throughput': 5998.36} +[INFO|2025-08-26 09:18:29] logging.py:143 >> {'loss': 0.0874, 'learning_rate': 8.9799e-06, 'epoch': 8.19, 'throughput': 5883.42} +[INFO|2025-08-26 09:21:01] logging.py:143 >> {'loss': 0.0755, 'learning_rate': 8.4265e-06, 'epoch': 8.26, 'throughput': 5774.87} +[INFO|2025-08-26 09:23:33] logging.py:143 >> {'loss': 0.0710, 'learning_rate': 7.8891e-06, 'epoch': 8.32, 'throughput': 5671.37} +[INFO|2025-08-26 09:26:04] logging.py:143 >> {'loss': 0.0742, 'learning_rate': 7.3680e-06, 'epoch': 8.39, 'throughput': 5573.80} +[INFO|2025-08-26 09:28:36] logging.py:143 >> {'loss': 0.0784, 'learning_rate': 6.8633e-06, 'epoch': 8.45, 'throughput': 5480.57} +[INFO|2025-08-26 09:31:08] logging.py:143 >> {'loss': 0.0706, 'learning_rate': 6.3752e-06, 'epoch': 8.52, 'throughput': 5391.83} +[INFO|2025-08-26 09:33:39] logging.py:143 >> {'loss': 0.0678, 'learning_rate': 5.9039e-06, 'epoch': 8.58, 'throughput': 5307.07} +[INFO|2025-08-26 09:36:11] logging.py:143 >> {'loss': 0.0778, 'learning_rate': 5.4497e-06, 'epoch': 8.65, 'throughput': 5226.11} +[INFO|2025-08-26 09:38:43] logging.py:143 >> {'loss': 0.0719, 'learning_rate': 5.0126e-06, 'epoch': 8.71, 'throughput': 5148.81} +[INFO|2025-08-26 09:41:15] logging.py:143 >> {'loss': 0.0748, 'learning_rate': 4.5928e-06, 'epoch': 8.77, 'throughput': 5074.60} +[INFO|2025-08-26 09:43:47] logging.py:143 >> {'loss': 0.0828, 'learning_rate': 4.1906e-06, 'epoch': 8.84, 'throughput': 5003.61} +[INFO|2025-08-26 09:46:20] logging.py:143 >> {'loss': 0.0761, 'learning_rate': 3.8060e-06, 'epoch': 8.90, 'throughput': 4935.24} +[INFO|2025-08-26 09:48:52] logging.py:143 >> {'loss': 0.0771, 'learning_rate': 3.4393e-06, 'epoch': 8.97, 'throughput': 4869.83} +[INFO|2025-08-26 09:50:08] logging.py:143 >> {'loss': 0.0753, 'learning_rate': 3.0904e-06, 'epoch': 9.00, 'throughput': 4838.03} +[INFO|2025-08-26 09:52:40] logging.py:143 >> {'loss': 0.0681, 'learning_rate': 2.7597e-06, 'epoch': 9.06, 'throughput': 4776.83} +[INFO|2025-08-26 09:55:12] logging.py:143 >> {'loss': 0.0643, 'learning_rate': 2.4472e-06, 'epoch': 9.13, 'throughput': 4717.55} +[INFO|2025-08-26 09:57:44] logging.py:143 >> {'loss': 0.0589, 'learning_rate': 2.1530e-06, 'epoch': 9.19, 'throughput': 4660.52} +[INFO|2025-08-26 10:00:16] logging.py:143 >> {'loss': 0.0628, 'learning_rate': 1.8772e-06, 'epoch': 9.26, 'throughput': 4605.81} +[INFO|2025-08-26 10:02:49] logging.py:143 >> {'loss': 0.0663, 'learning_rate': 1.6200e-06, 'epoch': 9.32, 'throughput': 4552.98} +[INFO|2025-08-26 10:05:21] logging.py:143 >> {'loss': 0.0506, 'learning_rate': 1.3815e-06, 'epoch': 9.39, 'throughput': 4502.18} +[INFO|2025-08-26 10:07:53] logging.py:143 >> {'loss': 0.0627, 'learning_rate': 1.1617e-06, 'epoch': 9.45, 'throughput': 4453.17} +[INFO|2025-08-26 10:10:25] logging.py:143 >> {'loss': 0.0610, 'learning_rate': 9.6074e-07, 'epoch': 9.52, 'throughput': 4405.70} +[INFO|2025-08-26 10:12:58] logging.py:143 >> {'loss': 0.0652, 'learning_rate': 7.7867e-07, 'epoch': 9.58, 'throughput': 4359.41} +[INFO|2025-08-26 10:15:31] logging.py:143 >> {'loss': 0.0560, 'learning_rate': 6.1558e-07, 'epoch': 9.65, 'throughput': 4315.04} +[INFO|2025-08-26 10:18:03] logging.py:143 >> {'loss': 0.0658, 'learning_rate': 4.7153e-07, 'epoch': 9.71, 'throughput': 4272.24} +[INFO|2025-08-26 10:20:35] logging.py:143 >> {'loss': 0.0673, 'learning_rate': 3.4658e-07, 'epoch': 9.77, 'throughput': 4230.87} +[INFO|2025-08-26 10:22:00] trainer.py:4074 >> Saving model checkpoint to saves/Llama-3.1-70B/lora/B_3/checkpoint-155 +[INFO|2025-08-26 10:22:00] configuration_utils.py:750 >> loading configuration file /workspace/meta-llama/Llama-3.1-70B/config.json +[INFO|2025-08-26 10:22:00] configuration_utils.py:817 >> Model config LlamaConfig { + "architectures": [ + "LlamaForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 128000, + "eos_token_id": 128001, + "head_dim": 128, + "hidden_act": "silu", + "hidden_size": 8192, + "initializer_range": 0.02, + "intermediate_size": 28672, + "max_position_embeddings": 131072, + "mlp_bias": false, + "model_type": "llama", + "num_attention_heads": 64, + "num_hidden_layers": 80, + "num_key_value_heads": 8, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": { + "factor": 8.0, + "high_freq_factor": 4.0, + "low_freq_factor": 1.0, + "original_max_position_embeddings": 8192, + "rope_type": "llama3" + }, + "rope_theta": 500000.0, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.55.0", + "use_cache": true, + "vocab_size": 128256 +} + +[INFO|2025-08-26 10:22:06] tokenization_utils_base.py:2393 >> chat template saved in saves/Llama-3.1-70B/lora/B_3/checkpoint-155/chat_template.jinja +[INFO|2025-08-26 10:22:06] tokenization_utils_base.py:2562 >> tokenizer config file saved in saves/Llama-3.1-70B/lora/B_3/checkpoint-155/tokenizer_config.json +[INFO|2025-08-26 10:22:06] tokenization_utils_base.py:2571 >> Special tokens file saved in saves/Llama-3.1-70B/lora/B_3/checkpoint-155/special_tokens_map.json +[INFO|2025-08-26 10:24:49] logging.py:143 >> {'loss': 0.0727, 'learning_rate': 2.4076e-07, 'epoch': 9.84, 'throughput': 4146.56} +[INFO|2025-08-26 10:27:21] logging.py:143 >> {'loss': 0.0597, 'learning_rate': 1.5413e-07, 'epoch': 9.90, 'throughput': 4109.07} +[INFO|2025-08-26 10:29:53] logging.py:143 >> {'loss': 0.0631, 'learning_rate': 8.6719e-08, 'epoch': 9.97, 'throughput': 4072.61} +[INFO|2025-08-26 10:31:08] logging.py:143 >> {'loss': 0.0649, 'learning_rate': 3.8548e-08, 'epoch': 10.00, 'throughput': 4055.02} +[INFO|2025-08-26 10:31:08] trainer.py:2718 >> + +Training completed. Do not forget to share your model on huggingface.co/models =) + + +[INFO|2025-08-26 10:32:36] trainer.py:4074 >> Saving model checkpoint to saves/Llama-3.1-70B/lora/B_3 +[INFO|2025-08-26 10:32:36] configuration_utils.py:750 >> loading configuration file /workspace/meta-llama/Llama-3.1-70B/config.json +[INFO|2025-08-26 10:32:36] configuration_utils.py:817 >> Model config LlamaConfig { + "architectures": [ + "LlamaForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 128000, + "eos_token_id": 128001, + "head_dim": 128, + "hidden_act": "silu", + "hidden_size": 8192, + "initializer_range": 0.02, + "intermediate_size": 28672, + "max_position_embeddings": 131072, + "mlp_bias": false, + "model_type": "llama", + "num_attention_heads": 64, + "num_hidden_layers": 80, + "num_key_value_heads": 8, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": { + "factor": 8.0, + "high_freq_factor": 4.0, + "low_freq_factor": 1.0, + "original_max_position_embeddings": 8192, + "rope_type": "llama3" + }, + "rope_theta": 500000.0, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.55.0", + "use_cache": true, + "vocab_size": 128256 +} + +[INFO|2025-08-26 10:32:44] tokenization_utils_base.py:2393 >> chat template saved in saves/Llama-3.1-70B/lora/B_3/chat_template.jinja +[INFO|2025-08-26 10:32:44] tokenization_utils_base.py:2562 >> tokenizer config file saved in saves/Llama-3.1-70B/lora/B_3/tokenizer_config.json +[INFO|2025-08-26 10:32:44] tokenization_utils_base.py:2571 >> Special tokens file saved in saves/Llama-3.1-70B/lora/B_3/special_tokens_map.json +[WARNING|2025-08-26 10:32:52] logging.py:148 >> No metric eval_loss to plot. +[INFO|2025-08-26 10:32:52] modelcard.py:456 >> Dropping the following result as it does not have all the necessary fields: +{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}} diff --git a/B_3/special_tokens_map.json b/B_3/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..14daf4588e61b4e4983af0fccaba4d5500c0977c --- /dev/null +++ b/B_3/special_tokens_map.json @@ -0,0 +1,26 @@ +{ + "additional_special_tokens": [ + { + "content": "<|eom_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } + ], + "bos_token": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": "<|eot_id|>" +} diff --git a/B_3/tokenizer.json b/B_3/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/B_3/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/B_3/tokenizer_config.json b/B_3/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d1e1ea9bc94ff1132f136710751e37fb23a64347 --- /dev/null +++ b/B_3/tokenizer_config.json @@ -0,0 +1,2068 @@ +{ + "added_tokens_decoder": { + "128000": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128001": { + "content": "<|end_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128002": { + "content": "<|reserved_special_token_0|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128003": { + "content": "<|reserved_special_token_1|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128004": { + "content": "<|finetune_right_pad_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128005": { + "content": "<|reserved_special_token_2|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128006": { + "content": "<|start_header_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128007": { + "content": "<|end_header_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128008": { + "content": "<|eom_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128009": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128010": { + "content": "<|python_tag|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128011": { + "content": "<|reserved_special_token_3|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128012": { + "content": "<|reserved_special_token_4|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128013": { + "content": "<|reserved_special_token_5|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128014": { + "content": "<|reserved_special_token_6|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128015": { + "content": "<|reserved_special_token_7|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128016": { + "content": "<|reserved_special_token_8|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128017": { + "content": "<|reserved_special_token_9|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128018": { + "content": "<|reserved_special_token_10|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128019": { + "content": "<|reserved_special_token_11|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128020": { + "content": "<|reserved_special_token_12|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128021": { + "content": "<|reserved_special_token_13|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128022": { + "content": "<|reserved_special_token_14|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128023": { + "content": "<|reserved_special_token_15|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128024": { + "content": "<|reserved_special_token_16|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128025": { + "content": "<|reserved_special_token_17|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128026": { + "content": "<|reserved_special_token_18|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128027": { + "content": "<|reserved_special_token_19|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128028": { + "content": "<|reserved_special_token_20|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128029": { + "content": "<|reserved_special_token_21|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128030": { + "content": "<|reserved_special_token_22|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128031": { + "content": "<|reserved_special_token_23|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128032": { + "content": "<|reserved_special_token_24|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128033": { + "content": "<|reserved_special_token_25|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128034": { + "content": "<|reserved_special_token_26|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128035": { + "content": "<|reserved_special_token_27|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128036": { + "content": "<|reserved_special_token_28|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128037": { + "content": "<|reserved_special_token_29|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128038": { + "content": "<|reserved_special_token_30|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128039": { + "content": "<|reserved_special_token_31|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128040": { + "content": "<|reserved_special_token_32|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128041": { + "content": "<|reserved_special_token_33|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128042": { + "content": "<|reserved_special_token_34|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128043": { + "content": "<|reserved_special_token_35|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128044": { + "content": "<|reserved_special_token_36|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128045": { + "content": "<|reserved_special_token_37|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128046": { + "content": "<|reserved_special_token_38|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128047": { + "content": "<|reserved_special_token_39|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128048": { + "content": "<|reserved_special_token_40|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128049": { + "content": "<|reserved_special_token_41|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128050": { + "content": "<|reserved_special_token_42|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128051": { + "content": "<|reserved_special_token_43|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128052": { + "content": "<|reserved_special_token_44|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128053": { + "content": "<|reserved_special_token_45|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128054": { + "content": "<|reserved_special_token_46|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128055": { + "content": "<|reserved_special_token_47|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128056": { + "content": "<|reserved_special_token_48|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128057": { + "content": "<|reserved_special_token_49|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128058": { + "content": "<|reserved_special_token_50|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128059": { + "content": "<|reserved_special_token_51|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128060": { + "content": "<|reserved_special_token_52|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128061": { + "content": "<|reserved_special_token_53|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128062": { + "content": "<|reserved_special_token_54|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128063": { + "content": "<|reserved_special_token_55|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128064": { + "content": "<|reserved_special_token_56|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128065": { + "content": "<|reserved_special_token_57|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128066": { + "content": "<|reserved_special_token_58|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128067": { + "content": "<|reserved_special_token_59|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128068": { + "content": "<|reserved_special_token_60|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128069": { + "content": "<|reserved_special_token_61|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128070": { + "content": "<|reserved_special_token_62|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128071": { + "content": "<|reserved_special_token_63|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128072": { + "content": "<|reserved_special_token_64|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128073": { + "content": "<|reserved_special_token_65|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128074": { + "content": "<|reserved_special_token_66|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128075": { + "content": "<|reserved_special_token_67|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128076": { + "content": "<|reserved_special_token_68|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128077": { + "content": "<|reserved_special_token_69|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128078": { + "content": "<|reserved_special_token_70|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128079": { + "content": "<|reserved_special_token_71|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128080": { + "content": "<|reserved_special_token_72|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128081": { + "content": "<|reserved_special_token_73|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128082": { + "content": "<|reserved_special_token_74|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128083": { + "content": "<|reserved_special_token_75|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128084": { + "content": "<|reserved_special_token_76|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128085": { + "content": "<|reserved_special_token_77|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128086": { + "content": "<|reserved_special_token_78|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128087": { + "content": "<|reserved_special_token_79|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128088": { + "content": "<|reserved_special_token_80|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128089": { + "content": "<|reserved_special_token_81|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128090": { + "content": "<|reserved_special_token_82|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128091": { + "content": "<|reserved_special_token_83|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128092": { + "content": "<|reserved_special_token_84|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128093": { + "content": "<|reserved_special_token_85|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128094": { + "content": "<|reserved_special_token_86|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128095": { + "content": "<|reserved_special_token_87|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128096": { + "content": "<|reserved_special_token_88|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128097": { + "content": "<|reserved_special_token_89|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128098": { + "content": "<|reserved_special_token_90|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128099": { + "content": "<|reserved_special_token_91|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128100": { + "content": "<|reserved_special_token_92|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128101": { + "content": "<|reserved_special_token_93|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128102": { + "content": "<|reserved_special_token_94|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128103": { + "content": "<|reserved_special_token_95|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128104": { + "content": "<|reserved_special_token_96|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128105": { + "content": "<|reserved_special_token_97|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128106": { + "content": "<|reserved_special_token_98|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128107": { + "content": "<|reserved_special_token_99|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128108": { + "content": "<|reserved_special_token_100|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128109": { + "content": "<|reserved_special_token_101|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128110": { + "content": "<|reserved_special_token_102|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128111": { + "content": "<|reserved_special_token_103|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128112": { + "content": "<|reserved_special_token_104|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128113": { + "content": "<|reserved_special_token_105|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128114": { + "content": "<|reserved_special_token_106|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128115": { + "content": "<|reserved_special_token_107|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128116": { + "content": "<|reserved_special_token_108|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128117": { + "content": "<|reserved_special_token_109|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128118": { + "content": "<|reserved_special_token_110|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128119": { + "content": "<|reserved_special_token_111|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128120": { + "content": "<|reserved_special_token_112|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128121": { + "content": "<|reserved_special_token_113|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128122": { + "content": "<|reserved_special_token_114|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128123": { + "content": "<|reserved_special_token_115|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128124": { + "content": "<|reserved_special_token_116|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128125": { + "content": "<|reserved_special_token_117|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128126": { + "content": "<|reserved_special_token_118|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128127": { + "content": "<|reserved_special_token_119|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128128": { + "content": "<|reserved_special_token_120|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128129": { + "content": "<|reserved_special_token_121|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128130": { + "content": "<|reserved_special_token_122|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128131": { + "content": "<|reserved_special_token_123|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128132": { + "content": "<|reserved_special_token_124|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128133": { + "content": "<|reserved_special_token_125|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128134": { + "content": "<|reserved_special_token_126|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128135": { + "content": "<|reserved_special_token_127|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128136": { + "content": "<|reserved_special_token_128|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128137": { + "content": "<|reserved_special_token_129|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128138": { + "content": "<|reserved_special_token_130|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128139": { + "content": "<|reserved_special_token_131|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128140": { + "content": "<|reserved_special_token_132|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128141": { + "content": "<|reserved_special_token_133|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128142": { + "content": "<|reserved_special_token_134|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128143": { + "content": "<|reserved_special_token_135|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128144": { + "content": "<|reserved_special_token_136|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128145": { + "content": "<|reserved_special_token_137|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128146": { + "content": "<|reserved_special_token_138|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128147": { + "content": "<|reserved_special_token_139|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128148": { + "content": "<|reserved_special_token_140|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128149": { + "content": "<|reserved_special_token_141|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128150": { + "content": "<|reserved_special_token_142|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128151": { + "content": "<|reserved_special_token_143|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128152": { + "content": "<|reserved_special_token_144|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128153": { + "content": "<|reserved_special_token_145|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128154": { + "content": "<|reserved_special_token_146|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128155": { + "content": "<|reserved_special_token_147|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128156": { + "content": "<|reserved_special_token_148|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128157": { + "content": "<|reserved_special_token_149|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128158": { + "content": "<|reserved_special_token_150|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128159": { + "content": "<|reserved_special_token_151|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128160": { + "content": "<|reserved_special_token_152|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128161": { + "content": "<|reserved_special_token_153|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128162": { + "content": "<|reserved_special_token_154|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128163": { + "content": "<|reserved_special_token_155|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128164": { + "content": "<|reserved_special_token_156|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128165": { + "content": "<|reserved_special_token_157|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128166": { + "content": "<|reserved_special_token_158|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128167": { + "content": "<|reserved_special_token_159|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128168": { + "content": "<|reserved_special_token_160|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128169": { + "content": "<|reserved_special_token_161|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128170": { + "content": "<|reserved_special_token_162|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128171": { + "content": "<|reserved_special_token_163|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128172": { + "content": "<|reserved_special_token_164|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128173": { + "content": "<|reserved_special_token_165|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128174": { + "content": "<|reserved_special_token_166|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128175": { + "content": "<|reserved_special_token_167|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128176": { + "content": "<|reserved_special_token_168|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128177": { + "content": "<|reserved_special_token_169|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128178": { + "content": "<|reserved_special_token_170|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128179": { + "content": "<|reserved_special_token_171|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128180": { + "content": "<|reserved_special_token_172|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128181": { + "content": "<|reserved_special_token_173|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128182": { + "content": "<|reserved_special_token_174|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128183": { + "content": "<|reserved_special_token_175|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128184": { + "content": "<|reserved_special_token_176|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128185": { + "content": "<|reserved_special_token_177|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128186": { + "content": "<|reserved_special_token_178|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128187": { + "content": "<|reserved_special_token_179|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128188": { + "content": "<|reserved_special_token_180|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128189": { + "content": "<|reserved_special_token_181|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128190": { + "content": "<|reserved_special_token_182|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128191": { + "content": "<|reserved_special_token_183|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128192": { + "content": "<|reserved_special_token_184|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128193": { + "content": "<|reserved_special_token_185|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128194": { + "content": "<|reserved_special_token_186|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128195": { + "content": "<|reserved_special_token_187|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128196": { + "content": "<|reserved_special_token_188|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128197": { + "content": "<|reserved_special_token_189|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128198": { + "content": "<|reserved_special_token_190|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128199": { + "content": "<|reserved_special_token_191|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128200": { + "content": "<|reserved_special_token_192|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128201": { + "content": "<|reserved_special_token_193|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128202": { + "content": "<|reserved_special_token_194|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128203": { + "content": "<|reserved_special_token_195|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128204": { + "content": "<|reserved_special_token_196|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128205": { + "content": "<|reserved_special_token_197|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128206": { + "content": "<|reserved_special_token_198|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128207": { + "content": "<|reserved_special_token_199|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128208": { + "content": "<|reserved_special_token_200|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128209": { + "content": "<|reserved_special_token_201|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128210": { + "content": "<|reserved_special_token_202|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128211": { + "content": "<|reserved_special_token_203|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128212": { + "content": "<|reserved_special_token_204|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128213": { + "content": "<|reserved_special_token_205|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128214": { + "content": "<|reserved_special_token_206|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128215": { + "content": "<|reserved_special_token_207|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128216": { + "content": "<|reserved_special_token_208|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128217": { + "content": "<|reserved_special_token_209|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128218": { + "content": "<|reserved_special_token_210|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128219": { + "content": "<|reserved_special_token_211|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128220": { + "content": "<|reserved_special_token_212|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128221": { + "content": "<|reserved_special_token_213|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128222": { + "content": "<|reserved_special_token_214|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128223": { + "content": "<|reserved_special_token_215|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128224": { + "content": "<|reserved_special_token_216|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128225": { + "content": "<|reserved_special_token_217|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128226": { + "content": "<|reserved_special_token_218|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128227": { + "content": "<|reserved_special_token_219|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128228": { + "content": "<|reserved_special_token_220|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128229": { + "content": "<|reserved_special_token_221|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128230": { + "content": "<|reserved_special_token_222|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128231": { + "content": "<|reserved_special_token_223|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128232": { + "content": "<|reserved_special_token_224|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128233": { + "content": "<|reserved_special_token_225|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128234": { + "content": "<|reserved_special_token_226|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128235": { + "content": "<|reserved_special_token_227|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128236": { + "content": "<|reserved_special_token_228|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128237": { + "content": "<|reserved_special_token_229|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128238": { + "content": "<|reserved_special_token_230|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128239": { + "content": "<|reserved_special_token_231|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128240": { + "content": "<|reserved_special_token_232|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128241": { + "content": "<|reserved_special_token_233|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128242": { + "content": "<|reserved_special_token_234|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128243": { + "content": "<|reserved_special_token_235|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128244": { + "content": "<|reserved_special_token_236|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128245": { + "content": "<|reserved_special_token_237|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128246": { + "content": "<|reserved_special_token_238|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128247": { + "content": "<|reserved_special_token_239|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128248": { + "content": "<|reserved_special_token_240|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128249": { + "content": "<|reserved_special_token_241|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128250": { + "content": "<|reserved_special_token_242|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128251": { + "content": "<|reserved_special_token_243|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128252": { + "content": "<|reserved_special_token_244|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128253": { + "content": "<|reserved_special_token_245|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128254": { + "content": "<|reserved_special_token_246|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "128255": { + "content": "<|reserved_special_token_247|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [ + "<|eom_id|>" + ], + "bos_token": "<|begin_of_text|>", + "clean_up_tokenization_spaces": true, + "eos_token": "<|eot_id|>", + "extra_special_tokens": {}, + "model_input_names": [ + "input_ids", + "attention_mask" + ], + "model_max_length": 131072, + "pad_token": "<|eot_id|>", + "padding_side": "right", + "split_special_tokens": false, + "tokenizer_class": "PreTrainedTokenizerFast" +} diff --git a/B_3/train_results.json b/B_3/train_results.json new file mode 100644 index 0000000000000000000000000000000000000000..155f7b8126e20313309f8fd37711b433fc1b8644 --- /dev/null +++ b/B_3/train_results.json @@ -0,0 +1,9 @@ +{ + "epoch": 10.0, + "num_input_tokens_seen": 40612480, + "total_flos": 320867359260672.0, + "train_loss": 0.048082037963582284, + "train_runtime": 10016.9898, + "train_samples_per_second": 1.974, + "train_steps_per_second": 0.016 +} \ No newline at end of file diff --git a/B_3/trainer_log.jsonl b/B_3/trainer_log.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a7c64366646825224cd16e82069ffce0845fd91d --- /dev/null +++ b/B_3/trainer_log.jsonl @@ -0,0 +1,177 @@ +{"current_steps": 1, "total_steps": 160, "loss": 1.3591, "lr": 0.0001, "epoch": 0.06451612903225806, "percentage": 0.62, "elapsed_time": "0:02:27", "remaining_time": "6:30:41", "throughput": 1777.19, "total_tokens": 262016} +{"current_steps": 2, "total_steps": 160, "loss": 1.4321, "lr": 9.999036202410325e-05, "epoch": 0.12903225806451613, "percentage": 1.25, "elapsed_time": "0:04:56", "remaining_time": "6:30:25", "throughput": 1767.24, "total_tokens": 524032} +{"current_steps": 3, "total_steps": 160, "loss": 1.3036, "lr": 9.996145181203615e-05, "epoch": 0.1935483870967742, "percentage": 1.88, "elapsed_time": "0:07:27", "remaining_time": "6:30:21", "throughput": 1756.39, "total_tokens": 786048} +{"current_steps": 4, "total_steps": 160, "loss": 1.3119, "lr": 9.991328050923581e-05, "epoch": 0.25806451612903225, "percentage": 2.5, "elapsed_time": "0:09:59", "remaining_time": "6:29:21", "throughput": 1749.62, "total_tokens": 1048064} +{"current_steps": 5, "total_steps": 160, "loss": 1.21, "lr": 9.98458666866564e-05, "epoch": 0.3225806451612903, "percentage": 3.12, "elapsed_time": "0:12:30", "remaining_time": "6:27:53", "throughput": 1745.03, "total_tokens": 1310080} +{"current_steps": 6, "total_steps": 160, "loss": 1.1713, "lr": 9.975923633360985e-05, "epoch": 0.3870967741935484, "percentage": 3.75, "elapsed_time": "0:15:02", "remaining_time": "6:26:00", "throughput": 1742.23, "total_tokens": 1572096} +{"current_steps": 7, "total_steps": 160, "loss": 1.1687, "lr": 9.965342284774632e-05, "epoch": 0.45161290322580644, "percentage": 4.38, "elapsed_time": "0:17:33", "remaining_time": "6:23:55", "throughput": 1740.3, "total_tokens": 1834112} +{"current_steps": 8, "total_steps": 160, "loss": 1.1151, "lr": 9.952846702217886e-05, "epoch": 0.5161290322580645, "percentage": 5.0, "elapsed_time": "0:20:05", "remaining_time": "6:21:43", "throughput": 1738.85, "total_tokens": 2096128} +{"current_steps": 9, "total_steps": 160, "loss": 1.0694, "lr": 9.938441702975689e-05, "epoch": 0.5806451612903226, "percentage": 5.62, "elapsed_time": "0:22:36", "remaining_time": "6:19:20", "throughput": 1738.31, "total_tokens": 2358144} +{"current_steps": 10, "total_steps": 160, "loss": 1.0431, "lr": 9.922132840449459e-05, "epoch": 0.6451612903225806, "percentage": 6.25, "elapsed_time": "0:25:07", "remaining_time": "6:16:53", "throughput": 1737.98, "total_tokens": 2620160} +{"current_steps": 11, "total_steps": 160, "loss": 1.0812, "lr": 9.903926402016153e-05, "epoch": 0.7096774193548387, "percentage": 6.88, "elapsed_time": "0:27:39", "remaining_time": "6:14:38", "throughput": 1736.79, "total_tokens": 2882176} +{"current_steps": 12, "total_steps": 160, "loss": 1.0296, "lr": 9.883829406604363e-05, "epoch": 0.7741935483870968, "percentage": 7.5, "elapsed_time": "0:30:10", "remaining_time": "6:12:11", "throughput": 1736.46, "total_tokens": 3144192} +{"current_steps": 13, "total_steps": 160, "loss": 0.9806, "lr": 9.861849601988383e-05, "epoch": 0.8387096774193549, "percentage": 8.12, "elapsed_time": "0:32:41", "remaining_time": "6:09:44", "throughput": 1736.19, "total_tokens": 3406208} +{"current_steps": 14, "total_steps": 160, "loss": 1.0244, "lr": 9.837995461801299e-05, "epoch": 0.9032258064516129, "percentage": 8.75, "elapsed_time": "0:35:13", "remaining_time": "6:07:22", "throughput": 1735.46, "total_tokens": 3668224} +{"current_steps": 15, "total_steps": 160, "loss": 0.9701, "lr": 9.812276182268236e-05, "epoch": 0.967741935483871, "percentage": 9.38, "elapsed_time": "0:37:45", "remaining_time": "6:04:59", "throughput": 1734.87, "total_tokens": 3930240} +{"current_steps": 16, "total_steps": 160, "loss": 0.9885, "lr": 9.784701678661045e-05, "epoch": 1.0, "percentage": 10.0, "elapsed_time": "0:39:01", "remaining_time": "5:51:15", "throughput": 1734.31, "total_tokens": 4061248} +{"current_steps": 17, "total_steps": 160, "loss": 0.9221, "lr": 9.755282581475769e-05, "epoch": 1.064516129032258, "percentage": 10.62, "elapsed_time": "0:41:33", "remaining_time": "5:49:37", "throughput": 1733.6, "total_tokens": 4323264} +{"current_steps": 18, "total_steps": 160, "loss": 0.9187, "lr": 9.724030232334391e-05, "epoch": 1.129032258064516, "percentage": 11.25, "elapsed_time": "0:44:05", "remaining_time": "5:47:49", "throughput": 1733.28, "total_tokens": 4585280} +{"current_steps": 19, "total_steps": 160, "loss": 0.8943, "lr": 9.690956679612421e-05, "epoch": 1.1935483870967742, "percentage": 11.88, "elapsed_time": "0:46:37", "remaining_time": "5:45:58", "throughput": 1732.9, "total_tokens": 4847296} +{"current_steps": 20, "total_steps": 160, "loss": 0.8567, "lr": 9.656074673794018e-05, "epoch": 1.2580645161290323, "percentage": 12.5, "elapsed_time": "0:49:09", "remaining_time": "5:44:03", "throughput": 1732.55, "total_tokens": 5109312} +{"current_steps": 21, "total_steps": 160, "loss": 0.8557, "lr": 9.619397662556435e-05, "epoch": 1.3225806451612903, "percentage": 13.12, "elapsed_time": "0:51:41", "remaining_time": "5:42:09", "throughput": 1731.84, "total_tokens": 5371328} +{"current_steps": 22, "total_steps": 160, "loss": 0.8546, "lr": 9.580939785585681e-05, "epoch": 1.3870967741935485, "percentage": 13.75, "elapsed_time": "0:54:13", "remaining_time": "5:40:05", "throughput": 1731.7, "total_tokens": 5633344} +{"current_steps": 23, "total_steps": 160, "loss": 0.851, "lr": 9.540715869125407e-05, "epoch": 1.4516129032258065, "percentage": 14.37, "elapsed_time": "0:56:45", "remaining_time": "5:38:03", "throughput": 1731.27, "total_tokens": 5895360} +{"current_steps": 24, "total_steps": 160, "loss": 0.8483, "lr": 9.498741420261108e-05, "epoch": 1.5161290322580645, "percentage": 15.0, "elapsed_time": "0:59:17", "remaining_time": "5:35:57", "throughput": 1730.98, "total_tokens": 6157376} +{"current_steps": 25, "total_steps": 160, "loss": 0.8226, "lr": 9.45503262094184e-05, "epoch": 1.5806451612903225, "percentage": 15.62, "elapsed_time": "1:01:49", "remaining_time": "5:33:51", "throughput": 1730.52, "total_tokens": 6419392} +{"current_steps": 26, "total_steps": 160, "loss": 0.8259, "lr": 9.409606321741775e-05, "epoch": 1.6451612903225805, "percentage": 16.25, "elapsed_time": "1:04:22", "remaining_time": "5:31:45", "throughput": 1729.94, "total_tokens": 6681408} +{"current_steps": 27, "total_steps": 160, "loss": 0.8524, "lr": 9.362480035363986e-05, "epoch": 1.7096774193548387, "percentage": 16.88, "elapsed_time": "1:06:54", "remaining_time": "5:29:37", "throughput": 1729.42, "total_tokens": 6943424} +{"current_steps": 28, "total_steps": 160, "loss": 0.8209, "lr": 9.31367192988896e-05, "epoch": 1.7741935483870968, "percentage": 17.5, "elapsed_time": "1:09:27", "remaining_time": "5:27:26", "throughput": 1729.01, "total_tokens": 7205440} +{"current_steps": 29, "total_steps": 160, "loss": 0.7853, "lr": 9.263200821770461e-05, "epoch": 1.838709677419355, "percentage": 18.12, "elapsed_time": "1:12:00", "remaining_time": "5:25:15", "throughput": 1728.51, "total_tokens": 7467456} +{"current_steps": 30, "total_steps": 160, "loss": 0.7779, "lr": 9.211086168581433e-05, "epoch": 1.903225806451613, "percentage": 18.75, "elapsed_time": "1:14:33", "remaining_time": "5:23:03", "throughput": 1727.94, "total_tokens": 7729472} +{"current_steps": 31, "total_steps": 160, "loss": 0.8146, "lr": 9.157348061512727e-05, "epoch": 1.967741935483871, "percentage": 19.38, "elapsed_time": "1:17:05", "remaining_time": "5:20:50", "throughput": 1727.52, "total_tokens": 7991488} +{"current_steps": 32, "total_steps": 160, "loss": 0.7787, "lr": 9.102007217627568e-05, "epoch": 2.0, "percentage": 20.0, "elapsed_time": "1:20:08", "remaining_time": "5:20:32", "throughput": 1689.34, "total_tokens": 8122496} +{"current_steps": 33, "total_steps": 160, "loss": 0.7361, "lr": 9.045084971874738e-05, "epoch": 2.064516129032258, "percentage": 20.62, "elapsed_time": "1:22:38", "remaining_time": "5:18:04", "throughput": 1690.8, "total_tokens": 8384512} +{"current_steps": 34, "total_steps": 160, "loss": 0.6956, "lr": 8.986603268863536e-05, "epoch": 2.129032258064516, "percentage": 21.25, "elapsed_time": "1:25:10", "remaining_time": "5:15:39", "throughput": 1691.88, "total_tokens": 8646528} +{"current_steps": 35, "total_steps": 160, "loss": 0.6819, "lr": 8.926584654403724e-05, "epoch": 2.193548387096774, "percentage": 21.88, "elapsed_time": "1:27:42", "remaining_time": "5:13:15", "throughput": 1692.73, "total_tokens": 8908544} +{"current_steps": 36, "total_steps": 160, "loss": 0.6958, "lr": 8.865052266813685e-05, "epoch": 2.258064516129032, "percentage": 22.5, "elapsed_time": "1:30:14", "remaining_time": "5:10:50", "throughput": 1693.64, "total_tokens": 9170560} +{"current_steps": 37, "total_steps": 160, "loss": 0.6756, "lr": 8.802029828000156e-05, "epoch": 2.3225806451612905, "percentage": 23.12, "elapsed_time": "1:32:46", "remaining_time": "5:08:24", "throughput": 1694.57, "total_tokens": 9432576} +{"current_steps": 38, "total_steps": 160, "loss": 0.648, "lr": 8.737541634312985e-05, "epoch": 2.3870967741935485, "percentage": 23.75, "elapsed_time": "1:35:18", "remaining_time": "5:05:58", "throughput": 1695.34, "total_tokens": 9694592} +{"current_steps": 39, "total_steps": 160, "loss": 0.669, "lr": 8.671612547178428e-05, "epoch": 2.4516129032258065, "percentage": 24.38, "elapsed_time": "1:37:50", "remaining_time": "5:03:32", "throughput": 1696.12, "total_tokens": 9956608} +{"current_steps": 40, "total_steps": 160, "loss": 0.6856, "lr": 8.604267983514594e-05, "epoch": 2.5161290322580645, "percentage": 25.0, "elapsed_time": "1:40:22", "remaining_time": "5:01:07", "throughput": 1696.73, "total_tokens": 10218624} +{"current_steps": 41, "total_steps": 160, "loss": 0.6949, "lr": 8.535533905932738e-05, "epoch": 2.5806451612903225, "percentage": 25.62, "elapsed_time": "1:42:55", "remaining_time": "4:58:42", "throughput": 1697.25, "total_tokens": 10480640} +{"current_steps": 42, "total_steps": 160, "loss": 0.7467, "lr": 8.46543681272818e-05, "epoch": 2.6451612903225805, "percentage": 26.25, "elapsed_time": "1:45:27", "remaining_time": "4:56:17", "throughput": 1697.74, "total_tokens": 10742656} +{"current_steps": 43, "total_steps": 160, "loss": 0.6638, "lr": 8.39400372766471e-05, "epoch": 2.709677419354839, "percentage": 26.88, "elapsed_time": "1:48:00", "remaining_time": "4:53:51", "throughput": 1698.22, "total_tokens": 11004672} +{"current_steps": 44, "total_steps": 160, "loss": 0.7096, "lr": 8.321262189556409e-05, "epoch": 2.774193548387097, "percentage": 27.5, "elapsed_time": "1:50:32", "remaining_time": "4:51:24", "throughput": 1698.78, "total_tokens": 11266688} +{"current_steps": 45, "total_steps": 160, "loss": 0.6737, "lr": 8.247240241650918e-05, "epoch": 2.838709677419355, "percentage": 28.12, "elapsed_time": "1:53:04", "remaining_time": "4:48:57", "throughput": 1699.35, "total_tokens": 11528704} +{"current_steps": 46, "total_steps": 160, "loss": 0.6638, "lr": 8.171966420818228e-05, "epoch": 2.903225806451613, "percentage": 28.75, "elapsed_time": "1:55:35", "remaining_time": "4:46:28", "throughput": 1699.99, "total_tokens": 11790720} +{"current_steps": 47, "total_steps": 160, "loss": 0.6753, "lr": 8.095469746549172e-05, "epoch": 2.967741935483871, "percentage": 29.38, "elapsed_time": "1:58:07", "remaining_time": "4:44:00", "throughput": 1700.5, "total_tokens": 12052736} +{"current_steps": 48, "total_steps": 160, "loss": 0.6673, "lr": 8.017779709767858e-05, "epoch": 3.0, "percentage": 30.0, "elapsed_time": "1:59:23", "remaining_time": "4:38:35", "throughput": 1700.73, "total_tokens": 12183744} +{"current_steps": 49, "total_steps": 160, "loss": 0.6049, "lr": 7.938926261462366e-05, "epoch": 3.064516129032258, "percentage": 30.63, "elapsed_time": "2:01:55", "remaining_time": "4:36:12", "throughput": 1701.21, "total_tokens": 12445760} +{"current_steps": 50, "total_steps": 160, "loss": 1.0716, "lr": 7.858939801138061e-05, "epoch": 3.129032258064516, "percentage": 31.25, "elapsed_time": "2:04:27", "remaining_time": "4:33:48", "throughput": 1701.76, "total_tokens": 12707776} +{"current_steps": 51, "total_steps": 160, "loss": 1.0428, "lr": 7.777851165098012e-05, "epoch": 3.193548387096774, "percentage": 31.87, "elapsed_time": "2:06:58", "remaining_time": "4:31:23", "throughput": 1702.3, "total_tokens": 12969792} +{"current_steps": 52, "total_steps": 160, "loss": 0.9172, "lr": 7.695691614555003e-05, "epoch": 3.258064516129032, "percentage": 32.5, "elapsed_time": "2:09:30", "remaining_time": "4:28:59", "throughput": 1702.78, "total_tokens": 13231808} +{"current_steps": 53, "total_steps": 160, "loss": 0.7593, "lr": 7.612492823579745e-05, "epoch": 3.3225806451612905, "percentage": 33.12, "elapsed_time": "2:12:03", "remaining_time": "4:26:35", "throughput": 1703.07, "total_tokens": 13493824} +{"current_steps": 54, "total_steps": 160, "loss": 0.6151, "lr": 7.528286866889924e-05, "epoch": 3.3870967741935485, "percentage": 33.75, "elapsed_time": "2:14:35", "remaining_time": "4:24:11", "throughput": 1703.41, "total_tokens": 13755840} +{"current_steps": 55, "total_steps": 160, "loss": 0.5964, "lr": 7.443106207484776e-05, "epoch": 3.4516129032258065, "percentage": 34.38, "elapsed_time": "2:17:07", "remaining_time": "4:21:47", "throughput": 1703.74, "total_tokens": 14017856} +{"current_steps": 56, "total_steps": 160, "loss": 0.5809, "lr": 7.35698368412999e-05, "epoch": 3.5161290322580645, "percentage": 35.0, "elapsed_time": "2:19:39", "remaining_time": "4:19:21", "throughput": 1704.17, "total_tokens": 14279872} +{"current_steps": 57, "total_steps": 160, "loss": 0.5638, "lr": 7.269952498697734e-05, "epoch": 3.5806451612903225, "percentage": 35.62, "elapsed_time": "2:22:11", "remaining_time": "4:16:56", "throughput": 1704.47, "total_tokens": 14541888} +{"current_steps": 58, "total_steps": 160, "loss": 0.5553, "lr": 7.18204620336671e-05, "epoch": 3.6451612903225805, "percentage": 36.25, "elapsed_time": "2:24:43", "remaining_time": "4:14:31", "throughput": 1704.8, "total_tokens": 14803904} +{"current_steps": 59, "total_steps": 160, "loss": 0.5555, "lr": 7.09329868768714e-05, "epoch": 3.709677419354839, "percentage": 36.88, "elapsed_time": "2:27:16", "remaining_time": "4:12:06", "throughput": 1705.03, "total_tokens": 15065920} +{"current_steps": 60, "total_steps": 160, "loss": 0.5635, "lr": 7.003744165515705e-05, "epoch": 3.774193548387097, "percentage": 37.5, "elapsed_time": "2:29:49", "remaining_time": "4:09:41", "throughput": 1705.16, "total_tokens": 15327936} +{"current_steps": 61, "total_steps": 160, "loss": 0.5775, "lr": 6.91341716182545e-05, "epoch": 3.838709677419355, "percentage": 38.12, "elapsed_time": "2:32:21", "remaining_time": "4:07:15", "throughput": 1705.44, "total_tokens": 15589952} +{"current_steps": 62, "total_steps": 160, "loss": 0.5305, "lr": 6.82235249939575e-05, "epoch": 3.903225806451613, "percentage": 38.75, "elapsed_time": "2:34:53", "remaining_time": "4:04:49", "throughput": 1705.78, "total_tokens": 15851968} +{"current_steps": 63, "total_steps": 160, "loss": 0.5428, "lr": 6.730585285387465e-05, "epoch": 3.967741935483871, "percentage": 39.38, "elapsed_time": "2:38:56", "remaining_time": "4:04:43", "throughput": 1689.7, "total_tokens": 16113984} +{"current_steps": 64, "total_steps": 160, "loss": 0.5671, "lr": 6.638150897808468e-05, "epoch": 4.0, "percentage": 40.0, "elapsed_time": "2:40:11", "remaining_time": "4:00:17", "throughput": 1690.08, "total_tokens": 16244992} +{"current_steps": 65, "total_steps": 160, "loss": 0.4715, "lr": 6.545084971874738e-05, "epoch": 4.064516129032258, "percentage": 40.62, "elapsed_time": "2:42:43", "remaining_time": "3:57:49", "throughput": 1690.7, "total_tokens": 16507008} +{"current_steps": 66, "total_steps": 160, "loss": 0.4488, "lr": 6.451423386272312e-05, "epoch": 4.129032258064516, "percentage": 41.25, "elapsed_time": "2:45:14", "remaining_time": "3:55:21", "throughput": 1691.32, "total_tokens": 16769024} +{"current_steps": 67, "total_steps": 160, "loss": 0.4582, "lr": 6.357202249325371e-05, "epoch": 4.193548387096774, "percentage": 41.88, "elapsed_time": "2:47:46", "remaining_time": "3:52:53", "throughput": 1691.79, "total_tokens": 17031040} +{"current_steps": 68, "total_steps": 160, "loss": 0.4588, "lr": 6.26245788507579e-05, "epoch": 4.258064516129032, "percentage": 42.5, "elapsed_time": "2:50:18", "remaining_time": "3:50:24", "throughput": 1692.34, "total_tokens": 17293056} +{"current_steps": 69, "total_steps": 160, "loss": 0.4447, "lr": 6.167226819279528e-05, "epoch": 4.32258064516129, "percentage": 43.12, "elapsed_time": "2:52:50", "remaining_time": "3:47:56", "throughput": 1692.85, "total_tokens": 17555072} +{"current_steps": 70, "total_steps": 160, "loss": 0.4565, "lr": 6.071545765325254e-05, "epoch": 4.387096774193548, "percentage": 43.75, "elapsed_time": "2:55:22", "remaining_time": "3:45:28", "throughput": 1693.24, "total_tokens": 17817088} +{"current_steps": 71, "total_steps": 160, "loss": 0.4665, "lr": 5.9754516100806423e-05, "epoch": 4.451612903225806, "percentage": 44.38, "elapsed_time": "2:57:54", "remaining_time": "3:43:01", "throughput": 1693.63, "total_tokens": 18079104} +{"current_steps": 72, "total_steps": 160, "loss": 0.446, "lr": 5.8789813996717736e-05, "epoch": 4.516129032258064, "percentage": 45.0, "elapsed_time": "3:00:27", "remaining_time": "3:40:33", "throughput": 1694.0, "total_tokens": 18341120} +{"current_steps": 73, "total_steps": 160, "loss": 0.4384, "lr": 5.782172325201155e-05, "epoch": 4.580645161290323, "percentage": 45.62, "elapsed_time": "3:02:59", "remaining_time": "3:38:05", "throughput": 1694.34, "total_tokens": 18603136} +{"current_steps": 74, "total_steps": 160, "loss": 0.4339, "lr": 5.685061708409841e-05, "epoch": 4.645161290322581, "percentage": 46.25, "elapsed_time": "3:05:31", "remaining_time": "3:35:37", "throughput": 1694.69, "total_tokens": 18865152} +{"current_steps": 75, "total_steps": 160, "loss": 0.444, "lr": 5.587686987289189e-05, "epoch": 4.709677419354839, "percentage": 46.88, "elapsed_time": "3:08:03", "remaining_time": "3:33:08", "throughput": 1695.09, "total_tokens": 19127168} +{"current_steps": 76, "total_steps": 160, "loss": 0.4353, "lr": 5.490085701647805e-05, "epoch": 4.774193548387097, "percentage": 47.5, "elapsed_time": "3:10:35", "remaining_time": "3:30:39", "throughput": 1695.48, "total_tokens": 19389184} +{"current_steps": 77, "total_steps": 160, "loss": 0.4257, "lr": 5.392295478639225e-05, "epoch": 4.838709677419355, "percentage": 48.12, "elapsed_time": "3:13:07", "remaining_time": "3:28:10", "throughput": 1695.84, "total_tokens": 19651200} +{"current_steps": 78, "total_steps": 160, "loss": 0.4402, "lr": 5.294354018255945e-05, "epoch": 4.903225806451613, "percentage": 48.75, "elapsed_time": "3:15:39", "remaining_time": "3:25:41", "throughput": 1696.2, "total_tokens": 19913216} +{"current_steps": 79, "total_steps": 160, "loss": 0.4326, "lr": 5.196299078795344e-05, "epoch": 4.967741935483871, "percentage": 49.38, "elapsed_time": "3:18:11", "remaining_time": "3:23:12", "throughput": 1696.57, "total_tokens": 20175232} +{"current_steps": 80, "total_steps": 160, "loss": 0.4125, "lr": 5.0981684623031415e-05, "epoch": 5.0, "percentage": 50.0, "elapsed_time": "3:19:27", "remaining_time": "3:19:27", "throughput": 1696.78, "total_tokens": 20306240} +{"current_steps": 81, "total_steps": 160, "loss": 0.3198, "lr": 5e-05, "epoch": 5.064516129032258, "percentage": 50.62, "elapsed_time": "3:21:59", "remaining_time": "3:17:00", "throughput": 1697.12, "total_tokens": 20568256} +{"current_steps": 82, "total_steps": 160, "loss": 0.2964, "lr": 4.901831537696859e-05, "epoch": 5.129032258064516, "percentage": 51.25, "elapsed_time": "3:24:31", "remaining_time": "3:14:32", "throughput": 1697.5, "total_tokens": 20830272} +{"current_steps": 83, "total_steps": 160, "loss": 0.3057, "lr": 4.8037009212046586e-05, "epoch": 5.193548387096774, "percentage": 51.88, "elapsed_time": "3:27:03", "remaining_time": "3:12:05", "throughput": 1697.82, "total_tokens": 21092288} +{"current_steps": 84, "total_steps": 160, "loss": 0.3129, "lr": 4.7056459817440544e-05, "epoch": 5.258064516129032, "percentage": 52.5, "elapsed_time": "3:29:34", "remaining_time": "3:09:37", "throughput": 1698.18, "total_tokens": 21354304} +{"current_steps": 85, "total_steps": 160, "loss": 0.2966, "lr": 4.607704521360776e-05, "epoch": 5.32258064516129, "percentage": 53.12, "elapsed_time": "3:32:06", "remaining_time": "3:07:09", "throughput": 1698.56, "total_tokens": 21616320} +{"current_steps": 86, "total_steps": 160, "loss": 0.2895, "lr": 4.509914298352197e-05, "epoch": 5.387096774193548, "percentage": 53.75, "elapsed_time": "3:34:38", "remaining_time": "3:04:41", "throughput": 1698.89, "total_tokens": 21878336} +{"current_steps": 87, "total_steps": 160, "loss": 0.2926, "lr": 4.412313012710813e-05, "epoch": 5.451612903225806, "percentage": 54.37, "elapsed_time": "3:37:09", "remaining_time": "3:02:12", "throughput": 1699.23, "total_tokens": 22140352} +{"current_steps": 88, "total_steps": 160, "loss": 0.3056, "lr": 4.3149382915901606e-05, "epoch": 5.516129032258064, "percentage": 55.0, "elapsed_time": "3:39:41", "remaining_time": "2:59:44", "throughput": 1699.58, "total_tokens": 22402368} +{"current_steps": 89, "total_steps": 160, "loss": 0.2922, "lr": 4.2178276747988446e-05, "epoch": 5.580645161290323, "percentage": 55.62, "elapsed_time": "3:42:12", "remaining_time": "2:57:15", "throughput": 1699.95, "total_tokens": 22664384} +{"current_steps": 90, "total_steps": 160, "loss": 0.2947, "lr": 4.1210186003282275e-05, "epoch": 5.645161290322581, "percentage": 56.25, "elapsed_time": "3:44:44", "remaining_time": "2:54:47", "throughput": 1700.25, "total_tokens": 22926400} +{"current_steps": 91, "total_steps": 160, "loss": 0.2809, "lr": 4.0245483899193595e-05, "epoch": 5.709677419354839, "percentage": 56.88, "elapsed_time": "3:47:15", "remaining_time": "2:52:19", "throughput": 1700.59, "total_tokens": 23188416} +{"current_steps": 92, "total_steps": 160, "loss": 0.2835, "lr": 3.928454234674747e-05, "epoch": 5.774193548387097, "percentage": 57.5, "elapsed_time": "3:49:47", "remaining_time": "2:49:50", "throughput": 1700.9, "total_tokens": 23450432} +{"current_steps": 93, "total_steps": 160, "loss": 0.2899, "lr": 3.832773180720475e-05, "epoch": 5.838709677419355, "percentage": 58.13, "elapsed_time": "3:52:19", "remaining_time": "2:47:22", "throughput": 1701.12, "total_tokens": 23712448} +{"current_steps": 94, "total_steps": 160, "loss": 0.272, "lr": 3.73754211492421e-05, "epoch": 5.903225806451613, "percentage": 58.75, "elapsed_time": "3:56:44", "remaining_time": "2:46:13", "throughput": 1687.76, "total_tokens": 23974464} +{"current_steps": 95, "total_steps": 160, "loss": 0.2951, "lr": 3.642797750674629e-05, "epoch": 5.967741935483871, "percentage": 59.38, "elapsed_time": "3:59:16", "remaining_time": "2:43:42", "throughput": 1688.23, "total_tokens": 24236480} +{"current_steps": 96, "total_steps": 160, "loss": 0.2745, "lr": 3.5485766137276894e-05, "epoch": 6.0, "percentage": 60.0, "elapsed_time": "4:00:31", "remaining_time": "2:40:21", "throughput": 1688.44, "total_tokens": 24367488} +{"current_steps": 94, "total_steps": 160, "loss": 0.2187, "lr": 3.73754211492421e-05, "epoch": 5.903225806451613, "percentage": 58.75, "elapsed_time": "0:02:29", "remaining_time": "0:01:45", "throughput": 160013.44, "total_tokens": 23974464} +{"current_steps": 95, "total_steps": 160, "loss": 0.2062, "lr": 3.642797750674629e-05, "epoch": 5.967741935483871, "percentage": 59.38, "elapsed_time": "0:05:00", "remaining_time": "0:03:25", "throughput": 80666.59, "total_tokens": 24236480} +{"current_steps": 96, "total_steps": 160, "loss": 0.3805, "lr": 3.5485766137276894e-05, "epoch": 6.064516129032258, "percentage": 60.0, "elapsed_time": "0:08:47", "remaining_time": "0:05:51", "throughput": 46698.08, "total_tokens": 24629504} +{"current_steps": 97, "total_steps": 160, "loss": 0.1957, "lr": 3.4549150281252636e-05, "epoch": 6.129032258064516, "percentage": 60.62, "elapsed_time": "0:11:18", "remaining_time": "0:07:20", "throughput": 36680.02, "total_tokens": 24891520} +{"current_steps": 98, "total_steps": 160, "loss": 0.2051, "lr": 3.361849102191533e-05, "epoch": 6.193548387096774, "percentage": 61.25, "elapsed_time": "0:13:49", "remaining_time": "0:08:45", "throughput": 30306.85, "total_tokens": 25153536} +{"current_steps": 99, "total_steps": 160, "loss": 0.191, "lr": 3.2694147146125345e-05, "epoch": 6.258064516129032, "percentage": 61.88, "elapsed_time": "0:16:21", "remaining_time": "0:10:04", "throughput": 25899.76, "total_tokens": 25415552} +{"current_steps": 100, "total_steps": 160, "loss": 0.1848, "lr": 3.177647500604252e-05, "epoch": 6.32258064516129, "percentage": 62.5, "elapsed_time": "0:18:52", "remaining_time": "0:11:19", "throughput": 22665.24, "total_tokens": 25677568} +{"current_steps": 101, "total_steps": 160, "loss": 0.1821, "lr": 3.086582838174551e-05, "epoch": 6.387096774193548, "percentage": 63.12, "elapsed_time": "0:21:24", "remaining_time": "0:12:30", "throughput": 20192.5, "total_tokens": 25939584} +{"current_steps": 102, "total_steps": 160, "loss": 0.1733, "lr": 2.996255834484296e-05, "epoch": 6.451612903225806, "percentage": 63.75, "elapsed_time": "0:23:55", "remaining_time": "0:13:36", "throughput": 18248.71, "total_tokens": 26201600} +{"current_steps": 103, "total_steps": 160, "loss": 0.1867, "lr": 2.9067013123128613e-05, "epoch": 6.516129032258064, "percentage": 64.38, "elapsed_time": "0:26:27", "remaining_time": "0:14:38", "throughput": 16673.22, "total_tokens": 26463616} +{"current_steps": 104, "total_steps": 160, "loss": 0.1886, "lr": 2.8179537966332887e-05, "epoch": 6.580645161290323, "percentage": 65.0, "elapsed_time": "0:28:58", "remaining_time": "0:15:36", "throughput": 15369.89, "total_tokens": 26725632} +{"current_steps": 105, "total_steps": 160, "loss": 0.1997, "lr": 2.7300475013022663e-05, "epoch": 6.645161290322581, "percentage": 65.62, "elapsed_time": "0:31:30", "remaining_time": "0:16:30", "throughput": 14278.44, "total_tokens": 26987648} +{"current_steps": 106, "total_steps": 160, "loss": 0.1862, "lr": 2.6430163158700115e-05, "epoch": 6.709677419354839, "percentage": 66.25, "elapsed_time": "0:34:01", "remaining_time": "0:17:19", "throughput": 13350.87, "total_tokens": 27249664} +{"current_steps": 107, "total_steps": 160, "loss": 0.1645, "lr": 2.556893792515227e-05, "epoch": 6.774193548387097, "percentage": 66.88, "elapsed_time": "0:36:32", "remaining_time": "0:18:05", "throughput": 12550.64, "total_tokens": 27511680} +{"current_steps": 94, "total_steps": 160, "loss": 0.2187, "lr": 3.73754211492421e-05, "epoch": 5.903225806451613, "percentage": 58.75, "elapsed_time": "0:02:31", "remaining_time": "0:01:46", "throughput": 158574.48, "total_tokens": 23974464} +{"current_steps": 95, "total_steps": 160, "loss": 0.2062, "lr": 3.642797750674629e-05, "epoch": 5.967741935483871, "percentage": 59.38, "elapsed_time": "0:05:02", "remaining_time": "0:03:27", "throughput": 80048.8, "total_tokens": 24236480} +{"current_steps": 96, "total_steps": 160, "loss": 0.3804, "lr": 3.5485766137276894e-05, "epoch": 6.064516129032258, "percentage": 60.0, "elapsed_time": "0:08:49", "remaining_time": "0:05:53", "throughput": 46495.19, "total_tokens": 24629504} +{"current_steps": 97, "total_steps": 160, "loss": 0.1957, "lr": 3.4549150281252636e-05, "epoch": 6.129032258064516, "percentage": 60.62, "elapsed_time": "0:11:21", "remaining_time": "0:07:22", "throughput": 36510.06, "total_tokens": 24891520} +{"current_steps": 98, "total_steps": 160, "loss": 0.205, "lr": 3.361849102191533e-05, "epoch": 6.193548387096774, "percentage": 61.25, "elapsed_time": "0:13:53", "remaining_time": "0:08:47", "throughput": 30164.83, "total_tokens": 25153536} +{"current_steps": 99, "total_steps": 160, "loss": 0.1908, "lr": 3.2694147146125345e-05, "epoch": 6.258064516129032, "percentage": 61.88, "elapsed_time": "0:16:25", "remaining_time": "0:10:07", "throughput": 25779.39, "total_tokens": 25415552} +{"current_steps": 100, "total_steps": 160, "loss": 0.1847, "lr": 3.177647500604252e-05, "epoch": 6.32258064516129, "percentage": 62.5, "elapsed_time": "0:18:58", "remaining_time": "0:11:22", "throughput": 22560.17, "total_tokens": 25677568} +{"current_steps": 101, "total_steps": 160, "loss": 0.1817, "lr": 3.086582838174551e-05, "epoch": 6.387096774193548, "percentage": 63.12, "elapsed_time": "0:21:29", "remaining_time": "0:12:33", "throughput": 20111.55, "total_tokens": 25939584} +{"current_steps": 102, "total_steps": 160, "loss": 0.1734, "lr": 2.996255834484296e-05, "epoch": 6.451612903225806, "percentage": 63.75, "elapsed_time": "0:24:02", "remaining_time": "0:13:40", "throughput": 18166.65, "total_tokens": 26201600} +{"current_steps": 103, "total_steps": 160, "loss": 0.1869, "lr": 2.9067013123128613e-05, "epoch": 6.516129032258064, "percentage": 64.38, "elapsed_time": "0:26:34", "remaining_time": "0:14:42", "throughput": 16593.12, "total_tokens": 26463616} +{"current_steps": 104, "total_steps": 160, "loss": 0.1892, "lr": 2.8179537966332887e-05, "epoch": 6.580645161290323, "percentage": 65.0, "elapsed_time": "0:29:07", "remaining_time": "0:15:40", "throughput": 15295.94, "total_tokens": 26725632} +{"current_steps": 105, "total_steps": 160, "loss": 0.2003, "lr": 2.7300475013022663e-05, "epoch": 6.645161290322581, "percentage": 65.62, "elapsed_time": "0:31:39", "remaining_time": "0:16:34", "throughput": 14210.44, "total_tokens": 26987648} +{"current_steps": 106, "total_steps": 160, "loss": 0.1869, "lr": 2.6430163158700115e-05, "epoch": 6.709677419354839, "percentage": 66.25, "elapsed_time": "0:34:11", "remaining_time": "0:17:25", "throughput": 13282.93, "total_tokens": 27249664} +{"current_steps": 107, "total_steps": 160, "loss": 0.1652, "lr": 2.556893792515227e-05, "epoch": 6.774193548387097, "percentage": 66.88, "elapsed_time": "0:36:44", "remaining_time": "0:18:11", "throughput": 12481.27, "total_tokens": 27511680} +{"current_steps": 108, "total_steps": 160, "loss": 0.1931, "lr": 2.471713133110078e-05, "epoch": 6.838709677419355, "percentage": 67.5, "elapsed_time": "0:39:16", "remaining_time": "0:18:54", "throughput": 11785.11, "total_tokens": 27773696} +{"current_steps": 109, "total_steps": 160, "loss": 0.1841, "lr": 2.3875071764202563e-05, "epoch": 6.903225806451613, "percentage": 68.12, "elapsed_time": "0:41:49", "remaining_time": "0:19:34", "throughput": 11172.53, "total_tokens": 28035712} +{"current_steps": 110, "total_steps": 160, "loss": 0.1843, "lr": 2.3043083854449988e-05, "epoch": 6.967741935483871, "percentage": 68.75, "elapsed_time": "0:44:22", "remaining_time": "0:20:10", "throughput": 10630.2, "total_tokens": 28297728} +{"current_steps": 111, "total_steps": 160, "loss": 0.1603, "lr": 2.2221488349019903e-05, "epoch": 7.0, "percentage": 69.38, "elapsed_time": "0:45:38", "remaining_time": "0:20:08", "throughput": 10382.25, "total_tokens": 28428736} +{"current_steps": 112, "total_steps": 160, "loss": 0.116, "lr": 2.1410601988619394e-05, "epoch": 7.064516129032258, "percentage": 70.0, "elapsed_time": "0:48:10", "remaining_time": "0:20:38", "throughput": 9925.06, "total_tokens": 28690752} +{"current_steps": 113, "total_steps": 160, "loss": 0.1325, "lr": 2.061073738537635e-05, "epoch": 7.129032258064516, "percentage": 70.62, "elapsed_time": "0:50:43", "remaining_time": "0:21:05", "throughput": 9513.74, "total_tokens": 28952768} +{"current_steps": 114, "total_steps": 160, "loss": 0.1212, "lr": 1.982220290232143e-05, "epoch": 7.193548387096774, "percentage": 71.25, "elapsed_time": "0:53:15", "remaining_time": "0:21:29", "throughput": 9141.86, "total_tokens": 29214784} +{"current_steps": 115, "total_steps": 160, "loss": 0.121, "lr": 1.9045302534508297e-05, "epoch": 7.258064516129032, "percentage": 71.88, "elapsed_time": "0:55:48", "remaining_time": "0:21:50", "throughput": 8802.81, "total_tokens": 29476800} +{"current_steps": 116, "total_steps": 160, "loss": 0.1172, "lr": 1.8280335791817733e-05, "epoch": 7.32258064516129, "percentage": 72.5, "elapsed_time": "0:58:21", "remaining_time": "0:22:07", "throughput": 8494.27, "total_tokens": 29738816} +{"current_steps": 117, "total_steps": 160, "loss": 0.1267, "lr": 1.7527597583490822e-05, "epoch": 7.387096774193548, "percentage": 73.12, "elapsed_time": "1:00:53", "remaining_time": "0:22:22", "throughput": 8211.08, "total_tokens": 30000832} +{"current_steps": 118, "total_steps": 160, "loss": 0.1132, "lr": 1.678737810443593e-05, "epoch": 7.451612903225806, "percentage": 73.75, "elapsed_time": "1:03:26", "remaining_time": "0:22:34", "throughput": 7951.35, "total_tokens": 30262848} +{"current_steps": 119, "total_steps": 160, "loss": 0.1036, "lr": 1.605996272335291e-05, "epoch": 7.516129032258064, "percentage": 74.38, "elapsed_time": "1:05:58", "remaining_time": "0:22:43", "throughput": 7710.8, "total_tokens": 30524864} +{"current_steps": 120, "total_steps": 160, "loss": 0.1085, "lr": 1.5345631872718214e-05, "epoch": 7.580645161290323, "percentage": 75.0, "elapsed_time": "1:08:31", "remaining_time": "0:22:50", "throughput": 7488.69, "total_tokens": 30786880} +{"current_steps": 121, "total_steps": 160, "loss": 0.1075, "lr": 1.4644660940672627e-05, "epoch": 7.645161290322581, "percentage": 75.62, "elapsed_time": "1:11:03", "remaining_time": "0:22:54", "throughput": 7282.61, "total_tokens": 31048896} +{"current_steps": 122, "total_steps": 160, "loss": 0.1091, "lr": 1.3957320164854059e-05, "epoch": 7.709677419354839, "percentage": 76.25, "elapsed_time": "1:13:35", "remaining_time": "0:22:55", "throughput": 7090.97, "total_tokens": 31310912} +{"current_steps": 123, "total_steps": 160, "loss": 0.1003, "lr": 1.3283874528215733e-05, "epoch": 7.774193548387097, "percentage": 76.88, "elapsed_time": "1:16:08", "remaining_time": "0:22:54", "throughput": 6911.66, "total_tokens": 31572928} +{"current_steps": 124, "total_steps": 160, "loss": 0.1123, "lr": 1.2624583656870154e-05, "epoch": 7.838709677419355, "percentage": 77.5, "elapsed_time": "1:18:40", "remaining_time": "0:22:50", "throughput": 6744.36, "total_tokens": 31834944} +{"current_steps": 125, "total_steps": 160, "loss": 0.1183, "lr": 1.1979701719998453e-05, "epoch": 7.903225806451613, "percentage": 78.12, "elapsed_time": "1:22:52", "remaining_time": "0:23:12", "throughput": 6454.73, "total_tokens": 32096960} +{"current_steps": 126, "total_steps": 160, "loss": 0.1099, "lr": 1.134947733186315e-05, "epoch": 7.967741935483871, "percentage": 78.75, "elapsed_time": "1:25:23", "remaining_time": "0:23:02", "throughput": 6315.75, "total_tokens": 32358976} +{"current_steps": 127, "total_steps": 160, "loss": 0.1076, "lr": 1.0734153455962765e-05, "epoch": 8.0, "percentage": 79.38, "elapsed_time": "1:26:39", "remaining_time": "0:22:31", "throughput": 6248.79, "total_tokens": 32489984} +{"current_steps": 128, "total_steps": 160, "loss": 0.0796, "lr": 1.013396731136465e-05, "epoch": 8.064516129032258, "percentage": 80.0, "elapsed_time": "1:29:11", "remaining_time": "0:22:17", "throughput": 6119.85, "total_tokens": 32752000} +{"current_steps": 129, "total_steps": 160, "loss": 0.0836, "lr": 9.549150281252633e-06, "epoch": 8.129032258064516, "percentage": 80.62, "elapsed_time": "1:31:43", "remaining_time": "0:22:02", "throughput": 5998.36, "total_tokens": 33014016} +{"current_steps": 130, "total_steps": 160, "loss": 0.0874, "lr": 8.97992782372432e-06, "epoch": 8.193548387096774, "percentage": 81.25, "elapsed_time": "1:34:15", "remaining_time": "0:21:45", "throughput": 5883.42, "total_tokens": 33276032} +{"current_steps": 131, "total_steps": 160, "loss": 0.0755, "lr": 8.426519384872733e-06, "epoch": 8.258064516129032, "percentage": 81.88, "elapsed_time": "1:36:47", "remaining_time": "0:21:25", "throughput": 5774.87, "total_tokens": 33538048} +{"current_steps": 132, "total_steps": 160, "loss": 0.071, "lr": 7.889138314185678e-06, "epoch": 8.32258064516129, "percentage": 82.5, "elapsed_time": "1:39:19", "remaining_time": "0:21:04", "throughput": 5671.37, "total_tokens": 33800064} +{"current_steps": 133, "total_steps": 160, "loss": 0.0742, "lr": 7.367991782295391e-06, "epoch": 8.387096774193548, "percentage": 83.12, "elapsed_time": "1:41:51", "remaining_time": "0:20:40", "throughput": 5573.8, "total_tokens": 34062080} +{"current_steps": 134, "total_steps": 160, "loss": 0.0784, "lr": 6.863280701110408e-06, "epoch": 8.451612903225806, "percentage": 83.75, "elapsed_time": "1:44:22", "remaining_time": "0:20:15", "throughput": 5480.57, "total_tokens": 34324096} +{"current_steps": 135, "total_steps": 160, "loss": 0.0706, "lr": 6.375199646360142e-06, "epoch": 8.516129032258064, "percentage": 84.38, "elapsed_time": "1:46:54", "remaining_time": "0:19:47", "throughput": 5391.83, "total_tokens": 34586112} +{"current_steps": 136, "total_steps": 160, "loss": 0.0678, "lr": 5.903936782582253e-06, "epoch": 8.580645161290322, "percentage": 85.0, "elapsed_time": "1:49:26", "remaining_time": "0:19:18", "throughput": 5307.07, "total_tokens": 34848128} +{"current_steps": 137, "total_steps": 160, "loss": 0.0778, "lr": 5.449673790581611e-06, "epoch": 8.64516129032258, "percentage": 85.62, "elapsed_time": "1:51:58", "remaining_time": "0:18:47", "throughput": 5226.11, "total_tokens": 35110144} +{"current_steps": 138, "total_steps": 160, "loss": 0.0719, "lr": 5.012585797388936e-06, "epoch": 8.709677419354838, "percentage": 86.25, "elapsed_time": "1:54:29", "remaining_time": "0:18:15", "throughput": 5148.81, "total_tokens": 35372160} +{"current_steps": 139, "total_steps": 160, "loss": 0.0748, "lr": 4.592841308745932e-06, "epoch": 8.774193548387096, "percentage": 86.88, "elapsed_time": "1:57:02", "remaining_time": "0:17:40", "throughput": 5074.6, "total_tokens": 35634176} +{"current_steps": 140, "total_steps": 160, "loss": 0.0828, "lr": 4.190602144143207e-06, "epoch": 8.838709677419354, "percentage": 87.5, "elapsed_time": "1:59:34", "remaining_time": "0:17:04", "throughput": 5003.61, "total_tokens": 35896192} +{"current_steps": 141, "total_steps": 160, "loss": 0.0761, "lr": 3.8060233744356633e-06, "epoch": 8.903225806451612, "percentage": 88.12, "elapsed_time": "2:02:06", "remaining_time": "0:16:27", "throughput": 4935.24, "total_tokens": 36158208} +{"current_steps": 142, "total_steps": 160, "loss": 0.0771, "lr": 3.4392532620598216e-06, "epoch": 8.967741935483872, "percentage": 88.75, "elapsed_time": "2:04:38", "remaining_time": "0:15:48", "throughput": 4869.83, "total_tokens": 36420224} +{"current_steps": 143, "total_steps": 160, "loss": 0.0753, "lr": 3.0904332038757977e-06, "epoch": 9.0, "percentage": 89.38, "elapsed_time": "2:05:54", "remaining_time": "0:14:58", "throughput": 4838.03, "total_tokens": 36551232} +{"current_steps": 144, "total_steps": 160, "loss": 0.0681, "lr": 2.759697676656098e-06, "epoch": 9.064516129032258, "percentage": 90.0, "elapsed_time": "2:08:26", "remaining_time": "0:14:16", "throughput": 4776.83, "total_tokens": 36813248} +{"current_steps": 145, "total_steps": 160, "loss": 0.0643, "lr": 2.4471741852423237e-06, "epoch": 9.129032258064516, "percentage": 90.62, "elapsed_time": "2:10:59", "remaining_time": "0:13:33", "throughput": 4717.55, "total_tokens": 37075264} +{"current_steps": 146, "total_steps": 160, "loss": 0.0589, "lr": 2.152983213389559e-06, "epoch": 9.193548387096774, "percentage": 91.25, "elapsed_time": "2:13:31", "remaining_time": "0:12:48", "throughput": 4660.52, "total_tokens": 37337280} +{"current_steps": 147, "total_steps": 160, "loss": 0.0628, "lr": 1.8772381773176417e-06, "epoch": 9.258064516129032, "percentage": 91.88, "elapsed_time": "2:16:03", "remaining_time": "0:12:01", "throughput": 4605.81, "total_tokens": 37599296} +{"current_steps": 148, "total_steps": 160, "loss": 0.0663, "lr": 1.620045381987012e-06, "epoch": 9.32258064516129, "percentage": 92.5, "elapsed_time": "2:18:35", "remaining_time": "0:11:14", "throughput": 4552.98, "total_tokens": 37861312} +{"current_steps": 149, "total_steps": 160, "loss": 0.0506, "lr": 1.3815039801161721e-06, "epoch": 9.387096774193548, "percentage": 93.12, "elapsed_time": "2:21:07", "remaining_time": "0:10:25", "throughput": 4502.18, "total_tokens": 38123328} +{"current_steps": 150, "total_steps": 160, "loss": 0.0627, "lr": 1.1617059339563807e-06, "epoch": 9.451612903225806, "percentage": 93.75, "elapsed_time": "2:23:39", "remaining_time": "0:09:34", "throughput": 4453.17, "total_tokens": 38385344} +{"current_steps": 151, "total_steps": 160, "loss": 0.061, "lr": 9.607359798384785e-07, "epoch": 9.516129032258064, "percentage": 94.38, "elapsed_time": "2:26:12", "remaining_time": "0:08:42", "throughput": 4405.7, "total_tokens": 38647360} +{"current_steps": 152, "total_steps": 160, "loss": 0.0652, "lr": 7.786715955054203e-07, "epoch": 9.580645161290322, "percentage": 95.0, "elapsed_time": "2:28:45", "remaining_time": "0:07:49", "throughput": 4359.41, "total_tokens": 38909376} +{"current_steps": 153, "total_steps": 160, "loss": 0.056, "lr": 6.15582970243117e-07, "epoch": 9.64516129032258, "percentage": 95.62, "elapsed_time": "2:31:17", "remaining_time": "0:06:55", "throughput": 4315.04, "total_tokens": 39171392} +{"current_steps": 154, "total_steps": 160, "loss": 0.0658, "lr": 4.715329778211375e-07, "epoch": 9.709677419354838, "percentage": 96.25, "elapsed_time": "2:33:50", "remaining_time": "0:05:59", "throughput": 4272.24, "total_tokens": 39433408} +{"current_steps": 155, "total_steps": 160, "loss": 0.0673, "lr": 3.465771522536854e-07, "epoch": 9.774193548387096, "percentage": 96.88, "elapsed_time": "2:36:22", "remaining_time": "0:05:02", "throughput": 4230.87, "total_tokens": 39695424} +{"current_steps": 156, "total_steps": 160, "loss": 0.0727, "lr": 2.407636663901591e-07, "epoch": 9.838709677419354, "percentage": 97.5, "elapsed_time": "2:40:36", "remaining_time": "0:04:07", "throughput": 4146.56, "total_tokens": 39957440} +{"current_steps": 157, "total_steps": 160, "loss": 0.0597, "lr": 1.5413331334360182e-07, "epoch": 9.903225806451612, "percentage": 98.12, "elapsed_time": "2:43:07", "remaining_time": "0:03:07", "throughput": 4109.07, "total_tokens": 40219456} +{"current_steps": 158, "total_steps": 160, "loss": 0.0631, "lr": 8.671949076420882e-08, "epoch": 9.967741935483872, "percentage": 98.75, "elapsed_time": "2:45:39", "remaining_time": "0:02:05", "throughput": 4072.61, "total_tokens": 40481472} +{"current_steps": 159, "total_steps": 160, "loss": 0.0649, "lr": 3.8548187963854956e-08, "epoch": 10.0, "percentage": 99.38, "elapsed_time": "2:46:55", "remaining_time": "0:01:02", "throughput": 4055.02, "total_tokens": 40612480} +{"current_steps": 159, "total_steps": 160, "epoch": 10.0, "percentage": 99.38, "elapsed_time": "2:46:55", "remaining_time": "0:01:02", "throughput": 4055.02, "total_tokens": 40612480} diff --git a/B_3/trainer_state.json b/B_3/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..291c727f792f3e1e304b00d993c14bab31111609 --- /dev/null +++ b/B_3/trainer_state.json @@ -0,0 +1,1634 @@ +{ + "best_global_step": null, + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 10.0, + "eval_steps": 500, + "global_step": 159, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.06451612903225806, + "grad_norm": 2.073743358513411, + "learning_rate": 0.0001, + "loss": 1.3591, + "num_input_tokens_seen": 262016, + "step": 1, + "train_runtime": 148.9807, + "train_tokens_per_second": 1758.725 + }, + { + "epoch": 0.12903225806451613, + "grad_norm": 1.7083966194301452, + "learning_rate": 9.999036202410325e-05, + "loss": 1.4321, + "num_input_tokens_seen": 524032, + "step": 2, + "train_runtime": 298.073, + "train_tokens_per_second": 1758.066 + }, + { + "epoch": 0.1935483870967742, + "grad_norm": 0.8154519457271471, + "learning_rate": 9.996145181203615e-05, + "loss": 1.3036, + "num_input_tokens_seen": 786048, + "step": 3, + "train_runtime": 449.085, + "train_tokens_per_second": 1750.332 + }, + { + "epoch": 0.25806451612903225, + "grad_norm": 1.6435854418760425, + "learning_rate": 9.991328050923581e-05, + "loss": 1.3119, + "num_input_tokens_seen": 1048064, + "step": 4, + "train_runtime": 600.5729, + "train_tokens_per_second": 1745.107 + }, + { + "epoch": 0.3225806451612903, + "grad_norm": 1.2034889741595727, + "learning_rate": 9.98458666866564e-05, + "loss": 1.21, + "num_input_tokens_seen": 1310080, + "step": 5, + "train_runtime": 752.2986, + "train_tokens_per_second": 1741.436 + }, + { + "epoch": 0.3870967741935484, + "grad_norm": 0.6081797430818349, + "learning_rate": 9.975923633360985e-05, + "loss": 1.1713, + "num_input_tokens_seen": 1572096, + "step": 6, + "train_runtime": 903.8941, + "train_tokens_per_second": 1739.248 + }, + { + "epoch": 0.45161290322580644, + "grad_norm": 0.43860201273645816, + "learning_rate": 9.965342284774632e-05, + "loss": 1.1687, + "num_input_tokens_seen": 1834112, + "step": 7, + "train_runtime": 1055.454, + "train_tokens_per_second": 1737.747 + }, + { + "epoch": 0.5161290322580645, + "grad_norm": 0.3489614049191675, + "learning_rate": 9.952846702217886e-05, + "loss": 1.1151, + "num_input_tokens_seen": 2096128, + "step": 8, + "train_runtime": 1207.0142, + "train_tokens_per_second": 1736.623 + }, + { + "epoch": 0.5806451612903226, + "grad_norm": 3.182590493238281, + "learning_rate": 9.938441702975689e-05, + "loss": 1.0694, + "num_input_tokens_seen": 2358144, + "step": 9, + "train_runtime": 1358.12, + "train_tokens_per_second": 1736.33 + }, + { + "epoch": 0.6451612903225806, + "grad_norm": 0.40091824929141967, + "learning_rate": 9.922132840449459e-05, + "loss": 1.0431, + "num_input_tokens_seen": 2620160, + "step": 10, + "train_runtime": 1509.1349, + "train_tokens_per_second": 1736.2 + }, + { + "epoch": 0.7096774193548387, + "grad_norm": 0.4778713425435316, + "learning_rate": 9.903926402016153e-05, + "loss": 1.0812, + "num_input_tokens_seen": 2882176, + "step": 11, + "train_runtime": 1661.0314, + "train_tokens_per_second": 1735.172 + }, + { + "epoch": 0.7741935483870968, + "grad_norm": 0.3361596465836105, + "learning_rate": 9.883829406604363e-05, + "loss": 1.0296, + "num_input_tokens_seen": 3144192, + "step": 12, + "train_runtime": 1812.2372, + "train_tokens_per_second": 1734.978 + }, + { + "epoch": 0.8387096774193549, + "grad_norm": 0.30484937194478195, + "learning_rate": 9.861849601988383e-05, + "loss": 0.9806, + "num_input_tokens_seen": 3406208, + "step": 13, + "train_runtime": 1963.4393, + "train_tokens_per_second": 1734.817 + }, + { + "epoch": 0.9032258064516129, + "grad_norm": 0.31491494208058013, + "learning_rate": 9.837995461801299e-05, + "loss": 1.0244, + "num_input_tokens_seen": 3668224, + "step": 14, + "train_runtime": 2115.2404, + "train_tokens_per_second": 1734.188 + }, + { + "epoch": 0.967741935483871, + "grad_norm": 0.2555325387827011, + "learning_rate": 9.812276182268236e-05, + "loss": 0.9701, + "num_input_tokens_seen": 3930240, + "step": 15, + "train_runtime": 2266.9917, + "train_tokens_per_second": 1733.681 + }, + { + "epoch": 1.0, + "grad_norm": 0.2555325387827011, + "learning_rate": 9.784701678661045e-05, + "loss": 0.9885, + "num_input_tokens_seen": 4061248, + "step": 16, + "train_runtime": 2343.254, + "train_tokens_per_second": 1733.166 + }, + { + "epoch": 1.064516129032258, + "grad_norm": 0.3803435873199766, + "learning_rate": 9.755282581475769e-05, + "loss": 0.9221, + "num_input_tokens_seen": 4323264, + "step": 17, + "train_runtime": 2495.3616, + "train_tokens_per_second": 1732.52 + }, + { + "epoch": 1.129032258064516, + "grad_norm": 0.25742598219685603, + "learning_rate": 9.724030232334391e-05, + "loss": 0.9187, + "num_input_tokens_seen": 4585280, + "step": 18, + "train_runtime": 2646.9782, + "train_tokens_per_second": 1732.27 + }, + { + "epoch": 1.1935483870967742, + "grad_norm": 0.22468849320033518, + "learning_rate": 9.690956679612421e-05, + "loss": 0.8943, + "num_input_tokens_seen": 4847296, + "step": 19, + "train_runtime": 2798.7721, + "train_tokens_per_second": 1731.937 + }, + { + "epoch": 1.2580645161290323, + "grad_norm": 0.28670637895213946, + "learning_rate": 9.656074673794018e-05, + "loss": 0.8567, + "num_input_tokens_seen": 5109312, + "step": 20, + "train_runtime": 2950.5573, + "train_tokens_per_second": 1731.643 + }, + { + "epoch": 1.3225806451612903, + "grad_norm": 0.2043611694383423, + "learning_rate": 9.619397662556435e-05, + "loss": 0.8557, + "num_input_tokens_seen": 5371328, + "step": 21, + "train_runtime": 3103.0692, + "train_tokens_per_second": 1730.973 + }, + { + "epoch": 1.3870967741935485, + "grad_norm": 0.24667572059564058, + "learning_rate": 9.580939785585681e-05, + "loss": 0.8546, + "num_input_tokens_seen": 5633344, + "step": 22, + "train_runtime": 3254.6109, + "train_tokens_per_second": 1730.881 + }, + { + "epoch": 1.4516129032258065, + "grad_norm": 0.24067782128131054, + "learning_rate": 9.540715869125407e-05, + "loss": 0.851, + "num_input_tokens_seen": 5895360, + "step": 23, + "train_runtime": 3406.7776, + "train_tokens_per_second": 1730.48 + }, + { + "epoch": 1.5161290322580645, + "grad_norm": 0.2000848793492341, + "learning_rate": 9.498741420261108e-05, + "loss": 0.8483, + "num_input_tokens_seen": 6157376, + "step": 24, + "train_runtime": 3558.7104, + "train_tokens_per_second": 1730.227 + }, + { + "epoch": 1.5806451612903225, + "grad_norm": 0.22359283400518964, + "learning_rate": 9.45503262094184e-05, + "loss": 0.8226, + "num_input_tokens_seen": 6419392, + "step": 25, + "train_runtime": 3711.0551, + "train_tokens_per_second": 1729.802 + }, + { + "epoch": 1.6451612903225805, + "grad_norm": 1.0648985294170912, + "learning_rate": 9.409606321741775e-05, + "loss": 0.8259, + "num_input_tokens_seen": 6681408, + "step": 26, + "train_runtime": 3863.7709, + "train_tokens_per_second": 1729.245 + }, + { + "epoch": 1.7096774193548387, + "grad_norm": 0.18133437899141433, + "learning_rate": 9.362480035363986e-05, + "loss": 0.8524, + "num_input_tokens_seen": 6943424, + "step": 27, + "train_runtime": 4016.4383, + "train_tokens_per_second": 1728.752 + }, + { + "epoch": 1.7741935483870968, + "grad_norm": 0.456273625303925, + "learning_rate": 9.31367192988896e-05, + "loss": 0.8209, + "num_input_tokens_seen": 7205440, + "step": 28, + "train_runtime": 4168.9223, + "train_tokens_per_second": 1728.37 + }, + { + "epoch": 1.838709677419355, + "grad_norm": 0.4673225345753548, + "learning_rate": 9.263200821770461e-05, + "loss": 0.7853, + "num_input_tokens_seen": 7467456, + "step": 29, + "train_runtime": 4321.7056, + "train_tokens_per_second": 1727.896 + }, + { + "epoch": 1.903225806451613, + "grad_norm": 0.1996315966078707, + "learning_rate": 9.211086168581433e-05, + "loss": 0.7779, + "num_input_tokens_seen": 7729472, + "step": 30, + "train_runtime": 4474.7713, + "train_tokens_per_second": 1727.345 + }, + { + "epoch": 1.967741935483871, + "grad_norm": 0.3752545960960864, + "learning_rate": 9.157348061512727e-05, + "loss": 0.8146, + "num_input_tokens_seen": 7991488, + "step": 31, + "train_runtime": 4627.5272, + "train_tokens_per_second": 1726.946 + }, + { + "epoch": 2.0, + "grad_norm": 0.484664872247163, + "learning_rate": 9.102007217627568e-05, + "loss": 0.7787, + "num_input_tokens_seen": 8122496, + "step": 32, + "train_runtime": 4809.6299, + "train_tokens_per_second": 1688.799 + }, + { + "epoch": 2.064516129032258, + "grad_norm": 0.299023275639115, + "learning_rate": 9.045084971874738e-05, + "loss": 0.7361, + "num_input_tokens_seen": 8384512, + "step": 33, + "train_runtime": 4960.4611, + "train_tokens_per_second": 1690.269 + }, + { + "epoch": 2.129032258064516, + "grad_norm": 0.20981231086811225, + "learning_rate": 8.986603268863536e-05, + "loss": 0.6956, + "num_input_tokens_seen": 8646528, + "step": 34, + "train_runtime": 5112.1398, + "train_tokens_per_second": 1691.372 + }, + { + "epoch": 2.193548387096774, + "grad_norm": 0.3648857123126151, + "learning_rate": 8.926584654403724e-05, + "loss": 0.6819, + "num_input_tokens_seen": 8908544, + "step": 35, + "train_runtime": 5264.3603, + "train_tokens_per_second": 1692.237 + }, + { + "epoch": 2.258064516129032, + "grad_norm": 0.31269893807625165, + "learning_rate": 8.865052266813685e-05, + "loss": 0.6958, + "num_input_tokens_seen": 9170560, + "step": 36, + "train_runtime": 5416.2601, + "train_tokens_per_second": 1693.154 + }, + { + "epoch": 2.3225806451612905, + "grad_norm": 0.2727897516670068, + "learning_rate": 8.802029828000156e-05, + "loss": 0.6756, + "num_input_tokens_seen": 9432576, + "step": 37, + "train_runtime": 5567.9089, + "train_tokens_per_second": 1694.097 + }, + { + "epoch": 2.3870967741935485, + "grad_norm": 0.25721446603694037, + "learning_rate": 8.737541634312985e-05, + "loss": 0.648, + "num_input_tokens_seen": 9694592, + "step": 38, + "train_runtime": 5719.9113, + "train_tokens_per_second": 1694.885 + }, + { + "epoch": 2.4516129032258065, + "grad_norm": 20.44651031163169, + "learning_rate": 8.671612547178428e-05, + "loss": 0.669, + "num_input_tokens_seen": 9956608, + "step": 39, + "train_runtime": 5871.7821, + "train_tokens_per_second": 1695.671 + }, + { + "epoch": 2.5161290322580645, + "grad_norm": 3.0150996948618145, + "learning_rate": 8.604267983514594e-05, + "loss": 0.6856, + "num_input_tokens_seen": 10218624, + "step": 40, + "train_runtime": 6024.0794, + "train_tokens_per_second": 1696.296 + }, + { + "epoch": 2.5806451612903225, + "grad_norm": 19.306885630930985, + "learning_rate": 8.535533905932738e-05, + "loss": 0.6949, + "num_input_tokens_seen": 10480640, + "step": 41, + "train_runtime": 6176.6303, + "train_tokens_per_second": 1696.822 + }, + { + "epoch": 2.6451612903225805, + "grad_norm": 46.29810754342157, + "learning_rate": 8.46543681272818e-05, + "loss": 0.7467, + "num_input_tokens_seen": 10742656, + "step": 42, + "train_runtime": 6329.1576, + "train_tokens_per_second": 1697.328 + }, + { + "epoch": 2.709677419354839, + "grad_norm": 9.77264228502113, + "learning_rate": 8.39400372766471e-05, + "loss": 0.6638, + "num_input_tokens_seen": 11004672, + "step": 43, + "train_runtime": 6481.6817, + "train_tokens_per_second": 1697.811 + }, + { + "epoch": 2.774193548387097, + "grad_norm": 16.058872629216598, + "learning_rate": 8.321262189556409e-05, + "loss": 0.7096, + "num_input_tokens_seen": 11266688, + "step": 44, + "train_runtime": 6633.7733, + "train_tokens_per_second": 1698.383 + }, + { + "epoch": 2.838709677419355, + "grad_norm": 1.0261214777781495, + "learning_rate": 8.247240241650918e-05, + "loss": 0.6737, + "num_input_tokens_seen": 11528704, + "step": 45, + "train_runtime": 6785.7147, + "train_tokens_per_second": 1698.967 + }, + { + "epoch": 2.903225806451613, + "grad_norm": 1.6378077961358868, + "learning_rate": 8.171966420818228e-05, + "loss": 0.6638, + "num_input_tokens_seen": 11790720, + "step": 46, + "train_runtime": 6937.3194, + "train_tokens_per_second": 1699.607 + }, + { + "epoch": 2.967741935483871, + "grad_norm": 2.4944381489903455, + "learning_rate": 8.095469746549172e-05, + "loss": 0.6753, + "num_input_tokens_seen": 12052736, + "step": 47, + "train_runtime": 7089.3141, + "train_tokens_per_second": 1700.127 + }, + { + "epoch": 3.0, + "grad_norm": 2.4944381489903455, + "learning_rate": 8.017779709767858e-05, + "loss": 0.6673, + "num_input_tokens_seen": 12183744, + "step": 48, + "train_runtime": 7165.3736, + "train_tokens_per_second": 1700.364 + }, + { + "epoch": 3.064516129032258, + "grad_norm": 26.97115523850904, + "learning_rate": 7.938926261462366e-05, + "loss": 0.6049, + "num_input_tokens_seen": 12445760, + "step": 49, + "train_runtime": 7317.3555, + "train_tokens_per_second": 1700.855 + }, + { + "epoch": 3.129032258064516, + "grad_norm": 5.4430349870657375, + "learning_rate": 7.858939801138061e-05, + "loss": 1.0716, + "num_input_tokens_seen": 12707776, + "step": 50, + "train_runtime": 7468.9695, + "train_tokens_per_second": 1701.41 + }, + { + "epoch": 3.193548387096774, + "grad_norm": 12.78395158722848, + "learning_rate": 7.777851165098012e-05, + "loss": 1.0428, + "num_input_tokens_seen": 12969792, + "step": 51, + "train_runtime": 7620.5451, + "train_tokens_per_second": 1701.951 + }, + { + "epoch": 3.258064516129032, + "grad_norm": 326.4832878895978, + "learning_rate": 7.695691614555003e-05, + "loss": 0.9172, + "num_input_tokens_seen": 13231808, + "step": 52, + "train_runtime": 7772.2756, + "train_tokens_per_second": 1702.437 + }, + { + "epoch": 3.3225806451612905, + "grad_norm": 69.03004430934298, + "learning_rate": 7.612492823579745e-05, + "loss": 0.7593, + "num_input_tokens_seen": 13493824, + "step": 53, + "train_runtime": 7924.7666, + "train_tokens_per_second": 1702.741 + }, + { + "epoch": 3.3870967741935485, + "grad_norm": 1.1495044460164596, + "learning_rate": 7.528286866889924e-05, + "loss": 0.6151, + "num_input_tokens_seen": 13755840, + "step": 54, + "train_runtime": 8077.0191, + "train_tokens_per_second": 1703.084 + }, + { + "epoch": 3.4516129032258065, + "grad_norm": 0.252822200967686, + "learning_rate": 7.443106207484776e-05, + "loss": 0.5964, + "num_input_tokens_seen": 14017856, + "step": 55, + "train_runtime": 8229.2406, + "train_tokens_per_second": 1703.42 + }, + { + "epoch": 3.5161290322580645, + "grad_norm": 0.36169399074673414, + "learning_rate": 7.35698368412999e-05, + "loss": 0.5809, + "num_input_tokens_seen": 14279872, + "step": 56, + "train_runtime": 8380.9399, + "train_tokens_per_second": 1703.851 + }, + { + "epoch": 3.5806451612903225, + "grad_norm": 0.3219026144290714, + "learning_rate": 7.269952498697734e-05, + "loss": 0.5638, + "num_input_tokens_seen": 14541888, + "step": 57, + "train_runtime": 8533.1482, + "train_tokens_per_second": 1704.164 + }, + { + "epoch": 3.6451612903225805, + "grad_norm": 1.0720094012961243, + "learning_rate": 7.18204620336671e-05, + "loss": 0.5553, + "num_input_tokens_seen": 14803904, + "step": 58, + "train_runtime": 8685.2309, + "train_tokens_per_second": 1704.492 + }, + { + "epoch": 3.709677419354839, + "grad_norm": 0.3151374305079844, + "learning_rate": 7.09329868768714e-05, + "loss": 0.5555, + "num_input_tokens_seen": 15065920, + "step": 59, + "train_runtime": 8837.7033, + "train_tokens_per_second": 1704.733 + }, + { + "epoch": 3.774193548387097, + "grad_norm": 0.2917481639941811, + "learning_rate": 7.003744165515705e-05, + "loss": 0.5635, + "num_input_tokens_seen": 15327936, + "step": 60, + "train_runtime": 8990.7028, + "train_tokens_per_second": 1704.865 + }, + { + "epoch": 3.838709677419355, + "grad_norm": 0.39703897734831073, + "learning_rate": 6.91341716182545e-05, + "loss": 0.5775, + "num_input_tokens_seen": 15589952, + "step": 61, + "train_runtime": 9142.8639, + "train_tokens_per_second": 1705.15 + }, + { + "epoch": 3.903225806451613, + "grad_norm": 0.330407023300617, + "learning_rate": 6.82235249939575e-05, + "loss": 0.5305, + "num_input_tokens_seen": 15851968, + "step": 62, + "train_runtime": 9294.6576, + "train_tokens_per_second": 1705.492 + }, + { + "epoch": 3.967741935483871, + "grad_norm": 0.2594779667409557, + "learning_rate": 6.730585285387465e-05, + "loss": 0.5428, + "num_input_tokens_seen": 16113984, + "step": 63, + "train_runtime": 9538.1388, + "train_tokens_per_second": 1689.426 + }, + { + "epoch": 4.0, + "grad_norm": 0.5188739404593391, + "learning_rate": 6.638150897808468e-05, + "loss": 0.5671, + "num_input_tokens_seen": 16244992, + "step": 64, + "train_runtime": 9613.4951, + "train_tokens_per_second": 1689.811 + }, + { + "epoch": 4.064516129032258, + "grad_norm": 0.2817891367025121, + "learning_rate": 6.545084971874738e-05, + "loss": 0.4715, + "num_input_tokens_seen": 16507008, + "step": 65, + "train_runtime": 9764.9453, + "train_tokens_per_second": 1690.435 + }, + { + "epoch": 4.129032258064516, + "grad_norm": 0.4313237711714659, + "learning_rate": 6.451423386272312e-05, + "loss": 0.4488, + "num_input_tokens_seen": 16769024, + "step": 66, + "train_runtime": 9916.329, + "train_tokens_per_second": 1691.052 + }, + { + "epoch": 4.193548387096774, + "grad_norm": 9.407910846391866, + "learning_rate": 6.357202249325371e-05, + "loss": 0.4582, + "num_input_tokens_seen": 17031040, + "step": 67, + "train_runtime": 10068.4234, + "train_tokens_per_second": 1691.53 + }, + { + "epoch": 4.258064516129032, + "grad_norm": 0.4318030663645452, + "learning_rate": 6.26245788507579e-05, + "loss": 0.4588, + "num_input_tokens_seen": 17293056, + "step": 68, + "train_runtime": 10219.9518, + "train_tokens_per_second": 1692.088 + }, + { + "epoch": 4.32258064516129, + "grad_norm": 0.2711661394321373, + "learning_rate": 6.167226819279528e-05, + "loss": 0.4447, + "num_input_tokens_seen": 17555072, + "step": 69, + "train_runtime": 10371.656, + "train_tokens_per_second": 1692.601 + }, + { + "epoch": 4.387096774193548, + "grad_norm": 0.2785432314686511, + "learning_rate": 6.071545765325254e-05, + "loss": 0.4565, + "num_input_tokens_seen": 17817088, + "step": 70, + "train_runtime": 10524.0113, + "train_tokens_per_second": 1692.994 + }, + { + "epoch": 4.451612903225806, + "grad_norm": 0.24786649766049834, + "learning_rate": 5.9754516100806423e-05, + "loss": 0.4665, + "num_input_tokens_seen": 18079104, + "step": 71, + "train_runtime": 10676.331, + "train_tokens_per_second": 1693.382 + }, + { + "epoch": 4.516129032258064, + "grad_norm": 0.2454529793840644, + "learning_rate": 5.8789813996717736e-05, + "loss": 0.446, + "num_input_tokens_seen": 18341120, + "step": 72, + "train_runtime": 10828.6719, + "train_tokens_per_second": 1693.755 + }, + { + "epoch": 4.580645161290323, + "grad_norm": 0.25559855808246584, + "learning_rate": 5.782172325201155e-05, + "loss": 0.4384, + "num_input_tokens_seen": 18603136, + "step": 73, + "train_runtime": 10981.143, + "train_tokens_per_second": 1694.098 + }, + { + "epoch": 4.645161290322581, + "grad_norm": 0.24822833593804589, + "learning_rate": 5.685061708409841e-05, + "loss": 0.4339, + "num_input_tokens_seen": 18865152, + "step": 74, + "train_runtime": 11133.4844, + "train_tokens_per_second": 1694.452 + }, + { + "epoch": 4.709677419354839, + "grad_norm": 0.24264729806408034, + "learning_rate": 5.587686987289189e-05, + "loss": 0.444, + "num_input_tokens_seen": 19127168, + "step": 75, + "train_runtime": 11285.3939, + "train_tokens_per_second": 1694.86 + }, + { + "epoch": 4.774193548387097, + "grad_norm": 0.22384324563071528, + "learning_rate": 5.490085701647805e-05, + "loss": 0.4353, + "num_input_tokens_seen": 19389184, + "step": 76, + "train_runtime": 11437.3833, + "train_tokens_per_second": 1695.246 + }, + { + "epoch": 4.838709677419355, + "grad_norm": 0.24764076326899273, + "learning_rate": 5.392295478639225e-05, + "loss": 0.4257, + "num_input_tokens_seen": 19651200, + "step": 77, + "train_runtime": 11589.4114, + "train_tokens_per_second": 1695.617 + }, + { + "epoch": 4.903225806451613, + "grad_norm": 0.2228535077281988, + "learning_rate": 5.294354018255945e-05, + "loss": 0.4402, + "num_input_tokens_seen": 19913216, + "step": 78, + "train_runtime": 11741.453, + "train_tokens_per_second": 1695.975 + }, + { + "epoch": 4.967741935483871, + "grad_norm": 0.21632763381463402, + "learning_rate": 5.196299078795344e-05, + "loss": 0.4326, + "num_input_tokens_seen": 20175232, + "step": 79, + "train_runtime": 11893.3046, + "train_tokens_per_second": 1696.352 + }, + { + "epoch": 5.0, + "grad_norm": 0.21632763381463402, + "learning_rate": 5.0981684623031415e-05, + "loss": 0.4125, + "num_input_tokens_seen": 20306240, + "step": 80, + "train_runtime": 11969.094, + "train_tokens_per_second": 1696.556 + }, + { + "epoch": 5.064516129032258, + "grad_norm": 0.3571828769341814, + "learning_rate": 5e-05, + "loss": 0.3198, + "num_input_tokens_seen": 20568256, + "step": 81, + "train_runtime": 12121.0302, + "train_tokens_per_second": 1696.907 + }, + { + "epoch": 5.129032258064516, + "grad_norm": 0.21648090918089968, + "learning_rate": 4.901831537696859e-05, + "loss": 0.2964, + "num_input_tokens_seen": 20830272, + "step": 82, + "train_runtime": 12272.7158, + "train_tokens_per_second": 1697.283 + }, + { + "epoch": 5.193548387096774, + "grad_norm": 0.31458036325066546, + "learning_rate": 4.8037009212046586e-05, + "loss": 0.3057, + "num_input_tokens_seen": 21092288, + "step": 83, + "train_runtime": 12424.73, + "train_tokens_per_second": 1697.605 + }, + { + "epoch": 5.258064516129032, + "grad_norm": 0.2497078272482313, + "learning_rate": 4.7056459817440544e-05, + "loss": 0.3129, + "num_input_tokens_seen": 21354304, + "step": 84, + "train_runtime": 12576.3946, + "train_tokens_per_second": 1697.967 + }, + { + "epoch": 5.32258064516129, + "grad_norm": 0.26688301370237244, + "learning_rate": 4.607704521360776e-05, + "loss": 0.2966, + "num_input_tokens_seen": 21616320, + "step": 85, + "train_runtime": 12727.8322, + "train_tokens_per_second": 1698.35 + }, + { + "epoch": 5.387096774193548, + "grad_norm": 0.23431543616740275, + "learning_rate": 4.509914298352197e-05, + "loss": 0.2895, + "num_input_tokens_seen": 21878336, + "step": 86, + "train_runtime": 12879.5644, + "train_tokens_per_second": 1698.686 + }, + { + "epoch": 5.451612903225806, + "grad_norm": 0.24692139115327455, + "learning_rate": 4.412313012710813e-05, + "loss": 0.2926, + "num_input_tokens_seen": 22140352, + "step": 87, + "train_runtime": 13031.2099, + "train_tokens_per_second": 1699.025 + }, + { + "epoch": 5.516129032258064, + "grad_norm": 0.25361311803158476, + "learning_rate": 4.3149382915901606e-05, + "loss": 0.3056, + "num_input_tokens_seen": 22402368, + "step": 88, + "train_runtime": 13182.6668, + "train_tokens_per_second": 1699.381 + }, + { + "epoch": 5.580645161290323, + "grad_norm": 0.24929333634502235, + "learning_rate": 4.2178276747988446e-05, + "loss": 0.2922, + "num_input_tokens_seen": 22664384, + "step": 89, + "train_runtime": 13333.9686, + "train_tokens_per_second": 1699.748 + }, + { + "epoch": 5.645161290322581, + "grad_norm": 0.23498457386000374, + "learning_rate": 4.1210186003282275e-05, + "loss": 0.2947, + "num_input_tokens_seen": 22926400, + "step": 90, + "train_runtime": 13485.686, + "train_tokens_per_second": 1700.054 + }, + { + "epoch": 5.709677419354839, + "grad_norm": 0.2260009367281118, + "learning_rate": 4.0245483899193595e-05, + "loss": 0.2809, + "num_input_tokens_seen": 23188416, + "step": 91, + "train_runtime": 13637.0722, + "train_tokens_per_second": 1700.395 + }, + { + "epoch": 5.774193548387097, + "grad_norm": 0.23177978895395898, + "learning_rate": 3.928454234674747e-05, + "loss": 0.2835, + "num_input_tokens_seen": 23450432, + "step": 92, + "train_runtime": 13788.6307, + "train_tokens_per_second": 1700.708 + }, + { + "epoch": 5.838709677419355, + "grad_norm": 0.23286376279827942, + "learning_rate": 3.832773180720475e-05, + "loss": 0.2899, + "num_input_tokens_seen": 23712448, + "step": 93, + "train_runtime": 13940.8531, + "train_tokens_per_second": 1700.932 + }, + { + "epoch": 5.903225806451613, + "grad_norm": 0.25123564466559106, + "learning_rate": 3.73754211492421e-05, + "loss": 0.2187, + "num_input_tokens_seen": 23974464, + "step": 94, + "train_runtime": 152.8243, + "train_tokens_per_second": 156875.973 + }, + { + "epoch": 5.967741935483871, + "grad_norm": 0.21842207206834566, + "learning_rate": 3.642797750674629e-05, + "loss": 0.2062, + "num_input_tokens_seen": 24236480, + "step": 95, + "train_runtime": 304.4082, + "train_tokens_per_second": 79618.347 + }, + { + "epoch": 6.064516129032258, + "grad_norm": 0.4137660833142641, + "learning_rate": 3.5485766137276894e-05, + "loss": 0.3804, + "num_input_tokens_seen": 24629504, + "step": 96, + "train_runtime": 531.3585, + "train_tokens_per_second": 46351.953 + }, + { + "epoch": 6.129032258064516, + "grad_norm": 0.24930112620091768, + "learning_rate": 3.4549150281252636e-05, + "loss": 0.1957, + "num_input_tokens_seen": 24891520, + "step": 97, + "train_runtime": 683.4085, + "train_tokens_per_second": 36422.61 + }, + { + "epoch": 6.193548387096774, + "grad_norm": 0.25330156602007586, + "learning_rate": 3.361849102191533e-05, + "loss": 0.205, + "num_input_tokens_seen": 25153536, + "step": 98, + "train_runtime": 835.5065, + "train_tokens_per_second": 30105.735 + }, + { + "epoch": 6.258064516129032, + "grad_norm": 0.29921778638612334, + "learning_rate": 3.2694147146125345e-05, + "loss": 0.1908, + "num_input_tokens_seen": 25415552, + "step": 99, + "train_runtime": 987.5232, + "train_tokens_per_second": 25736.663 + }, + { + "epoch": 6.32258064516129, + "grad_norm": 0.2469233673084958, + "learning_rate": 3.177647500604252e-05, + "loss": 0.1847, + "num_input_tokens_seen": 25677568, + "step": 100, + "train_runtime": 1139.8183, + "train_tokens_per_second": 22527.774 + }, + { + "epoch": 6.387096774193548, + "grad_norm": 0.7725747563793509, + "learning_rate": 3.086582838174551e-05, + "loss": 0.1817, + "num_input_tokens_seen": 25939584, + "step": 101, + "train_runtime": 1291.4222, + "train_tokens_per_second": 20086.06 + }, + { + "epoch": 6.451612903225806, + "grad_norm": 0.25680554550185525, + "learning_rate": 2.996255834484296e-05, + "loss": 0.1734, + "num_input_tokens_seen": 26201600, + "step": 102, + "train_runtime": 1443.928, + "train_tokens_per_second": 18146.057 + }, + { + "epoch": 6.516129032258064, + "grad_norm": 0.27042296025640733, + "learning_rate": 2.9067013123128613e-05, + "loss": 0.1869, + "num_input_tokens_seen": 26463616, + "step": 103, + "train_runtime": 1596.4915, + "train_tokens_per_second": 16576.108 + }, + { + "epoch": 6.580645161290323, + "grad_norm": 0.2882085640613909, + "learning_rate": 2.8179537966332887e-05, + "loss": 0.1892, + "num_input_tokens_seen": 26725632, + "step": 104, + "train_runtime": 1748.874, + "train_tokens_per_second": 15281.622 + }, + { + "epoch": 6.645161290322581, + "grad_norm": 0.23789859154459053, + "learning_rate": 2.7300475013022663e-05, + "loss": 0.2003, + "num_input_tokens_seen": 26987648, + "step": 105, + "train_runtime": 1900.7787, + "train_tokens_per_second": 14198.206 + }, + { + "epoch": 6.709677419354839, + "grad_norm": 0.24333597114107516, + "learning_rate": 2.6430163158700115e-05, + "loss": 0.1869, + "num_input_tokens_seen": 27249664, + "step": 106, + "train_runtime": 2053.1166, + "train_tokens_per_second": 13272.341 + }, + { + "epoch": 6.774193548387097, + "grad_norm": 0.22162314084558382, + "learning_rate": 2.556893792515227e-05, + "loss": 0.1652, + "num_input_tokens_seen": 27511680, + "step": 107, + "train_runtime": 2205.8735, + "train_tokens_per_second": 12472.012 + }, + { + "epoch": 6.838709677419355, + "grad_norm": 0.22353605121241715, + "learning_rate": 2.471713133110078e-05, + "loss": 0.1931, + "num_input_tokens_seen": 27773696, + "step": 108, + "train_runtime": 2358.313, + "train_tokens_per_second": 11776.934 + }, + { + "epoch": 6.903225806451613, + "grad_norm": 0.22455516265290895, + "learning_rate": 2.3875071764202563e-05, + "loss": 0.1841, + "num_input_tokens_seen": 28035712, + "step": 109, + "train_runtime": 2510.9803, + "train_tokens_per_second": 11165.246 + }, + { + "epoch": 6.967741935483871, + "grad_norm": 0.38402001653538137, + "learning_rate": 2.3043083854449988e-05, + "loss": 0.1843, + "num_input_tokens_seen": 28297728, + "step": 110, + "train_runtime": 2663.6487, + "train_tokens_per_second": 10623.671 + }, + { + "epoch": 7.0, + "grad_norm": 0.3742502503244311, + "learning_rate": 2.2221488349019903e-05, + "loss": 0.1603, + "num_input_tokens_seen": 28428736, + "step": 111, + "train_runtime": 2739.8421, + "train_tokens_per_second": 10376.049 + }, + { + "epoch": 7.064516129032258, + "grad_norm": 0.2248185819076052, + "learning_rate": 2.1410601988619394e-05, + "loss": 0.116, + "num_input_tokens_seen": 28690752, + "step": 112, + "train_runtime": 2892.3751, + "train_tokens_per_second": 9919.444 + }, + { + "epoch": 7.129032258064516, + "grad_norm": 0.2125797568607351, + "learning_rate": 2.061073738537635e-05, + "loss": 0.1325, + "num_input_tokens_seen": 28952768, + "step": 113, + "train_runtime": 3044.8954, + "train_tokens_per_second": 9508.625 + }, + { + "epoch": 7.193548387096774, + "grad_norm": 0.193911925191499, + "learning_rate": 1.982220290232143e-05, + "loss": 0.1212, + "num_input_tokens_seen": 29214784, + "step": 114, + "train_runtime": 3197.3523, + "train_tokens_per_second": 9137.18 + }, + { + "epoch": 7.258064516129032, + "grad_norm": 0.2538299645355182, + "learning_rate": 1.9045302534508297e-05, + "loss": 0.121, + "num_input_tokens_seen": 29476800, + "step": 115, + "train_runtime": 3350.2031, + "train_tokens_per_second": 8798.511 + }, + { + "epoch": 7.32258064516129, + "grad_norm": 0.23812141734052034, + "learning_rate": 1.8280335791817733e-05, + "loss": 0.1172, + "num_input_tokens_seen": 29738816, + "step": 116, + "train_runtime": 3502.6801, + "train_tokens_per_second": 8490.303 + }, + { + "epoch": 7.387096774193548, + "grad_norm": 0.2583083911424977, + "learning_rate": 1.7527597583490822e-05, + "loss": 0.1267, + "num_input_tokens_seen": 30000832, + "step": 117, + "train_runtime": 3655.3399, + "train_tokens_per_second": 8207.399 + }, + { + "epoch": 7.451612903225806, + "grad_norm": 0.35547790737132534, + "learning_rate": 1.678737810443593e-05, + "loss": 0.1132, + "num_input_tokens_seen": 30262848, + "step": 118, + "train_runtime": 3807.6391, + "train_tokens_per_second": 7947.93 + }, + { + "epoch": 7.516129032258064, + "grad_norm": 0.20767168289368249, + "learning_rate": 1.605996272335291e-05, + "loss": 0.1036, + "num_input_tokens_seen": 30524864, + "step": 119, + "train_runtime": 3960.35, + "train_tokens_per_second": 7707.618 + }, + { + "epoch": 7.580645161290323, + "grad_norm": 0.20569986142986987, + "learning_rate": 1.5345631872718214e-05, + "loss": 0.1085, + "num_input_tokens_seen": 30786880, + "step": 120, + "train_runtime": 4112.7549, + "train_tokens_per_second": 7485.708 + }, + { + "epoch": 7.645161290322581, + "grad_norm": 0.2080233876424552, + "learning_rate": 1.4644660940672627e-05, + "loss": 0.1075, + "num_input_tokens_seen": 31048896, + "step": 121, + "train_runtime": 4265.0677, + "train_tokens_per_second": 7279.813 + }, + { + "epoch": 7.709677419354839, + "grad_norm": 0.22886347662826706, + "learning_rate": 1.3957320164854059e-05, + "loss": 0.1091, + "num_input_tokens_seen": 31310912, + "step": 122, + "train_runtime": 4417.2414, + "train_tokens_per_second": 7088.341 + }, + { + "epoch": 7.774193548387097, + "grad_norm": 0.21253506561933785, + "learning_rate": 1.3283874528215733e-05, + "loss": 0.1003, + "num_input_tokens_seen": 31572928, + "step": 123, + "train_runtime": 4569.705, + "train_tokens_per_second": 6909.183 + }, + { + "epoch": 7.838709677419355, + "grad_norm": 0.211985762025977, + "learning_rate": 1.2624583656870154e-05, + "loss": 0.1123, + "num_input_tokens_seen": 31834944, + "step": 124, + "train_runtime": 4721.8673, + "train_tokens_per_second": 6742.024 + }, + { + "epoch": 7.903225806451613, + "grad_norm": 0.21676935398531322, + "learning_rate": 1.1979701719998453e-05, + "loss": 0.1183, + "num_input_tokens_seen": 32096960, + "step": 125, + "train_runtime": 4974.2634, + "train_tokens_per_second": 6452.606 + }, + { + "epoch": 7.967741935483871, + "grad_norm": 0.21396793459722308, + "learning_rate": 1.134947733186315e-05, + "loss": 0.1099, + "num_input_tokens_seen": 32358976, + "step": 126, + "train_runtime": 5125.1703, + "train_tokens_per_second": 6313.737 + }, + { + "epoch": 8.0, + "grad_norm": 0.21396793459722308, + "learning_rate": 1.0734153455962765e-05, + "loss": 0.1076, + "num_input_tokens_seen": 32489984, + "step": 127, + "train_runtime": 5201.0438, + "train_tokens_per_second": 6246.82 + }, + { + "epoch": 8.064516129032258, + "grad_norm": 0.33028107335658585, + "learning_rate": 1.013396731136465e-05, + "loss": 0.0796, + "num_input_tokens_seen": 32752000, + "step": 128, + "train_runtime": 5353.4043, + "train_tokens_per_second": 6117.976 + }, + { + "epoch": 8.129032258064516, + "grad_norm": 0.1828204126753726, + "learning_rate": 9.549150281252633e-06, + "loss": 0.0836, + "num_input_tokens_seen": 33014016, + "step": 129, + "train_runtime": 5505.4754, + "train_tokens_per_second": 5996.579 + }, + { + "epoch": 8.193548387096774, + "grad_norm": 0.1736663582033454, + "learning_rate": 8.97992782372432e-06, + "loss": 0.0874, + "num_input_tokens_seen": 33276032, + "step": 130, + "train_runtime": 5657.5383, + "train_tokens_per_second": 5881.716 + }, + { + "epoch": 8.258064516129032, + "grad_norm": 0.1732376583485956, + "learning_rate": 8.426519384872733e-06, + "loss": 0.0755, + "num_input_tokens_seen": 33538048, + "step": 131, + "train_runtime": 5809.2201, + "train_tokens_per_second": 5773.244 + }, + { + "epoch": 8.32258064516129, + "grad_norm": 0.2273082510299754, + "learning_rate": 7.889138314185678e-06, + "loss": 0.071, + "num_input_tokens_seen": 33800064, + "step": 132, + "train_runtime": 5961.4033, + "train_tokens_per_second": 5669.817 + }, + { + "epoch": 8.387096774193548, + "grad_norm": 0.1857251298054902, + "learning_rate": 7.367991782295391e-06, + "loss": 0.0742, + "num_input_tokens_seen": 34062080, + "step": 133, + "train_runtime": 6112.7404, + "train_tokens_per_second": 5572.309 + }, + { + "epoch": 8.451612903225806, + "grad_norm": 0.19048334214878657, + "learning_rate": 6.863280701110408e-06, + "loss": 0.0784, + "num_input_tokens_seen": 34324096, + "step": 134, + "train_runtime": 6264.502, + "train_tokens_per_second": 5479.142 + }, + { + "epoch": 8.516129032258064, + "grad_norm": 0.21366469439070385, + "learning_rate": 6.375199646360142e-06, + "loss": 0.0706, + "num_input_tokens_seen": 34586112, + "step": 135, + "train_runtime": 6416.173, + "train_tokens_per_second": 5390.458 + }, + { + "epoch": 8.580645161290322, + "grad_norm": 0.1718209125679383, + "learning_rate": 5.903936782582253e-06, + "loss": 0.0678, + "num_input_tokens_seen": 34848128, + "step": 136, + "train_runtime": 6567.9915, + "train_tokens_per_second": 5305.751 + }, + { + "epoch": 8.64516129032258, + "grad_norm": 0.2788885426429946, + "learning_rate": 5.449673790581611e-06, + "loss": 0.0778, + "num_input_tokens_seen": 35110144, + "step": 137, + "train_runtime": 6719.8609, + "train_tokens_per_second": 5224.832 + }, + { + "epoch": 8.709677419354838, + "grad_norm": 0.18095259642151965, + "learning_rate": 5.012585797388936e-06, + "loss": 0.0719, + "num_input_tokens_seen": 35372160, + "step": 138, + "train_runtime": 6871.6076, + "train_tokens_per_second": 5147.582 + }, + { + "epoch": 8.774193548387096, + "grad_norm": 0.17022106370632772, + "learning_rate": 4.592841308745932e-06, + "loss": 0.0748, + "num_input_tokens_seen": 35634176, + "step": 139, + "train_runtime": 7023.699, + "train_tokens_per_second": 5073.42 + }, + { + "epoch": 8.838709677419354, + "grad_norm": 0.16715517913143546, + "learning_rate": 4.190602144143207e-06, + "loss": 0.0828, + "num_input_tokens_seen": 35896192, + "step": 140, + "train_runtime": 7175.6961, + "train_tokens_per_second": 5002.468 + }, + { + "epoch": 8.903225806451612, + "grad_norm": 0.16949575977901518, + "learning_rate": 3.8060233744356633e-06, + "loss": 0.0761, + "num_input_tokens_seen": 36158208, + "step": 141, + "train_runtime": 7328.1762, + "train_tokens_per_second": 4934.135 + }, + { + "epoch": 8.967741935483872, + "grad_norm": 0.17039551803545613, + "learning_rate": 3.4392532620598216e-06, + "loss": 0.0771, + "num_input_tokens_seen": 36420224, + "step": 142, + "train_runtime": 7480.3902, + "train_tokens_per_second": 4868.76 + }, + { + "epoch": 9.0, + "grad_norm": 0.30950909838198004, + "learning_rate": 3.0904332038757977e-06, + "loss": 0.0753, + "num_input_tokens_seen": 36551232, + "step": 143, + "train_runtime": 7556.6237, + "train_tokens_per_second": 4836.979 + }, + { + "epoch": 9.064516129032258, + "grad_norm": 0.147866843609634, + "learning_rate": 2.759697676656098e-06, + "loss": 0.0681, + "num_input_tokens_seen": 36813248, + "step": 144, + "train_runtime": 7708.2614, + "train_tokens_per_second": 4775.817 + }, + { + "epoch": 9.129032258064516, + "grad_norm": 0.14249192821986728, + "learning_rate": 2.4471741852423237e-06, + "loss": 0.0643, + "num_input_tokens_seen": 37075264, + "step": 145, + "train_runtime": 7860.6392, + "train_tokens_per_second": 4716.571 + }, + { + "epoch": 9.193548387096774, + "grad_norm": 0.13759617454869796, + "learning_rate": 2.152983213389559e-06, + "loss": 0.0589, + "num_input_tokens_seen": 37337280, + "step": 146, + "train_runtime": 8013.0274, + "train_tokens_per_second": 4659.572 + }, + { + "epoch": 9.258064516129032, + "grad_norm": 0.1411589262898803, + "learning_rate": 1.8772381773176417e-06, + "loss": 0.0628, + "num_input_tokens_seen": 37599296, + "step": 147, + "train_runtime": 8165.0817, + "train_tokens_per_second": 4604.889 + }, + { + "epoch": 9.32258064516129, + "grad_norm": 0.1448799275709374, + "learning_rate": 1.620045381987012e-06, + "loss": 0.0663, + "num_input_tokens_seen": 37861312, + "step": 148, + "train_runtime": 8317.3617, + "train_tokens_per_second": 4552.082 + }, + { + "epoch": 9.387096774193548, + "grad_norm": 0.12990304958177346, + "learning_rate": 1.3815039801161721e-06, + "loss": 0.0506, + "num_input_tokens_seen": 38123328, + "step": 149, + "train_runtime": 8469.3923, + "train_tokens_per_second": 4501.306 + }, + { + "epoch": 9.451612903225806, + "grad_norm": 0.147433482907245, + "learning_rate": 1.1617059339563807e-06, + "loss": 0.0627, + "num_input_tokens_seen": 38385344, + "step": 150, + "train_runtime": 8621.4093, + "train_tokens_per_second": 4452.328 + }, + { + "epoch": 9.516129032258064, + "grad_norm": 0.1700792731577258, + "learning_rate": 9.607359798384785e-07, + "loss": 0.061, + "num_input_tokens_seen": 38647360, + "step": 151, + "train_runtime": 8773.7579, + "train_tokens_per_second": 4404.881 + }, + { + "epoch": 9.580645161290322, + "grad_norm": 0.14427724556347693, + "learning_rate": 7.786715955054203e-07, + "loss": 0.0652, + "num_input_tokens_seen": 38909376, + "step": 152, + "train_runtime": 8927.0159, + "train_tokens_per_second": 4358.609 + }, + { + "epoch": 9.64516129032258, + "grad_norm": 0.13912027574637723, + "learning_rate": 6.15582970243117e-07, + "loss": 0.056, + "num_input_tokens_seen": 39171392, + "step": 153, + "train_runtime": 9079.5151, + "train_tokens_per_second": 4314.26 + }, + { + "epoch": 9.709677419354838, + "grad_norm": 0.14425913554967099, + "learning_rate": 4.715329778211375e-07, + "loss": 0.0658, + "num_input_tokens_seen": 39433408, + "step": 154, + "train_runtime": 9231.7766, + "train_tokens_per_second": 4271.486 + }, + { + "epoch": 9.774193548387096, + "grad_norm": 0.1468697481065812, + "learning_rate": 3.465771522536854e-07, + "loss": 0.0673, + "num_input_tokens_seen": 39695424, + "step": 155, + "train_runtime": 9383.9718, + "train_tokens_per_second": 4230.13 + }, + { + "epoch": 9.838709677419354, + "grad_norm": 0.1521336799355655, + "learning_rate": 2.407636663901591e-07, + "loss": 0.0727, + "num_input_tokens_seen": 39957440, + "step": 156, + "train_runtime": 9637.9221, + "train_tokens_per_second": 4145.856 + }, + { + "epoch": 9.903225806451612, + "grad_norm": 0.14789255243541283, + "learning_rate": 1.5413331334360182e-07, + "loss": 0.0597, + "num_input_tokens_seen": 40219456, + "step": 157, + "train_runtime": 9789.5998, + "train_tokens_per_second": 4108.386 + }, + { + "epoch": 9.967741935483872, + "grad_norm": 0.15942288018506584, + "learning_rate": 8.671949076420882e-08, + "loss": 0.0631, + "num_input_tokens_seen": 40481472, + "step": 158, + "train_runtime": 9941.5749, + "train_tokens_per_second": 4071.938 + }, + { + "epoch": 10.0, + "grad_norm": 0.15942288018506584, + "learning_rate": 3.8548187963854956e-08, + "loss": 0.0649, + "num_input_tokens_seen": 40612480, + "step": 159, + "train_runtime": 10016.9881, + "train_tokens_per_second": 4054.36 + }, + { + "epoch": 10.0, + "num_input_tokens_seen": 40612480, + "step": 159, + "total_flos": 320867359260672.0, + "train_loss": 0.048082037963582284, + "train_runtime": 10016.9898, + "train_samples_per_second": 1.974, + "train_steps_per_second": 0.016 + } + ], + "logging_steps": 1, + "max_steps": 160, + "num_input_tokens_seen": 40612480, + "num_train_epochs": 10, + "save_steps": 31, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 320867359260672.0, + "train_batch_size": 4, + "trial_name": null, + "trial_params": null +} diff --git a/B_3/training_args.bin b/B_3/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..dae6cb82f77aa91f4f2aac3f1f2dd06336bf044e --- /dev/null +++ b/B_3/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cc4c22dcd8008af055092cdfdd6cef9bf6aaaae83714bdb9dad705f555cb73e +size 8081 diff --git a/B_3/training_args.yaml b/B_3/training_args.yaml new file mode 100644 index 0000000000000000000000000000000000000000..394b518e7e12d4a483587fb2123c9494118396a4 --- /dev/null +++ b/B_3/training_args.yaml @@ -0,0 +1,37 @@ +adapter_name_or_path: /workspace/LLaMA-Factory/saves/Llama-3.1-70B/lora/B_3/checkpoint-93 +bf16: true +cutoff_len: 2048 +dataset: Millfield1,Millfield3 +dataset_dir: data +ddp_timeout: 180000000 +deepspeed: cache/ds_z3_config.json +do_train: true +enable_thinking: true +finetuning_type: lora +flash_attn: auto +gradient_accumulation_steps: 8 +include_num_input_tokens_seen: true +learning_rate: 0.0001 +logging_steps: 1 +lora_alpha: 1024 +lora_dropout: 0 +lora_rank: 256 +lora_target: all +lr_scheduler_type: cosine +max_grad_norm: 1.0 +max_samples: 100000 +model_name_or_path: /workspace/meta-llama/Llama-3.1-70B +num_train_epochs: 10.0 +optim: adamw_torch +output_dir: saves/Llama-3.1-70B/lora/B_3 +packing: true +per_device_train_batch_size: 4 +plot_loss: true +preprocessing_num_workers: 16 +report_to: wandb +rope_scaling: llama3 +save_steps: 31 +stage: pt +template: llama3 +trust_remote_code: true +warmup_steps: 0 diff --git a/B_3/training_loss.png b/B_3/training_loss.png new file mode 100644 index 0000000000000000000000000000000000000000..8c1f17aabbf06510c879d9621dd18fb071edc79e Binary files /dev/null and b/B_3/training_loss.png differ