jbenbudd commited on
Commit
ed92ab1
·
1 Parent(s): f169902

Initial commit of the LoRA/adapter model

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ training_eval_loss.png filter=lfs diff=lfs merge=lfs -text
37
+ training_loss.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,78 @@
1
- ---
2
- license: llama2
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: other
4
+ base_model: GreatCaptainNemo/ProLLaMA_Stage_1
5
+ tags:
6
+ - llama-factory
7
+ - lora
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: train_2025-03-11-22-40-04
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # train_2025-03-11-22-40-04
18
+
19
+ This model is a fine-tuned version of [GreatCaptainNemo/ProLLaMA_Stage_1](https://huggingface.co/GreatCaptainNemo/ProLLaMA_Stage_1) on the adpr_train dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.0947
22
+ - Num Input Tokens Seen: 8867536
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 5e-05
42
+ - train_batch_size: 8
43
+ - eval_batch_size: 8
44
+ - seed: 42
45
+ - gradient_accumulation_steps: 8
46
+ - total_train_batch_size: 64
47
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
48
+ - lr_scheduler_type: cosine
49
+ - lr_scheduler_warmup_steps: 20
50
+ - num_epochs: 3.0
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss | Input Tokens Seen |
55
+ |:-------------:|:------:|:----:|:---------------:|:-----------------:|
56
+ | 0.1778 | 0.2114 | 100 | 0.1754 | 624768 |
57
+ | 0.1668 | 0.4228 | 200 | 0.1641 | 1249984 |
58
+ | 0.1569 | 0.6342 | 300 | 0.1600 | 1875648 |
59
+ | 0.1313 | 0.8457 | 400 | 0.1339 | 2500800 |
60
+ | 0.1134 | 1.0571 | 500 | 0.1193 | 3124224 |
61
+ | 0.1059 | 1.2685 | 600 | 0.1088 | 3750336 |
62
+ | 0.096 | 1.4799 | 700 | 0.1083 | 4375808 |
63
+ | 0.0998 | 1.6913 | 800 | 0.1001 | 5000128 |
64
+ | 0.1083 | 1.9027 | 900 | 0.0991 | 5624576 |
65
+ | 0.0953 | 2.1142 | 1000 | 0.0972 | 6248320 |
66
+ | 0.0887 | 2.3256 | 1100 | 0.0964 | 6873152 |
67
+ | 0.0889 | 2.5370 | 1200 | 0.0954 | 7498688 |
68
+ | 0.0859 | 2.7484 | 1300 | 0.0950 | 8124864 |
69
+ | 0.0883 | 2.9598 | 1400 | 0.0947 | 8749760 |
70
+
71
+
72
+ ### Framework versions
73
+
74
+ - PEFT 0.12.0
75
+ - Transformers 4.48.3
76
+ - Pytorch 2.3.1+cu121
77
+ - Datasets 3.3.2
78
+ - Tokenizers 0.21.0
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "GreatCaptainNemo/ProLLaMA_Stage_1",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 128,
14
+ "lora_dropout": 0.01,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 64,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "up_proj",
25
+ "gate_proj",
26
+ "v_proj",
27
+ "o_proj",
28
+ "down_proj",
29
+ "k_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ec8217f8814ec8fec1a7afe03a712ccc65a6bc6150d75f40338d02609d6edcd
3
+ size 639691872
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_loss": 0.09474755078554153,
4
+ "eval_runtime": 40.4682,
5
+ "eval_samples_per_second": 83.102,
6
+ "eval_steps_per_second": 10.403,
7
+ "num_input_tokens_seen": 8867536,
8
+ "total_flos": 3.600530754427945e+17,
9
+ "train_loss": 0.2131162985812786,
10
+ "train_runtime": 4701.6415,
11
+ "train_samples_per_second": 19.312,
12
+ "train_steps_per_second": 0.302
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_loss": 0.09474755078554153,
4
+ "eval_runtime": 40.4682,
5
+ "eval_samples_per_second": 83.102,
6
+ "eval_steps_per_second": 10.403,
7
+ "num_input_tokens_seen": 8867536
8
+ }
llamaboard_config.yaml ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ top.booster: auto
2
+ top.checkpoint_path: []
3
+ top.finetuning_type: lora
4
+ top.model_name: Custom
5
+ top.quantization_bit: none
6
+ top.quantization_method: bitsandbytes
7
+ top.rope_scaling: none
8
+ top.template: alpaca
9
+ train.additional_target: ''
10
+ train.apollo_rank: 16
11
+ train.apollo_scale: 32
12
+ train.apollo_target: all
13
+ train.apollo_update_interval: 200
14
+ train.badam_mode: layer
15
+ train.badam_switch_interval: 50
16
+ train.badam_switch_mode: ascending
17
+ train.badam_update_ratio: 0.05
18
+ train.batch_size: 8
19
+ train.compute_type: bf16
20
+ train.create_new_adapter: false
21
+ train.cutoff_len: 2048
22
+ train.dataset:
23
+ - adpr_train
24
+ train.dataset_dir: data
25
+ train.ds_offload: false
26
+ train.ds_stage: none
27
+ train.extra_args: '{"optim": "adamw_torch"}'
28
+ train.freeze_extra_modules: ''
29
+ train.freeze_trainable_layers: 2
30
+ train.freeze_trainable_modules: all
31
+ train.galore_rank: 16
32
+ train.galore_scale: 2
33
+ train.galore_target: all
34
+ train.galore_update_interval: 200
35
+ train.gradient_accumulation_steps: 8
36
+ train.learning_rate: 5e-5
37
+ train.logging_steps: 5
38
+ train.lora_alpha: 128
39
+ train.lora_dropout: 0.01
40
+ train.lora_rank: 64
41
+ train.lora_target: q_proj,v_proj,k_proj,o_proj,gate_proj,down_proj,up_proj
42
+ train.loraplus_lr_ratio: 0
43
+ train.lr_scheduler_type: cosine
44
+ train.mask_history: false
45
+ train.max_grad_norm: '1.0'
46
+ train.max_samples: '100000'
47
+ train.neat_packing: false
48
+ train.neftune_alpha: 0
49
+ train.num_train_epochs: '3.0'
50
+ train.packing: false
51
+ train.ppo_score_norm: false
52
+ train.ppo_whiten_rewards: false
53
+ train.pref_beta: 0.1
54
+ train.pref_ftx: 0
55
+ train.pref_loss: sigmoid
56
+ train.report_to:
57
+ - none
58
+ train.resize_vocab: false
59
+ train.reward_model: []
60
+ train.save_steps: 100
61
+ train.swanlab_api_key: ''
62
+ train.swanlab_link: ''
63
+ train.swanlab_mode: cloud
64
+ train.swanlab_project: llamafactory
65
+ train.swanlab_run_name: ''
66
+ train.swanlab_workspace: ''
67
+ train.train_on_prompt: false
68
+ train.training_stage: Supervised Fine-Tuning
69
+ train.use_apollo: false
70
+ train.use_badam: false
71
+ train.use_dora: false
72
+ train.use_galore: false
73
+ train.use_llama_pro: false
74
+ train.use_pissa: false
75
+ train.use_rslora: false
76
+ train.use_swanlab: false
77
+ train.val_size: 0.1
78
+ train.warmup_steps: 20
model_eval_results.csv ADDED
The diff for this file is too large to render. See raw diff
 
running_log.txt ADDED
@@ -0,0 +1,1528 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [INFO|2025-03-11 23:07:55] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
2
+
3
+ [INFO|2025-03-11 23:07:55] configuration_utils.py:768 >> Model config LlamaConfig {
4
+ "_name_or_path": "GreatCaptainNemo/ProLLaMA_Stage_1",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "bos_token_id": 1,
11
+ "eos_token_id": 2,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 4096,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 11008,
17
+ "max_position_embeddings": 4096,
18
+ "mlp_bias": false,
19
+ "model_type": "llama",
20
+ "num_attention_heads": 32,
21
+ "num_hidden_layers": 32,
22
+ "num_key_value_heads": 32,
23
+ "outputs_attentions": true,
24
+ "pad_token_id": 0,
25
+ "pretraining_tp": 1,
26
+ "rms_norm_eps": 1e-05,
27
+ "rope_scaling": null,
28
+ "rope_theta": 10000.0,
29
+ "tie_word_embeddings": false,
30
+ "torch_dtype": "float16",
31
+ "transformers_version": "4.48.3",
32
+ "use_cache": true,
33
+ "vocab_size": 32000
34
+ }
35
+
36
+
37
+ [INFO|2025-03-11 23:07:58] tokenization_utils_base.py:2034 >> loading file tokenizer.model from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/tokenizer.model
38
+
39
+ [INFO|2025-03-11 23:07:58] tokenization_utils_base.py:2034 >> loading file tokenizer.json from cache at None
40
+
41
+ [INFO|2025-03-11 23:07:58] tokenization_utils_base.py:2034 >> loading file added_tokens.json from cache at None
42
+
43
+ [INFO|2025-03-11 23:07:58] tokenization_utils_base.py:2034 >> loading file special_tokens_map.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/special_tokens_map.json
44
+
45
+ [INFO|2025-03-11 23:07:58] tokenization_utils_base.py:2034 >> loading file tokenizer_config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/tokenizer_config.json
46
+
47
+ [INFO|2025-03-11 23:07:58] tokenization_utils_base.py:2034 >> loading file chat_template.jinja from cache at None
48
+
49
+ [INFO|2025-03-11 23:07:59] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
50
+
51
+ [INFO|2025-03-11 23:07:59] configuration_utils.py:768 >> Model config LlamaConfig {
52
+ "_name_or_path": "GreatCaptainNemo/ProLLaMA_Stage_1",
53
+ "architectures": [
54
+ "LlamaForCausalLM"
55
+ ],
56
+ "attention_bias": false,
57
+ "attention_dropout": 0.0,
58
+ "bos_token_id": 1,
59
+ "eos_token_id": 2,
60
+ "head_dim": 128,
61
+ "hidden_act": "silu",
62
+ "hidden_size": 4096,
63
+ "initializer_range": 0.02,
64
+ "intermediate_size": 11008,
65
+ "max_position_embeddings": 4096,
66
+ "mlp_bias": false,
67
+ "model_type": "llama",
68
+ "num_attention_heads": 32,
69
+ "num_hidden_layers": 32,
70
+ "num_key_value_heads": 32,
71
+ "outputs_attentions": true,
72
+ "pad_token_id": 0,
73
+ "pretraining_tp": 1,
74
+ "rms_norm_eps": 1e-05,
75
+ "rope_scaling": null,
76
+ "rope_theta": 10000.0,
77
+ "tie_word_embeddings": false,
78
+ "torch_dtype": "float16",
79
+ "transformers_version": "4.48.3",
80
+ "use_cache": true,
81
+ "vocab_size": 32000
82
+ }
83
+
84
+
85
+ [INFO|2025-03-11 23:08:00] tokenization_utils_base.py:2034 >> loading file tokenizer.model from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/tokenizer.model
86
+
87
+ [INFO|2025-03-11 23:08:00] tokenization_utils_base.py:2034 >> loading file tokenizer.json from cache at None
88
+
89
+ [INFO|2025-03-11 23:08:00] tokenization_utils_base.py:2034 >> loading file added_tokens.json from cache at None
90
+
91
+ [INFO|2025-03-11 23:08:00] tokenization_utils_base.py:2034 >> loading file special_tokens_map.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/special_tokens_map.json
92
+
93
+ [INFO|2025-03-11 23:08:00] tokenization_utils_base.py:2034 >> loading file tokenizer_config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/tokenizer_config.json
94
+
95
+ [INFO|2025-03-11 23:08:00] tokenization_utils_base.py:2034 >> loading file chat_template.jinja from cache at None
96
+
97
+ [INFO|2025-03-11 23:08:00] logging.py:143 >> Add pad token: </s>
98
+
99
+ [INFO|2025-03-11 23:08:00] logging.py:143 >> Loading dataset ADPr/train.json...
100
+
101
+ [INFO|2025-03-11 23:08:05] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
102
+
103
+ [INFO|2025-03-11 23:08:05] configuration_utils.py:768 >> Model config LlamaConfig {
104
+ "_name_or_path": "GreatCaptainNemo/ProLLaMA_Stage_1",
105
+ "architectures": [
106
+ "LlamaForCausalLM"
107
+ ],
108
+ "attention_bias": false,
109
+ "attention_dropout": 0.0,
110
+ "bos_token_id": 1,
111
+ "eos_token_id": 2,
112
+ "head_dim": 128,
113
+ "hidden_act": "silu",
114
+ "hidden_size": 4096,
115
+ "initializer_range": 0.02,
116
+ "intermediate_size": 11008,
117
+ "max_position_embeddings": 4096,
118
+ "mlp_bias": false,
119
+ "model_type": "llama",
120
+ "num_attention_heads": 32,
121
+ "num_hidden_layers": 32,
122
+ "num_key_value_heads": 32,
123
+ "outputs_attentions": true,
124
+ "pad_token_id": 0,
125
+ "pretraining_tp": 1,
126
+ "rms_norm_eps": 1e-05,
127
+ "rope_scaling": null,
128
+ "rope_theta": 10000.0,
129
+ "tie_word_embeddings": false,
130
+ "torch_dtype": "float16",
131
+ "transformers_version": "4.48.3",
132
+ "use_cache": true,
133
+ "vocab_size": 32000
134
+ }
135
+
136
+
137
+ [INFO|2025-03-11 23:08:06] modeling_utils.py:3904 >> loading weights file model.safetensors from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/model.safetensors.index.json
138
+
139
+ [INFO|2025-03-11 23:17:35] modeling_utils.py:1582 >> Instantiating LlamaForCausalLM model under default dtype torch.bfloat16.
140
+
141
+ [INFO|2025-03-11 23:17:35] configuration_utils.py:1140 >> Generate config GenerationConfig {
142
+ "bos_token_id": 1,
143
+ "eos_token_id": 2,
144
+ "pad_token_id": 0
145
+ }
146
+
147
+
148
+ [INFO|2025-03-11 23:17:44] modeling_utils.py:4888 >> All model checkpoint weights were used when initializing LlamaForCausalLM.
149
+
150
+
151
+ [INFO|2025-03-11 23:17:44] modeling_utils.py:4896 >> All the weights of LlamaForCausalLM were initialized from the model checkpoint at GreatCaptainNemo/ProLLaMA_Stage_1.
152
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training.
153
+
154
+ [INFO|2025-03-11 23:17:44] configuration_utils.py:1095 >> loading configuration file generation_config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/generation_config.json
155
+
156
+ [INFO|2025-03-11 23:17:44] configuration_utils.py:1140 >> Generate config GenerationConfig {
157
+ "bos_token_id": 1,
158
+ "eos_token_id": 2,
159
+ "pad_token_id": 0
160
+ }
161
+
162
+
163
+ [INFO|2025-03-11 23:17:44] logging.py:143 >> Gradient checkpointing enabled.
164
+
165
+ [INFO|2025-03-11 23:17:44] logging.py:143 >> Using torch SDPA for faster training and inference.
166
+
167
+ [INFO|2025-03-11 23:17:44] logging.py:143 >> Upcasting trainable params to float32.
168
+
169
+ [INFO|2025-03-11 23:17:44] logging.py:143 >> Fine-tuning method: LoRA
170
+
171
+ [INFO|2025-03-11 23:17:46] logging.py:143 >> trainable params: 159,907,840 || all params: 6,898,323,456 || trainable%: 2.3181
172
+
173
+ [INFO|2025-03-11 23:17:47] trainer.py:741 >> Using auto half precision backend
174
+
175
+ [INFO|2025-03-11 23:17:47] trainer.py:2369 >> ***** Running training *****
176
+
177
+ [INFO|2025-03-11 23:17:47] trainer.py:2370 >> Num examples = 30,266
178
+
179
+ [INFO|2025-03-11 23:17:47] trainer.py:2371 >> Num Epochs = 3
180
+
181
+ [INFO|2025-03-11 23:17:47] trainer.py:2372 >> Instantaneous batch size per device = 8
182
+
183
+ [INFO|2025-03-11 23:17:47] trainer.py:2375 >> Total train batch size (w. parallel, distributed & accumulation) = 64
184
+
185
+ [INFO|2025-03-11 23:17:47] trainer.py:2376 >> Gradient Accumulation steps = 8
186
+
187
+ [INFO|2025-03-11 23:17:47] trainer.py:2377 >> Total optimization steps = 1,419
188
+
189
+ [INFO|2025-03-11 23:17:47] trainer.py:2378 >> Number of trainable parameters = 159,907,840
190
+
191
+ [INFO|2025-03-11 23:18:01] logging.py:143 >> {'loss': 14.2333, 'learning_rate': 1.2500e-05, 'epoch': 0.01, 'throughput': 2136.23}
192
+
193
+ [INFO|2025-03-11 23:18:16] logging.py:143 >> {'loss': 9.2972, 'learning_rate': 2.5000e-05, 'epoch': 0.02, 'throughput': 2172.88}
194
+
195
+ [INFO|2025-03-11 23:18:30] logging.py:143 >> {'loss': 2.4110, 'learning_rate': 3.7500e-05, 'epoch': 0.03, 'throughput': 2186.91}
196
+
197
+ [INFO|2025-03-11 23:18:44] logging.py:143 >> {'loss': 0.9413, 'learning_rate': 5.0000e-05, 'epoch': 0.04, 'throughput': 2188.14}
198
+
199
+ [INFO|2025-03-11 23:18:58] logging.py:143 >> {'loss': 0.4389, 'learning_rate': 4.9998e-05, 'epoch': 0.05, 'throughput': 2192.25}
200
+
201
+ [INFO|2025-03-11 23:19:12] logging.py:143 >> {'loss': 0.4112, 'learning_rate': 4.9994e-05, 'epoch': 0.06, 'throughput': 2195.86}
202
+
203
+ [INFO|2025-03-11 23:19:26] logging.py:143 >> {'loss': 0.3011, 'learning_rate': 4.9986e-05, 'epoch': 0.07, 'throughput': 2197.31}
204
+
205
+ [INFO|2025-03-11 23:19:41] logging.py:143 >> {'loss': 0.2631, 'learning_rate': 4.9975e-05, 'epoch': 0.08, 'throughput': 2198.95}
206
+
207
+ [INFO|2025-03-11 23:19:55] logging.py:143 >> {'loss': 0.2223, 'learning_rate': 4.9961e-05, 'epoch': 0.10, 'throughput': 2200.83}
208
+
209
+ [INFO|2025-03-11 23:20:09] logging.py:143 >> {'loss': 0.2446, 'learning_rate': 4.9943e-05, 'epoch': 0.11, 'throughput': 2201.29}
210
+
211
+ [INFO|2025-03-11 23:20:23] logging.py:143 >> {'loss': 0.2994, 'learning_rate': 4.9923e-05, 'epoch': 0.12, 'throughput': 2202.29}
212
+
213
+ [INFO|2025-03-11 23:20:37] logging.py:143 >> {'loss': 0.2916, 'learning_rate': 4.9899e-05, 'epoch': 0.13, 'throughput': 2204.00}
214
+
215
+ [INFO|2025-03-11 23:20:51] logging.py:143 >> {'loss': 0.2317, 'learning_rate': 4.9872e-05, 'epoch': 0.14, 'throughput': 2204.62}
216
+
217
+ [INFO|2025-03-11 23:21:05] logging.py:143 >> {'loss': 0.2216, 'learning_rate': 4.9843e-05, 'epoch': 0.15, 'throughput': 2204.91}
218
+
219
+ [INFO|2025-03-11 23:21:20] logging.py:143 >> {'loss': 0.2311, 'learning_rate': 4.9810e-05, 'epoch': 0.16, 'throughput': 2205.10}
220
+
221
+ [INFO|2025-03-11 23:21:34] logging.py:143 >> {'loss': 0.2051, 'learning_rate': 4.9773e-05, 'epoch': 0.17, 'throughput': 2205.46}
222
+
223
+ [INFO|2025-03-11 23:21:48] logging.py:143 >> {'loss': 0.1928, 'learning_rate': 4.9734e-05, 'epoch': 0.18, 'throughput': 2205.61}
224
+
225
+ [INFO|2025-03-11 23:22:02] logging.py:143 >> {'loss': 0.1849, 'learning_rate': 4.9692e-05, 'epoch': 0.19, 'throughput': 2205.03}
226
+
227
+ [INFO|2025-03-11 23:22:16] logging.py:143 >> {'loss': 0.1720, 'learning_rate': 4.9646e-05, 'epoch': 0.20, 'throughput': 2205.00}
228
+
229
+ [INFO|2025-03-11 23:22:30] logging.py:143 >> {'loss': 0.1778, 'learning_rate': 4.9598e-05, 'epoch': 0.21, 'throughput': 2205.18}
230
+
231
+ [INFO|2025-03-11 23:22:30] trainer.py:4226 >>
232
+ ***** Running Evaluation *****
233
+
234
+ [INFO|2025-03-11 23:22:30] trainer.py:4228 >> Num examples = 3363
235
+
236
+ [INFO|2025-03-11 23:22:30] trainer.py:4231 >> Batch size = 8
237
+
238
+ [INFO|2025-03-11 23:23:11] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-100
239
+
240
+ [INFO|2025-03-11 23:23:11] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
241
+
242
+ [INFO|2025-03-11 23:23:11] configuration_utils.py:768 >> Model config LlamaConfig {
243
+ "architectures": [
244
+ "LlamaForCausalLM"
245
+ ],
246
+ "attention_bias": false,
247
+ "attention_dropout": 0.0,
248
+ "bos_token_id": 1,
249
+ "eos_token_id": 2,
250
+ "head_dim": 128,
251
+ "hidden_act": "silu",
252
+ "hidden_size": 4096,
253
+ "initializer_range": 0.02,
254
+ "intermediate_size": 11008,
255
+ "max_position_embeddings": 4096,
256
+ "mlp_bias": false,
257
+ "model_type": "llama",
258
+ "num_attention_heads": 32,
259
+ "num_hidden_layers": 32,
260
+ "num_key_value_heads": 32,
261
+ "outputs_attentions": true,
262
+ "pad_token_id": 0,
263
+ "pretraining_tp": 1,
264
+ "rms_norm_eps": 1e-05,
265
+ "rope_scaling": null,
266
+ "rope_theta": 10000.0,
267
+ "tie_word_embeddings": false,
268
+ "torch_dtype": "float16",
269
+ "transformers_version": "4.48.3",
270
+ "use_cache": true,
271
+ "vocab_size": 32000
272
+ }
273
+
274
+
275
+ [INFO|2025-03-11 23:23:12] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-100/tokenizer_config.json
276
+
277
+ [INFO|2025-03-11 23:23:12] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-100/special_tokens_map.json
278
+
279
+ [INFO|2025-03-11 23:23:32] logging.py:143 >> {'loss': 0.1754, 'learning_rate': 4.9546e-05, 'epoch': 0.22, 'throughput': 1902.03}
280
+
281
+ [INFO|2025-03-11 23:23:46] logging.py:143 >> {'loss': 0.1727, 'learning_rate': 4.9491e-05, 'epoch': 0.23, 'throughput': 1913.33}
282
+
283
+ [INFO|2025-03-11 23:24:00] logging.py:143 >> {'loss': 0.1728, 'learning_rate': 4.9433e-05, 'epoch': 0.24, 'throughput': 1924.44}
284
+
285
+ [INFO|2025-03-11 23:24:15] logging.py:143 >> {'loss': 0.1649, 'learning_rate': 4.9372e-05, 'epoch': 0.25, 'throughput': 1934.37}
286
+
287
+ [INFO|2025-03-11 23:24:29] logging.py:143 >> {'loss': 0.1647, 'learning_rate': 4.9308e-05, 'epoch': 0.26, 'throughput': 1943.78}
288
+
289
+ [INFO|2025-03-11 23:24:43] logging.py:143 >> {'loss': 0.1683, 'learning_rate': 4.9241e-05, 'epoch': 0.27, 'throughput': 1952.73}
290
+
291
+ [INFO|2025-03-11 23:24:57] logging.py:143 >> {'loss': 0.1597, 'learning_rate': 4.9171e-05, 'epoch': 0.29, 'throughput': 1961.08}
292
+
293
+ [INFO|2025-03-11 23:25:11] logging.py:143 >> {'loss': 0.1669, 'learning_rate': 4.9098e-05, 'epoch': 0.30, 'throughput': 1969.16}
294
+
295
+ [INFO|2025-03-11 23:25:25] logging.py:143 >> {'loss': 0.1568, 'learning_rate': 4.9022e-05, 'epoch': 0.31, 'throughput': 1976.56}
296
+
297
+ [INFO|2025-03-11 23:25:40] logging.py:143 >> {'loss': 0.1608, 'learning_rate': 4.8942e-05, 'epoch': 0.32, 'throughput': 1983.65}
298
+
299
+ [INFO|2025-03-11 23:25:54] logging.py:143 >> {'loss': 0.1654, 'learning_rate': 4.8860e-05, 'epoch': 0.33, 'throughput': 1990.02}
300
+
301
+ [INFO|2025-03-11 23:26:08] logging.py:143 >> {'loss': 0.1639, 'learning_rate': 4.8775e-05, 'epoch': 0.34, 'throughput': 1996.50}
302
+
303
+ [INFO|2025-03-11 23:26:22] logging.py:143 >> {'loss': 0.1600, 'learning_rate': 4.8686e-05, 'epoch': 0.35, 'throughput': 2002.14}
304
+
305
+ [INFO|2025-03-11 23:26:36] logging.py:143 >> {'loss': 0.1645, 'learning_rate': 4.8595e-05, 'epoch': 0.36, 'throughput': 2007.70}
306
+
307
+ [INFO|2025-03-11 23:26:50] logging.py:143 >> {'loss': 0.1604, 'learning_rate': 4.8501e-05, 'epoch': 0.37, 'throughput': 2012.35}
308
+
309
+ [INFO|2025-03-11 23:27:05] logging.py:143 >> {'loss': 0.1707, 'learning_rate': 4.8404e-05, 'epoch': 0.38, 'throughput': 2016.61}
310
+
311
+ [INFO|2025-03-11 23:27:19] logging.py:143 >> {'loss': 0.1544, 'learning_rate': 4.8303e-05, 'epoch': 0.39, 'throughput': 2021.50}
312
+
313
+ [INFO|2025-03-11 23:27:33] logging.py:143 >> {'loss': 0.1615, 'learning_rate': 4.8200e-05, 'epoch': 0.40, 'throughput': 2026.14}
314
+
315
+ [INFO|2025-03-11 23:27:47] logging.py:143 >> {'loss': 0.1590, 'learning_rate': 4.8094e-05, 'epoch': 0.41, 'throughput': 2030.77}
316
+
317
+ [INFO|2025-03-11 23:28:01] logging.py:143 >> {'loss': 0.1668, 'learning_rate': 4.7985e-05, 'epoch': 0.42, 'throughput': 2034.94}
318
+
319
+ [INFO|2025-03-11 23:28:01] trainer.py:4226 >>
320
+ ***** Running Evaluation *****
321
+
322
+ [INFO|2025-03-11 23:28:01] trainer.py:4228 >> Num examples = 3363
323
+
324
+ [INFO|2025-03-11 23:28:01] trainer.py:4231 >> Batch size = 8
325
+
326
+ [INFO|2025-03-11 23:28:41] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-200
327
+
328
+ [INFO|2025-03-11 23:28:42] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
329
+
330
+ [INFO|2025-03-11 23:28:42] configuration_utils.py:768 >> Model config LlamaConfig {
331
+ "architectures": [
332
+ "LlamaForCausalLM"
333
+ ],
334
+ "attention_bias": false,
335
+ "attention_dropout": 0.0,
336
+ "bos_token_id": 1,
337
+ "eos_token_id": 2,
338
+ "head_dim": 128,
339
+ "hidden_act": "silu",
340
+ "hidden_size": 4096,
341
+ "initializer_range": 0.02,
342
+ "intermediate_size": 11008,
343
+ "max_position_embeddings": 4096,
344
+ "mlp_bias": false,
345
+ "model_type": "llama",
346
+ "num_attention_heads": 32,
347
+ "num_hidden_layers": 32,
348
+ "num_key_value_heads": 32,
349
+ "outputs_attentions": true,
350
+ "pad_token_id": 0,
351
+ "pretraining_tp": 1,
352
+ "rms_norm_eps": 1e-05,
353
+ "rope_scaling": null,
354
+ "rope_theta": 10000.0,
355
+ "tie_word_embeddings": false,
356
+ "torch_dtype": "float16",
357
+ "transformers_version": "4.48.3",
358
+ "use_cache": true,
359
+ "vocab_size": 32000
360
+ }
361
+
362
+
363
+ [INFO|2025-03-11 23:28:43] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-200/tokenizer_config.json
364
+
365
+ [INFO|2025-03-11 23:28:43] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-200/special_tokens_map.json
366
+
367
+ [INFO|2025-03-11 23:29:02] logging.py:143 >> {'loss': 0.1606, 'learning_rate': 4.7874e-05, 'epoch': 0.43, 'throughput': 1896.44}
368
+
369
+ [INFO|2025-03-11 23:29:17] logging.py:143 >> {'loss': 0.1628, 'learning_rate': 4.7759e-05, 'epoch': 0.44, 'throughput': 1902.91}
370
+
371
+ [INFO|2025-03-11 23:29:31] logging.py:143 >> {'loss': 0.1617, 'learning_rate': 4.7641e-05, 'epoch': 0.45, 'throughput': 1909.23}
372
+
373
+ [INFO|2025-03-11 23:29:45] logging.py:143 >> {'loss': 0.1620, 'learning_rate': 4.7521e-05, 'epoch': 0.47, 'throughput': 1915.09}
374
+
375
+ [INFO|2025-03-11 23:29:59] logging.py:143 >> {'loss': 0.1683, 'learning_rate': 4.7397e-05, 'epoch': 0.48, 'throughput': 1920.91}
376
+
377
+ [INFO|2025-03-11 23:30:13] logging.py:143 >> {'loss': 0.1550, 'learning_rate': 4.7271e-05, 'epoch': 0.49, 'throughput': 1926.53}
378
+
379
+ [INFO|2025-03-11 23:30:27] logging.py:143 >> {'loss': 0.1585, 'learning_rate': 4.7142e-05, 'epoch': 0.50, 'throughput': 1931.94}
380
+
381
+ [INFO|2025-03-11 23:30:41] logging.py:143 >> {'loss': 0.1882, 'learning_rate': 4.7011e-05, 'epoch': 0.51, 'throughput': 1936.99}
382
+
383
+ [INFO|2025-03-11 23:30:56] logging.py:143 >> {'loss': 0.1742, 'learning_rate': 4.6876e-05, 'epoch': 0.52, 'throughput': 1941.84}
384
+
385
+ [INFO|2025-03-11 23:31:10] logging.py:143 >> {'loss': 0.1607, 'learning_rate': 4.6739e-05, 'epoch': 0.53, 'throughput': 1946.45}
386
+
387
+ [INFO|2025-03-11 23:31:24] logging.py:143 >> {'loss': 0.1560, 'learning_rate': 4.6599e-05, 'epoch': 0.54, 'throughput': 1951.13}
388
+
389
+ [INFO|2025-03-11 23:31:38] logging.py:143 >> {'loss': 0.1584, 'learning_rate': 4.6456e-05, 'epoch': 0.55, 'throughput': 1955.67}
390
+
391
+ [INFO|2025-03-11 23:31:52] logging.py:143 >> {'loss': 0.1514, 'learning_rate': 4.6311e-05, 'epoch': 0.56, 'throughput': 1960.08}
392
+
393
+ [INFO|2025-03-11 23:32:06] logging.py:143 >> {'loss': 0.1649, 'learning_rate': 4.6163e-05, 'epoch': 0.57, 'throughput': 1964.13}
394
+
395
+ [INFO|2025-03-11 23:32:20] logging.py:143 >> {'loss': 0.1577, 'learning_rate': 4.6012e-05, 'epoch': 0.58, 'throughput': 1968.22}
396
+
397
+ [INFO|2025-03-11 23:32:34] logging.py:143 >> {'loss': 0.1562, 'learning_rate': 4.5859e-05, 'epoch': 0.59, 'throughput': 1972.26}
398
+
399
+ [INFO|2025-03-11 23:32:49] logging.py:143 >> {'loss': 0.1438, 'learning_rate': 4.5703e-05, 'epoch': 0.60, 'throughput': 1976.08}
400
+
401
+ [INFO|2025-03-11 23:33:03] logging.py:143 >> {'loss': 0.1650, 'learning_rate': 4.5544e-05, 'epoch': 0.61, 'throughput': 1979.80}
402
+
403
+ [INFO|2025-03-11 23:33:17] logging.py:143 >> {'loss': 0.1618, 'learning_rate': 4.5383e-05, 'epoch': 0.62, 'throughput': 1983.17}
404
+
405
+ [INFO|2025-03-11 23:33:31] logging.py:143 >> {'loss': 0.1569, 'learning_rate': 4.5219e-05, 'epoch': 0.63, 'throughput': 1986.63}
406
+
407
+ [INFO|2025-03-11 23:33:31] trainer.py:4226 >>
408
+ ***** Running Evaluation *****
409
+
410
+ [INFO|2025-03-11 23:33:31] trainer.py:4228 >> Num examples = 3363
411
+
412
+ [INFO|2025-03-11 23:33:31] trainer.py:4231 >> Batch size = 8
413
+
414
+ [INFO|2025-03-11 23:34:11] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-300
415
+
416
+ [INFO|2025-03-11 23:34:12] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
417
+
418
+ [INFO|2025-03-11 23:34:12] configuration_utils.py:768 >> Model config LlamaConfig {
419
+ "architectures": [
420
+ "LlamaForCausalLM"
421
+ ],
422
+ "attention_bias": false,
423
+ "attention_dropout": 0.0,
424
+ "bos_token_id": 1,
425
+ "eos_token_id": 2,
426
+ "head_dim": 128,
427
+ "hidden_act": "silu",
428
+ "hidden_size": 4096,
429
+ "initializer_range": 0.02,
430
+ "intermediate_size": 11008,
431
+ "max_position_embeddings": 4096,
432
+ "mlp_bias": false,
433
+ "model_type": "llama",
434
+ "num_attention_heads": 32,
435
+ "num_hidden_layers": 32,
436
+ "num_key_value_heads": 32,
437
+ "outputs_attentions": true,
438
+ "pad_token_id": 0,
439
+ "pretraining_tp": 1,
440
+ "rms_norm_eps": 1e-05,
441
+ "rope_scaling": null,
442
+ "rope_theta": 10000.0,
443
+ "tie_word_embeddings": false,
444
+ "torch_dtype": "float16",
445
+ "transformers_version": "4.48.3",
446
+ "use_cache": true,
447
+ "vocab_size": 32000
448
+ }
449
+
450
+
451
+ [INFO|2025-03-11 23:34:13] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-300/tokenizer_config.json
452
+
453
+ [INFO|2025-03-11 23:34:13] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-300/special_tokens_map.json
454
+
455
+ [INFO|2025-03-11 23:34:32] logging.py:143 >> {'loss': 0.1598, 'learning_rate': 4.5052e-05, 'epoch': 0.64, 'throughput': 1897.51}
456
+
457
+ [INFO|2025-03-11 23:34:46] logging.py:143 >> {'loss': 0.1501, 'learning_rate': 4.4884e-05, 'epoch': 0.66, 'throughput': 1901.82}
458
+
459
+ [INFO|2025-03-11 23:35:00] logging.py:143 >> {'loss': 0.1456, 'learning_rate': 4.4712e-05, 'epoch': 0.67, 'throughput': 1906.16}
460
+
461
+ [INFO|2025-03-11 23:35:14] logging.py:143 >> {'loss': 0.1502, 'learning_rate': 4.4538e-05, 'epoch': 0.68, 'throughput': 1910.28}
462
+
463
+ [INFO|2025-03-11 23:35:29] logging.py:143 >> {'loss': 0.1446, 'learning_rate': 4.4362e-05, 'epoch': 0.69, 'throughput': 1914.24}
464
+
465
+ [INFO|2025-03-11 23:35:43] logging.py:143 >> {'loss': 0.1478, 'learning_rate': 4.4183e-05, 'epoch': 0.70, 'throughput': 1918.15}
466
+
467
+ [INFO|2025-03-11 23:35:57] logging.py:143 >> {'loss': 0.1470, 'learning_rate': 4.4002e-05, 'epoch': 0.71, 'throughput': 1921.84}
468
+
469
+ [INFO|2025-03-11 23:36:11] logging.py:143 >> {'loss': 0.1479, 'learning_rate': 4.3818e-05, 'epoch': 0.72, 'throughput': 1925.58}
470
+
471
+ [INFO|2025-03-11 23:36:25] logging.py:143 >> {'loss': 0.1448, 'learning_rate': 4.3632e-05, 'epoch': 0.73, 'throughput': 1929.26}
472
+
473
+ [INFO|2025-03-11 23:36:39] logging.py:143 >> {'loss': 0.1443, 'learning_rate': 4.3444e-05, 'epoch': 0.74, 'throughput': 1932.65}
474
+
475
+ [INFO|2025-03-11 23:36:54] logging.py:143 >> {'loss': 0.1464, 'learning_rate': 4.3253e-05, 'epoch': 0.75, 'throughput': 1936.00}
476
+
477
+ [INFO|2025-03-11 23:37:08] logging.py:143 >> {'loss': 0.1345, 'learning_rate': 4.3060e-05, 'epoch': 0.76, 'throughput': 1939.19}
478
+
479
+ [INFO|2025-03-11 23:37:22] logging.py:143 >> {'loss': 0.1311, 'learning_rate': 4.2865e-05, 'epoch': 0.77, 'throughput': 1942.52}
480
+
481
+ [INFO|2025-03-11 23:37:36] logging.py:143 >> {'loss': 0.1428, 'learning_rate': 4.2668e-05, 'epoch': 0.78, 'throughput': 1945.66}
482
+
483
+ [INFO|2025-03-11 23:37:50] logging.py:143 >> {'loss': 0.1361, 'learning_rate': 4.2468e-05, 'epoch': 0.79, 'throughput': 1948.75}
484
+
485
+ [INFO|2025-03-11 23:38:04] logging.py:143 >> {'loss': 0.1436, 'learning_rate': 4.2266e-05, 'epoch': 0.80, 'throughput': 1951.85}
486
+
487
+ [INFO|2025-03-11 23:38:18] logging.py:143 >> {'loss': 0.1380, 'learning_rate': 4.2062e-05, 'epoch': 0.81, 'throughput': 1954.87}
488
+
489
+ [INFO|2025-03-11 23:38:32] logging.py:143 >> {'loss': 0.1290, 'learning_rate': 4.1856e-05, 'epoch': 0.82, 'throughput': 1957.72}
490
+
491
+ [INFO|2025-03-11 23:38:46] logging.py:143 >> {'loss': 0.1278, 'learning_rate': 4.1647e-05, 'epoch': 0.84, 'throughput': 1960.66}
492
+
493
+ [INFO|2025-03-11 23:39:01] logging.py:143 >> {'loss': 0.1313, 'learning_rate': 4.1437e-05, 'epoch': 0.85, 'throughput': 1963.41}
494
+
495
+ [INFO|2025-03-11 23:39:01] trainer.py:4226 >>
496
+ ***** Running Evaluation *****
497
+
498
+ [INFO|2025-03-11 23:39:01] trainer.py:4228 >> Num examples = 3363
499
+
500
+ [INFO|2025-03-11 23:39:01] trainer.py:4231 >> Batch size = 8
501
+
502
+ [INFO|2025-03-11 23:39:41] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-400
503
+
504
+ [INFO|2025-03-11 23:39:41] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
505
+
506
+ [INFO|2025-03-11 23:39:41] configuration_utils.py:768 >> Model config LlamaConfig {
507
+ "architectures": [
508
+ "LlamaForCausalLM"
509
+ ],
510
+ "attention_bias": false,
511
+ "attention_dropout": 0.0,
512
+ "bos_token_id": 1,
513
+ "eos_token_id": 2,
514
+ "head_dim": 128,
515
+ "hidden_act": "silu",
516
+ "hidden_size": 4096,
517
+ "initializer_range": 0.02,
518
+ "intermediate_size": 11008,
519
+ "max_position_embeddings": 4096,
520
+ "mlp_bias": false,
521
+ "model_type": "llama",
522
+ "num_attention_heads": 32,
523
+ "num_hidden_layers": 32,
524
+ "num_key_value_heads": 32,
525
+ "outputs_attentions": true,
526
+ "pad_token_id": 0,
527
+ "pretraining_tp": 1,
528
+ "rms_norm_eps": 1e-05,
529
+ "rope_scaling": null,
530
+ "rope_theta": 10000.0,
531
+ "tie_word_embeddings": false,
532
+ "torch_dtype": "float16",
533
+ "transformers_version": "4.48.3",
534
+ "use_cache": true,
535
+ "vocab_size": 32000
536
+ }
537
+
538
+
539
+ [INFO|2025-03-11 23:39:43] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-400/tokenizer_config.json
540
+
541
+ [INFO|2025-03-11 23:39:43] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-400/special_tokens_map.json
542
+
543
+ [INFO|2025-03-11 23:40:02] logging.py:143 >> {'loss': 0.1308, 'learning_rate': 4.1224e-05, 'epoch': 0.86, 'throughput': 1896.86}
544
+
545
+ [INFO|2025-03-11 23:40:16] logging.py:143 >> {'loss': 0.1213, 'learning_rate': 4.1010e-05, 'epoch': 0.87, 'throughput': 1900.13}
546
+
547
+ [INFO|2025-03-11 23:40:30] logging.py:143 >> {'loss': 0.1232, 'learning_rate': 4.0793e-05, 'epoch': 0.88, 'throughput': 1903.28}
548
+
549
+ [INFO|2025-03-11 23:40:44] logging.py:143 >> {'loss': 0.1328, 'learning_rate': 4.0575e-05, 'epoch': 0.89, 'throughput': 1906.43}
550
+
551
+ [INFO|2025-03-11 23:40:58] logging.py:143 >> {'loss': 0.1237, 'learning_rate': 4.0354e-05, 'epoch': 0.90, 'throughput': 1909.54}
552
+
553
+ [INFO|2025-03-11 23:41:12] logging.py:143 >> {'loss': 0.1310, 'learning_rate': 4.0132e-05, 'epoch': 0.91, 'throughput': 1912.66}
554
+
555
+ [INFO|2025-03-11 23:41:26] logging.py:143 >> {'loss': 0.1301, 'learning_rate': 3.9907e-05, 'epoch': 0.92, 'throughput': 1915.64}
556
+
557
+ [INFO|2025-03-11 23:41:41] logging.py:143 >> {'loss': 0.1249, 'learning_rate': 3.9681e-05, 'epoch': 0.93, 'throughput': 1918.46}
558
+
559
+ [INFO|2025-03-11 23:41:54] logging.py:143 >> {'loss': 0.1176, 'learning_rate': 3.9453e-05, 'epoch': 0.94, 'throughput': 1921.44}
560
+
561
+ [INFO|2025-03-11 23:42:09] logging.py:143 >> {'loss': 0.1190, 'learning_rate': 3.9223e-05, 'epoch': 0.95, 'throughput': 1924.19}
562
+
563
+ [INFO|2025-03-11 23:42:23] logging.py:143 >> {'loss': 0.1166, 'learning_rate': 3.8991e-05, 'epoch': 0.96, 'throughput': 1926.97}
564
+
565
+ [INFO|2025-03-11 23:42:37] logging.py:143 >> {'loss': 0.1207, 'learning_rate': 3.8758e-05, 'epoch': 0.97, 'throughput': 1929.70}
566
+
567
+ [INFO|2025-03-11 23:42:51] logging.py:143 >> {'loss': 0.1250, 'learning_rate': 3.8522e-05, 'epoch': 0.98, 'throughput': 1932.40}
568
+
569
+ [INFO|2025-03-11 23:43:05] logging.py:143 >> {'loss': 0.1278, 'learning_rate': 3.8286e-05, 'epoch': 0.99, 'throughput': 1934.95}
570
+
571
+ [INFO|2025-03-11 23:43:19] logging.py:143 >> {'loss': 0.1226, 'learning_rate': 3.8047e-05, 'epoch': 1.00, 'throughput': 1937.15}
572
+
573
+ [INFO|2025-03-11 23:43:33] logging.py:143 >> {'loss': 0.1169, 'learning_rate': 3.7807e-05, 'epoch': 1.01, 'throughput': 1939.64}
574
+
575
+ [INFO|2025-03-11 23:43:47] logging.py:143 >> {'loss': 0.1160, 'learning_rate': 3.7565e-05, 'epoch': 1.03, 'throughput': 1942.08}
576
+
577
+ [INFO|2025-03-11 23:44:02] logging.py:143 >> {'loss': 0.1197, 'learning_rate': 3.7321e-05, 'epoch': 1.04, 'throughput': 1944.49}
578
+
579
+ [INFO|2025-03-11 23:44:16] logging.py:143 >> {'loss': 0.1184, 'learning_rate': 3.7076e-05, 'epoch': 1.05, 'throughput': 1946.75}
580
+
581
+ [INFO|2025-03-11 23:44:30] logging.py:143 >> {'loss': 0.1134, 'learning_rate': 3.6830e-05, 'epoch': 1.06, 'throughput': 1948.86}
582
+
583
+ [INFO|2025-03-11 23:44:30] trainer.py:4226 >>
584
+ ***** Running Evaluation *****
585
+
586
+ [INFO|2025-03-11 23:44:30] trainer.py:4228 >> Num examples = 3363
587
+
588
+ [INFO|2025-03-11 23:44:30] trainer.py:4231 >> Batch size = 8
589
+
590
+ [INFO|2025-03-11 23:45:10] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-500
591
+
592
+ [INFO|2025-03-11 23:45:11] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
593
+
594
+ [INFO|2025-03-11 23:45:11] configuration_utils.py:768 >> Model config LlamaConfig {
595
+ "architectures": [
596
+ "LlamaForCausalLM"
597
+ ],
598
+ "attention_bias": false,
599
+ "attention_dropout": 0.0,
600
+ "bos_token_id": 1,
601
+ "eos_token_id": 2,
602
+ "head_dim": 128,
603
+ "hidden_act": "silu",
604
+ "hidden_size": 4096,
605
+ "initializer_range": 0.02,
606
+ "intermediate_size": 11008,
607
+ "max_position_embeddings": 4096,
608
+ "mlp_bias": false,
609
+ "model_type": "llama",
610
+ "num_attention_heads": 32,
611
+ "num_hidden_layers": 32,
612
+ "num_key_value_heads": 32,
613
+ "outputs_attentions": true,
614
+ "pad_token_id": 0,
615
+ "pretraining_tp": 1,
616
+ "rms_norm_eps": 1e-05,
617
+ "rope_scaling": null,
618
+ "rope_theta": 10000.0,
619
+ "tie_word_embeddings": false,
620
+ "torch_dtype": "float16",
621
+ "transformers_version": "4.48.3",
622
+ "use_cache": true,
623
+ "vocab_size": 32000
624
+ }
625
+
626
+
627
+ [INFO|2025-03-11 23:45:12] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-500/tokenizer_config.json
628
+
629
+ [INFO|2025-03-11 23:45:12] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-500/special_tokens_map.json
630
+
631
+ [INFO|2025-03-11 23:45:35] logging.py:143 >> {'loss': 0.1193, 'learning_rate': 3.6582e-05, 'epoch': 1.07, 'throughput': 1891.77}
632
+
633
+ [INFO|2025-03-11 23:45:49] logging.py:143 >> {'loss': 0.1150, 'learning_rate': 3.6332e-05, 'epoch': 1.08, 'throughput': 1894.29}
634
+
635
+ [INFO|2025-03-11 23:46:04] logging.py:143 >> {'loss': 0.1146, 'learning_rate': 3.6081e-05, 'epoch': 1.09, 'throughput': 1896.78}
636
+
637
+ [INFO|2025-03-11 23:46:18] logging.py:143 >> {'loss': 0.1109, 'learning_rate': 3.5829e-05, 'epoch': 1.10, 'throughput': 1899.24}
638
+
639
+ [INFO|2025-03-11 23:46:32] logging.py:143 >> {'loss': 0.1143, 'learning_rate': 3.5575e-05, 'epoch': 1.11, 'throughput': 1901.62}
640
+
641
+ [INFO|2025-03-11 23:46:46] logging.py:143 >> {'loss': 0.1129, 'learning_rate': 3.5320e-05, 'epoch': 1.12, 'throughput': 1904.07}
642
+
643
+ [INFO|2025-03-11 23:47:01] logging.py:143 >> {'loss': 0.1139, 'learning_rate': 3.5064e-05, 'epoch': 1.13, 'throughput': 1906.47}
644
+
645
+ [INFO|2025-03-11 23:47:15] logging.py:143 >> {'loss': 0.1122, 'learning_rate': 3.4806e-05, 'epoch': 1.14, 'throughput': 1908.86}
646
+
647
+ [INFO|2025-03-11 23:47:29] logging.py:143 >> {'loss': 0.1110, 'learning_rate': 3.4548e-05, 'epoch': 1.15, 'throughput': 1911.20}
648
+
649
+ [INFO|2025-03-11 23:47:43] logging.py:143 >> {'loss': 0.1105, 'learning_rate': 3.4288e-05, 'epoch': 1.16, 'throughput': 1913.58}
650
+
651
+ [INFO|2025-03-11 23:47:57] logging.py:143 >> {'loss': 0.1128, 'learning_rate': 3.4026e-05, 'epoch': 1.17, 'throughput': 1915.77}
652
+
653
+ [INFO|2025-03-11 23:48:11] logging.py:143 >> {'loss': 0.1066, 'learning_rate': 3.3764e-05, 'epoch': 1.18, 'throughput': 1918.02}
654
+
655
+ [INFO|2025-03-11 23:48:26] logging.py:143 >> {'loss': 0.1126, 'learning_rate': 3.3501e-05, 'epoch': 1.19, 'throughput': 1920.23}
656
+
657
+ [INFO|2025-03-11 23:48:40] logging.py:143 >> {'loss': 0.1107, 'learning_rate': 3.3236e-05, 'epoch': 1.21, 'throughput': 1922.35}
658
+
659
+ [INFO|2025-03-11 23:48:54] logging.py:143 >> {'loss': 0.1146, 'learning_rate': 3.2971e-05, 'epoch': 1.22, 'throughput': 1924.42}
660
+
661
+ [INFO|2025-03-11 23:49:09] logging.py:143 >> {'loss': 0.1063, 'learning_rate': 3.2704e-05, 'epoch': 1.23, 'throughput': 1926.46}
662
+
663
+ [INFO|2025-03-11 23:49:23] logging.py:143 >> {'loss': 0.1057, 'learning_rate': 3.2437e-05, 'epoch': 1.24, 'throughput': 1928.57}
664
+
665
+ [INFO|2025-03-11 23:49:37] logging.py:143 >> {'loss': 0.1024, 'learning_rate': 3.2168e-05, 'epoch': 1.25, 'throughput': 1930.63}
666
+
667
+ [INFO|2025-03-11 23:49:51] logging.py:143 >> {'loss': 0.1006, 'learning_rate': 3.1899e-05, 'epoch': 1.26, 'throughput': 1932.65}
668
+
669
+ [INFO|2025-03-11 23:50:05] logging.py:143 >> {'loss': 0.1059, 'learning_rate': 3.1628e-05, 'epoch': 1.27, 'throughput': 1934.62}
670
+
671
+ [INFO|2025-03-11 23:50:05] trainer.py:4226 >>
672
+ ***** Running Evaluation *****
673
+
674
+ [INFO|2025-03-11 23:50:05] trainer.py:4228 >> Num examples = 3363
675
+
676
+ [INFO|2025-03-11 23:50:05] trainer.py:4231 >> Batch size = 8
677
+
678
+ [INFO|2025-03-11 23:50:46] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-600
679
+
680
+ [INFO|2025-03-11 23:50:46] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
681
+
682
+ [INFO|2025-03-11 23:50:46] configuration_utils.py:768 >> Model config LlamaConfig {
683
+ "architectures": [
684
+ "LlamaForCausalLM"
685
+ ],
686
+ "attention_bias": false,
687
+ "attention_dropout": 0.0,
688
+ "bos_token_id": 1,
689
+ "eos_token_id": 2,
690
+ "head_dim": 128,
691
+ "hidden_act": "silu",
692
+ "hidden_size": 4096,
693
+ "initializer_range": 0.02,
694
+ "intermediate_size": 11008,
695
+ "max_position_embeddings": 4096,
696
+ "mlp_bias": false,
697
+ "model_type": "llama",
698
+ "num_attention_heads": 32,
699
+ "num_hidden_layers": 32,
700
+ "num_key_value_heads": 32,
701
+ "outputs_attentions": true,
702
+ "pad_token_id": 0,
703
+ "pretraining_tp": 1,
704
+ "rms_norm_eps": 1e-05,
705
+ "rope_scaling": null,
706
+ "rope_theta": 10000.0,
707
+ "tie_word_embeddings": false,
708
+ "torch_dtype": "float16",
709
+ "transformers_version": "4.48.3",
710
+ "use_cache": true,
711
+ "vocab_size": 32000
712
+ }
713
+
714
+
715
+ [INFO|2025-03-11 23:50:48] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-600/tokenizer_config.json
716
+
717
+ [INFO|2025-03-11 23:50:48] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-600/special_tokens_map.json
718
+
719
+ [INFO|2025-03-11 23:51:08] logging.py:143 >> {'loss': 0.1079, 'learning_rate': 3.1357e-05, 'epoch': 1.28, 'throughput': 1889.78}
720
+
721
+ [INFO|2025-03-11 23:51:22] logging.py:143 >> {'loss': 0.1106, 'learning_rate': 3.1086e-05, 'epoch': 1.29, 'throughput': 1891.96}
722
+
723
+ [INFO|2025-03-11 23:51:36] logging.py:143 >> {'loss': 0.1027, 'learning_rate': 3.0813e-05, 'epoch': 1.30, 'throughput': 1894.15}
724
+
725
+ [INFO|2025-03-11 23:51:50] logging.py:143 >> {'loss': 0.1015, 'learning_rate': 3.0540e-05, 'epoch': 1.31, 'throughput': 1896.21}
726
+
727
+ [INFO|2025-03-11 23:52:05] logging.py:143 >> {'loss': 0.0965, 'learning_rate': 3.0265e-05, 'epoch': 1.32, 'throughput': 1898.26}
728
+
729
+ [INFO|2025-03-11 23:52:19] logging.py:143 >> {'loss': 0.1106, 'learning_rate': 2.9991e-05, 'epoch': 1.33, 'throughput': 1900.22}
730
+
731
+ [INFO|2025-03-11 23:52:33] logging.py:143 >> {'loss': 0.0996, 'learning_rate': 2.9715e-05, 'epoch': 1.34, 'throughput': 1902.27}
732
+
733
+ [INFO|2025-03-11 23:52:48] logging.py:143 >> {'loss': 0.1049, 'learning_rate': 2.9439e-05, 'epoch': 1.35, 'throughput': 1904.16}
734
+
735
+ [INFO|2025-03-11 23:53:02] logging.py:143 >> {'loss': 0.1118, 'learning_rate': 2.9163e-05, 'epoch': 1.36, 'throughput': 1906.07}
736
+
737
+ [INFO|2025-03-11 23:53:16] logging.py:143 >> {'loss': 0.0967, 'learning_rate': 2.8886e-05, 'epoch': 1.37, 'throughput': 1907.98}
738
+
739
+ [INFO|2025-03-11 23:53:30] logging.py:143 >> {'loss': 0.1035, 'learning_rate': 2.8608e-05, 'epoch': 1.38, 'throughput': 1909.88}
740
+
741
+ [INFO|2025-03-11 23:53:45] logging.py:143 >> {'loss': 0.1043, 'learning_rate': 2.8330e-05, 'epoch': 1.40, 'throughput': 1911.61}
742
+
743
+ [INFO|2025-03-11 23:53:59] logging.py:143 >> {'loss': 0.1045, 'learning_rate': 2.8052e-05, 'epoch': 1.41, 'throughput': 1913.40}
744
+
745
+ [INFO|2025-03-11 23:54:13] logging.py:143 >> {'loss': 0.1015, 'learning_rate': 2.7773e-05, 'epoch': 1.42, 'throughput': 1915.26}
746
+
747
+ [INFO|2025-03-11 23:54:28] logging.py:143 >> {'loss': 0.1055, 'learning_rate': 2.7494e-05, 'epoch': 1.43, 'throughput': 1917.02}
748
+
749
+ [INFO|2025-03-11 23:54:42] logging.py:143 >> {'loss': 0.1045, 'learning_rate': 2.7215e-05, 'epoch': 1.44, 'throughput': 1918.79}
750
+
751
+ [INFO|2025-03-11 23:54:56] logging.py:143 >> {'loss': 0.1035, 'learning_rate': 2.6935e-05, 'epoch': 1.45, 'throughput': 1920.56}
752
+
753
+ [INFO|2025-03-11 23:55:11] logging.py:143 >> {'loss': 0.1035, 'learning_rate': 2.6655e-05, 'epoch': 1.46, 'throughput': 1922.31}
754
+
755
+ [INFO|2025-03-11 23:55:25] logging.py:143 >> {'loss': 0.0989, 'learning_rate': 2.6375e-05, 'epoch': 1.47, 'throughput': 1923.99}
756
+
757
+ [INFO|2025-03-11 23:55:39] logging.py:143 >> {'loss': 0.0960, 'learning_rate': 2.6094e-05, 'epoch': 1.48, 'throughput': 1925.61}
758
+
759
+ [INFO|2025-03-11 23:55:39] trainer.py:4226 >>
760
+ ***** Running Evaluation *****
761
+
762
+ [INFO|2025-03-11 23:55:39] trainer.py:4228 >> Num examples = 3363
763
+
764
+ [INFO|2025-03-11 23:55:39] trainer.py:4231 >> Batch size = 8
765
+
766
+ [INFO|2025-03-11 23:56:20] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-700
767
+
768
+ [INFO|2025-03-11 23:56:20] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
769
+
770
+ [INFO|2025-03-11 23:56:20] configuration_utils.py:768 >> Model config LlamaConfig {
771
+ "architectures": [
772
+ "LlamaForCausalLM"
773
+ ],
774
+ "attention_bias": false,
775
+ "attention_dropout": 0.0,
776
+ "bos_token_id": 1,
777
+ "eos_token_id": 2,
778
+ "head_dim": 128,
779
+ "hidden_act": "silu",
780
+ "hidden_size": 4096,
781
+ "initializer_range": 0.02,
782
+ "intermediate_size": 11008,
783
+ "max_position_embeddings": 4096,
784
+ "mlp_bias": false,
785
+ "model_type": "llama",
786
+ "num_attention_heads": 32,
787
+ "num_hidden_layers": 32,
788
+ "num_key_value_heads": 32,
789
+ "outputs_attentions": true,
790
+ "pad_token_id": 0,
791
+ "pretraining_tp": 1,
792
+ "rms_norm_eps": 1e-05,
793
+ "rope_scaling": null,
794
+ "rope_theta": 10000.0,
795
+ "tie_word_embeddings": false,
796
+ "torch_dtype": "float16",
797
+ "transformers_version": "4.48.3",
798
+ "use_cache": true,
799
+ "vocab_size": 32000
800
+ }
801
+
802
+
803
+ [INFO|2025-03-11 23:56:22] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-700/tokenizer_config.json
804
+
805
+ [INFO|2025-03-11 23:56:22] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-700/special_tokens_map.json
806
+
807
+ [INFO|2025-03-11 23:56:42] logging.py:143 >> {'loss': 0.1052, 'learning_rate': 2.5814e-05, 'epoch': 1.49, 'throughput': 1887.47}
808
+
809
+ [INFO|2025-03-11 23:56:56] logging.py:143 >> {'loss': 0.1082, 'learning_rate': 2.5533e-05, 'epoch': 1.50, 'throughput': 1889.29}
810
+
811
+ [INFO|2025-03-11 23:57:10] logging.py:143 >> {'loss': 0.1053, 'learning_rate': 2.5253e-05, 'epoch': 1.51, 'throughput': 1891.07}
812
+
813
+ [INFO|2025-03-11 23:57:24] logging.py:143 >> {'loss': 0.1003, 'learning_rate': 2.4972e-05, 'epoch': 1.52, 'throughput': 1892.89}
814
+
815
+ [INFO|2025-03-11 23:57:39] logging.py:143 >> {'loss': 0.1027, 'learning_rate': 2.4691e-05, 'epoch': 1.53, 'throughput': 1894.67}
816
+
817
+ [INFO|2025-03-11 23:57:53] logging.py:143 >> {'loss': 0.1019, 'learning_rate': 2.4411e-05, 'epoch': 1.54, 'throughput': 1896.42}
818
+
819
+ [INFO|2025-03-11 23:58:07] logging.py:143 >> {'loss': 0.1094, 'learning_rate': 2.4130e-05, 'epoch': 1.55, 'throughput': 1898.19}
820
+
821
+ [INFO|2025-03-11 23:58:21] logging.py:143 >> {'loss': 0.0987, 'learning_rate': 2.3850e-05, 'epoch': 1.56, 'throughput': 1899.99}
822
+
823
+ [INFO|2025-03-11 23:58:35] logging.py:143 >> {'loss': 0.0973, 'learning_rate': 2.3569e-05, 'epoch': 1.58, 'throughput': 1901.77}
824
+
825
+ [INFO|2025-03-11 23:58:49] logging.py:143 >> {'loss': 0.1052, 'learning_rate': 2.3289e-05, 'epoch': 1.59, 'throughput': 1903.53}
826
+
827
+ [INFO|2025-03-11 23:59:03] logging.py:143 >> {'loss': 0.1069, 'learning_rate': 2.3009e-05, 'epoch': 1.60, 'throughput': 1905.25}
828
+
829
+ [INFO|2025-03-11 23:59:17] logging.py:143 >> {'loss': 0.0994, 'learning_rate': 2.2729e-05, 'epoch': 1.61, 'throughput': 1906.93}
830
+
831
+ [INFO|2025-03-11 23:59:32] logging.py:143 >> {'loss': 0.1027, 'learning_rate': 2.2450e-05, 'epoch': 1.62, 'throughput': 1908.55}
832
+
833
+ [INFO|2025-03-11 23:59:46] logging.py:143 >> {'loss': 0.1026, 'learning_rate': 2.2171e-05, 'epoch': 1.63, 'throughput': 1910.14}
834
+
835
+ [INFO|2025-03-12 00:00:00] logging.py:143 >> {'loss': 0.0974, 'learning_rate': 2.1892e-05, 'epoch': 1.64, 'throughput': 1911.77}
836
+
837
+ [INFO|2025-03-12 00:00:15] logging.py:143 >> {'loss': 0.1131, 'learning_rate': 2.1614e-05, 'epoch': 1.65, 'throughput': 1913.37}
838
+
839
+ [INFO|2025-03-12 00:00:29] logging.py:143 >> {'loss': 0.0919, 'learning_rate': 2.1336e-05, 'epoch': 1.66, 'throughput': 1914.94}
840
+
841
+ [INFO|2025-03-12 00:00:43] logging.py:143 >> {'loss': 0.1036, 'learning_rate': 2.1059e-05, 'epoch': 1.67, 'throughput': 1916.49}
842
+
843
+ [INFO|2025-03-12 00:00:58] logging.py:143 >> {'loss': 0.0956, 'learning_rate': 2.0782e-05, 'epoch': 1.68, 'throughput': 1918.00}
844
+
845
+ [INFO|2025-03-12 00:01:12] logging.py:143 >> {'loss': 0.0998, 'learning_rate': 2.0505e-05, 'epoch': 1.69, 'throughput': 1919.52}
846
+
847
+ [INFO|2025-03-12 00:01:12] trainer.py:4226 >>
848
+ ***** Running Evaluation *****
849
+
850
+ [INFO|2025-03-12 00:01:12] trainer.py:4228 >> Num examples = 3363
851
+
852
+ [INFO|2025-03-12 00:01:12] trainer.py:4231 >> Batch size = 8
853
+
854
+ [INFO|2025-03-12 00:01:52] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-800
855
+
856
+ [INFO|2025-03-12 00:01:53] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
857
+
858
+ [INFO|2025-03-12 00:01:53] configuration_utils.py:768 >> Model config LlamaConfig {
859
+ "architectures": [
860
+ "LlamaForCausalLM"
861
+ ],
862
+ "attention_bias": false,
863
+ "attention_dropout": 0.0,
864
+ "bos_token_id": 1,
865
+ "eos_token_id": 2,
866
+ "head_dim": 128,
867
+ "hidden_act": "silu",
868
+ "hidden_size": 4096,
869
+ "initializer_range": 0.02,
870
+ "intermediate_size": 11008,
871
+ "max_position_embeddings": 4096,
872
+ "mlp_bias": false,
873
+ "model_type": "llama",
874
+ "num_attention_heads": 32,
875
+ "num_hidden_layers": 32,
876
+ "num_key_value_heads": 32,
877
+ "outputs_attentions": true,
878
+ "pad_token_id": 0,
879
+ "pretraining_tp": 1,
880
+ "rms_norm_eps": 1e-05,
881
+ "rope_scaling": null,
882
+ "rope_theta": 10000.0,
883
+ "tie_word_embeddings": false,
884
+ "torch_dtype": "float16",
885
+ "transformers_version": "4.48.3",
886
+ "use_cache": true,
887
+ "vocab_size": 32000
888
+ }
889
+
890
+
891
+ [INFO|2025-03-12 00:01:54] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-800/tokenizer_config.json
892
+
893
+ [INFO|2025-03-12 00:01:54] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-800/special_tokens_map.json
894
+
895
+ [INFO|2025-03-12 00:02:16] logging.py:143 >> {'loss': 0.0956, 'learning_rate': 2.0229e-05, 'epoch': 1.70, 'throughput': 1884.96}
896
+
897
+ [INFO|2025-03-12 00:02:30] logging.py:143 >> {'loss': 0.1008, 'learning_rate': 1.9954e-05, 'epoch': 1.71, 'throughput': 1886.61}
898
+
899
+ [INFO|2025-03-12 00:02:44] logging.py:143 >> {'loss': 0.0955, 'learning_rate': 1.9680e-05, 'epoch': 1.72, 'throughput': 1888.29}
900
+
901
+ [INFO|2025-03-12 00:02:59] logging.py:143 >> {'loss': 0.1039, 'learning_rate': 1.9406e-05, 'epoch': 1.73, 'throughput': 1889.94}
902
+
903
+ [INFO|2025-03-12 00:03:13] logging.py:143 >> {'loss': 0.1005, 'learning_rate': 1.9132e-05, 'epoch': 1.74, 'throughput': 1891.53}
904
+
905
+ [INFO|2025-03-12 00:03:27] logging.py:143 >> {'loss': 0.1069, 'learning_rate': 1.8860e-05, 'epoch': 1.75, 'throughput': 1893.17}
906
+
907
+ [INFO|2025-03-12 00:03:41] logging.py:143 >> {'loss': 0.1004, 'learning_rate': 1.8588e-05, 'epoch': 1.77, 'throughput': 1894.74}
908
+
909
+ [INFO|2025-03-12 00:03:56] logging.py:143 >> {'loss': 0.1002, 'learning_rate': 1.8317e-05, 'epoch': 1.78, 'throughput': 1896.28}
910
+
911
+ [INFO|2025-03-12 00:04:10] logging.py:143 >> {'loss': 0.0927, 'learning_rate': 1.8047e-05, 'epoch': 1.79, 'throughput': 1897.83}
912
+
913
+ [INFO|2025-03-12 00:04:24] logging.py:143 >> {'loss': 0.1013, 'learning_rate': 1.7778e-05, 'epoch': 1.80, 'throughput': 1899.37}
914
+
915
+ [INFO|2025-03-12 00:04:38] logging.py:143 >> {'loss': 0.0945, 'learning_rate': 1.7510e-05, 'epoch': 1.81, 'throughput': 1900.81}
916
+
917
+ [INFO|2025-03-12 00:04:52] logging.py:143 >> {'loss': 0.0949, 'learning_rate': 1.7243e-05, 'epoch': 1.82, 'throughput': 1902.31}
918
+
919
+ [INFO|2025-03-12 00:05:07] logging.py:143 >> {'loss': 0.0923, 'learning_rate': 1.6976e-05, 'epoch': 1.83, 'throughput': 1903.78}
920
+
921
+ [INFO|2025-03-12 00:05:21] logging.py:143 >> {'loss': 0.1049, 'learning_rate': 1.6711e-05, 'epoch': 1.84, 'throughput': 1905.25}
922
+
923
+ [INFO|2025-03-12 00:05:35] logging.py:143 >> {'loss': 0.0906, 'learning_rate': 1.6447e-05, 'epoch': 1.85, 'throughput': 1906.67}
924
+
925
+ [INFO|2025-03-12 00:05:49] logging.py:143 >> {'loss': 0.0942, 'learning_rate': 1.6183e-05, 'epoch': 1.86, 'throughput': 1908.13}
926
+
927
+ [INFO|2025-03-12 00:06:04] logging.py:143 >> {'loss': 0.1000, 'learning_rate': 1.5921e-05, 'epoch': 1.87, 'throughput': 1909.57}
928
+
929
+ [INFO|2025-03-12 00:06:18] logging.py:143 >> {'loss': 0.0985, 'learning_rate': 1.5660e-05, 'epoch': 1.88, 'throughput': 1910.96}
930
+
931
+ [INFO|2025-03-12 00:06:32] logging.py:143 >> {'loss': 0.0959, 'learning_rate': 1.5401e-05, 'epoch': 1.89, 'throughput': 1912.35}
932
+
933
+ [INFO|2025-03-12 00:06:46] logging.py:143 >> {'loss': 0.1083, 'learning_rate': 1.5142e-05, 'epoch': 1.90, 'throughput': 1913.73}
934
+
935
+ [INFO|2025-03-12 00:06:46] trainer.py:4226 >>
936
+ ***** Running Evaluation *****
937
+
938
+ [INFO|2025-03-12 00:06:46] trainer.py:4228 >> Num examples = 3363
939
+
940
+ [INFO|2025-03-12 00:06:46] trainer.py:4231 >> Batch size = 8
941
+
942
+ [INFO|2025-03-12 00:07:26] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-900
943
+
944
+ [INFO|2025-03-12 00:07:27] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
945
+
946
+ [INFO|2025-03-12 00:07:27] configuration_utils.py:768 >> Model config LlamaConfig {
947
+ "architectures": [
948
+ "LlamaForCausalLM"
949
+ ],
950
+ "attention_bias": false,
951
+ "attention_dropout": 0.0,
952
+ "bos_token_id": 1,
953
+ "eos_token_id": 2,
954
+ "head_dim": 128,
955
+ "hidden_act": "silu",
956
+ "hidden_size": 4096,
957
+ "initializer_range": 0.02,
958
+ "intermediate_size": 11008,
959
+ "max_position_embeddings": 4096,
960
+ "mlp_bias": false,
961
+ "model_type": "llama",
962
+ "num_attention_heads": 32,
963
+ "num_hidden_layers": 32,
964
+ "num_key_value_heads": 32,
965
+ "outputs_attentions": true,
966
+ "pad_token_id": 0,
967
+ "pretraining_tp": 1,
968
+ "rms_norm_eps": 1e-05,
969
+ "rope_scaling": null,
970
+ "rope_theta": 10000.0,
971
+ "tie_word_embeddings": false,
972
+ "torch_dtype": "float16",
973
+ "transformers_version": "4.48.3",
974
+ "use_cache": true,
975
+ "vocab_size": 32000
976
+ }
977
+
978
+
979
+ [INFO|2025-03-12 00:07:28] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-900/tokenizer_config.json
980
+
981
+ [INFO|2025-03-12 00:07:28] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-900/special_tokens_map.json
982
+
983
+ [INFO|2025-03-12 00:07:45] logging.py:143 >> {'loss': 0.0942, 'learning_rate': 1.4885e-05, 'epoch': 1.91, 'throughput': 1886.77}
984
+
985
+ [INFO|2025-03-12 00:07:59] logging.py:143 >> {'loss': 0.1026, 'learning_rate': 1.4629e-05, 'epoch': 1.92, 'throughput': 1888.26}
986
+
987
+ [INFO|2025-03-12 00:08:13] logging.py:143 >> {'loss': 0.0933, 'learning_rate': 1.4374e-05, 'epoch': 1.93, 'throughput': 1889.72}
988
+
989
+ [INFO|2025-03-12 00:08:27] logging.py:143 >> {'loss': 0.0955, 'learning_rate': 1.4120e-05, 'epoch': 1.95, 'throughput': 1891.20}
990
+
991
+ [INFO|2025-03-12 00:08:41] logging.py:143 >> {'loss': 0.0933, 'learning_rate': 1.3868e-05, 'epoch': 1.96, 'throughput': 1892.70}
992
+
993
+ [INFO|2025-03-12 00:08:55] logging.py:143 >> {'loss': 0.0905, 'learning_rate': 1.3618e-05, 'epoch': 1.97, 'throughput': 1894.17}
994
+
995
+ [INFO|2025-03-12 00:09:09] logging.py:143 >> {'loss': 0.0959, 'learning_rate': 1.3369e-05, 'epoch': 1.98, 'throughput': 1895.53}
996
+
997
+ [INFO|2025-03-12 00:09:24] logging.py:143 >> {'loss': 0.1031, 'learning_rate': 1.3121e-05, 'epoch': 1.99, 'throughput': 1896.91}
998
+
999
+ [INFO|2025-03-12 00:09:38] logging.py:143 >> {'loss': 0.0896, 'learning_rate': 1.2875e-05, 'epoch': 2.00, 'throughput': 1898.29}
1000
+
1001
+ [INFO|2025-03-12 00:09:52] logging.py:143 >> {'loss': 0.0931, 'learning_rate': 1.2630e-05, 'epoch': 2.01, 'throughput': 1899.47}
1002
+
1003
+ [INFO|2025-03-12 00:10:06] logging.py:143 >> {'loss': 0.0904, 'learning_rate': 1.2387e-05, 'epoch': 2.02, 'throughput': 1900.83}
1004
+
1005
+ [INFO|2025-03-12 00:10:20] logging.py:143 >> {'loss': 0.0973, 'learning_rate': 1.2145e-05, 'epoch': 2.03, 'throughput': 1902.21}
1006
+
1007
+ [INFO|2025-03-12 00:10:35] logging.py:143 >> {'loss': 0.0937, 'learning_rate': 1.1905e-05, 'epoch': 2.04, 'throughput': 1903.51}
1008
+
1009
+ [INFO|2025-03-12 00:10:49] logging.py:143 >> {'loss': 0.0898, 'learning_rate': 1.1667e-05, 'epoch': 2.05, 'throughput': 1904.75}
1010
+
1011
+ [INFO|2025-03-12 00:11:03] logging.py:143 >> {'loss': 0.1006, 'learning_rate': 1.1430e-05, 'epoch': 2.06, 'throughput': 1906.03}
1012
+
1013
+ [INFO|2025-03-12 00:11:18] logging.py:143 >> {'loss': 0.0981, 'learning_rate': 1.1195e-05, 'epoch': 2.07, 'throughput': 1907.28}
1014
+
1015
+ [INFO|2025-03-12 00:11:32] logging.py:143 >> {'loss': 0.0943, 'learning_rate': 1.0962e-05, 'epoch': 2.08, 'throughput': 1908.57}
1016
+
1017
+ [INFO|2025-03-12 00:11:46] logging.py:143 >> {'loss': 0.0943, 'learning_rate': 1.0731e-05, 'epoch': 2.09, 'throughput': 1909.82}
1018
+
1019
+ [INFO|2025-03-12 00:12:00] logging.py:143 >> {'loss': 0.0843, 'learning_rate': 1.0501e-05, 'epoch': 2.10, 'throughput': 1911.11}
1020
+
1021
+ [INFO|2025-03-12 00:12:14] logging.py:143 >> {'loss': 0.0953, 'learning_rate': 1.0274e-05, 'epoch': 2.11, 'throughput': 1912.36}
1022
+
1023
+ [INFO|2025-03-12 00:12:14] trainer.py:4226 >>
1024
+ ***** Running Evaluation *****
1025
+
1026
+ [INFO|2025-03-12 00:12:14] trainer.py:4228 >> Num examples = 3363
1027
+
1028
+ [INFO|2025-03-12 00:12:14] trainer.py:4231 >> Batch size = 8
1029
+
1030
+ [INFO|2025-03-12 00:12:55] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1000
1031
+
1032
+ [INFO|2025-03-12 00:12:55] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
1033
+
1034
+ [INFO|2025-03-12 00:12:55] configuration_utils.py:768 >> Model config LlamaConfig {
1035
+ "architectures": [
1036
+ "LlamaForCausalLM"
1037
+ ],
1038
+ "attention_bias": false,
1039
+ "attention_dropout": 0.0,
1040
+ "bos_token_id": 1,
1041
+ "eos_token_id": 2,
1042
+ "head_dim": 128,
1043
+ "hidden_act": "silu",
1044
+ "hidden_size": 4096,
1045
+ "initializer_range": 0.02,
1046
+ "intermediate_size": 11008,
1047
+ "max_position_embeddings": 4096,
1048
+ "mlp_bias": false,
1049
+ "model_type": "llama",
1050
+ "num_attention_heads": 32,
1051
+ "num_hidden_layers": 32,
1052
+ "num_key_value_heads": 32,
1053
+ "outputs_attentions": true,
1054
+ "pad_token_id": 0,
1055
+ "pretraining_tp": 1,
1056
+ "rms_norm_eps": 1e-05,
1057
+ "rope_scaling": null,
1058
+ "rope_theta": 10000.0,
1059
+ "tie_word_embeddings": false,
1060
+ "torch_dtype": "float16",
1061
+ "transformers_version": "4.48.3",
1062
+ "use_cache": true,
1063
+ "vocab_size": 32000
1064
+ }
1065
+
1066
+
1067
+ [INFO|2025-03-12 00:12:56] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1000/tokenizer_config.json
1068
+
1069
+ [INFO|2025-03-12 00:12:56] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1000/special_tokens_map.json
1070
+
1071
+ [INFO|2025-03-12 00:13:16] logging.py:143 >> {'loss': 0.0923, 'learning_rate': 1.0048e-05, 'epoch': 2.12, 'throughput': 1886.30}
1072
+
1073
+ [INFO|2025-03-12 00:13:30] logging.py:143 >> {'loss': 0.0847, 'learning_rate': 9.8237e-06, 'epoch': 2.14, 'throughput': 1887.60}
1074
+
1075
+ [INFO|2025-03-12 00:13:45] logging.py:143 >> {'loss': 0.0921, 'learning_rate': 9.6016e-06, 'epoch': 2.15, 'throughput': 1888.89}
1076
+
1077
+ [INFO|2025-03-12 00:13:59] logging.py:143 >> {'loss': 0.0887, 'learning_rate': 9.3814e-06, 'epoch': 2.16, 'throughput': 1890.15}
1078
+
1079
+ [INFO|2025-03-12 00:14:13] logging.py:143 >> {'loss': 0.0924, 'learning_rate': 9.1632e-06, 'epoch': 2.17, 'throughput': 1891.43}
1080
+
1081
+ [INFO|2025-03-12 00:14:28] logging.py:143 >> {'loss': 0.0884, 'learning_rate': 8.9470e-06, 'epoch': 2.18, 'throughput': 1892.70}
1082
+
1083
+ [INFO|2025-03-12 00:14:42] logging.py:143 >> {'loss': 0.0937, 'learning_rate': 8.7329e-06, 'epoch': 2.19, 'throughput': 1893.97}
1084
+
1085
+ [INFO|2025-03-12 00:14:56] logging.py:143 >> {'loss': 0.0856, 'learning_rate': 8.5208e-06, 'epoch': 2.20, 'throughput': 1895.24}
1086
+
1087
+ [INFO|2025-03-12 00:15:11] logging.py:143 >> {'loss': 0.0864, 'learning_rate': 8.3107e-06, 'epoch': 2.21, 'throughput': 1896.47}
1088
+
1089
+ [INFO|2025-03-12 00:15:25] logging.py:143 >> {'loss': 0.0974, 'learning_rate': 8.1028e-06, 'epoch': 2.22, 'throughput': 1897.71}
1090
+
1091
+ [INFO|2025-03-12 00:15:39] logging.py:143 >> {'loss': 0.0962, 'learning_rate': 7.8970e-06, 'epoch': 2.23, 'throughput': 1898.91}
1092
+
1093
+ [INFO|2025-03-12 00:15:53] logging.py:143 >> {'loss': 0.0993, 'learning_rate': 7.6933e-06, 'epoch': 2.24, 'throughput': 1900.13}
1094
+
1095
+ [INFO|2025-03-12 00:16:07] logging.py:143 >> {'loss': 0.0915, 'learning_rate': 7.4918e-06, 'epoch': 2.25, 'throughput': 1901.37}
1096
+
1097
+ [INFO|2025-03-12 00:16:21] logging.py:143 >> {'loss': 0.0944, 'learning_rate': 7.2926e-06, 'epoch': 2.26, 'throughput': 1902.61}
1098
+
1099
+ [INFO|2025-03-12 00:16:35] logging.py:143 >> {'loss': 0.0945, 'learning_rate': 7.0956e-06, 'epoch': 2.27, 'throughput': 1903.77}
1100
+
1101
+ [INFO|2025-03-12 00:16:50] logging.py:143 >> {'loss': 0.0896, 'learning_rate': 6.9008e-06, 'epoch': 2.28, 'throughput': 1904.97}
1102
+
1103
+ [INFO|2025-03-12 00:17:04] logging.py:143 >> {'loss': 0.0958, 'learning_rate': 6.7083e-06, 'epoch': 2.29, 'throughput': 1906.16}
1104
+
1105
+ [INFO|2025-03-12 00:17:18] logging.py:143 >> {'loss': 0.0938, 'learning_rate': 6.5181e-06, 'epoch': 2.30, 'throughput': 1907.34}
1106
+
1107
+ [INFO|2025-03-12 00:17:32] logging.py:143 >> {'loss': 0.0923, 'learning_rate': 6.3302e-06, 'epoch': 2.32, 'throughput': 1908.50}
1108
+
1109
+ [INFO|2025-03-12 00:17:46] logging.py:143 >> {'loss': 0.0887, 'learning_rate': 6.1447e-06, 'epoch': 2.33, 'throughput': 1909.64}
1110
+
1111
+ [INFO|2025-03-12 00:17:46] trainer.py:4226 >>
1112
+ ***** Running Evaluation *****
1113
+
1114
+ [INFO|2025-03-12 00:17:46] trainer.py:4228 >> Num examples = 3363
1115
+
1116
+ [INFO|2025-03-12 00:17:46] trainer.py:4231 >> Batch size = 8
1117
+
1118
+ [INFO|2025-03-12 00:18:27] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1100
1119
+
1120
+ [INFO|2025-03-12 00:18:27] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
1121
+
1122
+ [INFO|2025-03-12 00:18:27] configuration_utils.py:768 >> Model config LlamaConfig {
1123
+ "architectures": [
1124
+ "LlamaForCausalLM"
1125
+ ],
1126
+ "attention_bias": false,
1127
+ "attention_dropout": 0.0,
1128
+ "bos_token_id": 1,
1129
+ "eos_token_id": 2,
1130
+ "head_dim": 128,
1131
+ "hidden_act": "silu",
1132
+ "hidden_size": 4096,
1133
+ "initializer_range": 0.02,
1134
+ "intermediate_size": 11008,
1135
+ "max_position_embeddings": 4096,
1136
+ "mlp_bias": false,
1137
+ "model_type": "llama",
1138
+ "num_attention_heads": 32,
1139
+ "num_hidden_layers": 32,
1140
+ "num_key_value_heads": 32,
1141
+ "outputs_attentions": true,
1142
+ "pad_token_id": 0,
1143
+ "pretraining_tp": 1,
1144
+ "rms_norm_eps": 1e-05,
1145
+ "rope_scaling": null,
1146
+ "rope_theta": 10000.0,
1147
+ "tie_word_embeddings": false,
1148
+ "torch_dtype": "float16",
1149
+ "transformers_version": "4.48.3",
1150
+ "use_cache": true,
1151
+ "vocab_size": 32000
1152
+ }
1153
+
1154
+
1155
+ [INFO|2025-03-12 00:18:28] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1100/tokenizer_config.json
1156
+
1157
+ [INFO|2025-03-12 00:18:28] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1100/special_tokens_map.json
1158
+
1159
+ [INFO|2025-03-12 00:18:50] logging.py:143 >> {'loss': 0.0826, 'learning_rate': 5.9616e-06, 'epoch': 2.34, 'throughput': 1884.99}
1160
+
1161
+ [INFO|2025-03-12 00:19:04] logging.py:143 >> {'loss': 0.0826, 'learning_rate': 5.7809e-06, 'epoch': 2.35, 'throughput': 1886.19}
1162
+
1163
+ [INFO|2025-03-12 00:19:18] logging.py:143 >> {'loss': 0.0929, 'learning_rate': 5.6026e-06, 'epoch': 2.36, 'throughput': 1887.37}
1164
+
1165
+ [INFO|2025-03-12 00:19:33] logging.py:143 >> {'loss': 0.0936, 'learning_rate': 5.4267e-06, 'epoch': 2.37, 'throughput': 1888.56}
1166
+
1167
+ [INFO|2025-03-12 00:19:47] logging.py:143 >> {'loss': 0.0856, 'learning_rate': 5.2534e-06, 'epoch': 2.38, 'throughput': 1889.74}
1168
+
1169
+ [INFO|2025-03-12 00:20:01] logging.py:143 >> {'loss': 0.0923, 'learning_rate': 5.0825e-06, 'epoch': 2.39, 'throughput': 1890.91}
1170
+
1171
+ [INFO|2025-03-12 00:20:15] logging.py:143 >> {'loss': 0.0911, 'learning_rate': 4.9141e-06, 'epoch': 2.40, 'throughput': 1892.04}
1172
+
1173
+ [INFO|2025-03-12 00:20:30] logging.py:143 >> {'loss': 0.0851, 'learning_rate': 4.7482e-06, 'epoch': 2.41, 'throughput': 1893.14}
1174
+
1175
+ [INFO|2025-03-12 00:20:44] logging.py:143 >> {'loss': 0.0956, 'learning_rate': 4.5849e-06, 'epoch': 2.42, 'throughput': 1894.28}
1176
+
1177
+ [INFO|2025-03-12 00:20:58] logging.py:143 >> {'loss': 0.0916, 'learning_rate': 4.4242e-06, 'epoch': 2.43, 'throughput': 1895.41}
1178
+
1179
+ [INFO|2025-03-12 00:21:13] logging.py:143 >> {'loss': 0.0886, 'learning_rate': 4.2660e-06, 'epoch': 2.44, 'throughput': 1896.52}
1180
+
1181
+ [INFO|2025-03-12 00:21:27] logging.py:143 >> {'loss': 0.0852, 'learning_rate': 4.1105e-06, 'epoch': 2.45, 'throughput': 1897.55}
1182
+
1183
+ [INFO|2025-03-12 00:21:41] logging.py:143 >> {'loss': 0.0942, 'learning_rate': 3.9576e-06, 'epoch': 2.46, 'throughput': 1898.62}
1184
+
1185
+ [INFO|2025-03-12 00:21:56] logging.py:143 >> {'loss': 0.0889, 'learning_rate': 3.8074e-06, 'epoch': 2.47, 'throughput': 1899.67}
1186
+
1187
+ [INFO|2025-03-12 00:22:10] logging.py:143 >> {'loss': 0.0907, 'learning_rate': 3.6598e-06, 'epoch': 2.48, 'throughput': 1900.73}
1188
+
1189
+ [INFO|2025-03-12 00:22:24] logging.py:143 >> {'loss': 0.0910, 'learning_rate': 3.5150e-06, 'epoch': 2.49, 'throughput': 1901.79}
1190
+
1191
+ [INFO|2025-03-12 00:22:38] logging.py:143 >> {'loss': 0.0888, 'learning_rate': 3.3728e-06, 'epoch': 2.51, 'throughput': 1902.87}
1192
+
1193
+ [INFO|2025-03-12 00:22:53] logging.py:143 >> {'loss': 0.0965, 'learning_rate': 3.2334e-06, 'epoch': 2.52, 'throughput': 1903.94}
1194
+
1195
+ [INFO|2025-03-12 00:23:07] logging.py:143 >> {'loss': 0.0879, 'learning_rate': 3.0967e-06, 'epoch': 2.53, 'throughput': 1905.01}
1196
+
1197
+ [INFO|2025-03-12 00:23:21] logging.py:143 >> {'loss': 0.0889, 'learning_rate': 2.9627e-06, 'epoch': 2.54, 'throughput': 1906.08}
1198
+
1199
+ [INFO|2025-03-12 00:23:21] trainer.py:4226 >>
1200
+ ***** Running Evaluation *****
1201
+
1202
+ [INFO|2025-03-12 00:23:21] trainer.py:4228 >> Num examples = 3363
1203
+
1204
+ [INFO|2025-03-12 00:23:21] trainer.py:4231 >> Batch size = 8
1205
+
1206
+ [INFO|2025-03-12 00:24:01] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1200
1207
+
1208
+ [INFO|2025-03-12 00:24:02] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
1209
+
1210
+ [INFO|2025-03-12 00:24:02] configuration_utils.py:768 >> Model config LlamaConfig {
1211
+ "architectures": [
1212
+ "LlamaForCausalLM"
1213
+ ],
1214
+ "attention_bias": false,
1215
+ "attention_dropout": 0.0,
1216
+ "bos_token_id": 1,
1217
+ "eos_token_id": 2,
1218
+ "head_dim": 128,
1219
+ "hidden_act": "silu",
1220
+ "hidden_size": 4096,
1221
+ "initializer_range": 0.02,
1222
+ "intermediate_size": 11008,
1223
+ "max_position_embeddings": 4096,
1224
+ "mlp_bias": false,
1225
+ "model_type": "llama",
1226
+ "num_attention_heads": 32,
1227
+ "num_hidden_layers": 32,
1228
+ "num_key_value_heads": 32,
1229
+ "outputs_attentions": true,
1230
+ "pad_token_id": 0,
1231
+ "pretraining_tp": 1,
1232
+ "rms_norm_eps": 1e-05,
1233
+ "rope_scaling": null,
1234
+ "rope_theta": 10000.0,
1235
+ "tie_word_embeddings": false,
1236
+ "torch_dtype": "float16",
1237
+ "transformers_version": "4.48.3",
1238
+ "use_cache": true,
1239
+ "vocab_size": 32000
1240
+ }
1241
+
1242
+
1243
+ [INFO|2025-03-12 00:24:03] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1200/tokenizer_config.json
1244
+
1245
+ [INFO|2025-03-12 00:24:03] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1200/special_tokens_map.json
1246
+
1247
+ [INFO|2025-03-12 00:24:22] logging.py:143 >> {'loss': 0.0890, 'learning_rate': 2.8316e-06, 'epoch': 2.55, 'throughput': 1884.54}
1248
+
1249
+ [INFO|2025-03-12 00:24:37] logging.py:143 >> {'loss': 0.0965, 'learning_rate': 2.7032e-06, 'epoch': 2.56, 'throughput': 1885.70}
1250
+
1251
+ [INFO|2025-03-12 00:24:51] logging.py:143 >> {'loss': 0.0921, 'learning_rate': 2.5777e-06, 'epoch': 2.57, 'throughput': 1886.82}
1252
+
1253
+ [INFO|2025-03-12 00:25:05] logging.py:143 >> {'loss': 0.0831, 'learning_rate': 2.4549e-06, 'epoch': 2.58, 'throughput': 1887.91}
1254
+
1255
+ [INFO|2025-03-12 00:25:19] logging.py:143 >> {'loss': 0.0915, 'learning_rate': 2.3351e-06, 'epoch': 2.59, 'throughput': 1888.99}
1256
+
1257
+ [INFO|2025-03-12 00:25:34] logging.py:143 >> {'loss': 0.0893, 'learning_rate': 2.2180e-06, 'epoch': 2.60, 'throughput': 1890.12}
1258
+
1259
+ [INFO|2025-03-12 00:25:48] logging.py:143 >> {'loss': 0.0944, 'learning_rate': 2.1039e-06, 'epoch': 2.61, 'throughput': 1891.22}
1260
+
1261
+ [INFO|2025-03-12 00:26:02] logging.py:143 >> {'loss': 0.0854, 'learning_rate': 1.9926e-06, 'epoch': 2.62, 'throughput': 1892.29}
1262
+
1263
+ [INFO|2025-03-12 00:26:16] logging.py:143 >> {'loss': 0.0895, 'learning_rate': 1.8843e-06, 'epoch': 2.63, 'throughput': 1893.35}
1264
+
1265
+ [INFO|2025-03-12 00:26:31] logging.py:143 >> {'loss': 0.0807, 'learning_rate': 1.7788e-06, 'epoch': 2.64, 'throughput': 1894.39}
1266
+
1267
+ [INFO|2025-03-12 00:26:45] logging.py:143 >> {'loss': 0.0820, 'learning_rate': 1.6763e-06, 'epoch': 2.65, 'throughput': 1895.46}
1268
+
1269
+ [INFO|2025-03-12 00:26:59] logging.py:143 >> {'loss': 0.0897, 'learning_rate': 1.5767e-06, 'epoch': 2.66, 'throughput': 1896.52}
1270
+
1271
+ [INFO|2025-03-12 00:27:13] logging.py:143 >> {'loss': 0.0909, 'learning_rate': 1.4801e-06, 'epoch': 2.67, 'throughput': 1897.54}
1272
+
1273
+ [INFO|2025-03-12 00:27:27] logging.py:143 >> {'loss': 0.0984, 'learning_rate': 1.3864e-06, 'epoch': 2.68, 'throughput': 1898.57}
1274
+
1275
+ [INFO|2025-03-12 00:27:42] logging.py:143 >> {'loss': 0.0893, 'learning_rate': 1.2957e-06, 'epoch': 2.70, 'throughput': 1899.61}
1276
+
1277
+ [INFO|2025-03-12 00:27:56] logging.py:143 >> {'loss': 0.0843, 'learning_rate': 1.2080e-06, 'epoch': 2.71, 'throughput': 1900.62}
1278
+
1279
+ [INFO|2025-03-12 00:28:10] logging.py:143 >> {'loss': 0.0892, 'learning_rate': 1.1233e-06, 'epoch': 2.72, 'throughput': 1901.65}
1280
+
1281
+ [INFO|2025-03-12 00:28:24] logging.py:143 >> {'loss': 0.0883, 'learning_rate': 1.0416e-06, 'epoch': 2.73, 'throughput': 1902.67}
1282
+
1283
+ [INFO|2025-03-12 00:28:38] logging.py:143 >> {'loss': 0.0890, 'learning_rate': 9.6296e-07, 'epoch': 2.74, 'throughput': 1903.66}
1284
+
1285
+ [INFO|2025-03-12 00:28:53] logging.py:143 >> {'loss': 0.0859, 'learning_rate': 8.8732e-07, 'epoch': 2.75, 'throughput': 1904.62}
1286
+
1287
+ [INFO|2025-03-12 00:28:53] trainer.py:4226 >>
1288
+ ***** Running Evaluation *****
1289
+
1290
+ [INFO|2025-03-12 00:28:53] trainer.py:4228 >> Num examples = 3363
1291
+
1292
+ [INFO|2025-03-12 00:28:53] trainer.py:4231 >> Batch size = 8
1293
+
1294
+ [INFO|2025-03-12 00:29:33] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1300
1295
+
1296
+ [INFO|2025-03-12 00:29:34] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
1297
+
1298
+ [INFO|2025-03-12 00:29:34] configuration_utils.py:768 >> Model config LlamaConfig {
1299
+ "architectures": [
1300
+ "LlamaForCausalLM"
1301
+ ],
1302
+ "attention_bias": false,
1303
+ "attention_dropout": 0.0,
1304
+ "bos_token_id": 1,
1305
+ "eos_token_id": 2,
1306
+ "head_dim": 128,
1307
+ "hidden_act": "silu",
1308
+ "hidden_size": 4096,
1309
+ "initializer_range": 0.02,
1310
+ "intermediate_size": 11008,
1311
+ "max_position_embeddings": 4096,
1312
+ "mlp_bias": false,
1313
+ "model_type": "llama",
1314
+ "num_attention_heads": 32,
1315
+ "num_hidden_layers": 32,
1316
+ "num_key_value_heads": 32,
1317
+ "outputs_attentions": true,
1318
+ "pad_token_id": 0,
1319
+ "pretraining_tp": 1,
1320
+ "rms_norm_eps": 1e-05,
1321
+ "rope_scaling": null,
1322
+ "rope_theta": 10000.0,
1323
+ "tie_word_embeddings": false,
1324
+ "torch_dtype": "float16",
1325
+ "transformers_version": "4.48.3",
1326
+ "use_cache": true,
1327
+ "vocab_size": 32000
1328
+ }
1329
+
1330
+
1331
+ [INFO|2025-03-12 00:29:35] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1300/tokenizer_config.json
1332
+
1333
+ [INFO|2025-03-12 00:29:35] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1300/special_tokens_map.json
1334
+
1335
+ [INFO|2025-03-12 00:29:51] logging.py:143 >> {'loss': 0.0937, 'learning_rate': 8.1472e-07, 'epoch': 2.76, 'throughput': 1886.07}
1336
+
1337
+ [INFO|2025-03-12 00:30:06] logging.py:143 >> {'loss': 0.0927, 'learning_rate': 7.4517e-07, 'epoch': 2.77, 'throughput': 1887.10}
1338
+
1339
+ [INFO|2025-03-12 00:30:20] logging.py:143 >> {'loss': 0.0840, 'learning_rate': 6.7868e-07, 'epoch': 2.78, 'throughput': 1888.10}
1340
+
1341
+ [INFO|2025-03-12 00:30:34] logging.py:143 >> {'loss': 0.0862, 'learning_rate': 6.1526e-07, 'epoch': 2.79, 'throughput': 1889.14}
1342
+
1343
+ [INFO|2025-03-12 00:30:48] logging.py:143 >> {'loss': 0.0967, 'learning_rate': 5.5490e-07, 'epoch': 2.80, 'throughput': 1890.18}
1344
+
1345
+ [INFO|2025-03-12 00:31:02] logging.py:143 >> {'loss': 0.0878, 'learning_rate': 4.9763e-07, 'epoch': 2.81, 'throughput': 1891.24}
1346
+
1347
+ [INFO|2025-03-12 00:31:16] logging.py:143 >> {'loss': 0.0923, 'learning_rate': 4.4345e-07, 'epoch': 2.82, 'throughput': 1892.27}
1348
+
1349
+ [INFO|2025-03-12 00:31:30] logging.py:143 >> {'loss': 0.0887, 'learning_rate': 3.9236e-07, 'epoch': 2.83, 'throughput': 1893.28}
1350
+
1351
+ [INFO|2025-03-12 00:31:45] logging.py:143 >> {'loss': 0.0862, 'learning_rate': 3.4438e-07, 'epoch': 2.84, 'throughput': 1894.25}
1352
+
1353
+ [INFO|2025-03-12 00:31:59] logging.py:143 >> {'loss': 0.0862, 'learning_rate': 2.9950e-07, 'epoch': 2.85, 'throughput': 1895.22}
1354
+
1355
+ [INFO|2025-03-12 00:32:13] logging.py:143 >> {'loss': 0.0878, 'learning_rate': 2.5774e-07, 'epoch': 2.86, 'throughput': 1896.20}
1356
+
1357
+ [INFO|2025-03-12 00:32:27] logging.py:143 >> {'loss': 0.0881, 'learning_rate': 2.1910e-07, 'epoch': 2.88, 'throughput': 1897.17}
1358
+
1359
+ [INFO|2025-03-12 00:32:41] logging.py:143 >> {'loss': 0.0913, 'learning_rate': 1.8358e-07, 'epoch': 2.89, 'throughput': 1898.12}
1360
+
1361
+ [INFO|2025-03-12 00:32:56] logging.py:143 >> {'loss': 0.0899, 'learning_rate': 1.5119e-07, 'epoch': 2.90, 'throughput': 1899.07}
1362
+
1363
+ [INFO|2025-03-12 00:33:10] logging.py:143 >> {'loss': 0.0813, 'learning_rate': 1.2193e-07, 'epoch': 2.91, 'throughput': 1900.01}
1364
+
1365
+ [INFO|2025-03-12 00:33:24] logging.py:143 >> {'loss': 0.0874, 'learning_rate': 9.5813e-08, 'epoch': 2.92, 'throughput': 1900.94}
1366
+
1367
+ [INFO|2025-03-12 00:33:38] logging.py:143 >> {'loss': 0.0915, 'learning_rate': 7.2832e-08, 'epoch': 2.93, 'throughput': 1901.81}
1368
+
1369
+ [INFO|2025-03-12 00:33:52] logging.py:143 >> {'loss': 0.0835, 'learning_rate': 5.2993e-08, 'epoch': 2.94, 'throughput': 1902.75}
1370
+
1371
+ [INFO|2025-03-12 00:34:07] logging.py:143 >> {'loss': 0.0920, 'learning_rate': 3.6299e-08, 'epoch': 2.95, 'throughput': 1903.68}
1372
+
1373
+ [INFO|2025-03-12 00:34:21] logging.py:143 >> {'loss': 0.0883, 'learning_rate': 2.2752e-08, 'epoch': 2.96, 'throughput': 1904.60}
1374
+
1375
+ [INFO|2025-03-12 00:34:21] trainer.py:4226 >>
1376
+ ***** Running Evaluation *****
1377
+
1378
+ [INFO|2025-03-12 00:34:21] trainer.py:4228 >> Num examples = 3363
1379
+
1380
+ [INFO|2025-03-12 00:34:21] trainer.py:4231 >> Batch size = 8
1381
+
1382
+ [INFO|2025-03-12 00:35:01] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1400
1383
+
1384
+ [INFO|2025-03-12 00:35:08] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
1385
+
1386
+ [INFO|2025-03-12 00:35:08] configuration_utils.py:768 >> Model config LlamaConfig {
1387
+ "architectures": [
1388
+ "LlamaForCausalLM"
1389
+ ],
1390
+ "attention_bias": false,
1391
+ "attention_dropout": 0.0,
1392
+ "bos_token_id": 1,
1393
+ "eos_token_id": 2,
1394
+ "head_dim": 128,
1395
+ "hidden_act": "silu",
1396
+ "hidden_size": 4096,
1397
+ "initializer_range": 0.02,
1398
+ "intermediate_size": 11008,
1399
+ "max_position_embeddings": 4096,
1400
+ "mlp_bias": false,
1401
+ "model_type": "llama",
1402
+ "num_attention_heads": 32,
1403
+ "num_hidden_layers": 32,
1404
+ "num_key_value_heads": 32,
1405
+ "outputs_attentions": true,
1406
+ "pad_token_id": 0,
1407
+ "pretraining_tp": 1,
1408
+ "rms_norm_eps": 1e-05,
1409
+ "rope_scaling": null,
1410
+ "rope_theta": 10000.0,
1411
+ "tie_word_embeddings": false,
1412
+ "torch_dtype": "float16",
1413
+ "transformers_version": "4.48.3",
1414
+ "use_cache": true,
1415
+ "vocab_size": 32000
1416
+ }
1417
+
1418
+
1419
+ [INFO|2025-03-12 00:35:09] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1400/tokenizer_config.json
1420
+
1421
+ [INFO|2025-03-12 00:35:09] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1400/special_tokens_map.json
1422
+
1423
+ [INFO|2025-03-12 00:35:25] logging.py:143 >> {'loss': 0.0872, 'learning_rate': 1.2354e-08, 'epoch': 2.97, 'throughput': 1885.05}
1424
+
1425
+ [INFO|2025-03-12 00:35:39] logging.py:143 >> {'loss': 0.0822, 'learning_rate': 5.1056e-09, 'epoch': 2.98, 'throughput': 1886.04}
1426
+
1427
+ [INFO|2025-03-12 00:35:53] logging.py:143 >> {'loss': 0.0901, 'learning_rate': 1.0085e-09, 'epoch': 2.99, 'throughput': 1887.02}
1428
+
1429
+ [INFO|2025-03-12 00:36:04] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1419
1430
+
1431
+ [INFO|2025-03-12 00:36:05] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
1432
+
1433
+ [INFO|2025-03-12 00:36:05] configuration_utils.py:768 >> Model config LlamaConfig {
1434
+ "architectures": [
1435
+ "LlamaForCausalLM"
1436
+ ],
1437
+ "attention_bias": false,
1438
+ "attention_dropout": 0.0,
1439
+ "bos_token_id": 1,
1440
+ "eos_token_id": 2,
1441
+ "head_dim": 128,
1442
+ "hidden_act": "silu",
1443
+ "hidden_size": 4096,
1444
+ "initializer_range": 0.02,
1445
+ "intermediate_size": 11008,
1446
+ "max_position_embeddings": 4096,
1447
+ "mlp_bias": false,
1448
+ "model_type": "llama",
1449
+ "num_attention_heads": 32,
1450
+ "num_hidden_layers": 32,
1451
+ "num_key_value_heads": 32,
1452
+ "outputs_attentions": true,
1453
+ "pad_token_id": 0,
1454
+ "pretraining_tp": 1,
1455
+ "rms_norm_eps": 1e-05,
1456
+ "rope_scaling": null,
1457
+ "rope_theta": 10000.0,
1458
+ "tie_word_embeddings": false,
1459
+ "torch_dtype": "float16",
1460
+ "transformers_version": "4.48.3",
1461
+ "use_cache": true,
1462
+ "vocab_size": 32000
1463
+ }
1464
+
1465
+
1466
+ [INFO|2025-03-12 00:36:06] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1419/tokenizer_config.json
1467
+
1468
+ [INFO|2025-03-12 00:36:06] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/checkpoint-1419/special_tokens_map.json
1469
+
1470
+ [INFO|2025-03-12 00:36:09] trainer.py:2643 >>
1471
+
1472
+ Training completed. Do not forget to share your model on huggingface.co/models =)
1473
+
1474
+
1475
+
1476
+ [INFO|2025-03-12 00:36:09] trainer.py:3910 >> Saving model checkpoint to saves/Custom/lora/train_2025-03-11-22-40-04
1477
+
1478
+ [INFO|2025-03-12 00:36:09] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--GreatCaptainNemo--ProLLaMA_Stage_1/snapshots/599e4e22ff2cab772732bc2ec4e1872ce20590a9/config.json
1479
+
1480
+ [INFO|2025-03-12 00:36:09] configuration_utils.py:768 >> Model config LlamaConfig {
1481
+ "architectures": [
1482
+ "LlamaForCausalLM"
1483
+ ],
1484
+ "attention_bias": false,
1485
+ "attention_dropout": 0.0,
1486
+ "bos_token_id": 1,
1487
+ "eos_token_id": 2,
1488
+ "head_dim": 128,
1489
+ "hidden_act": "silu",
1490
+ "hidden_size": 4096,
1491
+ "initializer_range": 0.02,
1492
+ "intermediate_size": 11008,
1493
+ "max_position_embeddings": 4096,
1494
+ "mlp_bias": false,
1495
+ "model_type": "llama",
1496
+ "num_attention_heads": 32,
1497
+ "num_hidden_layers": 32,
1498
+ "num_key_value_heads": 32,
1499
+ "outputs_attentions": true,
1500
+ "pad_token_id": 0,
1501
+ "pretraining_tp": 1,
1502
+ "rms_norm_eps": 1e-05,
1503
+ "rope_scaling": null,
1504
+ "rope_theta": 10000.0,
1505
+ "tie_word_embeddings": false,
1506
+ "torch_dtype": "float16",
1507
+ "transformers_version": "4.48.3",
1508
+ "use_cache": true,
1509
+ "vocab_size": 32000
1510
+ }
1511
+
1512
+
1513
+ [INFO|2025-03-12 00:36:11] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Custom/lora/train_2025-03-11-22-40-04/tokenizer_config.json
1514
+
1515
+ [INFO|2025-03-12 00:36:11] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Custom/lora/train_2025-03-11-22-40-04/special_tokens_map.json
1516
+
1517
+ [WARNING|2025-03-12 00:36:12] logging.py:148 >> No metric eval_accuracy to plot.
1518
+
1519
+ [INFO|2025-03-12 00:36:12] trainer.py:4226 >>
1520
+ ***** Running Evaluation *****
1521
+
1522
+ [INFO|2025-03-12 00:36:12] trainer.py:4228 >> Num examples = 3363
1523
+
1524
+ [INFO|2025-03-12 00:36:12] trainer.py:4231 >> Batch size = 8
1525
+
1526
+ [INFO|2025-03-12 00:36:52] modelcard.py:449 >> Dropping the following result as it does not have all the necessary fields:
1527
+ {'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
1528
+
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": true,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "chat_template": "{% set system_message = 'Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n' %}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '### Instruction:\n' + content + '\n\n### Response:\n' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' + '\n\n' }}{% endif %}{% endfor %}",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "extra_special_tokens": {},
36
+ "legacy": true,
37
+ "model_max_length": 2048,
38
+ "pad_token": "</s>",
39
+ "padding_side": "right",
40
+ "sp_model_kwargs": {},
41
+ "spaces_between_special_tokens": false,
42
+ "split_special_tokens": false,
43
+ "tokenizer_class": "LlamaTokenizer",
44
+ "unk_token": "<unk>",
45
+ "use_default_system_prompt": false,
46
+ "use_fast": true
47
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 5, "total_steps": 1419, "loss": 14.2333, "lr": 1.25e-05, "epoch": 0.010570824524312896, "percentage": 0.35, "elapsed_time": "0:00:14", "remaining_time": "1:08:37", "throughput": 2136.23, "total_tokens": 31104}
2
+ {"current_steps": 10, "total_steps": 1419, "loss": 9.2972, "lr": 2.5e-05, "epoch": 0.021141649048625793, "percentage": 0.7, "elapsed_time": "0:00:28", "remaining_time": "1:07:13", "throughput": 2172.88, "total_tokens": 62208}
3
+ {"current_steps": 15, "total_steps": 1419, "loss": 2.411, "lr": 3.7500000000000003e-05, "epoch": 0.03171247357293869, "percentage": 1.06, "elapsed_time": "0:00:42", "remaining_time": "1:06:41", "throughput": 2186.91, "total_tokens": 93504}
4
+ {"current_steps": 20, "total_steps": 1419, "loss": 0.9413, "lr": 5e-05, "epoch": 0.042283298097251586, "percentage": 1.41, "elapsed_time": "0:00:57", "remaining_time": "1:06:29", "throughput": 2188.14, "total_tokens": 124800}
5
+ {"current_steps": 25, "total_steps": 1419, "loss": 0.4389, "lr": 4.9998424168507275e-05, "epoch": 0.052854122621564484, "percentage": 1.76, "elapsed_time": "0:01:11", "remaining_time": "1:06:10", "throughput": 2192.25, "total_tokens": 156096}
6
+ {"current_steps": 30, "total_steps": 1419, "loss": 0.4112, "lr": 4.999369687268868e-05, "epoch": 0.06342494714587738, "percentage": 2.11, "elapsed_time": "0:01:25", "remaining_time": "1:05:47", "throughput": 2195.86, "total_tokens": 187200}
7
+ {"current_steps": 35, "total_steps": 1419, "loss": 0.3011, "lr": 4.998581870849795e-05, "epoch": 0.07399577167019028, "percentage": 2.47, "elapsed_time": "0:01:39", "remaining_time": "1:05:32", "throughput": 2197.31, "total_tokens": 218496}
8
+ {"current_steps": 40, "total_steps": 1419, "loss": 0.2631, "lr": 4.997479066910782e-05, "epoch": 0.08456659619450317, "percentage": 2.82, "elapsed_time": "0:01:53", "remaining_time": "1:05:18", "throughput": 2198.95, "total_tokens": 249920}
9
+ {"current_steps": 45, "total_steps": 1419, "loss": 0.2223, "lr": 4.996061414478485e-05, "epoch": 0.09513742071881606, "percentage": 3.17, "elapsed_time": "0:02:07", "remaining_time": "1:05:01", "throughput": 2200.83, "total_tokens": 281216}
10
+ {"current_steps": 50, "total_steps": 1419, "loss": 0.2446, "lr": 4.994329092271408e-05, "epoch": 0.10570824524312897, "percentage": 3.52, "elapsed_time": "0:02:21", "remaining_time": "1:04:47", "throughput": 2201.29, "total_tokens": 312512}
11
+ {"current_steps": 55, "total_steps": 1419, "loss": 0.2994, "lr": 4.992282318677387e-05, "epoch": 0.11627906976744186, "percentage": 3.88, "elapsed_time": "0:02:36", "remaining_time": "1:04:30", "throughput": 2202.29, "total_tokens": 343680}
12
+ {"current_steps": 60, "total_steps": 1419, "loss": 0.2916, "lr": 4.9899213517260416e-05, "epoch": 0.12684989429175475, "percentage": 4.23, "elapsed_time": "0:02:50", "remaining_time": "1:04:12", "throughput": 2204.0, "total_tokens": 374848}
13
+ {"current_steps": 65, "total_steps": 1419, "loss": 0.2317, "lr": 4.9872464890562576e-05, "epoch": 0.13742071881606766, "percentage": 4.58, "elapsed_time": "0:03:04", "remaining_time": "1:03:59", "throughput": 2204.62, "total_tokens": 406400}
14
+ {"current_steps": 70, "total_steps": 1419, "loss": 0.2216, "lr": 4.9842580678786645e-05, "epoch": 0.14799154334038056, "percentage": 4.93, "elapsed_time": "0:03:18", "remaining_time": "1:03:45", "throughput": 2204.91, "total_tokens": 437696}
15
+ {"current_steps": 75, "total_steps": 1419, "loss": 0.2311, "lr": 4.980956464933116e-05, "epoch": 0.15856236786469344, "percentage": 5.29, "elapsed_time": "0:03:32", "remaining_time": "1:03:30", "throughput": 2205.1, "total_tokens": 468864}
16
+ {"current_steps": 80, "total_steps": 1419, "loss": 0.2051, "lr": 4.9773420964412064e-05, "epoch": 0.16913319238900634, "percentage": 5.64, "elapsed_time": "0:03:46", "remaining_time": "1:03:14", "throughput": 2205.46, "total_tokens": 499968}
17
+ {"current_steps": 85, "total_steps": 1419, "loss": 0.1928, "lr": 4.973415418053789e-05, "epoch": 0.17970401691331925, "percentage": 5.99, "elapsed_time": "0:04:00", "remaining_time": "1:02:58", "throughput": 2205.61, "total_tokens": 531072}
18
+ {"current_steps": 90, "total_steps": 1419, "loss": 0.1849, "lr": 4.969176924793543e-05, "epoch": 0.19027484143763213, "percentage": 6.34, "elapsed_time": "0:04:14", "remaining_time": "1:02:45", "throughput": 2205.03, "total_tokens": 562240}
19
+ {"current_steps": 95, "total_steps": 1419, "loss": 0.172, "lr": 4.96462715099256e-05, "epoch": 0.20084566596194503, "percentage": 6.69, "elapsed_time": "0:04:29", "remaining_time": "1:02:31", "throughput": 2205.0, "total_tokens": 593536}
20
+ {"current_steps": 100, "total_steps": 1419, "loss": 0.1778, "lr": 4.9597666702249865e-05, "epoch": 0.21141649048625794, "percentage": 7.05, "elapsed_time": "0:04:43", "remaining_time": "1:02:16", "throughput": 2205.18, "total_tokens": 624768}
21
+ {"current_steps": 100, "total_steps": 1419, "eval_loss": 0.17541779577732086, "epoch": 0.21141649048625794, "percentage": 7.05, "elapsed_time": "0:05:23", "remaining_time": "1:11:09", "throughput": 1930.26, "total_tokens": 624768}
22
+ {"current_steps": 105, "total_steps": 1419, "loss": 0.1754, "lr": 4.954596095234718e-05, "epoch": 0.2219873150105708, "percentage": 7.4, "elapsed_time": "0:05:45", "remaining_time": "1:11:57", "throughput": 1902.03, "total_tokens": 656256}
23
+ {"current_steps": 110, "total_steps": 1419, "loss": 0.1727, "lr": 4.9491160778581445e-05, "epoch": 0.23255813953488372, "percentage": 7.75, "elapsed_time": "0:05:59", "remaining_time": "1:11:17", "throughput": 1913.33, "total_tokens": 687808}
24
+ {"current_steps": 115, "total_steps": 1419, "loss": 0.1728, "lr": 4.943327308941985e-05, "epoch": 0.24312896405919662, "percentage": 8.1, "elapsed_time": "0:06:13", "remaining_time": "1:10:35", "throughput": 1924.44, "total_tokens": 718848}
25
+ {"current_steps": 120, "total_steps": 1419, "loss": 0.1649, "lr": 4.9372305182561874e-05, "epoch": 0.2536997885835095, "percentage": 8.46, "elapsed_time": "0:06:27", "remaining_time": "1:09:57", "throughput": 1934.37, "total_tokens": 750080}
26
+ {"current_steps": 125, "total_steps": 1419, "loss": 0.1647, "lr": 4.9308264744019326e-05, "epoch": 0.2642706131078224, "percentage": 8.81, "elapsed_time": "0:06:41", "remaining_time": "1:09:20", "throughput": 1943.78, "total_tokens": 781184}
27
+ {"current_steps": 130, "total_steps": 1419, "loss": 0.1683, "lr": 4.9241159847147405e-05, "epoch": 0.2748414376321353, "percentage": 9.16, "elapsed_time": "0:06:55", "remaining_time": "1:08:43", "throughput": 1952.73, "total_tokens": 812160}
28
+ {"current_steps": 135, "total_steps": 1419, "loss": 0.1597, "lr": 4.917099895162689e-05, "epoch": 0.2854122621564482, "percentage": 9.51, "elapsed_time": "0:07:10", "remaining_time": "1:08:11", "throughput": 1961.08, "total_tokens": 843584}
29
+ {"current_steps": 140, "total_steps": 1419, "loss": 0.1669, "lr": 4.9097790902397686e-05, "epoch": 0.2959830866807611, "percentage": 9.87, "elapsed_time": "0:07:24", "remaining_time": "1:07:40", "throughput": 1969.16, "total_tokens": 875200}
30
+ {"current_steps": 145, "total_steps": 1419, "loss": 0.1568, "lr": 4.902154492854374e-05, "epoch": 0.30655391120507397, "percentage": 10.22, "elapsed_time": "0:07:38", "remaining_time": "1:07:09", "throughput": 1976.56, "total_tokens": 906432}
31
+ {"current_steps": 150, "total_steps": 1419, "loss": 0.1608, "lr": 4.8942270642129604e-05, "epoch": 0.3171247357293869, "percentage": 10.57, "elapsed_time": "0:07:52", "remaining_time": "1:06:39", "throughput": 1983.65, "total_tokens": 937664}
32
+ {"current_steps": 155, "total_steps": 1419, "loss": 0.1654, "lr": 4.8859978036988644e-05, "epoch": 0.3276955602536998, "percentage": 10.92, "elapsed_time": "0:08:06", "remaining_time": "1:06:10", "throughput": 1990.02, "total_tokens": 968960}
33
+ {"current_steps": 160, "total_steps": 1419, "loss": 0.1639, "lr": 4.8774677487463175e-05, "epoch": 0.3382663847780127, "percentage": 11.28, "elapsed_time": "0:08:20", "remaining_time": "1:05:42", "throughput": 1996.5, "total_tokens": 1000192}
34
+ {"current_steps": 165, "total_steps": 1419, "loss": 0.16, "lr": 4.8686379747096556e-05, "epoch": 0.3488372093023256, "percentage": 11.63, "elapsed_time": "0:08:35", "remaining_time": "1:05:15", "throughput": 2002.14, "total_tokens": 1031616}
35
+ {"current_steps": 170, "total_steps": 1419, "loss": 0.1645, "lr": 4.85950959472776e-05, "epoch": 0.3594080338266385, "percentage": 11.98, "elapsed_time": "0:08:49", "remaining_time": "1:04:48", "throughput": 2007.7, "total_tokens": 1062656}
36
+ {"current_steps": 175, "total_steps": 1419, "loss": 0.1604, "lr": 4.850083759583723e-05, "epoch": 0.3699788583509514, "percentage": 12.33, "elapsed_time": "0:09:03", "remaining_time": "1:04:24", "throughput": 2012.35, "total_tokens": 1093888}
37
+ {"current_steps": 180, "total_steps": 1419, "loss": 0.1707, "lr": 4.840361657559775e-05, "epoch": 0.38054968287526425, "percentage": 12.68, "elapsed_time": "0:09:17", "remaining_time": "1:04:00", "throughput": 2016.61, "total_tokens": 1125184}
38
+ {"current_steps": 185, "total_steps": 1419, "loss": 0.1544, "lr": 4.830344514287478e-05, "epoch": 0.39112050739957716, "percentage": 13.04, "elapsed_time": "0:09:31", "remaining_time": "1:03:35", "throughput": 2021.5, "total_tokens": 1156224}
39
+ {"current_steps": 190, "total_steps": 1419, "loss": 0.1615, "lr": 4.8200335925932185e-05, "epoch": 0.40169133192389006, "percentage": 13.39, "elapsed_time": "0:09:46", "remaining_time": "1:03:10", "throughput": 2026.14, "total_tokens": 1187392}
40
+ {"current_steps": 195, "total_steps": 1419, "loss": 0.159, "lr": 4.809430192339008e-05, "epoch": 0.41226215644820297, "percentage": 13.74, "elapsed_time": "0:10:00", "remaining_time": "1:02:46", "throughput": 2030.77, "total_tokens": 1218624}
41
+ {"current_steps": 200, "total_steps": 1419, "loss": 0.1668, "lr": 4.79853565025861e-05, "epoch": 0.42283298097251587, "percentage": 14.09, "elapsed_time": "0:10:14", "remaining_time": "1:02:23", "throughput": 2034.94, "total_tokens": 1249984}
42
+ {"current_steps": 200, "total_steps": 1419, "eval_loss": 0.16408737003803253, "epoch": 0.42283298097251587, "percentage": 14.09, "elapsed_time": "0:10:54", "remaining_time": "1:06:29", "throughput": 1909.64, "total_tokens": 1249984}
43
+ {"current_steps": 205, "total_steps": 1419, "loss": 0.1606, "lr": 4.787351339789025e-05, "epoch": 0.4334038054968288, "percentage": 14.45, "elapsed_time": "0:11:15", "remaining_time": "1:06:40", "throughput": 1896.44, "total_tokens": 1281216}
44
+ {"current_steps": 210, "total_steps": 1419, "loss": 0.1628, "lr": 4.7758786708973444e-05, "epoch": 0.4439746300211416, "percentage": 14.8, "elapsed_time": "0:11:29", "remaining_time": "1:06:11", "throughput": 1902.91, "total_tokens": 1312768}
45
+ {"current_steps": 215, "total_steps": 1419, "loss": 0.1617, "lr": 4.764119089903008e-05, "epoch": 0.45454545454545453, "percentage": 15.15, "elapsed_time": "0:11:44", "remaining_time": "1:05:42", "throughput": 1909.23, "total_tokens": 1344192}
46
+ {"current_steps": 220, "total_steps": 1419, "loss": 0.162, "lr": 4.752074079295457e-05, "epoch": 0.46511627906976744, "percentage": 15.5, "elapsed_time": "0:11:58", "remaining_time": "1:05:14", "throughput": 1915.09, "total_tokens": 1375424}
47
+ {"current_steps": 225, "total_steps": 1419, "loss": 0.1683, "lr": 4.739745157547258e-05, "epoch": 0.47568710359408034, "percentage": 15.86, "elapsed_time": "0:12:12", "remaining_time": "1:04:45", "throughput": 1920.91, "total_tokens": 1406656}
48
+ {"current_steps": 230, "total_steps": 1419, "loss": 0.155, "lr": 4.727133878922663e-05, "epoch": 0.48625792811839325, "percentage": 16.21, "elapsed_time": "0:12:26", "remaining_time": "1:04:18", "throughput": 1926.53, "total_tokens": 1437824}
49
+ {"current_steps": 235, "total_steps": 1419, "loss": 0.1585, "lr": 4.7142418332816735e-05, "epoch": 0.49682875264270615, "percentage": 16.56, "elapsed_time": "0:12:40", "remaining_time": "1:03:50", "throughput": 1931.94, "total_tokens": 1468992}
50
+ {"current_steps": 240, "total_steps": 1419, "loss": 0.1882, "lr": 4.701070645879612e-05, "epoch": 0.507399577167019, "percentage": 16.91, "elapsed_time": "0:12:54", "remaining_time": "1:03:24", "throughput": 1936.99, "total_tokens": 1500224}
51
+ {"current_steps": 245, "total_steps": 1419, "loss": 0.1742, "lr": 4.687621977162231e-05, "epoch": 0.5179704016913319, "percentage": 17.27, "elapsed_time": "0:13:08", "remaining_time": "1:02:59", "throughput": 1941.84, "total_tokens": 1531584}
52
+ {"current_steps": 250, "total_steps": 1419, "loss": 0.1607, "lr": 4.673897522556385e-05, "epoch": 0.5285412262156448, "percentage": 17.62, "elapsed_time": "0:13:22", "remaining_time": "1:02:34", "throughput": 1946.45, "total_tokens": 1562880}
53
+ {"current_steps": 255, "total_steps": 1419, "loss": 0.156, "lr": 4.6598990122562996e-05, "epoch": 0.5391120507399577, "percentage": 17.97, "elapsed_time": "0:13:37", "remaining_time": "1:02:09", "throughput": 1951.13, "total_tokens": 1594176}
54
+ {"current_steps": 260, "total_steps": 1419, "loss": 0.1584, "lr": 4.645628211005443e-05, "epoch": 0.5496828752642706, "percentage": 18.32, "elapsed_time": "0:13:51", "remaining_time": "1:01:44", "throughput": 1955.67, "total_tokens": 1625344}
55
+ {"current_steps": 265, "total_steps": 1419, "loss": 0.1514, "lr": 4.63108691787406e-05, "epoch": 0.5602536997885835, "percentage": 18.68, "elapsed_time": "0:14:05", "remaining_time": "1:01:20", "throughput": 1960.08, "total_tokens": 1656448}
56
+ {"current_steps": 270, "total_steps": 1419, "loss": 0.1649, "lr": 4.616276966032363e-05, "epoch": 0.5708245243128964, "percentage": 19.03, "elapsed_time": "0:14:19", "remaining_time": "1:00:56", "throughput": 1964.13, "total_tokens": 1687744}
57
+ {"current_steps": 275, "total_steps": 1419, "loss": 0.1577, "lr": 4.6012002225194325e-05, "epoch": 0.5813953488372093, "percentage": 19.38, "elapsed_time": "0:14:33", "remaining_time": "1:00:33", "throughput": 1968.22, "total_tokens": 1719040}
58
+ {"current_steps": 280, "total_steps": 1419, "loss": 0.1562, "lr": 4.585858588007849e-05, "epoch": 0.5919661733615222, "percentage": 19.73, "elapsed_time": "0:14:47", "remaining_time": "1:00:09", "throughput": 1972.26, "total_tokens": 1750208}
59
+ {"current_steps": 285, "total_steps": 1419, "loss": 0.1438, "lr": 4.570253996564075e-05, "epoch": 0.6025369978858351, "percentage": 20.08, "elapsed_time": "0:15:01", "remaining_time": "0:59:47", "throughput": 1976.08, "total_tokens": 1781824}
60
+ {"current_steps": 290, "total_steps": 1419, "loss": 0.165, "lr": 4.554388415404644e-05, "epoch": 0.6131078224101479, "percentage": 20.44, "elapsed_time": "0:15:15", "remaining_time": "0:59:25", "throughput": 1979.8, "total_tokens": 1813248}
61
+ {"current_steps": 295, "total_steps": 1419, "loss": 0.1618, "lr": 4.538263844648149e-05, "epoch": 0.6236786469344608, "percentage": 20.79, "elapsed_time": "0:15:30", "remaining_time": "0:59:04", "throughput": 1983.17, "total_tokens": 1844736}
62
+ {"current_steps": 300, "total_steps": 1419, "loss": 0.1569, "lr": 4.521882317063103e-05, "epoch": 0.6342494714587738, "percentage": 21.14, "elapsed_time": "0:15:44", "remaining_time": "0:58:41", "throughput": 1986.63, "total_tokens": 1875648}
63
+ {"current_steps": 300, "total_steps": 1419, "eval_loss": 0.16001471877098083, "epoch": 0.6342494714587738, "percentage": 21.14, "elapsed_time": "0:16:24", "remaining_time": "1:01:12", "throughput": 1905.24, "total_tokens": 1875648}
64
+ {"current_steps": 305, "total_steps": 1419, "loss": 0.1598, "lr": 4.505245897811672e-05, "epoch": 0.6448202959830867, "percentage": 21.49, "elapsed_time": "0:16:45", "remaining_time": "1:01:10", "throughput": 1897.51, "total_tokens": 1907008}
65
+ {"current_steps": 310, "total_steps": 1419, "loss": 0.1501, "lr": 4.488356684189325e-05, "epoch": 0.6553911205073996, "percentage": 21.85, "elapsed_time": "0:16:59", "remaining_time": "1:00:46", "throughput": 1901.82, "total_tokens": 1938496}
66
+ {"current_steps": 315, "total_steps": 1419, "loss": 0.1456, "lr": 4.4712168053604407e-05, "epoch": 0.6659619450317125, "percentage": 22.2, "elapsed_time": "0:17:13", "remaining_time": "1:00:21", "throughput": 1906.16, "total_tokens": 1969664}
67
+ {"current_steps": 320, "total_steps": 1419, "loss": 0.1502, "lr": 4.4538284220898864e-05, "epoch": 0.6765327695560254, "percentage": 22.55, "elapsed_time": "0:17:27", "remaining_time": "0:59:57", "throughput": 1910.28, "total_tokens": 2001024}
68
+ {"current_steps": 325, "total_steps": 1419, "loss": 0.1446, "lr": 4.4361937264706186e-05, "epoch": 0.6871035940803383, "percentage": 22.9, "elapsed_time": "0:17:41", "remaining_time": "0:59:34", "throughput": 1914.24, "total_tokens": 2032448}
69
+ {"current_steps": 330, "total_steps": 1419, "loss": 0.1478, "lr": 4.418314941647335e-05, "epoch": 0.6976744186046512, "percentage": 23.26, "elapsed_time": "0:17:55", "remaining_time": "0:59:10", "throughput": 1918.15, "total_tokens": 2063872}
70
+ {"current_steps": 335, "total_steps": 1419, "loss": 0.147, "lr": 4.400194321536209e-05, "epoch": 0.7082452431289641, "percentage": 23.61, "elapsed_time": "0:18:10", "remaining_time": "0:58:47", "throughput": 1921.84, "total_tokens": 2095104}
71
+ {"current_steps": 340, "total_steps": 1419, "loss": 0.1479, "lr": 4.381834150540749e-05, "epoch": 0.718816067653277, "percentage": 23.96, "elapsed_time": "0:18:24", "remaining_time": "0:58:24", "throughput": 1925.58, "total_tokens": 2126336}
72
+ {"current_steps": 345, "total_steps": 1419, "loss": 0.1448, "lr": 4.363236743263808e-05, "epoch": 0.7293868921775899, "percentage": 24.31, "elapsed_time": "0:18:38", "remaining_time": "0:58:01", "throughput": 1929.26, "total_tokens": 2157376}
73
+ {"current_steps": 350, "total_steps": 1419, "loss": 0.1443, "lr": 4.3444044442157914e-05, "epoch": 0.7399577167019028, "percentage": 24.67, "elapsed_time": "0:18:52", "remaining_time": "0:57:39", "throughput": 1932.65, "total_tokens": 2188864}
74
+ {"current_steps": 355, "total_steps": 1419, "loss": 0.1464, "lr": 4.3253396275190926e-05, "epoch": 0.7505285412262156, "percentage": 25.02, "elapsed_time": "0:19:06", "remaining_time": "0:57:17", "throughput": 1936.0, "total_tokens": 2220288}
75
+ {"current_steps": 360, "total_steps": 1419, "loss": 0.1345, "lr": 4.306044696608797e-05, "epoch": 0.7610993657505285, "percentage": 25.37, "elapsed_time": "0:19:21", "remaining_time": "0:56:55", "throughput": 1939.19, "total_tokens": 2251520}
76
+ {"current_steps": 365, "total_steps": 1419, "loss": 0.1311, "lr": 4.286522083929686e-05, "epoch": 0.7716701902748414, "percentage": 25.72, "elapsed_time": "0:19:35", "remaining_time": "0:56:33", "throughput": 1942.52, "total_tokens": 2282624}
77
+ {"current_steps": 370, "total_steps": 1419, "loss": 0.1428, "lr": 4.266774250629589e-05, "epoch": 0.7822410147991543, "percentage": 26.07, "elapsed_time": "0:19:49", "remaining_time": "0:56:11", "throughput": 1945.66, "total_tokens": 2313792}
78
+ {"current_steps": 375, "total_steps": 1419, "loss": 0.1361, "lr": 4.2468036862491176e-05, "epoch": 0.7928118393234672, "percentage": 26.43, "elapsed_time": "0:20:03", "remaining_time": "0:55:49", "throughput": 1948.75, "total_tokens": 2344896}
79
+ {"current_steps": 380, "total_steps": 1419, "loss": 0.1436, "lr": 4.226612908407814e-05, "epoch": 0.8033826638477801, "percentage": 26.78, "elapsed_time": "0:20:17", "remaining_time": "0:55:28", "throughput": 1951.85, "total_tokens": 2376192}
80
+ {"current_steps": 385, "total_steps": 1419, "loss": 0.138, "lr": 4.2062044624867656e-05, "epoch": 0.813953488372093, "percentage": 27.13, "elapsed_time": "0:20:31", "remaining_time": "0:55:07", "throughput": 1954.87, "total_tokens": 2407232}
81
+ {"current_steps": 390, "total_steps": 1419, "loss": 0.129, "lr": 4.1855809213077146e-05, "epoch": 0.8245243128964059, "percentage": 27.48, "elapsed_time": "0:20:45", "remaining_time": "0:54:46", "throughput": 1957.72, "total_tokens": 2438528}
82
+ {"current_steps": 395, "total_steps": 1419, "loss": 0.1278, "lr": 4.1647448848087166e-05, "epoch": 0.8350951374207188, "percentage": 27.84, "elapsed_time": "0:20:59", "remaining_time": "0:54:25", "throughput": 1960.66, "total_tokens": 2469504}
83
+ {"current_steps": 400, "total_steps": 1419, "loss": 0.1313, "lr": 4.143698979716372e-05, "epoch": 0.8456659619450317, "percentage": 28.19, "elapsed_time": "0:21:13", "remaining_time": "0:54:04", "throughput": 1963.41, "total_tokens": 2500800}
84
+ {"current_steps": 400, "total_steps": 1419, "eval_loss": 0.1339479386806488, "epoch": 0.8456659619450317, "percentage": 28.19, "elapsed_time": "0:21:54", "remaining_time": "0:55:47", "throughput": 1903.11, "total_tokens": 2500800}
85
+ {"current_steps": 405, "total_steps": 1419, "loss": 0.1308, "lr": 4.122445859214682e-05, "epoch": 0.8562367864693446, "percentage": 28.54, "elapsed_time": "0:22:14", "remaining_time": "0:55:41", "throughput": 1896.86, "total_tokens": 2531904}
86
+ {"current_steps": 410, "total_steps": 1419, "loss": 0.1213, "lr": 4.100988202610577e-05, "epoch": 0.8668076109936576, "percentage": 28.89, "elapsed_time": "0:22:29", "remaining_time": "0:55:20", "throughput": 1900.13, "total_tokens": 2563392}
87
+ {"current_steps": 415, "total_steps": 1419, "loss": 0.1232, "lr": 4.079328714996139e-05, "epoch": 0.8773784355179705, "percentage": 29.25, "elapsed_time": "0:22:43", "remaining_time": "0:54:58", "throughput": 1903.28, "total_tokens": 2594688}
88
+ {"current_steps": 420, "total_steps": 1419, "loss": 0.1328, "lr": 4.0574701269075844e-05, "epoch": 0.8879492600422833, "percentage": 29.6, "elapsed_time": "0:22:57", "remaining_time": "0:54:36", "throughput": 1906.43, "total_tokens": 2626112}
89
+ {"current_steps": 425, "total_steps": 1419, "loss": 0.1237, "lr": 4.035415193981032e-05, "epoch": 0.8985200845665962, "percentage": 29.95, "elapsed_time": "0:23:11", "remaining_time": "0:54:14", "throughput": 1909.54, "total_tokens": 2657344}
90
+ {"current_steps": 430, "total_steps": 1419, "loss": 0.131, "lr": 4.0131666966051127e-05, "epoch": 0.9090909090909091, "percentage": 30.3, "elapsed_time": "0:23:25", "remaining_time": "0:53:52", "throughput": 1912.66, "total_tokens": 2688256}
91
+ {"current_steps": 435, "total_steps": 1419, "loss": 0.1301, "lr": 3.990727439570453e-05, "epoch": 0.919661733615222, "percentage": 30.66, "elapsed_time": "0:23:39", "remaining_time": "0:53:30", "throughput": 1915.64, "total_tokens": 2719232}
92
+ {"current_steps": 440, "total_steps": 1419, "loss": 0.1249, "lr": 3.9681002517160845e-05, "epoch": 0.9302325581395349, "percentage": 31.01, "elapsed_time": "0:23:53", "remaining_time": "0:53:09", "throughput": 1918.46, "total_tokens": 2750464}
93
+ {"current_steps": 445, "total_steps": 1419, "loss": 0.1176, "lr": 3.945287985572826e-05, "epoch": 0.9408033826638478, "percentage": 31.36, "elapsed_time": "0:24:07", "remaining_time": "0:52:48", "throughput": 1921.44, "total_tokens": 2781440}
94
+ {"current_steps": 450, "total_steps": 1419, "loss": 0.119, "lr": 3.922293517003668e-05, "epoch": 0.9513742071881607, "percentage": 31.71, "elapsed_time": "0:24:21", "remaining_time": "0:52:27", "throughput": 1924.19, "total_tokens": 2812864}
95
+ {"current_steps": 455, "total_steps": 1419, "loss": 0.1166, "lr": 3.899119744841232e-05, "epoch": 0.9619450317124736, "percentage": 32.06, "elapsed_time": "0:24:35", "remaining_time": "0:52:07", "throughput": 1926.97, "total_tokens": 2844096}
96
+ {"current_steps": 460, "total_steps": 1419, "loss": 0.1207, "lr": 3.875769590522314e-05, "epoch": 0.9725158562367865, "percentage": 32.42, "elapsed_time": "0:24:50", "remaining_time": "0:51:46", "throughput": 1929.7, "total_tokens": 2875392}
97
+ {"current_steps": 465, "total_steps": 1419, "loss": 0.125, "lr": 3.8522459977195955e-05, "epoch": 0.9830866807610994, "percentage": 32.77, "elapsed_time": "0:25:04", "remaining_time": "0:51:25", "throughput": 1932.4, "total_tokens": 2906432}
98
+ {"current_steps": 470, "total_steps": 1419, "loss": 0.1278, "lr": 3.828551931970549e-05, "epoch": 0.9936575052854123, "percentage": 33.12, "elapsed_time": "0:25:18", "remaining_time": "0:51:05", "throughput": 1934.95, "total_tokens": 2937728}
99
+ {"current_steps": 475, "total_steps": 1419, "loss": 0.1226, "lr": 3.8046903803035716e-05, "epoch": 1.0042283298097252, "percentage": 33.47, "elapsed_time": "0:25:32", "remaining_time": "0:50:45", "throughput": 1937.15, "total_tokens": 2968192}
100
+ {"current_steps": 480, "total_steps": 1419, "loss": 0.1169, "lr": 3.780664350861431e-05, "epoch": 1.014799154334038, "percentage": 33.83, "elapsed_time": "0:25:46", "remaining_time": "0:50:25", "throughput": 1939.64, "total_tokens": 2999488}
101
+ {"current_steps": 485, "total_steps": 1419, "loss": 0.116, "lr": 3.756476872522035e-05, "epoch": 1.025369978858351, "percentage": 34.18, "elapsed_time": "0:26:00", "remaining_time": "0:50:05", "throughput": 1942.08, "total_tokens": 3030720}
102
+ {"current_steps": 490, "total_steps": 1419, "loss": 0.1197, "lr": 3.7321309945165905e-05, "epoch": 1.0359408033826638, "percentage": 34.53, "elapsed_time": "0:26:14", "remaining_time": "0:49:45", "throughput": 1944.49, "total_tokens": 3062016}
103
+ {"current_steps": 495, "total_steps": 1419, "loss": 0.1184, "lr": 3.707629786045198e-05, "epoch": 1.0465116279069768, "percentage": 34.88, "elapsed_time": "0:26:28", "remaining_time": "0:49:25", "throughput": 1946.75, "total_tokens": 3093184}
104
+ {"current_steps": 500, "total_steps": 1419, "loss": 0.1134, "lr": 3.682976335889935e-05, "epoch": 1.0570824524312896, "percentage": 35.24, "elapsed_time": "0:26:43", "remaining_time": "0:49:06", "throughput": 1948.86, "total_tokens": 3124224}
105
+ {"current_steps": 500, "total_steps": 1419, "eval_loss": 0.1192605197429657, "epoch": 1.0570824524312896, "percentage": 35.24, "elapsed_time": "0:27:23", "remaining_time": "0:50:20", "throughput": 1900.84, "total_tokens": 3124224}
106
+ {"current_steps": 505, "total_steps": 1419, "loss": 0.1193, "lr": 3.658173752025452e-05, "epoch": 1.0676532769556026, "percentage": 35.59, "elapsed_time": "0:27:48", "remaining_time": "0:50:19", "throughput": 1891.77, "total_tokens": 3155584}
107
+ {"current_steps": 510, "total_steps": 1419, "loss": 0.115, "lr": 3.633225161227169e-05, "epoch": 1.0782241014799154, "percentage": 35.94, "elapsed_time": "0:28:02", "remaining_time": "0:49:58", "throughput": 1894.29, "total_tokens": 3186944}
108
+ {"current_steps": 515, "total_steps": 1419, "loss": 0.1146, "lr": 3.608133708677093e-05, "epoch": 1.0887949260042284, "percentage": 36.29, "elapsed_time": "0:28:16", "remaining_time": "0:49:38", "throughput": 1896.78, "total_tokens": 3218304}
109
+ {"current_steps": 520, "total_steps": 1419, "loss": 0.1109, "lr": 3.5829025575673136e-05, "epoch": 1.0993657505285412, "percentage": 36.65, "elapsed_time": "0:28:31", "remaining_time": "0:49:18", "throughput": 1899.24, "total_tokens": 3249664}
110
+ {"current_steps": 525, "total_steps": 1419, "loss": 0.1143, "lr": 3.5575348887012336e-05, "epoch": 1.109936575052854, "percentage": 37.0, "elapsed_time": "0:28:45", "remaining_time": "0:48:58", "throughput": 1901.62, "total_tokens": 3280960}
111
+ {"current_steps": 530, "total_steps": 1419, "loss": 0.1129, "lr": 3.532033900092571e-05, "epoch": 1.120507399577167, "percentage": 37.35, "elapsed_time": "0:28:59", "remaining_time": "0:48:37", "throughput": 1904.07, "total_tokens": 3312320}
112
+ {"current_steps": 535, "total_steps": 1419, "loss": 0.1139, "lr": 3.506402806562202e-05, "epoch": 1.1310782241014798, "percentage": 37.7, "elapsed_time": "0:29:13", "remaining_time": "0:48:17", "throughput": 1906.47, "total_tokens": 3343424}
113
+ {"current_steps": 540, "total_steps": 1419, "loss": 0.1122, "lr": 3.480644839332876e-05, "epoch": 1.1416490486257929, "percentage": 38.05, "elapsed_time": "0:29:27", "remaining_time": "0:47:57", "throughput": 1908.86, "total_tokens": 3374720}
114
+ {"current_steps": 545, "total_steps": 1419, "loss": 0.111, "lr": 3.454763245621871e-05, "epoch": 1.1522198731501057, "percentage": 38.41, "elapsed_time": "0:29:42", "remaining_time": "0:47:37", "throughput": 1911.2, "total_tokens": 3406016}
115
+ {"current_steps": 550, "total_steps": 1419, "loss": 0.1105, "lr": 3.428761288231621e-05, "epoch": 1.1627906976744187, "percentage": 38.76, "elapsed_time": "0:29:56", "remaining_time": "0:47:17", "throughput": 1913.58, "total_tokens": 3437184}
116
+ {"current_steps": 555, "total_steps": 1419, "loss": 0.1128, "lr": 3.402642245138394e-05, "epoch": 1.1733615221987315, "percentage": 39.11, "elapsed_time": "0:30:10", "remaining_time": "0:46:58", "throughput": 1915.77, "total_tokens": 3468416}
117
+ {"current_steps": 560, "total_steps": 1419, "loss": 0.1066, "lr": 3.376409409079043e-05, "epoch": 1.1839323467230445, "percentage": 39.46, "elapsed_time": "0:30:24", "remaining_time": "0:46:38", "throughput": 1918.02, "total_tokens": 3499456}
118
+ {"current_steps": 565, "total_steps": 1419, "loss": 0.1126, "lr": 3.350066087135903e-05, "epoch": 1.1945031712473573, "percentage": 39.82, "elapsed_time": "0:30:38", "remaining_time": "0:46:19", "throughput": 1920.23, "total_tokens": 3530944}
119
+ {"current_steps": 570, "total_steps": 1419, "loss": 0.1107, "lr": 3.323615600319883e-05, "epoch": 1.20507399577167, "percentage": 40.17, "elapsed_time": "0:30:53", "remaining_time": "0:46:00", "throughput": 1922.35, "total_tokens": 3562368}
120
+ {"current_steps": 575, "total_steps": 1419, "loss": 0.1146, "lr": 3.297061283151791e-05, "epoch": 1.215644820295983, "percentage": 40.52, "elapsed_time": "0:31:07", "remaining_time": "0:45:40", "throughput": 1924.42, "total_tokens": 3593600}
121
+ {"current_steps": 580, "total_steps": 1419, "loss": 0.1063, "lr": 3.27040648324197e-05, "epoch": 1.226215644820296, "percentage": 40.87, "elapsed_time": "0:31:21", "remaining_time": "0:45:22", "throughput": 1926.46, "total_tokens": 3625152}
122
+ {"current_steps": 585, "total_steps": 1419, "loss": 0.1057, "lr": 3.243654560868268e-05, "epoch": 1.236786469344609, "percentage": 41.23, "elapsed_time": "0:31:35", "remaining_time": "0:45:02", "throughput": 1928.57, "total_tokens": 3656192}
123
+ {"current_steps": 590, "total_steps": 1419, "loss": 0.1024, "lr": 3.216808888552429e-05, "epoch": 1.2473572938689217, "percentage": 41.58, "elapsed_time": "0:31:49", "remaining_time": "0:44:43", "throughput": 1930.63, "total_tokens": 3687232}
124
+ {"current_steps": 595, "total_steps": 1419, "loss": 0.1006, "lr": 3.189872850634922e-05, "epoch": 1.2579281183932347, "percentage": 41.93, "elapsed_time": "0:32:04", "remaining_time": "0:44:24", "throughput": 1932.65, "total_tokens": 3718592}
125
+ {"current_steps": 600, "total_steps": 1419, "loss": 0.1059, "lr": 3.162849842848294e-05, "epoch": 1.2684989429175475, "percentage": 42.28, "elapsed_time": "0:32:18", "remaining_time": "0:44:06", "throughput": 1934.62, "total_tokens": 3750336}
126
+ {"current_steps": 600, "total_steps": 1419, "eval_loss": 0.10877919942140579, "epoch": 1.2684989429175475, "percentage": 42.28, "elapsed_time": "0:32:59", "remaining_time": "0:45:01", "throughput": 1895.06, "total_tokens": 3750336}
127
+ {"current_steps": 605, "total_steps": 1419, "loss": 0.1079, "lr": 3.1357432718890815e-05, "epoch": 1.2790697674418605, "percentage": 42.64, "elapsed_time": "0:33:21", "remaining_time": "0:44:52", "throughput": 1889.78, "total_tokens": 3781632}
128
+ {"current_steps": 610, "total_steps": 1419, "loss": 0.1106, "lr": 3.108556554988338e-05, "epoch": 1.2896405919661733, "percentage": 42.99, "elapsed_time": "0:33:35", "remaining_time": "0:44:32", "throughput": 1891.96, "total_tokens": 3812928}
129
+ {"current_steps": 615, "total_steps": 1419, "loss": 0.1027, "lr": 3.081293119480838e-05, "epoch": 1.3002114164904863, "percentage": 43.34, "elapsed_time": "0:33:49", "remaining_time": "0:44:13", "throughput": 1894.15, "total_tokens": 3843904}
130
+ {"current_steps": 620, "total_steps": 1419, "loss": 0.1015, "lr": 3.053956402373004e-05, "epoch": 1.3107822410147991, "percentage": 43.69, "elapsed_time": "0:34:03", "remaining_time": "0:43:53", "throughput": 1896.21, "total_tokens": 3875008}
131
+ {"current_steps": 625, "total_steps": 1419, "loss": 0.0965, "lr": 3.0265498499096127e-05, "epoch": 1.3213530655391121, "percentage": 44.05, "elapsed_time": "0:34:17", "remaining_time": "0:43:34", "throughput": 1898.26, "total_tokens": 3906560}
132
+ {"current_steps": 630, "total_steps": 1419, "loss": 0.1106, "lr": 2.9990769171393423e-05, "epoch": 1.331923890063425, "percentage": 44.4, "elapsed_time": "0:34:32", "remaining_time": "0:43:15", "throughput": 1900.22, "total_tokens": 3937856}
133
+ {"current_steps": 635, "total_steps": 1419, "loss": 0.0996, "lr": 2.971541067479207e-05, "epoch": 1.3424947145877377, "percentage": 44.75, "elapsed_time": "0:34:46", "remaining_time": "0:42:55", "throughput": 1902.27, "total_tokens": 3968832}
134
+ {"current_steps": 640, "total_steps": 1419, "loss": 0.1049, "lr": 2.9439457722779317e-05, "epoch": 1.3530655391120507, "percentage": 45.1, "elapsed_time": "0:35:00", "remaining_time": "0:42:36", "throughput": 1904.16, "total_tokens": 4000000}
135
+ {"current_steps": 645, "total_steps": 1419, "loss": 0.1118, "lr": 2.916294510378335e-05, "epoch": 1.3636363636363638, "percentage": 45.45, "elapsed_time": "0:35:15", "remaining_time": "0:42:18", "throughput": 1906.07, "total_tokens": 4031424}
136
+ {"current_steps": 650, "total_steps": 1419, "loss": 0.0967, "lr": 2.8885907676787622e-05, "epoch": 1.3742071881606766, "percentage": 45.81, "elapsed_time": "0:35:29", "remaining_time": "0:41:59", "throughput": 1907.98, "total_tokens": 4062720}
137
+ {"current_steps": 655, "total_steps": 1419, "loss": 0.1035, "lr": 2.8608380366936293e-05, "epoch": 1.3847780126849893, "percentage": 46.16, "elapsed_time": "0:35:43", "remaining_time": "0:41:40", "throughput": 1909.88, "total_tokens": 4093824}
138
+ {"current_steps": 660, "total_steps": 1419, "loss": 0.1043, "lr": 2.8330398161131376e-05, "epoch": 1.3953488372093024, "percentage": 46.51, "elapsed_time": "0:35:57", "remaining_time": "0:41:21", "throughput": 1911.61, "total_tokens": 4125120}
139
+ {"current_steps": 665, "total_steps": 1419, "loss": 0.1045, "lr": 2.8051996103622003e-05, "epoch": 1.4059196617336152, "percentage": 46.86, "elapsed_time": "0:36:12", "remaining_time": "0:41:03", "throughput": 1913.4, "total_tokens": 4156544}
140
+ {"current_steps": 670, "total_steps": 1419, "loss": 0.1015, "lr": 2.7773209291586567e-05, "epoch": 1.4164904862579282, "percentage": 47.22, "elapsed_time": "0:36:26", "remaining_time": "0:40:44", "throughput": 1915.26, "total_tokens": 4187904}
141
+ {"current_steps": 675, "total_steps": 1419, "loss": 0.1055, "lr": 2.749407287070812e-05, "epoch": 1.427061310782241, "percentage": 47.57, "elapsed_time": "0:36:40", "remaining_time": "0:40:25", "throughput": 1917.02, "total_tokens": 4219072}
142
+ {"current_steps": 680, "total_steps": 1419, "loss": 0.1045, "lr": 2.7214622030743693e-05, "epoch": 1.437632135306554, "percentage": 47.92, "elapsed_time": "0:36:55", "remaining_time": "0:40:07", "throughput": 1918.79, "total_tokens": 4250624}
143
+ {"current_steps": 685, "total_steps": 1419, "loss": 0.1035, "lr": 2.693489200108802e-05, "epoch": 1.4482029598308668, "percentage": 48.27, "elapsed_time": "0:37:09", "remaining_time": "0:39:48", "throughput": 1920.56, "total_tokens": 4281920}
144
+ {"current_steps": 690, "total_steps": 1419, "loss": 0.1035, "lr": 2.6654918046332323e-05, "epoch": 1.4587737843551798, "percentage": 48.63, "elapsed_time": "0:37:23", "remaining_time": "0:39:30", "throughput": 1922.31, "total_tokens": 4313024}
145
+ {"current_steps": 695, "total_steps": 1419, "loss": 0.0989, "lr": 2.63747354618186e-05, "epoch": 1.4693446088794926, "percentage": 48.98, "elapsed_time": "0:37:38", "remaining_time": "0:39:12", "throughput": 1923.99, "total_tokens": 4344384}
146
+ {"current_steps": 700, "total_steps": 1419, "loss": 0.096, "lr": 2.6094379569190082e-05, "epoch": 1.4799154334038054, "percentage": 49.33, "elapsed_time": "0:37:52", "remaining_time": "0:38:54", "throughput": 1925.61, "total_tokens": 4375808}
147
+ {"current_steps": 700, "total_steps": 1419, "eval_loss": 0.10834133625030518, "epoch": 1.4799154334038054, "percentage": 49.33, "elapsed_time": "0:38:32", "remaining_time": "0:39:35", "throughput": 1891.89, "total_tokens": 4375808}
148
+ {"current_steps": 705, "total_steps": 1419, "loss": 0.1052, "lr": 2.5813885711938357e-05, "epoch": 1.4904862579281184, "percentage": 49.68, "elapsed_time": "0:38:54", "remaining_time": "0:39:24", "throughput": 1887.47, "total_tokens": 4406912}
149
+ {"current_steps": 710, "total_steps": 1419, "loss": 0.1082, "lr": 2.553328925094773e-05, "epoch": 1.5010570824524314, "percentage": 50.04, "elapsed_time": "0:39:09", "remaining_time": "0:39:05", "throughput": 1889.29, "total_tokens": 4437952}
150
+ {"current_steps": 715, "total_steps": 1419, "loss": 0.1053, "lr": 2.5252625560037386e-05, "epoch": 1.5116279069767442, "percentage": 50.39, "elapsed_time": "0:39:23", "remaining_time": "0:38:47", "throughput": 1891.07, "total_tokens": 4469312}
151
+ {"current_steps": 720, "total_steps": 1419, "loss": 0.1003, "lr": 2.4971930021501965e-05, "epoch": 1.522198731501057, "percentage": 50.74, "elapsed_time": "0:39:37", "remaining_time": "0:38:28", "throughput": 1892.89, "total_tokens": 4500352}
152
+ {"current_steps": 725, "total_steps": 1419, "loss": 0.1027, "lr": 2.4691238021651042e-05, "epoch": 1.53276955602537, "percentage": 51.09, "elapsed_time": "0:39:51", "remaining_time": "0:38:09", "throughput": 1894.67, "total_tokens": 4531584}
153
+ {"current_steps": 730, "total_steps": 1419, "loss": 0.1019, "lr": 2.4410584946348054e-05, "epoch": 1.543340380549683, "percentage": 51.44, "elapsed_time": "0:40:05", "remaining_time": "0:37:50", "throughput": 1896.42, "total_tokens": 4562752}
154
+ {"current_steps": 735, "total_steps": 1419, "loss": 0.1094, "lr": 2.413000617654938e-05, "epoch": 1.5539112050739958, "percentage": 51.8, "elapsed_time": "0:40:20", "remaining_time": "0:37:32", "throughput": 1898.19, "total_tokens": 4593792}
155
+ {"current_steps": 740, "total_steps": 1419, "loss": 0.0987, "lr": 2.3849537083843936e-05, "epoch": 1.5644820295983086, "percentage": 52.15, "elapsed_time": "0:40:34", "remaining_time": "0:37:13", "throughput": 1899.99, "total_tokens": 4624832}
156
+ {"current_steps": 745, "total_steps": 1419, "loss": 0.0973, "lr": 2.3569213025994056e-05, "epoch": 1.5750528541226214, "percentage": 52.5, "elapsed_time": "0:40:48", "remaining_time": "0:36:54", "throughput": 1901.77, "total_tokens": 4655872}
157
+ {"current_steps": 750, "total_steps": 1419, "loss": 0.1052, "lr": 2.3289069342478018e-05, "epoch": 1.5856236786469344, "percentage": 52.85, "elapsed_time": "0:41:02", "remaining_time": "0:36:36", "throughput": 1903.53, "total_tokens": 4686912}
158
+ {"current_steps": 755, "total_steps": 1419, "loss": 0.1069, "lr": 2.3009141350034937e-05, "epoch": 1.5961945031712474, "percentage": 53.21, "elapsed_time": "0:41:16", "remaining_time": "0:36:17", "throughput": 1905.25, "total_tokens": 4718208}
159
+ {"current_steps": 760, "total_steps": 1419, "loss": 0.0994, "lr": 2.2729464338212515e-05, "epoch": 1.6067653276955602, "percentage": 53.56, "elapsed_time": "0:41:30", "remaining_time": "0:35:59", "throughput": 1906.93, "total_tokens": 4749376}
160
+ {"current_steps": 765, "total_steps": 1419, "loss": 0.1027, "lr": 2.2450073564918185e-05, "epoch": 1.617336152219873, "percentage": 53.91, "elapsed_time": "0:41:45", "remaining_time": "0:35:41", "throughput": 1908.55, "total_tokens": 4781120}
161
+ {"current_steps": 770, "total_steps": 1419, "loss": 0.1026, "lr": 2.21710042519743e-05, "epoch": 1.627906976744186, "percentage": 54.26, "elapsed_time": "0:41:59", "remaining_time": "0:35:23", "throughput": 1910.14, "total_tokens": 4812480}
162
+ {"current_steps": 775, "total_steps": 1419, "loss": 0.0974, "lr": 2.1892291580677822e-05, "epoch": 1.638477801268499, "percentage": 54.62, "elapsed_time": "0:42:13", "remaining_time": "0:35:05", "throughput": 1911.77, "total_tokens": 4843712}
163
+ {"current_steps": 780, "total_steps": 1419, "loss": 0.1131, "lr": 2.1613970687365127e-05, "epoch": 1.6490486257928119, "percentage": 54.97, "elapsed_time": "0:42:27", "remaining_time": "0:34:47", "throughput": 1913.37, "total_tokens": 4874944}
164
+ {"current_steps": 785, "total_steps": 1419, "loss": 0.0919, "lr": 2.1336076658982524e-05, "epoch": 1.6596194503171247, "percentage": 55.32, "elapsed_time": "0:42:42", "remaining_time": "0:34:29", "throughput": 1914.94, "total_tokens": 4906368}
165
+ {"current_steps": 790, "total_steps": 1419, "loss": 0.1036, "lr": 2.1058644528662945e-05, "epoch": 1.6701902748414377, "percentage": 55.67, "elapsed_time": "0:42:56", "remaining_time": "0:34:11", "throughput": 1916.49, "total_tokens": 4937536}
166
+ {"current_steps": 795, "total_steps": 1419, "loss": 0.0956, "lr": 2.0781709271309423e-05, "epoch": 1.6807610993657507, "percentage": 56.03, "elapsed_time": "0:43:10", "remaining_time": "0:33:53", "throughput": 1918.0, "total_tokens": 4968832}
167
+ {"current_steps": 800, "total_steps": 1419, "loss": 0.0998, "lr": 2.0505305799185966e-05, "epoch": 1.6913319238900635, "percentage": 56.38, "elapsed_time": "0:43:24", "remaining_time": "0:33:35", "throughput": 1919.52, "total_tokens": 5000128}
168
+ {"current_steps": 800, "total_steps": 1419, "eval_loss": 0.10008509457111359, "epoch": 1.6913319238900635, "percentage": 56.38, "elapsed_time": "0:44:05", "remaining_time": "0:34:06", "throughput": 1890.15, "total_tokens": 5000128}
169
+ {"current_steps": 805, "total_steps": 1419, "loss": 0.0956, "lr": 2.022946895751625e-05, "epoch": 1.7019027484143763, "percentage": 56.73, "elapsed_time": "0:44:29", "remaining_time": "0:33:55", "throughput": 1884.96, "total_tokens": 5031360}
170
+ {"current_steps": 810, "total_steps": 1419, "loss": 0.1008, "lr": 1.9954233520090843e-05, "epoch": 1.712473572938689, "percentage": 57.08, "elapsed_time": "0:44:43", "remaining_time": "0:33:37", "throughput": 1886.61, "total_tokens": 5062720}
171
+ {"current_steps": 815, "total_steps": 1419, "loss": 0.0955, "lr": 1.967963418488335e-05, "epoch": 1.723044397463002, "percentage": 57.43, "elapsed_time": "0:44:57", "remaining_time": "0:33:19", "throughput": 1888.29, "total_tokens": 5093888}
172
+ {"current_steps": 820, "total_steps": 1419, "loss": 0.1039, "lr": 1.9405705569676206e-05, "epoch": 1.733615221987315, "percentage": 57.79, "elapsed_time": "0:45:11", "remaining_time": "0:33:00", "throughput": 1889.94, "total_tokens": 5125120}
173
+ {"current_steps": 825, "total_steps": 1419, "loss": 0.1005, "lr": 1.9132482207696488e-05, "epoch": 1.744186046511628, "percentage": 58.14, "elapsed_time": "0:45:26", "remaining_time": "0:32:42", "throughput": 1891.53, "total_tokens": 5156544}
174
+ {"current_steps": 830, "total_steps": 1419, "loss": 0.1069, "lr": 1.8859998543262474e-05, "epoch": 1.7547568710359407, "percentage": 58.49, "elapsed_time": "0:45:40", "remaining_time": "0:32:24", "throughput": 1893.17, "total_tokens": 5187776}
175
+ {"current_steps": 835, "total_steps": 1419, "loss": 0.1004, "lr": 1.8588288927441334e-05, "epoch": 1.7653276955602537, "percentage": 58.84, "elapsed_time": "0:45:54", "remaining_time": "0:32:06", "throughput": 1894.74, "total_tokens": 5218944}
176
+ {"current_steps": 840, "total_steps": 1419, "loss": 0.1002, "lr": 1.831738761371863e-05, "epoch": 1.7758985200845667, "percentage": 59.2, "elapsed_time": "0:46:08", "remaining_time": "0:31:48", "throughput": 1896.28, "total_tokens": 5250112}
177
+ {"current_steps": 845, "total_steps": 1419, "loss": 0.0927, "lr": 1.8047328753680083e-05, "epoch": 1.7864693446088795, "percentage": 59.55, "elapsed_time": "0:46:22", "remaining_time": "0:31:30", "throughput": 1897.83, "total_tokens": 5281088}
178
+ {"current_steps": 850, "total_steps": 1419, "loss": 0.1013, "lr": 1.777814639270622e-05, "epoch": 1.7970401691331923, "percentage": 59.9, "elapsed_time": "0:46:36", "remaining_time": "0:31:12", "throughput": 1899.37, "total_tokens": 5312256}
179
+ {"current_steps": 855, "total_steps": 1419, "loss": 0.0945, "lr": 1.7509874465680377e-05, "epoch": 1.8076109936575053, "percentage": 60.25, "elapsed_time": "0:46:51", "remaining_time": "0:30:54", "throughput": 1900.81, "total_tokens": 5343744}
180
+ {"current_steps": 860, "total_steps": 1419, "loss": 0.0949, "lr": 1.724254679271065e-05, "epoch": 1.8181818181818183, "percentage": 60.61, "elapsed_time": "0:47:05", "remaining_time": "0:30:36", "throughput": 1902.31, "total_tokens": 5374976}
181
+ {"current_steps": 865, "total_steps": 1419, "loss": 0.0923, "lr": 1.6976197074866315e-05, "epoch": 1.8287526427061311, "percentage": 60.96, "elapsed_time": "0:47:19", "remaining_time": "0:30:18", "throughput": 1903.78, "total_tokens": 5406144}
182
+ {"current_steps": 870, "total_steps": 1419, "loss": 0.1049, "lr": 1.6710858889929255e-05, "epoch": 1.839323467230444, "percentage": 61.31, "elapsed_time": "0:47:34", "remaining_time": "0:30:01", "throughput": 1905.25, "total_tokens": 5437760}
183
+ {"current_steps": 875, "total_steps": 1419, "loss": 0.0906, "lr": 1.6446565688160897e-05, "epoch": 1.8498942917547567, "percentage": 61.66, "elapsed_time": "0:47:48", "remaining_time": "0:29:43", "throughput": 1906.67, "total_tokens": 5468992}
184
+ {"current_steps": 880, "total_steps": 1419, "loss": 0.0942, "lr": 1.6183350788085317e-05, "epoch": 1.8604651162790697, "percentage": 62.02, "elapsed_time": "0:48:02", "remaining_time": "0:29:25", "throughput": 1908.13, "total_tokens": 5500288}
185
+ {"current_steps": 885, "total_steps": 1419, "loss": 0.1, "lr": 1.592124737228881e-05, "epoch": 1.8710359408033828, "percentage": 62.37, "elapsed_time": "0:48:16", "remaining_time": "0:29:07", "throughput": 1909.57, "total_tokens": 5531456}
186
+ {"current_steps": 890, "total_steps": 1419, "loss": 0.0985, "lr": 1.566028848323674e-05, "epoch": 1.8816067653276956, "percentage": 62.72, "elapsed_time": "0:48:30", "remaining_time": "0:28:50", "throughput": 1910.96, "total_tokens": 5562624}
187
+ {"current_steps": 895, "total_steps": 1419, "loss": 0.0959, "lr": 1.540050701910796e-05, "epoch": 1.8921775898520083, "percentage": 63.07, "elapsed_time": "0:48:44", "remaining_time": "0:28:32", "throughput": 1912.35, "total_tokens": 5593536}
188
+ {"current_steps": 900, "total_steps": 1419, "loss": 0.1083, "lr": 1.5141935729647461e-05, "epoch": 1.9027484143763214, "percentage": 63.42, "elapsed_time": "0:48:59", "remaining_time": "0:28:14", "throughput": 1913.73, "total_tokens": 5624576}
189
+ {"current_steps": 900, "total_steps": 1419, "eval_loss": 0.09912961721420288, "epoch": 1.9027484143763214, "percentage": 63.42, "elapsed_time": "0:49:39", "remaining_time": "0:28:38", "throughput": 1887.72, "total_tokens": 5624576}
190
+ {"current_steps": 905, "total_steps": 1419, "loss": 0.0942, "lr": 1.4884607212037726e-05, "epoch": 1.9133192389006344, "percentage": 63.78, "elapsed_time": "0:49:57", "remaining_time": "0:28:22", "throughput": 1886.77, "total_tokens": 5655936}
191
+ {"current_steps": 910, "total_steps": 1419, "loss": 0.1026, "lr": 1.4628553906789322e-05, "epoch": 1.9238900634249472, "percentage": 64.13, "elapsed_time": "0:50:11", "remaining_time": "0:28:04", "throughput": 1888.26, "total_tokens": 5686976}
192
+ {"current_steps": 915, "total_steps": 1419, "loss": 0.0933, "lr": 1.4373808093651215e-05, "epoch": 1.93446088794926, "percentage": 64.48, "elapsed_time": "0:50:26", "remaining_time": "0:27:46", "throughput": 1889.72, "total_tokens": 5718592}
193
+ {"current_steps": 920, "total_steps": 1419, "loss": 0.0955, "lr": 1.4120401887541423e-05, "epoch": 1.945031712473573, "percentage": 64.83, "elapsed_time": "0:50:40", "remaining_time": "0:27:29", "throughput": 1891.2, "total_tokens": 5749952}
194
+ {"current_steps": 925, "total_steps": 1419, "loss": 0.0933, "lr": 1.3868367234498328e-05, "epoch": 1.955602536997886, "percentage": 65.19, "elapsed_time": "0:50:54", "remaining_time": "0:27:11", "throughput": 1892.7, "total_tokens": 5780928}
195
+ {"current_steps": 930, "total_steps": 1419, "loss": 0.0905, "lr": 1.3617735907653434e-05, "epoch": 1.9661733615221988, "percentage": 65.54, "elapsed_time": "0:51:08", "remaining_time": "0:26:53", "throughput": 1894.17, "total_tokens": 5812032}
196
+ {"current_steps": 935, "total_steps": 1419, "loss": 0.0959, "lr": 1.3368539503225746e-05, "epoch": 1.9767441860465116, "percentage": 65.89, "elapsed_time": "0:51:22", "remaining_time": "0:26:35", "throughput": 1895.53, "total_tokens": 5843136}
197
+ {"current_steps": 940, "total_steps": 1419, "loss": 0.1031, "lr": 1.3120809436538656e-05, "epoch": 1.9873150105708244, "percentage": 66.24, "elapsed_time": "0:51:37", "remaining_time": "0:26:18", "throughput": 1896.91, "total_tokens": 5874752}
198
+ {"current_steps": 945, "total_steps": 1419, "loss": 0.0896, "lr": 1.2874576938059402e-05, "epoch": 1.9978858350951374, "percentage": 66.6, "elapsed_time": "0:51:51", "remaining_time": "0:26:00", "throughput": 1898.29, "total_tokens": 5905728}
199
+ {"current_steps": 950, "total_steps": 1419, "loss": 0.0931, "lr": 1.2629873049462032e-05, "epoch": 2.0084566596194504, "percentage": 66.95, "elapsed_time": "0:52:05", "remaining_time": "0:25:42", "throughput": 1899.47, "total_tokens": 5936448}
200
+ {"current_steps": 955, "total_steps": 1419, "loss": 0.0904, "lr": 1.2386728619714091e-05, "epoch": 2.019027484143763, "percentage": 67.3, "elapsed_time": "0:52:19", "remaining_time": "0:25:25", "throughput": 1900.83, "total_tokens": 5967808}
201
+ {"current_steps": 960, "total_steps": 1419, "loss": 0.0973, "lr": 1.214517430118753e-05, "epoch": 2.029598308668076, "percentage": 67.65, "elapsed_time": "0:52:33", "remaining_time": "0:25:07", "throughput": 1902.21, "total_tokens": 5998720}
202
+ {"current_steps": 965, "total_steps": 1419, "loss": 0.0937, "lr": 1.190524054579455e-05, "epoch": 2.040169133192389, "percentage": 68.01, "elapsed_time": "0:52:47", "remaining_time": "0:24:50", "throughput": 1903.51, "total_tokens": 6030016}
203
+ {"current_steps": 970, "total_steps": 1419, "loss": 0.0898, "lr": 1.1666957601148576e-05, "epoch": 2.050739957716702, "percentage": 68.36, "elapsed_time": "0:53:02", "remaining_time": "0:24:32", "throughput": 1904.75, "total_tokens": 6061184}
204
+ {"current_steps": 975, "total_steps": 1419, "loss": 0.1006, "lr": 1.1430355506751095e-05, "epoch": 2.061310782241015, "percentage": 68.71, "elapsed_time": "0:53:16", "remaining_time": "0:24:15", "throughput": 1906.03, "total_tokens": 6092672}
205
+ {"current_steps": 980, "total_steps": 1419, "loss": 0.0981, "lr": 1.119546409020461e-05, "epoch": 2.0718816067653276, "percentage": 69.06, "elapsed_time": "0:53:30", "remaining_time": "0:23:58", "throughput": 1907.28, "total_tokens": 6123712}
206
+ {"current_steps": 985, "total_steps": 1419, "loss": 0.0943, "lr": 1.0962312963452467e-05, "epoch": 2.0824524312896404, "percentage": 69.42, "elapsed_time": "0:53:44", "remaining_time": "0:23:40", "throughput": 1908.57, "total_tokens": 6154816}
207
+ {"current_steps": 990, "total_steps": 1419, "loss": 0.0943, "lr": 1.0730931519045697e-05, "epoch": 2.0930232558139537, "percentage": 69.77, "elapsed_time": "0:53:59", "remaining_time": "0:23:23", "throughput": 1909.82, "total_tokens": 6186176}
208
+ {"current_steps": 995, "total_steps": 1419, "loss": 0.0843, "lr": 1.050134892643767e-05, "epoch": 2.1035940803382664, "percentage": 70.12, "elapsed_time": "0:54:13", "remaining_time": "0:23:06", "throughput": 1911.11, "total_tokens": 6217216}
209
+ {"current_steps": 1000, "total_steps": 1419, "loss": 0.0953, "lr": 1.0273594128306738e-05, "epoch": 2.1141649048625792, "percentage": 70.47, "elapsed_time": "0:54:27", "remaining_time": "0:22:49", "throughput": 1912.36, "total_tokens": 6248320}
210
+ {"current_steps": 1000, "total_steps": 1419, "eval_loss": 0.09716298431158066, "epoch": 2.1141649048625792, "percentage": 70.47, "elapsed_time": "0:55:07", "remaining_time": "0:23:05", "throughput": 1888.97, "total_tokens": 6248320}
211
+ {"current_steps": 1005, "total_steps": 1419, "loss": 0.0923, "lr": 1.00476958369076e-05, "epoch": 2.124735729386892, "percentage": 70.82, "elapsed_time": "0:55:29", "remaining_time": "0:22:51", "throughput": 1886.3, "total_tokens": 6279552}
212
+ {"current_steps": 1010, "total_steps": 1419, "loss": 0.0847, "lr": 9.82368253045158e-06, "epoch": 2.1353065539112053, "percentage": 71.18, "elapsed_time": "0:55:43", "remaining_time": "0:22:33", "throughput": 1887.6, "total_tokens": 6311296}
213
+ {"current_steps": 1015, "total_steps": 1419, "loss": 0.0921, "lr": 9.601582449516538e-06, "epoch": 2.145877378435518, "percentage": 71.53, "elapsed_time": "0:55:57", "remaining_time": "0:22:16", "throughput": 1888.89, "total_tokens": 6342656}
214
+ {"current_steps": 1020, "total_steps": 1419, "loss": 0.0887, "lr": 9.381423593486629e-06, "epoch": 2.156448202959831, "percentage": 71.88, "elapsed_time": "0:56:12", "remaining_time": "0:21:59", "throughput": 1890.15, "total_tokens": 6374208}
215
+ {"current_steps": 1025, "total_steps": 1419, "loss": 0.0924, "lr": 9.163233717022568e-06, "epoch": 2.1670190274841437, "percentage": 72.23, "elapsed_time": "0:56:26", "remaining_time": "0:21:41", "throughput": 1891.43, "total_tokens": 6405440}
216
+ {"current_steps": 1030, "total_steps": 1419, "loss": 0.0884, "lr": 8.947040326562638e-06, "epoch": 2.177589852008457, "percentage": 72.59, "elapsed_time": "0:56:40", "remaining_time": "0:21:24", "throughput": 1892.7, "total_tokens": 6436928}
217
+ {"current_steps": 1035, "total_steps": 1419, "loss": 0.0937, "lr": 8.732870676855096e-06, "epoch": 2.1881606765327697, "percentage": 72.94, "elapsed_time": "0:56:55", "remaining_time": "0:21:07", "throughput": 1893.97, "total_tokens": 6468288}
218
+ {"current_steps": 1040, "total_steps": 1419, "loss": 0.0856, "lr": 8.520751767522257e-06, "epoch": 2.1987315010570825, "percentage": 73.29, "elapsed_time": "0:57:09", "remaining_time": "0:20:49", "throughput": 1895.24, "total_tokens": 6499584}
219
+ {"current_steps": 1045, "total_steps": 1419, "loss": 0.0864, "lr": 8.310710339656707e-06, "epoch": 2.2093023255813953, "percentage": 73.64, "elapsed_time": "0:57:23", "remaining_time": "0:20:32", "throughput": 1896.47, "total_tokens": 6530752}
220
+ {"current_steps": 1050, "total_steps": 1419, "loss": 0.0974, "lr": 8.102772872450209e-06, "epoch": 2.219873150105708, "percentage": 74.0, "elapsed_time": "0:57:37", "remaining_time": "0:20:15", "throughput": 1897.71, "total_tokens": 6561856}
221
+ {"current_steps": 1055, "total_steps": 1419, "loss": 0.0962, "lr": 7.896965579855486e-06, "epoch": 2.2304439746300213, "percentage": 74.35, "elapsed_time": "0:57:51", "remaining_time": "0:19:57", "throughput": 1898.91, "total_tokens": 6592960}
222
+ {"current_steps": 1060, "total_steps": 1419, "loss": 0.0993, "lr": 7.693314407281615e-06, "epoch": 2.241014799154334, "percentage": 74.7, "elapsed_time": "0:58:06", "remaining_time": "0:19:40", "throughput": 1900.13, "total_tokens": 6624256}
223
+ {"current_steps": 1065, "total_steps": 1419, "loss": 0.0915, "lr": 7.49184502832308e-06, "epoch": 2.251585623678647, "percentage": 75.05, "elapsed_time": "0:58:20", "remaining_time": "0:19:23", "throughput": 1901.37, "total_tokens": 6655424}
224
+ {"current_steps": 1070, "total_steps": 1419, "loss": 0.0944, "lr": 7.292582841523268e-06, "epoch": 2.2621564482029597, "percentage": 75.41, "elapsed_time": "0:58:34", "remaining_time": "0:19:06", "throughput": 1902.61, "total_tokens": 6686400}
225
+ {"current_steps": 1075, "total_steps": 1419, "loss": 0.0945, "lr": 7.095552967172503e-06, "epoch": 2.2727272727272725, "percentage": 75.76, "elapsed_time": "0:58:48", "remaining_time": "0:18:49", "throughput": 1903.77, "total_tokens": 6717376}
226
+ {"current_steps": 1080, "total_steps": 1419, "loss": 0.0896, "lr": 6.900780244141286e-06, "epoch": 2.2832980972515857, "percentage": 76.11, "elapsed_time": "0:59:02", "remaining_time": "0:18:31", "throughput": 1904.97, "total_tokens": 6748608}
227
+ {"current_steps": 1085, "total_steps": 1419, "loss": 0.0958, "lr": 6.708289226748868e-06, "epoch": 2.2938689217758985, "percentage": 76.46, "elapsed_time": "0:59:16", "remaining_time": "0:18:14", "throughput": 1906.16, "total_tokens": 6779776}
228
+ {"current_steps": 1090, "total_steps": 1419, "loss": 0.0938, "lr": 6.518104181667844e-06, "epoch": 2.3044397463002113, "percentage": 76.81, "elapsed_time": "0:59:30", "remaining_time": "0:17:57", "throughput": 1907.34, "total_tokens": 6810880}
229
+ {"current_steps": 1095, "total_steps": 1419, "loss": 0.0923, "lr": 6.3302490848648864e-06, "epoch": 2.3150105708245245, "percentage": 77.17, "elapsed_time": "0:59:45", "remaining_time": "0:17:40", "throughput": 1908.5, "total_tokens": 6842112}
230
+ {"current_steps": 1100, "total_steps": 1419, "loss": 0.0887, "lr": 6.144747618578209e-06, "epoch": 2.3255813953488373, "percentage": 77.52, "elapsed_time": "0:59:59", "remaining_time": "0:17:23", "throughput": 1909.64, "total_tokens": 6873152}
231
+ {"current_steps": 1100, "total_steps": 1419, "eval_loss": 0.09644165635108948, "epoch": 2.3255813953488373, "percentage": 77.52, "elapsed_time": "1:00:39", "remaining_time": "0:17:35", "throughput": 1888.37, "total_tokens": 6873152}
232
+ {"current_steps": 1105, "total_steps": 1419, "loss": 0.0826, "lr": 5.961623168332006e-06, "epoch": 2.33615221987315, "percentage": 77.87, "elapsed_time": "1:01:02", "remaining_time": "0:17:20", "throughput": 1884.99, "total_tokens": 6904512}
233
+ {"current_steps": 1110, "total_steps": 1419, "loss": 0.0826, "lr": 5.780898819988354e-06, "epoch": 2.346723044397463, "percentage": 78.22, "elapsed_time": "1:01:17", "remaining_time": "0:17:03", "throughput": 1886.19, "total_tokens": 6936064}
234
+ {"current_steps": 1115, "total_steps": 1419, "loss": 0.0929, "lr": 5.602597356836803e-06, "epoch": 2.3572938689217757, "percentage": 78.58, "elapsed_time": "1:01:31", "remaining_time": "0:16:46", "throughput": 1887.37, "total_tokens": 6967424}
235
+ {"current_steps": 1120, "total_steps": 1419, "loss": 0.0936, "lr": 5.426741256722239e-06, "epoch": 2.367864693446089, "percentage": 78.93, "elapsed_time": "1:01:45", "remaining_time": "0:16:29", "throughput": 1888.56, "total_tokens": 6998592}
236
+ {"current_steps": 1125, "total_steps": 1419, "loss": 0.0856, "lr": 5.253352689211114e-06, "epoch": 2.3784355179704018, "percentage": 79.28, "elapsed_time": "1:02:00", "remaining_time": "0:16:12", "throughput": 1889.74, "total_tokens": 7029952}
237
+ {"current_steps": 1130, "total_steps": 1419, "loss": 0.0923, "lr": 5.082453512796634e-06, "epoch": 2.3890063424947146, "percentage": 79.63, "elapsed_time": "1:02:14", "remaining_time": "0:15:55", "throughput": 1890.91, "total_tokens": 7060992}
238
+ {"current_steps": 1135, "total_steps": 1419, "loss": 0.0911, "lr": 4.914065272143153e-06, "epoch": 2.3995771670190273, "percentage": 79.99, "elapsed_time": "1:02:28", "remaining_time": "0:15:37", "throughput": 1892.04, "total_tokens": 7092224}
239
+ {"current_steps": 1140, "total_steps": 1419, "loss": 0.0851, "lr": 4.7482091953700705e-06, "epoch": 2.41014799154334, "percentage": 80.34, "elapsed_time": "1:02:42", "remaining_time": "0:15:20", "throughput": 1893.14, "total_tokens": 7123776}
240
+ {"current_steps": 1145, "total_steps": 1419, "loss": 0.0956, "lr": 4.584906191375715e-06, "epoch": 2.4207188160676534, "percentage": 80.69, "elapsed_time": "1:02:57", "remaining_time": "0:15:03", "throughput": 1894.28, "total_tokens": 7155072}
241
+ {"current_steps": 1150, "total_steps": 1419, "loss": 0.0916, "lr": 4.424176847201411e-06, "epoch": 2.431289640591966, "percentage": 81.04, "elapsed_time": "1:03:11", "remaining_time": "0:14:46", "throughput": 1895.41, "total_tokens": 7186240}
242
+ {"current_steps": 1155, "total_steps": 1419, "loss": 0.0886, "lr": 4.266041425436151e-06, "epoch": 2.441860465116279, "percentage": 81.4, "elapsed_time": "1:03:25", "remaining_time": "0:14:29", "throughput": 1896.52, "total_tokens": 7217536}
243
+ {"current_steps": 1160, "total_steps": 1419, "loss": 0.0852, "lr": 4.110519861662143e-06, "epoch": 2.452431289640592, "percentage": 81.75, "elapsed_time": "1:03:39", "remaining_time": "0:14:12", "throughput": 1897.55, "total_tokens": 7248576}
244
+ {"current_steps": 1165, "total_steps": 1419, "loss": 0.0942, "lr": 3.957631761941641e-06, "epoch": 2.463002114164905, "percentage": 82.1, "elapsed_time": "1:03:54", "remaining_time": "0:13:55", "throughput": 1898.62, "total_tokens": 7279808}
245
+ {"current_steps": 1170, "total_steps": 1419, "loss": 0.0889, "lr": 3.807396400345223e-06, "epoch": 2.473572938689218, "percentage": 82.45, "elapsed_time": "1:04:08", "remaining_time": "0:13:39", "throughput": 1899.67, "total_tokens": 7311168}
246
+ {"current_steps": 1175, "total_steps": 1419, "loss": 0.0907, "lr": 3.6598327165220296e-06, "epoch": 2.4841437632135306, "percentage": 82.8, "elapsed_time": "1:04:22", "remaining_time": "0:13:22", "throughput": 1900.73, "total_tokens": 7342528}
247
+ {"current_steps": 1180, "total_steps": 1419, "loss": 0.091, "lr": 3.514959313312061e-06, "epoch": 2.4947145877378434, "percentage": 83.16, "elapsed_time": "1:04:37", "remaining_time": "0:13:05", "throughput": 1901.79, "total_tokens": 7373696}
248
+ {"current_steps": 1185, "total_steps": 1419, "loss": 0.0888, "lr": 3.372794454401032e-06, "epoch": 2.5052854122621566, "percentage": 83.51, "elapsed_time": "1:04:51", "remaining_time": "0:12:48", "throughput": 1902.87, "total_tokens": 7404928}
249
+ {"current_steps": 1190, "total_steps": 1419, "loss": 0.0965, "lr": 3.2333560620178727e-06, "epoch": 2.5158562367864694, "percentage": 83.86, "elapsed_time": "1:05:05", "remaining_time": "0:12:31", "throughput": 1903.94, "total_tokens": 7436096}
250
+ {"current_steps": 1195, "total_steps": 1419, "loss": 0.0879, "lr": 3.096661714675397e-06, "epoch": 2.526427061310782, "percentage": 84.21, "elapsed_time": "1:05:19", "remaining_time": "0:12:14", "throughput": 1905.01, "total_tokens": 7467328}
251
+ {"current_steps": 1200, "total_steps": 1419, "loss": 0.0889, "lr": 2.962728644954191e-06, "epoch": 2.536997885835095, "percentage": 84.57, "elapsed_time": "1:05:34", "remaining_time": "0:11:57", "throughput": 1906.08, "total_tokens": 7498688}
252
+ {"current_steps": 1200, "total_steps": 1419, "eval_loss": 0.09538523107767105, "epoch": 2.536997885835095, "percentage": 84.57, "elapsed_time": "1:06:14", "remaining_time": "0:12:05", "throughput": 1886.69, "total_tokens": 7498688}
253
+ {"current_steps": 1205, "total_steps": 1419, "loss": 0.089, "lr": 2.8315737373301955e-06, "epoch": 2.547568710359408, "percentage": 84.92, "elapsed_time": "1:06:35", "remaining_time": "0:11:49", "throughput": 1884.54, "total_tokens": 7529792}
254
+ {"current_steps": 1210, "total_steps": 1419, "loss": 0.0965, "lr": 2.703213526046108e-06, "epoch": 2.558139534883721, "percentage": 85.27, "elapsed_time": "1:06:49", "remaining_time": "0:11:32", "throughput": 1885.7, "total_tokens": 7561088}
255
+ {"current_steps": 1215, "total_steps": 1419, "loss": 0.0921, "lr": 2.577664193027013e-06, "epoch": 2.568710359408034, "percentage": 85.62, "elapsed_time": "1:07:03", "remaining_time": "0:11:15", "throughput": 1886.82, "total_tokens": 7592448}
256
+ {"current_steps": 1220, "total_steps": 1419, "loss": 0.0831, "lr": 2.45494156584033e-06, "epoch": 2.5792811839323466, "percentage": 85.98, "elapsed_time": "1:07:18", "remaining_time": "0:10:58", "throughput": 1887.91, "total_tokens": 7624000}
257
+ {"current_steps": 1225, "total_steps": 1419, "loss": 0.0915, "lr": 2.3350611157005182e-06, "epoch": 2.58985200845666, "percentage": 86.33, "elapsed_time": "1:07:32", "remaining_time": "0:10:41", "throughput": 1888.99, "total_tokens": 7655232}
258
+ {"current_steps": 1230, "total_steps": 1419, "loss": 0.0893, "lr": 2.2180379555186844e-06, "epoch": 2.6004228329809727, "percentage": 86.68, "elapsed_time": "1:07:46", "remaining_time": "0:10:24", "throughput": 1890.12, "total_tokens": 7686464}
259
+ {"current_steps": 1235, "total_steps": 1419, "loss": 0.0944, "lr": 2.103886837997307e-06, "epoch": 2.6109936575052854, "percentage": 87.03, "elapsed_time": "1:08:00", "remaining_time": "0:10:07", "throughput": 1891.22, "total_tokens": 7717824}
260
+ {"current_steps": 1240, "total_steps": 1419, "loss": 0.0854, "lr": 1.9926221537704794e-06, "epoch": 2.6215644820295982, "percentage": 87.39, "elapsed_time": "1:08:15", "remaining_time": "0:09:51", "throughput": 1892.29, "total_tokens": 7749120}
261
+ {"current_steps": 1245, "total_steps": 1419, "loss": 0.0895, "lr": 1.884257929589664e-06, "epoch": 2.632135306553911, "percentage": 87.74, "elapsed_time": "1:08:29", "remaining_time": "0:09:34", "throughput": 1893.35, "total_tokens": 7780736}
262
+ {"current_steps": 1250, "total_steps": 1419, "loss": 0.0807, "lr": 1.7788078265554398e-06, "epoch": 2.6427061310782243, "percentage": 88.09, "elapsed_time": "1:08:43", "remaining_time": "0:09:17", "throughput": 1894.39, "total_tokens": 7812288}
263
+ {"current_steps": 1255, "total_steps": 1419, "loss": 0.082, "lr": 1.6762851383952616e-06, "epoch": 2.653276955602537, "percentage": 88.44, "elapsed_time": "1:08:57", "remaining_time": "0:09:00", "throughput": 1895.46, "total_tokens": 7843392}
264
+ {"current_steps": 1260, "total_steps": 1419, "loss": 0.0897, "lr": 1.5767027897875957e-06, "epoch": 2.66384778012685, "percentage": 88.79, "elapsed_time": "1:09:12", "remaining_time": "0:08:43", "throughput": 1896.52, "total_tokens": 7874560}
265
+ {"current_steps": 1265, "total_steps": 1419, "loss": 0.0909, "lr": 1.4800733347325152e-06, "epoch": 2.6744186046511627, "percentage": 89.15, "elapsed_time": "1:09:26", "remaining_time": "0:08:27", "throughput": 1897.54, "total_tokens": 7905728}
266
+ {"current_steps": 1270, "total_steps": 1419, "loss": 0.0984, "lr": 1.3864089549691012e-06, "epoch": 2.6849894291754755, "percentage": 89.5, "elapsed_time": "1:09:40", "remaining_time": "0:08:10", "throughput": 1898.57, "total_tokens": 7937088}
267
+ {"current_steps": 1275, "total_steps": 1419, "loss": 0.0893, "lr": 1.2957214584396997e-06, "epoch": 2.6955602536997887, "percentage": 89.85, "elapsed_time": "1:09:54", "remaining_time": "0:07:53", "throughput": 1899.61, "total_tokens": 7968704}
268
+ {"current_steps": 1280, "total_steps": 1419, "loss": 0.0843, "lr": 1.2080222778013573e-06, "epoch": 2.7061310782241015, "percentage": 90.2, "elapsed_time": "1:10:09", "remaining_time": "0:07:37", "throughput": 1900.62, "total_tokens": 8000064}
269
+ {"current_steps": 1285, "total_steps": 1419, "loss": 0.0892, "lr": 1.1233224689845251e-06, "epoch": 2.7167019027484143, "percentage": 90.56, "elapsed_time": "1:10:23", "remaining_time": "0:07:20", "throughput": 1901.65, "total_tokens": 8031296}
270
+ {"current_steps": 1290, "total_steps": 1419, "loss": 0.0883, "lr": 1.041632709799306e-06, "epoch": 2.7272727272727275, "percentage": 90.91, "elapsed_time": "1:10:37", "remaining_time": "0:07:03", "throughput": 1902.67, "total_tokens": 8062208}
271
+ {"current_steps": 1295, "total_steps": 1419, "loss": 0.089, "lr": 9.629632985893033e-07, "epoch": 2.7378435517970403, "percentage": 91.26, "elapsed_time": "1:10:51", "remaining_time": "0:06:47", "throughput": 1903.66, "total_tokens": 8093440}
272
+ {"current_steps": 1300, "total_steps": 1419, "loss": 0.0859, "lr": 8.873241529333776e-07, "epoch": 2.748414376321353, "percentage": 91.61, "elapsed_time": "1:11:05", "remaining_time": "0:06:30", "throughput": 1904.62, "total_tokens": 8124864}
273
+ {"current_steps": 1300, "total_steps": 1419, "eval_loss": 0.09499379247426987, "epoch": 2.748414376321353, "percentage": 91.61, "elapsed_time": "1:11:46", "remaining_time": "0:06:34", "throughput": 1886.7, "total_tokens": 8124864}
274
+ {"current_steps": 1305, "total_steps": 1419, "loss": 0.0937, "lr": 8.147248083953562e-07, "epoch": 2.758985200845666, "percentage": 91.97, "elapsed_time": "1:12:04", "remaining_time": "0:06:17", "throughput": 1886.07, "total_tokens": 8156032}
275
+ {"current_steps": 1310, "total_steps": 1419, "loss": 0.0927, "lr": 7.451744173219116e-07, "epoch": 2.7695560253699787, "percentage": 92.32, "elapsed_time": "1:12:18", "remaining_time": "0:06:01", "throughput": 1887.1, "total_tokens": 8187456}
276
+ {"current_steps": 1315, "total_steps": 1419, "loss": 0.084, "lr": 6.786817476887725e-07, "epoch": 2.780126849894292, "percentage": 92.67, "elapsed_time": "1:12:32", "remaining_time": "0:05:44", "throughput": 1888.1, "total_tokens": 8218880}
277
+ {"current_steps": 1320, "total_steps": 1419, "loss": 0.0862, "lr": 6.152551819953667e-07, "epoch": 2.7906976744186047, "percentage": 93.02, "elapsed_time": "1:12:47", "remaining_time": "0:05:27", "throughput": 1889.14, "total_tokens": 8250048}
278
+ {"current_steps": 1325, "total_steps": 1419, "loss": 0.0967, "lr": 5.549027162080666e-07, "epoch": 2.8012684989429175, "percentage": 93.38, "elapsed_time": "1:13:01", "remaining_time": "0:05:10", "throughput": 1890.18, "total_tokens": 8281408}
279
+ {"current_steps": 1330, "total_steps": 1419, "loss": 0.0878, "lr": 4.976319587521788e-07, "epoch": 2.8118393234672303, "percentage": 93.73, "elapsed_time": "1:13:15", "remaining_time": "0:04:54", "throughput": 1891.24, "total_tokens": 8312448}
280
+ {"current_steps": 1335, "total_steps": 1419, "loss": 0.0923, "lr": 4.434501295527582e-07, "epoch": 2.822410147991543, "percentage": 94.08, "elapsed_time": "1:13:29", "remaining_time": "0:04:37", "throughput": 1892.27, "total_tokens": 8343488}
281
+ {"current_steps": 1340, "total_steps": 1419, "loss": 0.0887, "lr": 3.9236405912442544e-07, "epoch": 2.8329809725158563, "percentage": 94.43, "elapsed_time": "1:13:43", "remaining_time": "0:04:20", "throughput": 1893.28, "total_tokens": 8374976}
282
+ {"current_steps": 1345, "total_steps": 1419, "loss": 0.0862, "lr": 3.44380187710272e-07, "epoch": 2.843551797040169, "percentage": 94.79, "elapsed_time": "1:13:57", "remaining_time": "0:04:04", "throughput": 1894.25, "total_tokens": 8406208}
283
+ {"current_steps": 1350, "total_steps": 1419, "loss": 0.0862, "lr": 2.995045644699518e-07, "epoch": 2.854122621564482, "percentage": 95.14, "elapsed_time": "1:14:11", "remaining_time": "0:03:47", "throughput": 1895.22, "total_tokens": 8437440}
284
+ {"current_steps": 1355, "total_steps": 1419, "loss": 0.0878, "lr": 2.577428467170989e-07, "epoch": 2.864693446088795, "percentage": 95.49, "elapsed_time": "1:14:26", "remaining_time": "0:03:30", "throughput": 1896.2, "total_tokens": 8468416}
285
+ {"current_steps": 1360, "total_steps": 1419, "loss": 0.0881, "lr": 2.1910029920610974e-07, "epoch": 2.875264270613108, "percentage": 95.84, "elapsed_time": "1:14:40", "remaining_time": "0:03:14", "throughput": 1897.17, "total_tokens": 8500032}
286
+ {"current_steps": 1365, "total_steps": 1419, "loss": 0.0913, "lr": 1.8358179346845694e-07, "epoch": 2.8858350951374208, "percentage": 96.19, "elapsed_time": "1:14:54", "remaining_time": "0:02:57", "throughput": 1898.12, "total_tokens": 8531200}
287
+ {"current_steps": 1370, "total_steps": 1419, "loss": 0.0899, "lr": 1.51191807198528e-07, "epoch": 2.8964059196617336, "percentage": 96.55, "elapsed_time": "1:15:08", "remaining_time": "0:02:41", "throughput": 1899.07, "total_tokens": 8562240}
288
+ {"current_steps": 1375, "total_steps": 1419, "loss": 0.0813, "lr": 1.2193442368915732e-07, "epoch": 2.9069767441860463, "percentage": 96.9, "elapsed_time": "1:15:22", "remaining_time": "0:02:24", "throughput": 1900.01, "total_tokens": 8593600}
289
+ {"current_steps": 1380, "total_steps": 1419, "loss": 0.0874, "lr": 9.581333131685467e-08, "epoch": 2.9175475687103596, "percentage": 97.25, "elapsed_time": "1:15:37", "remaining_time": "0:02:08", "throughput": 1900.94, "total_tokens": 8624768}
290
+ {"current_steps": 1385, "total_steps": 1419, "loss": 0.0915, "lr": 7.283182307681324e-08, "epoch": 2.9281183932346724, "percentage": 97.6, "elapsed_time": "1:15:51", "remaining_time": "0:01:51", "throughput": 1901.81, "total_tokens": 8655808}
291
+ {"current_steps": 1390, "total_steps": 1419, "loss": 0.0835, "lr": 5.299279616779174e-08, "epoch": 2.938689217758985, "percentage": 97.96, "elapsed_time": "1:16:05", "remaining_time": "0:01:35", "throughput": 1902.75, "total_tokens": 8687232}
292
+ {"current_steps": 1395, "total_steps": 1419, "loss": 0.092, "lr": 3.629875162686203e-08, "epoch": 2.949260042283298, "percentage": 98.31, "elapsed_time": "1:16:19", "remaining_time": "0:01:18", "throughput": 1903.68, "total_tokens": 8718592}
293
+ {"current_steps": 1400, "total_steps": 1419, "loss": 0.0883, "lr": 2.2751794014111428e-08, "epoch": 2.9598308668076108, "percentage": 98.66, "elapsed_time": "1:16:34", "remaining_time": "0:01:02", "throughput": 1904.6, "total_tokens": 8749760}
294
+ {"current_steps": 1400, "total_steps": 1419, "eval_loss": 0.09467408061027527, "epoch": 2.9598308668076108, "percentage": 98.66, "elapsed_time": "1:17:14", "remaining_time": "0:01:02", "throughput": 1887.97, "total_tokens": 8749760}
295
+ {"current_steps": 1405, "total_steps": 1419, "loss": 0.0872, "lr": 1.2353631147335454e-08, "epoch": 2.970401691331924, "percentage": 99.01, "elapsed_time": "1:17:38", "remaining_time": "0:00:46", "throughput": 1885.05, "total_tokens": 8780992}
296
+ {"current_steps": 1410, "total_steps": 1419, "loss": 0.0822, "lr": 5.105573886735049e-09, "epoch": 2.980972515856237, "percentage": 99.37, "elapsed_time": "1:17:52", "remaining_time": "0:00:29", "throughput": 1886.04, "total_tokens": 8812224}
297
+ {"current_steps": 1415, "total_steps": 1419, "loss": 0.0901, "lr": 1.0085359696654362e-09, "epoch": 2.9915433403805496, "percentage": 99.72, "elapsed_time": "1:18:06", "remaining_time": "0:00:13", "throughput": 1887.02, "total_tokens": 8843200}
298
+ {"current_steps": 1419, "total_steps": 1419, "epoch": 3.0, "percentage": 100.0, "elapsed_time": "1:18:21", "remaining_time": "0:00:00", "throughput": 1886.05, "total_tokens": 8867536}
trainer_state.json ADDED
@@ -0,0 +1,2433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "eval_steps": 100,
6
+ "global_step": 1419,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.010570824524312896,
13
+ "grad_norm": 45.65249252319336,
14
+ "learning_rate": 1.25e-05,
15
+ "loss": 14.2333,
16
+ "num_input_tokens_seen": 31104,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.021141649048625793,
21
+ "grad_norm": 33.49619674682617,
22
+ "learning_rate": 2.5e-05,
23
+ "loss": 9.2972,
24
+ "num_input_tokens_seen": 62208,
25
+ "step": 10
26
+ },
27
+ {
28
+ "epoch": 0.03171247357293869,
29
+ "grad_norm": 9.210739135742188,
30
+ "learning_rate": 3.7500000000000003e-05,
31
+ "loss": 2.411,
32
+ "num_input_tokens_seen": 93504,
33
+ "step": 15
34
+ },
35
+ {
36
+ "epoch": 0.042283298097251586,
37
+ "grad_norm": 7.316084384918213,
38
+ "learning_rate": 5e-05,
39
+ "loss": 0.9413,
40
+ "num_input_tokens_seen": 124800,
41
+ "step": 20
42
+ },
43
+ {
44
+ "epoch": 0.052854122621564484,
45
+ "grad_norm": 7.541203498840332,
46
+ "learning_rate": 4.9998424168507275e-05,
47
+ "loss": 0.4389,
48
+ "num_input_tokens_seen": 156096,
49
+ "step": 25
50
+ },
51
+ {
52
+ "epoch": 0.06342494714587738,
53
+ "grad_norm": 7.138961315155029,
54
+ "learning_rate": 4.999369687268868e-05,
55
+ "loss": 0.4112,
56
+ "num_input_tokens_seen": 187200,
57
+ "step": 30
58
+ },
59
+ {
60
+ "epoch": 0.07399577167019028,
61
+ "grad_norm": 0.46440309286117554,
62
+ "learning_rate": 4.998581870849795e-05,
63
+ "loss": 0.3011,
64
+ "num_input_tokens_seen": 218496,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.08456659619450317,
69
+ "grad_norm": 1.6051653623580933,
70
+ "learning_rate": 4.997479066910782e-05,
71
+ "loss": 0.2631,
72
+ "num_input_tokens_seen": 249920,
73
+ "step": 40
74
+ },
75
+ {
76
+ "epoch": 0.09513742071881606,
77
+ "grad_norm": 1.2404223680496216,
78
+ "learning_rate": 4.996061414478485e-05,
79
+ "loss": 0.2223,
80
+ "num_input_tokens_seen": 281216,
81
+ "step": 45
82
+ },
83
+ {
84
+ "epoch": 0.10570824524312897,
85
+ "grad_norm": 0.2932145297527313,
86
+ "learning_rate": 4.994329092271408e-05,
87
+ "loss": 0.2446,
88
+ "num_input_tokens_seen": 312512,
89
+ "step": 50
90
+ },
91
+ {
92
+ "epoch": 0.11627906976744186,
93
+ "grad_norm": 7.334479331970215,
94
+ "learning_rate": 4.992282318677387e-05,
95
+ "loss": 0.2994,
96
+ "num_input_tokens_seen": 343680,
97
+ "step": 55
98
+ },
99
+ {
100
+ "epoch": 0.12684989429175475,
101
+ "grad_norm": 1.8428316116333008,
102
+ "learning_rate": 4.9899213517260416e-05,
103
+ "loss": 0.2916,
104
+ "num_input_tokens_seen": 374848,
105
+ "step": 60
106
+ },
107
+ {
108
+ "epoch": 0.13742071881606766,
109
+ "grad_norm": 0.9089680314064026,
110
+ "learning_rate": 4.9872464890562576e-05,
111
+ "loss": 0.2317,
112
+ "num_input_tokens_seen": 406400,
113
+ "step": 65
114
+ },
115
+ {
116
+ "epoch": 0.14799154334038056,
117
+ "grad_norm": 4.8257880210876465,
118
+ "learning_rate": 4.9842580678786645e-05,
119
+ "loss": 0.2216,
120
+ "num_input_tokens_seen": 437696,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.15856236786469344,
125
+ "grad_norm": 0.614710807800293,
126
+ "learning_rate": 4.980956464933116e-05,
127
+ "loss": 0.2311,
128
+ "num_input_tokens_seen": 468864,
129
+ "step": 75
130
+ },
131
+ {
132
+ "epoch": 0.16913319238900634,
133
+ "grad_norm": 1.1520471572875977,
134
+ "learning_rate": 4.9773420964412064e-05,
135
+ "loss": 0.2051,
136
+ "num_input_tokens_seen": 499968,
137
+ "step": 80
138
+ },
139
+ {
140
+ "epoch": 0.17970401691331925,
141
+ "grad_norm": 0.8753998279571533,
142
+ "learning_rate": 4.973415418053789e-05,
143
+ "loss": 0.1928,
144
+ "num_input_tokens_seen": 531072,
145
+ "step": 85
146
+ },
147
+ {
148
+ "epoch": 0.19027484143763213,
149
+ "grad_norm": 0.2460280954837799,
150
+ "learning_rate": 4.969176924793543e-05,
151
+ "loss": 0.1849,
152
+ "num_input_tokens_seen": 562240,
153
+ "step": 90
154
+ },
155
+ {
156
+ "epoch": 0.20084566596194503,
157
+ "grad_norm": 0.22848260402679443,
158
+ "learning_rate": 4.96462715099256e-05,
159
+ "loss": 0.172,
160
+ "num_input_tokens_seen": 593536,
161
+ "step": 95
162
+ },
163
+ {
164
+ "epoch": 0.21141649048625794,
165
+ "grad_norm": 0.4746881127357483,
166
+ "learning_rate": 4.9597666702249865e-05,
167
+ "loss": 0.1778,
168
+ "num_input_tokens_seen": 624768,
169
+ "step": 100
170
+ },
171
+ {
172
+ "epoch": 0.21141649048625794,
173
+ "eval_loss": 0.17541779577732086,
174
+ "eval_runtime": 40.3512,
175
+ "eval_samples_per_second": 83.343,
176
+ "eval_steps_per_second": 10.433,
177
+ "num_input_tokens_seen": 624768,
178
+ "step": 100
179
+ },
180
+ {
181
+ "epoch": 0.2219873150105708,
182
+ "grad_norm": 0.2084885537624359,
183
+ "learning_rate": 4.954596095234718e-05,
184
+ "loss": 0.1754,
185
+ "num_input_tokens_seen": 656256,
186
+ "step": 105
187
+ },
188
+ {
189
+ "epoch": 0.23255813953488372,
190
+ "grad_norm": 0.10341060161590576,
191
+ "learning_rate": 4.9491160778581445e-05,
192
+ "loss": 0.1727,
193
+ "num_input_tokens_seen": 687808,
194
+ "step": 110
195
+ },
196
+ {
197
+ "epoch": 0.24312896405919662,
198
+ "grad_norm": 11.542490005493164,
199
+ "learning_rate": 4.943327308941985e-05,
200
+ "loss": 0.1728,
201
+ "num_input_tokens_seen": 718848,
202
+ "step": 115
203
+ },
204
+ {
205
+ "epoch": 0.2536997885835095,
206
+ "grad_norm": 0.07902055978775024,
207
+ "learning_rate": 4.9372305182561874e-05,
208
+ "loss": 0.1649,
209
+ "num_input_tokens_seen": 750080,
210
+ "step": 120
211
+ },
212
+ {
213
+ "epoch": 0.2642706131078224,
214
+ "grad_norm": 0.09493754059076309,
215
+ "learning_rate": 4.9308264744019326e-05,
216
+ "loss": 0.1647,
217
+ "num_input_tokens_seen": 781184,
218
+ "step": 125
219
+ },
220
+ {
221
+ "epoch": 0.2748414376321353,
222
+ "grad_norm": 1.9789398908615112,
223
+ "learning_rate": 4.9241159847147405e-05,
224
+ "loss": 0.1683,
225
+ "num_input_tokens_seen": 812160,
226
+ "step": 130
227
+ },
228
+ {
229
+ "epoch": 0.2854122621564482,
230
+ "grad_norm": 0.1611855924129486,
231
+ "learning_rate": 4.917099895162689e-05,
232
+ "loss": 0.1597,
233
+ "num_input_tokens_seen": 843584,
234
+ "step": 135
235
+ },
236
+ {
237
+ "epoch": 0.2959830866807611,
238
+ "grad_norm": 0.3848009705543518,
239
+ "learning_rate": 4.9097790902397686e-05,
240
+ "loss": 0.1669,
241
+ "num_input_tokens_seen": 875200,
242
+ "step": 140
243
+ },
244
+ {
245
+ "epoch": 0.30655391120507397,
246
+ "grad_norm": 0.13839414715766907,
247
+ "learning_rate": 4.902154492854374e-05,
248
+ "loss": 0.1568,
249
+ "num_input_tokens_seen": 906432,
250
+ "step": 145
251
+ },
252
+ {
253
+ "epoch": 0.3171247357293869,
254
+ "grad_norm": 0.12030225247144699,
255
+ "learning_rate": 4.8942270642129604e-05,
256
+ "loss": 0.1608,
257
+ "num_input_tokens_seen": 937664,
258
+ "step": 150
259
+ },
260
+ {
261
+ "epoch": 0.3276955602536998,
262
+ "grad_norm": 0.21667665243148804,
263
+ "learning_rate": 4.8859978036988644e-05,
264
+ "loss": 0.1654,
265
+ "num_input_tokens_seen": 968960,
266
+ "step": 155
267
+ },
268
+ {
269
+ "epoch": 0.3382663847780127,
270
+ "grad_norm": 0.146434485912323,
271
+ "learning_rate": 4.8774677487463175e-05,
272
+ "loss": 0.1639,
273
+ "num_input_tokens_seen": 1000192,
274
+ "step": 160
275
+ },
276
+ {
277
+ "epoch": 0.3488372093023256,
278
+ "grad_norm": 0.11135861277580261,
279
+ "learning_rate": 4.8686379747096556e-05,
280
+ "loss": 0.16,
281
+ "num_input_tokens_seen": 1031616,
282
+ "step": 165
283
+ },
284
+ {
285
+ "epoch": 0.3594080338266385,
286
+ "grad_norm": 0.07981089502573013,
287
+ "learning_rate": 4.85950959472776e-05,
288
+ "loss": 0.1645,
289
+ "num_input_tokens_seen": 1062656,
290
+ "step": 170
291
+ },
292
+ {
293
+ "epoch": 0.3699788583509514,
294
+ "grad_norm": 0.057556912302970886,
295
+ "learning_rate": 4.850083759583723e-05,
296
+ "loss": 0.1604,
297
+ "num_input_tokens_seen": 1093888,
298
+ "step": 175
299
+ },
300
+ {
301
+ "epoch": 0.38054968287526425,
302
+ "grad_norm": 0.12838751077651978,
303
+ "learning_rate": 4.840361657559775e-05,
304
+ "loss": 0.1707,
305
+ "num_input_tokens_seen": 1125184,
306
+ "step": 180
307
+ },
308
+ {
309
+ "epoch": 0.39112050739957716,
310
+ "grad_norm": 0.20540139079093933,
311
+ "learning_rate": 4.830344514287478e-05,
312
+ "loss": 0.1544,
313
+ "num_input_tokens_seen": 1156224,
314
+ "step": 185
315
+ },
316
+ {
317
+ "epoch": 0.40169133192389006,
318
+ "grad_norm": 0.11389072984457016,
319
+ "learning_rate": 4.8200335925932185e-05,
320
+ "loss": 0.1615,
321
+ "num_input_tokens_seen": 1187392,
322
+ "step": 190
323
+ },
324
+ {
325
+ "epoch": 0.41226215644820297,
326
+ "grad_norm": 0.34211698174476624,
327
+ "learning_rate": 4.809430192339008e-05,
328
+ "loss": 0.159,
329
+ "num_input_tokens_seen": 1218624,
330
+ "step": 195
331
+ },
332
+ {
333
+ "epoch": 0.42283298097251587,
334
+ "grad_norm": 0.4587748348712921,
335
+ "learning_rate": 4.79853565025861e-05,
336
+ "loss": 0.1668,
337
+ "num_input_tokens_seen": 1249984,
338
+ "step": 200
339
+ },
340
+ {
341
+ "epoch": 0.42283298097251587,
342
+ "eval_loss": 0.16408737003803253,
343
+ "eval_runtime": 40.303,
344
+ "eval_samples_per_second": 83.443,
345
+ "eval_steps_per_second": 10.446,
346
+ "num_input_tokens_seen": 1249984,
347
+ "step": 200
348
+ },
349
+ {
350
+ "epoch": 0.4334038054968288,
351
+ "grad_norm": 0.4510205388069153,
352
+ "learning_rate": 4.787351339789025e-05,
353
+ "loss": 0.1606,
354
+ "num_input_tokens_seen": 1281216,
355
+ "step": 205
356
+ },
357
+ {
358
+ "epoch": 0.4439746300211416,
359
+ "grad_norm": 0.07297118008136749,
360
+ "learning_rate": 4.7758786708973444e-05,
361
+ "loss": 0.1628,
362
+ "num_input_tokens_seen": 1312768,
363
+ "step": 210
364
+ },
365
+ {
366
+ "epoch": 0.45454545454545453,
367
+ "grad_norm": 0.1418861746788025,
368
+ "learning_rate": 4.764119089903008e-05,
369
+ "loss": 0.1617,
370
+ "num_input_tokens_seen": 1344192,
371
+ "step": 215
372
+ },
373
+ {
374
+ "epoch": 0.46511627906976744,
375
+ "grad_norm": 0.15124177932739258,
376
+ "learning_rate": 4.752074079295457e-05,
377
+ "loss": 0.162,
378
+ "num_input_tokens_seen": 1375424,
379
+ "step": 220
380
+ },
381
+ {
382
+ "epoch": 0.47568710359408034,
383
+ "grad_norm": 0.10217985510826111,
384
+ "learning_rate": 4.739745157547258e-05,
385
+ "loss": 0.1683,
386
+ "num_input_tokens_seen": 1406656,
387
+ "step": 225
388
+ },
389
+ {
390
+ "epoch": 0.48625792811839325,
391
+ "grad_norm": 0.24457764625549316,
392
+ "learning_rate": 4.727133878922663e-05,
393
+ "loss": 0.155,
394
+ "num_input_tokens_seen": 1437824,
395
+ "step": 230
396
+ },
397
+ {
398
+ "epoch": 0.49682875264270615,
399
+ "grad_norm": 1.5385491847991943,
400
+ "learning_rate": 4.7142418332816735e-05,
401
+ "loss": 0.1585,
402
+ "num_input_tokens_seen": 1468992,
403
+ "step": 235
404
+ },
405
+ {
406
+ "epoch": 0.507399577167019,
407
+ "grad_norm": 25.565441131591797,
408
+ "learning_rate": 4.701070645879612e-05,
409
+ "loss": 0.1882,
410
+ "num_input_tokens_seen": 1500224,
411
+ "step": 240
412
+ },
413
+ {
414
+ "epoch": 0.5179704016913319,
415
+ "grad_norm": 0.18062542378902435,
416
+ "learning_rate": 4.687621977162231e-05,
417
+ "loss": 0.1742,
418
+ "num_input_tokens_seen": 1531584,
419
+ "step": 245
420
+ },
421
+ {
422
+ "epoch": 0.5285412262156448,
423
+ "grad_norm": 0.20139415562152863,
424
+ "learning_rate": 4.673897522556385e-05,
425
+ "loss": 0.1607,
426
+ "num_input_tokens_seen": 1562880,
427
+ "step": 250
428
+ },
429
+ {
430
+ "epoch": 0.5391120507399577,
431
+ "grad_norm": 0.20215147733688354,
432
+ "learning_rate": 4.6598990122562996e-05,
433
+ "loss": 0.156,
434
+ "num_input_tokens_seen": 1594176,
435
+ "step": 255
436
+ },
437
+ {
438
+ "epoch": 0.5496828752642706,
439
+ "grad_norm": 0.19909769296646118,
440
+ "learning_rate": 4.645628211005443e-05,
441
+ "loss": 0.1584,
442
+ "num_input_tokens_seen": 1625344,
443
+ "step": 260
444
+ },
445
+ {
446
+ "epoch": 0.5602536997885835,
447
+ "grad_norm": 0.0857083797454834,
448
+ "learning_rate": 4.63108691787406e-05,
449
+ "loss": 0.1514,
450
+ "num_input_tokens_seen": 1656448,
451
+ "step": 265
452
+ },
453
+ {
454
+ "epoch": 0.5708245243128964,
455
+ "grad_norm": 0.11940807104110718,
456
+ "learning_rate": 4.616276966032363e-05,
457
+ "loss": 0.1649,
458
+ "num_input_tokens_seen": 1687744,
459
+ "step": 270
460
+ },
461
+ {
462
+ "epoch": 0.5813953488372093,
463
+ "grad_norm": 0.07466170191764832,
464
+ "learning_rate": 4.6012002225194325e-05,
465
+ "loss": 0.1577,
466
+ "num_input_tokens_seen": 1719040,
467
+ "step": 275
468
+ },
469
+ {
470
+ "epoch": 0.5919661733615222,
471
+ "grad_norm": 0.1683170348405838,
472
+ "learning_rate": 4.585858588007849e-05,
473
+ "loss": 0.1562,
474
+ "num_input_tokens_seen": 1750208,
475
+ "step": 280
476
+ },
477
+ {
478
+ "epoch": 0.6025369978858351,
479
+ "grad_norm": 0.3020932674407959,
480
+ "learning_rate": 4.570253996564075e-05,
481
+ "loss": 0.1438,
482
+ "num_input_tokens_seen": 1781824,
483
+ "step": 285
484
+ },
485
+ {
486
+ "epoch": 0.6131078224101479,
487
+ "grad_norm": 0.18477758765220642,
488
+ "learning_rate": 4.554388415404644e-05,
489
+ "loss": 0.165,
490
+ "num_input_tokens_seen": 1813248,
491
+ "step": 290
492
+ },
493
+ {
494
+ "epoch": 0.6236786469344608,
495
+ "grad_norm": 17.139799118041992,
496
+ "learning_rate": 4.538263844648149e-05,
497
+ "loss": 0.1618,
498
+ "num_input_tokens_seen": 1844736,
499
+ "step": 295
500
+ },
501
+ {
502
+ "epoch": 0.6342494714587738,
503
+ "grad_norm": 0.15323784947395325,
504
+ "learning_rate": 4.521882317063103e-05,
505
+ "loss": 0.1569,
506
+ "num_input_tokens_seen": 1875648,
507
+ "step": 300
508
+ },
509
+ {
510
+ "epoch": 0.6342494714587738,
511
+ "eval_loss": 0.16001471877098083,
512
+ "eval_runtime": 40.331,
513
+ "eval_samples_per_second": 83.385,
514
+ "eval_steps_per_second": 10.439,
515
+ "num_input_tokens_seen": 1875648,
516
+ "step": 300
517
+ },
518
+ {
519
+ "epoch": 0.6448202959830867,
520
+ "grad_norm": 0.16273276507854462,
521
+ "learning_rate": 4.505245897811672e-05,
522
+ "loss": 0.1598,
523
+ "num_input_tokens_seen": 1907008,
524
+ "step": 305
525
+ },
526
+ {
527
+ "epoch": 0.6553911205073996,
528
+ "grad_norm": 0.1982152760028839,
529
+ "learning_rate": 4.488356684189325e-05,
530
+ "loss": 0.1501,
531
+ "num_input_tokens_seen": 1938496,
532
+ "step": 310
533
+ },
534
+ {
535
+ "epoch": 0.6659619450317125,
536
+ "grad_norm": 0.15199612081050873,
537
+ "learning_rate": 4.4712168053604407e-05,
538
+ "loss": 0.1456,
539
+ "num_input_tokens_seen": 1969664,
540
+ "step": 315
541
+ },
542
+ {
543
+ "epoch": 0.6765327695560254,
544
+ "grad_norm": 0.21335271000862122,
545
+ "learning_rate": 4.4538284220898864e-05,
546
+ "loss": 0.1502,
547
+ "num_input_tokens_seen": 2001024,
548
+ "step": 320
549
+ },
550
+ {
551
+ "epoch": 0.6871035940803383,
552
+ "grad_norm": 0.1967424601316452,
553
+ "learning_rate": 4.4361937264706186e-05,
554
+ "loss": 0.1446,
555
+ "num_input_tokens_seen": 2032448,
556
+ "step": 325
557
+ },
558
+ {
559
+ "epoch": 0.6976744186046512,
560
+ "grad_norm": 0.13540367782115936,
561
+ "learning_rate": 4.418314941647335e-05,
562
+ "loss": 0.1478,
563
+ "num_input_tokens_seen": 2063872,
564
+ "step": 330
565
+ },
566
+ {
567
+ "epoch": 0.7082452431289641,
568
+ "grad_norm": 0.17547021806240082,
569
+ "learning_rate": 4.400194321536209e-05,
570
+ "loss": 0.147,
571
+ "num_input_tokens_seen": 2095104,
572
+ "step": 335
573
+ },
574
+ {
575
+ "epoch": 0.718816067653277,
576
+ "grad_norm": 0.29705560207366943,
577
+ "learning_rate": 4.381834150540749e-05,
578
+ "loss": 0.1479,
579
+ "num_input_tokens_seen": 2126336,
580
+ "step": 340
581
+ },
582
+ {
583
+ "epoch": 0.7293868921775899,
584
+ "grad_norm": 0.24377129971981049,
585
+ "learning_rate": 4.363236743263808e-05,
586
+ "loss": 0.1448,
587
+ "num_input_tokens_seen": 2157376,
588
+ "step": 345
589
+ },
590
+ {
591
+ "epoch": 0.7399577167019028,
592
+ "grad_norm": 0.16772465407848358,
593
+ "learning_rate": 4.3444044442157914e-05,
594
+ "loss": 0.1443,
595
+ "num_input_tokens_seen": 2188864,
596
+ "step": 350
597
+ },
598
+ {
599
+ "epoch": 0.7505285412262156,
600
+ "grad_norm": 0.18267805874347687,
601
+ "learning_rate": 4.3253396275190926e-05,
602
+ "loss": 0.1464,
603
+ "num_input_tokens_seen": 2220288,
604
+ "step": 355
605
+ },
606
+ {
607
+ "epoch": 0.7610993657505285,
608
+ "grad_norm": 0.18752624094486237,
609
+ "learning_rate": 4.306044696608797e-05,
610
+ "loss": 0.1345,
611
+ "num_input_tokens_seen": 2251520,
612
+ "step": 360
613
+ },
614
+ {
615
+ "epoch": 0.7716701902748414,
616
+ "grad_norm": 0.21755804121494293,
617
+ "learning_rate": 4.286522083929686e-05,
618
+ "loss": 0.1311,
619
+ "num_input_tokens_seen": 2282624,
620
+ "step": 365
621
+ },
622
+ {
623
+ "epoch": 0.7822410147991543,
624
+ "grad_norm": 0.2151494175195694,
625
+ "learning_rate": 4.266774250629589e-05,
626
+ "loss": 0.1428,
627
+ "num_input_tokens_seen": 2313792,
628
+ "step": 370
629
+ },
630
+ {
631
+ "epoch": 0.7928118393234672,
632
+ "grad_norm": 0.24206243455410004,
633
+ "learning_rate": 4.2468036862491176e-05,
634
+ "loss": 0.1361,
635
+ "num_input_tokens_seen": 2344896,
636
+ "step": 375
637
+ },
638
+ {
639
+ "epoch": 0.8033826638477801,
640
+ "grad_norm": 0.26434633135795593,
641
+ "learning_rate": 4.226612908407814e-05,
642
+ "loss": 0.1436,
643
+ "num_input_tokens_seen": 2376192,
644
+ "step": 380
645
+ },
646
+ {
647
+ "epoch": 0.813953488372093,
648
+ "grad_norm": 0.26230087876319885,
649
+ "learning_rate": 4.2062044624867656e-05,
650
+ "loss": 0.138,
651
+ "num_input_tokens_seen": 2407232,
652
+ "step": 385
653
+ },
654
+ {
655
+ "epoch": 0.8245243128964059,
656
+ "grad_norm": 0.27545973658561707,
657
+ "learning_rate": 4.1855809213077146e-05,
658
+ "loss": 0.129,
659
+ "num_input_tokens_seen": 2438528,
660
+ "step": 390
661
+ },
662
+ {
663
+ "epoch": 0.8350951374207188,
664
+ "grad_norm": 0.2836856245994568,
665
+ "learning_rate": 4.1647448848087166e-05,
666
+ "loss": 0.1278,
667
+ "num_input_tokens_seen": 2469504,
668
+ "step": 395
669
+ },
670
+ {
671
+ "epoch": 0.8456659619450317,
672
+ "grad_norm": 0.3141574561595917,
673
+ "learning_rate": 4.143698979716372e-05,
674
+ "loss": 0.1313,
675
+ "num_input_tokens_seen": 2500800,
676
+ "step": 400
677
+ },
678
+ {
679
+ "epoch": 0.8456659619450317,
680
+ "eval_loss": 0.1339479386806488,
681
+ "eval_runtime": 40.355,
682
+ "eval_samples_per_second": 83.335,
683
+ "eval_steps_per_second": 10.432,
684
+ "num_input_tokens_seen": 2500800,
685
+ "step": 400
686
+ },
687
+ {
688
+ "epoch": 0.8562367864693446,
689
+ "grad_norm": 0.21188335120677948,
690
+ "learning_rate": 4.122445859214682e-05,
691
+ "loss": 0.1308,
692
+ "num_input_tokens_seen": 2531904,
693
+ "step": 405
694
+ },
695
+ {
696
+ "epoch": 0.8668076109936576,
697
+ "grad_norm": 0.22360175848007202,
698
+ "learning_rate": 4.100988202610577e-05,
699
+ "loss": 0.1213,
700
+ "num_input_tokens_seen": 2563392,
701
+ "step": 410
702
+ },
703
+ {
704
+ "epoch": 0.8773784355179705,
705
+ "grad_norm": 0.1944059282541275,
706
+ "learning_rate": 4.079328714996139e-05,
707
+ "loss": 0.1232,
708
+ "num_input_tokens_seen": 2594688,
709
+ "step": 415
710
+ },
711
+ {
712
+ "epoch": 0.8879492600422833,
713
+ "grad_norm": 0.3056269884109497,
714
+ "learning_rate": 4.0574701269075844e-05,
715
+ "loss": 0.1328,
716
+ "num_input_tokens_seen": 2626112,
717
+ "step": 420
718
+ },
719
+ {
720
+ "epoch": 0.8985200845665962,
721
+ "grad_norm": 0.25777870416641235,
722
+ "learning_rate": 4.035415193981032e-05,
723
+ "loss": 0.1237,
724
+ "num_input_tokens_seen": 2657344,
725
+ "step": 425
726
+ },
727
+ {
728
+ "epoch": 0.9090909090909091,
729
+ "grad_norm": 0.3172893822193146,
730
+ "learning_rate": 4.0131666966051127e-05,
731
+ "loss": 0.131,
732
+ "num_input_tokens_seen": 2688256,
733
+ "step": 430
734
+ },
735
+ {
736
+ "epoch": 0.919661733615222,
737
+ "grad_norm": 0.3003503978252411,
738
+ "learning_rate": 3.990727439570453e-05,
739
+ "loss": 0.1301,
740
+ "num_input_tokens_seen": 2719232,
741
+ "step": 435
742
+ },
743
+ {
744
+ "epoch": 0.9302325581395349,
745
+ "grad_norm": 0.350626677274704,
746
+ "learning_rate": 3.9681002517160845e-05,
747
+ "loss": 0.1249,
748
+ "num_input_tokens_seen": 2750464,
749
+ "step": 440
750
+ },
751
+ {
752
+ "epoch": 0.9408033826638478,
753
+ "grad_norm": 1.0592330694198608,
754
+ "learning_rate": 3.945287985572826e-05,
755
+ "loss": 0.1176,
756
+ "num_input_tokens_seen": 2781440,
757
+ "step": 445
758
+ },
759
+ {
760
+ "epoch": 0.9513742071881607,
761
+ "grad_norm": 0.6262398362159729,
762
+ "learning_rate": 3.922293517003668e-05,
763
+ "loss": 0.119,
764
+ "num_input_tokens_seen": 2812864,
765
+ "step": 450
766
+ },
767
+ {
768
+ "epoch": 0.9619450317124736,
769
+ "grad_norm": 1.1160500049591064,
770
+ "learning_rate": 3.899119744841232e-05,
771
+ "loss": 0.1166,
772
+ "num_input_tokens_seen": 2844096,
773
+ "step": 455
774
+ },
775
+ {
776
+ "epoch": 0.9725158562367865,
777
+ "grad_norm": 0.24976776540279388,
778
+ "learning_rate": 3.875769590522314e-05,
779
+ "loss": 0.1207,
780
+ "num_input_tokens_seen": 2875392,
781
+ "step": 460
782
+ },
783
+ {
784
+ "epoch": 0.9830866807610994,
785
+ "grad_norm": 0.17139197885990143,
786
+ "learning_rate": 3.8522459977195955e-05,
787
+ "loss": 0.125,
788
+ "num_input_tokens_seen": 2906432,
789
+ "step": 465
790
+ },
791
+ {
792
+ "epoch": 0.9936575052854123,
793
+ "grad_norm": 0.22843952476978302,
794
+ "learning_rate": 3.828551931970549e-05,
795
+ "loss": 0.1278,
796
+ "num_input_tokens_seen": 2937728,
797
+ "step": 470
798
+ },
799
+ {
800
+ "epoch": 1.0042283298097252,
801
+ "grad_norm": 0.1976863592863083,
802
+ "learning_rate": 3.8046903803035716e-05,
803
+ "loss": 0.1226,
804
+ "num_input_tokens_seen": 2968192,
805
+ "step": 475
806
+ },
807
+ {
808
+ "epoch": 1.014799154334038,
809
+ "grad_norm": 0.280398428440094,
810
+ "learning_rate": 3.780664350861431e-05,
811
+ "loss": 0.1169,
812
+ "num_input_tokens_seen": 2999488,
813
+ "step": 480
814
+ },
815
+ {
816
+ "epoch": 1.025369978858351,
817
+ "grad_norm": 0.2658718526363373,
818
+ "learning_rate": 3.756476872522035e-05,
819
+ "loss": 0.116,
820
+ "num_input_tokens_seen": 3030720,
821
+ "step": 485
822
+ },
823
+ {
824
+ "epoch": 1.0359408033826638,
825
+ "grad_norm": 0.27286848425865173,
826
+ "learning_rate": 3.7321309945165905e-05,
827
+ "loss": 0.1197,
828
+ "num_input_tokens_seen": 3062016,
829
+ "step": 490
830
+ },
831
+ {
832
+ "epoch": 1.0465116279069768,
833
+ "grad_norm": 0.5994888544082642,
834
+ "learning_rate": 3.707629786045198e-05,
835
+ "loss": 0.1184,
836
+ "num_input_tokens_seen": 3093184,
837
+ "step": 495
838
+ },
839
+ {
840
+ "epoch": 1.0570824524312896,
841
+ "grad_norm": 0.21185331046581268,
842
+ "learning_rate": 3.682976335889935e-05,
843
+ "loss": 0.1134,
844
+ "num_input_tokens_seen": 3124224,
845
+ "step": 500
846
+ },
847
+ {
848
+ "epoch": 1.0570824524312896,
849
+ "eval_loss": 0.1192605197429657,
850
+ "eval_runtime": 40.4991,
851
+ "eval_samples_per_second": 83.039,
852
+ "eval_steps_per_second": 10.395,
853
+ "num_input_tokens_seen": 3124224,
854
+ "step": 500
855
+ },
856
+ {
857
+ "epoch": 1.0676532769556026,
858
+ "grad_norm": 0.24811674654483795,
859
+ "learning_rate": 3.658173752025452e-05,
860
+ "loss": 0.1193,
861
+ "num_input_tokens_seen": 3155584,
862
+ "step": 505
863
+ },
864
+ {
865
+ "epoch": 1.0782241014799154,
866
+ "grad_norm": 0.3816189765930176,
867
+ "learning_rate": 3.633225161227169e-05,
868
+ "loss": 0.115,
869
+ "num_input_tokens_seen": 3186944,
870
+ "step": 510
871
+ },
872
+ {
873
+ "epoch": 1.0887949260042284,
874
+ "grad_norm": 0.28296881914138794,
875
+ "learning_rate": 3.608133708677093e-05,
876
+ "loss": 0.1146,
877
+ "num_input_tokens_seen": 3218304,
878
+ "step": 515
879
+ },
880
+ {
881
+ "epoch": 1.0993657505285412,
882
+ "grad_norm": 0.23222461342811584,
883
+ "learning_rate": 3.5829025575673136e-05,
884
+ "loss": 0.1109,
885
+ "num_input_tokens_seen": 3249664,
886
+ "step": 520
887
+ },
888
+ {
889
+ "epoch": 1.109936575052854,
890
+ "grad_norm": 0.2331598997116089,
891
+ "learning_rate": 3.5575348887012336e-05,
892
+ "loss": 0.1143,
893
+ "num_input_tokens_seen": 3280960,
894
+ "step": 525
895
+ },
896
+ {
897
+ "epoch": 1.120507399577167,
898
+ "grad_norm": 0.2590779662132263,
899
+ "learning_rate": 3.532033900092571e-05,
900
+ "loss": 0.1129,
901
+ "num_input_tokens_seen": 3312320,
902
+ "step": 530
903
+ },
904
+ {
905
+ "epoch": 1.1310782241014798,
906
+ "grad_norm": 0.5093595385551453,
907
+ "learning_rate": 3.506402806562202e-05,
908
+ "loss": 0.1139,
909
+ "num_input_tokens_seen": 3343424,
910
+ "step": 535
911
+ },
912
+ {
913
+ "epoch": 1.1416490486257929,
914
+ "grad_norm": 0.41402578353881836,
915
+ "learning_rate": 3.480644839332876e-05,
916
+ "loss": 0.1122,
917
+ "num_input_tokens_seen": 3374720,
918
+ "step": 540
919
+ },
920
+ {
921
+ "epoch": 1.1522198731501057,
922
+ "grad_norm": 0.2018992006778717,
923
+ "learning_rate": 3.454763245621871e-05,
924
+ "loss": 0.111,
925
+ "num_input_tokens_seen": 3406016,
926
+ "step": 545
927
+ },
928
+ {
929
+ "epoch": 1.1627906976744187,
930
+ "grad_norm": 0.7119062542915344,
931
+ "learning_rate": 3.428761288231621e-05,
932
+ "loss": 0.1105,
933
+ "num_input_tokens_seen": 3437184,
934
+ "step": 550
935
+ },
936
+ {
937
+ "epoch": 1.1733615221987315,
938
+ "grad_norm": 0.1787111908197403,
939
+ "learning_rate": 3.402642245138394e-05,
940
+ "loss": 0.1128,
941
+ "num_input_tokens_seen": 3468416,
942
+ "step": 555
943
+ },
944
+ {
945
+ "epoch": 1.1839323467230445,
946
+ "grad_norm": 0.3644562065601349,
947
+ "learning_rate": 3.376409409079043e-05,
948
+ "loss": 0.1066,
949
+ "num_input_tokens_seen": 3499456,
950
+ "step": 560
951
+ },
952
+ {
953
+ "epoch": 1.1945031712473573,
954
+ "grad_norm": 0.18238377571105957,
955
+ "learning_rate": 3.350066087135903e-05,
956
+ "loss": 0.1126,
957
+ "num_input_tokens_seen": 3530944,
958
+ "step": 565
959
+ },
960
+ {
961
+ "epoch": 1.20507399577167,
962
+ "grad_norm": 0.4499008357524872,
963
+ "learning_rate": 3.323615600319883e-05,
964
+ "loss": 0.1107,
965
+ "num_input_tokens_seen": 3562368,
966
+ "step": 570
967
+ },
968
+ {
969
+ "epoch": 1.215644820295983,
970
+ "grad_norm": 0.21635930240154266,
971
+ "learning_rate": 3.297061283151791e-05,
972
+ "loss": 0.1146,
973
+ "num_input_tokens_seen": 3593600,
974
+ "step": 575
975
+ },
976
+ {
977
+ "epoch": 1.226215644820296,
978
+ "grad_norm": 0.2716653645038605,
979
+ "learning_rate": 3.27040648324197e-05,
980
+ "loss": 0.1063,
981
+ "num_input_tokens_seen": 3625152,
982
+ "step": 580
983
+ },
984
+ {
985
+ "epoch": 1.236786469344609,
986
+ "grad_norm": 0.48543792963027954,
987
+ "learning_rate": 3.243654560868268e-05,
988
+ "loss": 0.1057,
989
+ "num_input_tokens_seen": 3656192,
990
+ "step": 585
991
+ },
992
+ {
993
+ "epoch": 1.2473572938689217,
994
+ "grad_norm": 0.14151746034622192,
995
+ "learning_rate": 3.216808888552429e-05,
996
+ "loss": 0.1024,
997
+ "num_input_tokens_seen": 3687232,
998
+ "step": 590
999
+ },
1000
+ {
1001
+ "epoch": 1.2579281183932347,
1002
+ "grad_norm": 0.14911863207817078,
1003
+ "learning_rate": 3.189872850634922e-05,
1004
+ "loss": 0.1006,
1005
+ "num_input_tokens_seen": 3718592,
1006
+ "step": 595
1007
+ },
1008
+ {
1009
+ "epoch": 1.2684989429175475,
1010
+ "grad_norm": 0.2520624101161957,
1011
+ "learning_rate": 3.162849842848294e-05,
1012
+ "loss": 0.1059,
1013
+ "num_input_tokens_seen": 3750336,
1014
+ "step": 600
1015
+ },
1016
+ {
1017
+ "epoch": 1.2684989429175475,
1018
+ "eval_loss": 0.10877919942140579,
1019
+ "eval_runtime": 40.4719,
1020
+ "eval_samples_per_second": 83.095,
1021
+ "eval_steps_per_second": 10.402,
1022
+ "num_input_tokens_seen": 3750336,
1023
+ "step": 600
1024
+ },
1025
+ {
1026
+ "epoch": 1.2790697674418605,
1027
+ "grad_norm": 0.17694608867168427,
1028
+ "learning_rate": 3.1357432718890815e-05,
1029
+ "loss": 0.1079,
1030
+ "num_input_tokens_seen": 3781632,
1031
+ "step": 605
1032
+ },
1033
+ {
1034
+ "epoch": 1.2896405919661733,
1035
+ "grad_norm": 0.2516515254974365,
1036
+ "learning_rate": 3.108556554988338e-05,
1037
+ "loss": 0.1106,
1038
+ "num_input_tokens_seen": 3812928,
1039
+ "step": 610
1040
+ },
1041
+ {
1042
+ "epoch": 1.3002114164904863,
1043
+ "grad_norm": 0.19276951253414154,
1044
+ "learning_rate": 3.081293119480838e-05,
1045
+ "loss": 0.1027,
1046
+ "num_input_tokens_seen": 3843904,
1047
+ "step": 615
1048
+ },
1049
+ {
1050
+ "epoch": 1.3107822410147991,
1051
+ "grad_norm": 0.21422848105430603,
1052
+ "learning_rate": 3.053956402373004e-05,
1053
+ "loss": 0.1015,
1054
+ "num_input_tokens_seen": 3875008,
1055
+ "step": 620
1056
+ },
1057
+ {
1058
+ "epoch": 1.3213530655391121,
1059
+ "grad_norm": 0.3366522789001465,
1060
+ "learning_rate": 3.0265498499096127e-05,
1061
+ "loss": 0.0965,
1062
+ "num_input_tokens_seen": 3906560,
1063
+ "step": 625
1064
+ },
1065
+ {
1066
+ "epoch": 1.331923890063425,
1067
+ "grad_norm": 0.21864481270313263,
1068
+ "learning_rate": 2.9990769171393423e-05,
1069
+ "loss": 0.1106,
1070
+ "num_input_tokens_seen": 3937856,
1071
+ "step": 630
1072
+ },
1073
+ {
1074
+ "epoch": 1.3424947145877377,
1075
+ "grad_norm": 0.15011939406394958,
1076
+ "learning_rate": 2.971541067479207e-05,
1077
+ "loss": 0.0996,
1078
+ "num_input_tokens_seen": 3968832,
1079
+ "step": 635
1080
+ },
1081
+ {
1082
+ "epoch": 1.3530655391120507,
1083
+ "grad_norm": 0.5444221496582031,
1084
+ "learning_rate": 2.9439457722779317e-05,
1085
+ "loss": 0.1049,
1086
+ "num_input_tokens_seen": 4000000,
1087
+ "step": 640
1088
+ },
1089
+ {
1090
+ "epoch": 1.3636363636363638,
1091
+ "grad_norm": 0.2850906252861023,
1092
+ "learning_rate": 2.916294510378335e-05,
1093
+ "loss": 0.1118,
1094
+ "num_input_tokens_seen": 4031424,
1095
+ "step": 645
1096
+ },
1097
+ {
1098
+ "epoch": 1.3742071881606766,
1099
+ "grad_norm": 0.13976424932479858,
1100
+ "learning_rate": 2.8885907676787622e-05,
1101
+ "loss": 0.0967,
1102
+ "num_input_tokens_seen": 4062720,
1103
+ "step": 650
1104
+ },
1105
+ {
1106
+ "epoch": 1.3847780126849893,
1107
+ "grad_norm": 0.3354976773262024,
1108
+ "learning_rate": 2.8608380366936293e-05,
1109
+ "loss": 0.1035,
1110
+ "num_input_tokens_seen": 4093824,
1111
+ "step": 655
1112
+ },
1113
+ {
1114
+ "epoch": 1.3953488372093024,
1115
+ "grad_norm": 0.43213343620300293,
1116
+ "learning_rate": 2.8330398161131376e-05,
1117
+ "loss": 0.1043,
1118
+ "num_input_tokens_seen": 4125120,
1119
+ "step": 660
1120
+ },
1121
+ {
1122
+ "epoch": 1.4059196617336152,
1123
+ "grad_norm": 0.15570229291915894,
1124
+ "learning_rate": 2.8051996103622003e-05,
1125
+ "loss": 0.1045,
1126
+ "num_input_tokens_seen": 4156544,
1127
+ "step": 665
1128
+ },
1129
+ {
1130
+ "epoch": 1.4164904862579282,
1131
+ "grad_norm": 0.2985534965991974,
1132
+ "learning_rate": 2.7773209291586567e-05,
1133
+ "loss": 0.1015,
1134
+ "num_input_tokens_seen": 4187904,
1135
+ "step": 670
1136
+ },
1137
+ {
1138
+ "epoch": 1.427061310782241,
1139
+ "grad_norm": 1.0605559349060059,
1140
+ "learning_rate": 2.749407287070812e-05,
1141
+ "loss": 0.1055,
1142
+ "num_input_tokens_seen": 4219072,
1143
+ "step": 675
1144
+ },
1145
+ {
1146
+ "epoch": 1.437632135306554,
1147
+ "grad_norm": 0.3407301902770996,
1148
+ "learning_rate": 2.7214622030743693e-05,
1149
+ "loss": 0.1045,
1150
+ "num_input_tokens_seen": 4250624,
1151
+ "step": 680
1152
+ },
1153
+ {
1154
+ "epoch": 1.4482029598308668,
1155
+ "grad_norm": 0.4994814395904541,
1156
+ "learning_rate": 2.693489200108802e-05,
1157
+ "loss": 0.1035,
1158
+ "num_input_tokens_seen": 4281920,
1159
+ "step": 685
1160
+ },
1161
+ {
1162
+ "epoch": 1.4587737843551798,
1163
+ "grad_norm": 0.2948305606842041,
1164
+ "learning_rate": 2.6654918046332323e-05,
1165
+ "loss": 0.1035,
1166
+ "num_input_tokens_seen": 4313024,
1167
+ "step": 690
1168
+ },
1169
+ {
1170
+ "epoch": 1.4693446088794926,
1171
+ "grad_norm": 0.24761343002319336,
1172
+ "learning_rate": 2.63747354618186e-05,
1173
+ "loss": 0.0989,
1174
+ "num_input_tokens_seen": 4344384,
1175
+ "step": 695
1176
+ },
1177
+ {
1178
+ "epoch": 1.4799154334038054,
1179
+ "grad_norm": 0.1787084937095642,
1180
+ "learning_rate": 2.6094379569190082e-05,
1181
+ "loss": 0.096,
1182
+ "num_input_tokens_seen": 4375808,
1183
+ "step": 700
1184
+ },
1185
+ {
1186
+ "epoch": 1.4799154334038054,
1187
+ "eval_loss": 0.10834133625030518,
1188
+ "eval_runtime": 40.5125,
1189
+ "eval_samples_per_second": 83.012,
1190
+ "eval_steps_per_second": 10.392,
1191
+ "num_input_tokens_seen": 4375808,
1192
+ "step": 700
1193
+ },
1194
+ {
1195
+ "epoch": 1.4904862579281184,
1196
+ "grad_norm": 0.30317065119743347,
1197
+ "learning_rate": 2.5813885711938357e-05,
1198
+ "loss": 0.1052,
1199
+ "num_input_tokens_seen": 4406912,
1200
+ "step": 705
1201
+ },
1202
+ {
1203
+ "epoch": 1.5010570824524314,
1204
+ "grad_norm": 0.4754318594932556,
1205
+ "learning_rate": 2.553328925094773e-05,
1206
+ "loss": 0.1082,
1207
+ "num_input_tokens_seen": 4437952,
1208
+ "step": 710
1209
+ },
1210
+ {
1211
+ "epoch": 1.5116279069767442,
1212
+ "grad_norm": 0.28454455733299255,
1213
+ "learning_rate": 2.5252625560037386e-05,
1214
+ "loss": 0.1053,
1215
+ "num_input_tokens_seen": 4469312,
1216
+ "step": 715
1217
+ },
1218
+ {
1219
+ "epoch": 1.522198731501057,
1220
+ "grad_norm": 0.20031358301639557,
1221
+ "learning_rate": 2.4971930021501965e-05,
1222
+ "loss": 0.1003,
1223
+ "num_input_tokens_seen": 4500352,
1224
+ "step": 720
1225
+ },
1226
+ {
1227
+ "epoch": 1.53276955602537,
1228
+ "grad_norm": 0.3033943176269531,
1229
+ "learning_rate": 2.4691238021651042e-05,
1230
+ "loss": 0.1027,
1231
+ "num_input_tokens_seen": 4531584,
1232
+ "step": 725
1233
+ },
1234
+ {
1235
+ "epoch": 1.543340380549683,
1236
+ "grad_norm": 0.21204060316085815,
1237
+ "learning_rate": 2.4410584946348054e-05,
1238
+ "loss": 0.1019,
1239
+ "num_input_tokens_seen": 4562752,
1240
+ "step": 730
1241
+ },
1242
+ {
1243
+ "epoch": 1.5539112050739958,
1244
+ "grad_norm": 0.21926385164260864,
1245
+ "learning_rate": 2.413000617654938e-05,
1246
+ "loss": 0.1094,
1247
+ "num_input_tokens_seen": 4593792,
1248
+ "step": 735
1249
+ },
1250
+ {
1251
+ "epoch": 1.5644820295983086,
1252
+ "grad_norm": 0.14374680817127228,
1253
+ "learning_rate": 2.3849537083843936e-05,
1254
+ "loss": 0.0987,
1255
+ "num_input_tokens_seen": 4624832,
1256
+ "step": 740
1257
+ },
1258
+ {
1259
+ "epoch": 1.5750528541226214,
1260
+ "grad_norm": 0.20950725674629211,
1261
+ "learning_rate": 2.3569213025994056e-05,
1262
+ "loss": 0.0973,
1263
+ "num_input_tokens_seen": 4655872,
1264
+ "step": 745
1265
+ },
1266
+ {
1267
+ "epoch": 1.5856236786469344,
1268
+ "grad_norm": 0.20852594077587128,
1269
+ "learning_rate": 2.3289069342478018e-05,
1270
+ "loss": 0.1052,
1271
+ "num_input_tokens_seen": 4686912,
1272
+ "step": 750
1273
+ },
1274
+ {
1275
+ "epoch": 1.5961945031712474,
1276
+ "grad_norm": 0.24457433819770813,
1277
+ "learning_rate": 2.3009141350034937e-05,
1278
+ "loss": 0.1069,
1279
+ "num_input_tokens_seen": 4718208,
1280
+ "step": 755
1281
+ },
1282
+ {
1283
+ "epoch": 1.6067653276955602,
1284
+ "grad_norm": 0.22334040701389313,
1285
+ "learning_rate": 2.2729464338212515e-05,
1286
+ "loss": 0.0994,
1287
+ "num_input_tokens_seen": 4749376,
1288
+ "step": 760
1289
+ },
1290
+ {
1291
+ "epoch": 1.617336152219873,
1292
+ "grad_norm": 0.298551082611084,
1293
+ "learning_rate": 2.2450073564918185e-05,
1294
+ "loss": 0.1027,
1295
+ "num_input_tokens_seen": 4781120,
1296
+ "step": 765
1297
+ },
1298
+ {
1299
+ "epoch": 1.627906976744186,
1300
+ "grad_norm": 0.17930828034877777,
1301
+ "learning_rate": 2.21710042519743e-05,
1302
+ "loss": 0.1026,
1303
+ "num_input_tokens_seen": 4812480,
1304
+ "step": 770
1305
+ },
1306
+ {
1307
+ "epoch": 1.638477801268499,
1308
+ "grad_norm": 0.21870951354503632,
1309
+ "learning_rate": 2.1892291580677822e-05,
1310
+ "loss": 0.0974,
1311
+ "num_input_tokens_seen": 4843712,
1312
+ "step": 775
1313
+ },
1314
+ {
1315
+ "epoch": 1.6490486257928119,
1316
+ "grad_norm": 0.31846246123313904,
1317
+ "learning_rate": 2.1613970687365127e-05,
1318
+ "loss": 0.1131,
1319
+ "num_input_tokens_seen": 4874944,
1320
+ "step": 780
1321
+ },
1322
+ {
1323
+ "epoch": 1.6596194503171247,
1324
+ "grad_norm": 0.16467052698135376,
1325
+ "learning_rate": 2.1336076658982524e-05,
1326
+ "loss": 0.0919,
1327
+ "num_input_tokens_seen": 4906368,
1328
+ "step": 785
1329
+ },
1330
+ {
1331
+ "epoch": 1.6701902748414377,
1332
+ "grad_norm": 0.21385768055915833,
1333
+ "learning_rate": 2.1058644528662945e-05,
1334
+ "loss": 0.1036,
1335
+ "num_input_tokens_seen": 4937536,
1336
+ "step": 790
1337
+ },
1338
+ {
1339
+ "epoch": 1.6807610993657507,
1340
+ "grad_norm": 0.23187273740768433,
1341
+ "learning_rate": 2.0781709271309423e-05,
1342
+ "loss": 0.0956,
1343
+ "num_input_tokens_seen": 4968832,
1344
+ "step": 795
1345
+ },
1346
+ {
1347
+ "epoch": 1.6913319238900635,
1348
+ "grad_norm": 0.1834268420934677,
1349
+ "learning_rate": 2.0505305799185966e-05,
1350
+ "loss": 0.0998,
1351
+ "num_input_tokens_seen": 5000128,
1352
+ "step": 800
1353
+ },
1354
+ {
1355
+ "epoch": 1.6913319238900635,
1356
+ "eval_loss": 0.10008509457111359,
1357
+ "eval_runtime": 40.4757,
1358
+ "eval_samples_per_second": 83.087,
1359
+ "eval_steps_per_second": 10.401,
1360
+ "num_input_tokens_seen": 5000128,
1361
+ "step": 800
1362
+ },
1363
+ {
1364
+ "epoch": 1.7019027484143763,
1365
+ "grad_norm": 0.21062688529491425,
1366
+ "learning_rate": 2.022946895751625e-05,
1367
+ "loss": 0.0956,
1368
+ "num_input_tokens_seen": 5031360,
1369
+ "step": 805
1370
+ },
1371
+ {
1372
+ "epoch": 1.712473572938689,
1373
+ "grad_norm": 1.7325960397720337,
1374
+ "learning_rate": 1.9954233520090843e-05,
1375
+ "loss": 0.1008,
1376
+ "num_input_tokens_seen": 5062720,
1377
+ "step": 810
1378
+ },
1379
+ {
1380
+ "epoch": 1.723044397463002,
1381
+ "grad_norm": 0.3289014399051666,
1382
+ "learning_rate": 1.967963418488335e-05,
1383
+ "loss": 0.0955,
1384
+ "num_input_tokens_seen": 5093888,
1385
+ "step": 815
1386
+ },
1387
+ {
1388
+ "epoch": 1.733615221987315,
1389
+ "grad_norm": 0.5929372906684875,
1390
+ "learning_rate": 1.9405705569676206e-05,
1391
+ "loss": 0.1039,
1392
+ "num_input_tokens_seen": 5125120,
1393
+ "step": 820
1394
+ },
1395
+ {
1396
+ "epoch": 1.744186046511628,
1397
+ "grad_norm": 0.32440027594566345,
1398
+ "learning_rate": 1.9132482207696488e-05,
1399
+ "loss": 0.1005,
1400
+ "num_input_tokens_seen": 5156544,
1401
+ "step": 825
1402
+ },
1403
+ {
1404
+ "epoch": 1.7547568710359407,
1405
+ "grad_norm": 0.9935529828071594,
1406
+ "learning_rate": 1.8859998543262474e-05,
1407
+ "loss": 0.1069,
1408
+ "num_input_tokens_seen": 5187776,
1409
+ "step": 830
1410
+ },
1411
+ {
1412
+ "epoch": 1.7653276955602537,
1413
+ "grad_norm": 0.3179354667663574,
1414
+ "learning_rate": 1.8588288927441334e-05,
1415
+ "loss": 0.1004,
1416
+ "num_input_tokens_seen": 5218944,
1417
+ "step": 835
1418
+ },
1419
+ {
1420
+ "epoch": 1.7758985200845667,
1421
+ "grad_norm": 0.2485605925321579,
1422
+ "learning_rate": 1.831738761371863e-05,
1423
+ "loss": 0.1002,
1424
+ "num_input_tokens_seen": 5250112,
1425
+ "step": 840
1426
+ },
1427
+ {
1428
+ "epoch": 1.7864693446088795,
1429
+ "grad_norm": 0.2269657999277115,
1430
+ "learning_rate": 1.8047328753680083e-05,
1431
+ "loss": 0.0927,
1432
+ "num_input_tokens_seen": 5281088,
1433
+ "step": 845
1434
+ },
1435
+ {
1436
+ "epoch": 1.7970401691331923,
1437
+ "grad_norm": 0.2539865970611572,
1438
+ "learning_rate": 1.777814639270622e-05,
1439
+ "loss": 0.1013,
1440
+ "num_input_tokens_seen": 5312256,
1441
+ "step": 850
1442
+ },
1443
+ {
1444
+ "epoch": 1.8076109936575053,
1445
+ "grad_norm": 0.6908059120178223,
1446
+ "learning_rate": 1.7509874465680377e-05,
1447
+ "loss": 0.0945,
1448
+ "num_input_tokens_seen": 5343744,
1449
+ "step": 855
1450
+ },
1451
+ {
1452
+ "epoch": 1.8181818181818183,
1453
+ "grad_norm": 0.19062310457229614,
1454
+ "learning_rate": 1.724254679271065e-05,
1455
+ "loss": 0.0949,
1456
+ "num_input_tokens_seen": 5374976,
1457
+ "step": 860
1458
+ },
1459
+ {
1460
+ "epoch": 1.8287526427061311,
1461
+ "grad_norm": 0.2800229787826538,
1462
+ "learning_rate": 1.6976197074866315e-05,
1463
+ "loss": 0.0923,
1464
+ "num_input_tokens_seen": 5406144,
1465
+ "step": 865
1466
+ },
1467
+ {
1468
+ "epoch": 1.839323467230444,
1469
+ "grad_norm": 0.18416666984558105,
1470
+ "learning_rate": 1.6710858889929255e-05,
1471
+ "loss": 0.1049,
1472
+ "num_input_tokens_seen": 5437760,
1473
+ "step": 870
1474
+ },
1475
+ {
1476
+ "epoch": 1.8498942917547567,
1477
+ "grad_norm": 0.2170882225036621,
1478
+ "learning_rate": 1.6446565688160897e-05,
1479
+ "loss": 0.0906,
1480
+ "num_input_tokens_seen": 5468992,
1481
+ "step": 875
1482
+ },
1483
+ {
1484
+ "epoch": 1.8604651162790697,
1485
+ "grad_norm": 0.5100112557411194,
1486
+ "learning_rate": 1.6183350788085317e-05,
1487
+ "loss": 0.0942,
1488
+ "num_input_tokens_seen": 5500288,
1489
+ "step": 880
1490
+ },
1491
+ {
1492
+ "epoch": 1.8710359408033828,
1493
+ "grad_norm": 0.2084072232246399,
1494
+ "learning_rate": 1.592124737228881e-05,
1495
+ "loss": 0.1,
1496
+ "num_input_tokens_seen": 5531456,
1497
+ "step": 885
1498
+ },
1499
+ {
1500
+ "epoch": 1.8816067653276956,
1501
+ "grad_norm": 0.28143033385276794,
1502
+ "learning_rate": 1.566028848323674e-05,
1503
+ "loss": 0.0985,
1504
+ "num_input_tokens_seen": 5562624,
1505
+ "step": 890
1506
+ },
1507
+ {
1508
+ "epoch": 1.8921775898520083,
1509
+ "grad_norm": 0.5206342935562134,
1510
+ "learning_rate": 1.540050701910796e-05,
1511
+ "loss": 0.0959,
1512
+ "num_input_tokens_seen": 5593536,
1513
+ "step": 895
1514
+ },
1515
+ {
1516
+ "epoch": 1.9027484143763214,
1517
+ "grad_norm": 0.17240764200687408,
1518
+ "learning_rate": 1.5141935729647461e-05,
1519
+ "loss": 0.1083,
1520
+ "num_input_tokens_seen": 5624576,
1521
+ "step": 900
1522
+ },
1523
+ {
1524
+ "epoch": 1.9027484143763214,
1525
+ "eval_loss": 0.09912961721420288,
1526
+ "eval_runtime": 40.4925,
1527
+ "eval_samples_per_second": 83.052,
1528
+ "eval_steps_per_second": 10.397,
1529
+ "num_input_tokens_seen": 5624576,
1530
+ "step": 900
1531
+ },
1532
+ {
1533
+ "epoch": 1.9133192389006344,
1534
+ "grad_norm": 0.2102658748626709,
1535
+ "learning_rate": 1.4884607212037726e-05,
1536
+ "loss": 0.0942,
1537
+ "num_input_tokens_seen": 5655936,
1538
+ "step": 905
1539
+ },
1540
+ {
1541
+ "epoch": 1.9238900634249472,
1542
+ "grad_norm": 0.18206021189689636,
1543
+ "learning_rate": 1.4628553906789322e-05,
1544
+ "loss": 0.1026,
1545
+ "num_input_tokens_seen": 5686976,
1546
+ "step": 910
1547
+ },
1548
+ {
1549
+ "epoch": 1.93446088794926,
1550
+ "grad_norm": 0.3003005385398865,
1551
+ "learning_rate": 1.4373808093651215e-05,
1552
+ "loss": 0.0933,
1553
+ "num_input_tokens_seen": 5718592,
1554
+ "step": 915
1555
+ },
1556
+ {
1557
+ "epoch": 1.945031712473573,
1558
+ "grad_norm": 0.25162649154663086,
1559
+ "learning_rate": 1.4120401887541423e-05,
1560
+ "loss": 0.0955,
1561
+ "num_input_tokens_seen": 5749952,
1562
+ "step": 920
1563
+ },
1564
+ {
1565
+ "epoch": 1.955602536997886,
1566
+ "grad_norm": 0.19604356586933136,
1567
+ "learning_rate": 1.3868367234498328e-05,
1568
+ "loss": 0.0933,
1569
+ "num_input_tokens_seen": 5780928,
1570
+ "step": 925
1571
+ },
1572
+ {
1573
+ "epoch": 1.9661733615221988,
1574
+ "grad_norm": 0.3053622543811798,
1575
+ "learning_rate": 1.3617735907653434e-05,
1576
+ "loss": 0.0905,
1577
+ "num_input_tokens_seen": 5812032,
1578
+ "step": 930
1579
+ },
1580
+ {
1581
+ "epoch": 1.9767441860465116,
1582
+ "grad_norm": 0.2663424015045166,
1583
+ "learning_rate": 1.3368539503225746e-05,
1584
+ "loss": 0.0959,
1585
+ "num_input_tokens_seen": 5843136,
1586
+ "step": 935
1587
+ },
1588
+ {
1589
+ "epoch": 1.9873150105708244,
1590
+ "grad_norm": 0.25155574083328247,
1591
+ "learning_rate": 1.3120809436538656e-05,
1592
+ "loss": 0.1031,
1593
+ "num_input_tokens_seen": 5874752,
1594
+ "step": 940
1595
+ },
1596
+ {
1597
+ "epoch": 1.9978858350951374,
1598
+ "grad_norm": 0.22895610332489014,
1599
+ "learning_rate": 1.2874576938059402e-05,
1600
+ "loss": 0.0896,
1601
+ "num_input_tokens_seen": 5905728,
1602
+ "step": 945
1603
+ },
1604
+ {
1605
+ "epoch": 2.0084566596194504,
1606
+ "grad_norm": 0.5792025327682495,
1607
+ "learning_rate": 1.2629873049462032e-05,
1608
+ "loss": 0.0931,
1609
+ "num_input_tokens_seen": 5936448,
1610
+ "step": 950
1611
+ },
1612
+ {
1613
+ "epoch": 2.019027484143763,
1614
+ "grad_norm": 0.21641181409358978,
1615
+ "learning_rate": 1.2386728619714091e-05,
1616
+ "loss": 0.0904,
1617
+ "num_input_tokens_seen": 5967808,
1618
+ "step": 955
1619
+ },
1620
+ {
1621
+ "epoch": 2.029598308668076,
1622
+ "grad_norm": 0.32977041602134705,
1623
+ "learning_rate": 1.214517430118753e-05,
1624
+ "loss": 0.0973,
1625
+ "num_input_tokens_seen": 5998720,
1626
+ "step": 960
1627
+ },
1628
+ {
1629
+ "epoch": 2.040169133192389,
1630
+ "grad_norm": 0.3212999105453491,
1631
+ "learning_rate": 1.190524054579455e-05,
1632
+ "loss": 0.0937,
1633
+ "num_input_tokens_seen": 6030016,
1634
+ "step": 965
1635
+ },
1636
+ {
1637
+ "epoch": 2.050739957716702,
1638
+ "grad_norm": 0.2424679398536682,
1639
+ "learning_rate": 1.1666957601148576e-05,
1640
+ "loss": 0.0898,
1641
+ "num_input_tokens_seen": 6061184,
1642
+ "step": 970
1643
+ },
1644
+ {
1645
+ "epoch": 2.061310782241015,
1646
+ "grad_norm": 0.39736026525497437,
1647
+ "learning_rate": 1.1430355506751095e-05,
1648
+ "loss": 0.1006,
1649
+ "num_input_tokens_seen": 6092672,
1650
+ "step": 975
1651
+ },
1652
+ {
1653
+ "epoch": 2.0718816067653276,
1654
+ "grad_norm": 0.2846342623233795,
1655
+ "learning_rate": 1.119546409020461e-05,
1656
+ "loss": 0.0981,
1657
+ "num_input_tokens_seen": 6123712,
1658
+ "step": 980
1659
+ },
1660
+ {
1661
+ "epoch": 2.0824524312896404,
1662
+ "grad_norm": 0.29333314299583435,
1663
+ "learning_rate": 1.0962312963452467e-05,
1664
+ "loss": 0.0943,
1665
+ "num_input_tokens_seen": 6154816,
1666
+ "step": 985
1667
+ },
1668
+ {
1669
+ "epoch": 2.0930232558139537,
1670
+ "grad_norm": 0.4092048108577728,
1671
+ "learning_rate": 1.0730931519045697e-05,
1672
+ "loss": 0.0943,
1673
+ "num_input_tokens_seen": 6186176,
1674
+ "step": 990
1675
+ },
1676
+ {
1677
+ "epoch": 2.1035940803382664,
1678
+ "grad_norm": 0.2516307532787323,
1679
+ "learning_rate": 1.050134892643767e-05,
1680
+ "loss": 0.0843,
1681
+ "num_input_tokens_seen": 6217216,
1682
+ "step": 995
1683
+ },
1684
+ {
1685
+ "epoch": 2.1141649048625792,
1686
+ "grad_norm": 0.2285660356283188,
1687
+ "learning_rate": 1.0273594128306738e-05,
1688
+ "loss": 0.0953,
1689
+ "num_input_tokens_seen": 6248320,
1690
+ "step": 1000
1691
+ },
1692
+ {
1693
+ "epoch": 2.1141649048625792,
1694
+ "eval_loss": 0.09716298431158066,
1695
+ "eval_runtime": 40.4648,
1696
+ "eval_samples_per_second": 83.109,
1697
+ "eval_steps_per_second": 10.404,
1698
+ "num_input_tokens_seen": 6248320,
1699
+ "step": 1000
1700
+ },
1701
+ {
1702
+ "epoch": 2.124735729386892,
1703
+ "grad_norm": 0.21935948729515076,
1704
+ "learning_rate": 1.00476958369076e-05,
1705
+ "loss": 0.0923,
1706
+ "num_input_tokens_seen": 6279552,
1707
+ "step": 1005
1708
+ },
1709
+ {
1710
+ "epoch": 2.1353065539112053,
1711
+ "grad_norm": 0.3147173523902893,
1712
+ "learning_rate": 9.82368253045158e-06,
1713
+ "loss": 0.0847,
1714
+ "num_input_tokens_seen": 6311296,
1715
+ "step": 1010
1716
+ },
1717
+ {
1718
+ "epoch": 2.145877378435518,
1719
+ "grad_norm": 0.208901509642601,
1720
+ "learning_rate": 9.601582449516538e-06,
1721
+ "loss": 0.0921,
1722
+ "num_input_tokens_seen": 6342656,
1723
+ "step": 1015
1724
+ },
1725
+ {
1726
+ "epoch": 2.156448202959831,
1727
+ "grad_norm": 0.24753566086292267,
1728
+ "learning_rate": 9.381423593486629e-06,
1729
+ "loss": 0.0887,
1730
+ "num_input_tokens_seen": 6374208,
1731
+ "step": 1020
1732
+ },
1733
+ {
1734
+ "epoch": 2.1670190274841437,
1735
+ "grad_norm": 0.23306626081466675,
1736
+ "learning_rate": 9.163233717022568e-06,
1737
+ "loss": 0.0924,
1738
+ "num_input_tokens_seen": 6405440,
1739
+ "step": 1025
1740
+ },
1741
+ {
1742
+ "epoch": 2.177589852008457,
1743
+ "grad_norm": 0.22320829331874847,
1744
+ "learning_rate": 8.947040326562638e-06,
1745
+ "loss": 0.0884,
1746
+ "num_input_tokens_seen": 6436928,
1747
+ "step": 1030
1748
+ },
1749
+ {
1750
+ "epoch": 2.1881606765327697,
1751
+ "grad_norm": 0.19100725650787354,
1752
+ "learning_rate": 8.732870676855096e-06,
1753
+ "loss": 0.0937,
1754
+ "num_input_tokens_seen": 6468288,
1755
+ "step": 1035
1756
+ },
1757
+ {
1758
+ "epoch": 2.1987315010570825,
1759
+ "grad_norm": 0.17379307746887207,
1760
+ "learning_rate": 8.520751767522257e-06,
1761
+ "loss": 0.0856,
1762
+ "num_input_tokens_seen": 6499584,
1763
+ "step": 1040
1764
+ },
1765
+ {
1766
+ "epoch": 2.2093023255813953,
1767
+ "grad_norm": 0.19016264379024506,
1768
+ "learning_rate": 8.310710339656707e-06,
1769
+ "loss": 0.0864,
1770
+ "num_input_tokens_seen": 6530752,
1771
+ "step": 1045
1772
+ },
1773
+ {
1774
+ "epoch": 2.219873150105708,
1775
+ "grad_norm": 0.23884597420692444,
1776
+ "learning_rate": 8.102772872450209e-06,
1777
+ "loss": 0.0974,
1778
+ "num_input_tokens_seen": 6561856,
1779
+ "step": 1050
1780
+ },
1781
+ {
1782
+ "epoch": 2.2304439746300213,
1783
+ "grad_norm": 0.23964087665081024,
1784
+ "learning_rate": 7.896965579855486e-06,
1785
+ "loss": 0.0962,
1786
+ "num_input_tokens_seen": 6592960,
1787
+ "step": 1055
1788
+ },
1789
+ {
1790
+ "epoch": 2.241014799154334,
1791
+ "grad_norm": 0.38224127888679504,
1792
+ "learning_rate": 7.693314407281615e-06,
1793
+ "loss": 0.0993,
1794
+ "num_input_tokens_seen": 6624256,
1795
+ "step": 1060
1796
+ },
1797
+ {
1798
+ "epoch": 2.251585623678647,
1799
+ "grad_norm": 0.2022206038236618,
1800
+ "learning_rate": 7.49184502832308e-06,
1801
+ "loss": 0.0915,
1802
+ "num_input_tokens_seen": 6655424,
1803
+ "step": 1065
1804
+ },
1805
+ {
1806
+ "epoch": 2.2621564482029597,
1807
+ "grad_norm": 0.1900220513343811,
1808
+ "learning_rate": 7.292582841523268e-06,
1809
+ "loss": 0.0944,
1810
+ "num_input_tokens_seen": 6686400,
1811
+ "step": 1070
1812
+ },
1813
+ {
1814
+ "epoch": 2.2727272727272725,
1815
+ "grad_norm": 0.23861418664455414,
1816
+ "learning_rate": 7.095552967172503e-06,
1817
+ "loss": 0.0945,
1818
+ "num_input_tokens_seen": 6717376,
1819
+ "step": 1075
1820
+ },
1821
+ {
1822
+ "epoch": 2.2832980972515857,
1823
+ "grad_norm": 0.18786799907684326,
1824
+ "learning_rate": 6.900780244141286e-06,
1825
+ "loss": 0.0896,
1826
+ "num_input_tokens_seen": 6748608,
1827
+ "step": 1080
1828
+ },
1829
+ {
1830
+ "epoch": 2.2938689217758985,
1831
+ "grad_norm": 0.29745545983314514,
1832
+ "learning_rate": 6.708289226748868e-06,
1833
+ "loss": 0.0958,
1834
+ "num_input_tokens_seen": 6779776,
1835
+ "step": 1085
1836
+ },
1837
+ {
1838
+ "epoch": 2.3044397463002113,
1839
+ "grad_norm": 0.23612141609191895,
1840
+ "learning_rate": 6.518104181667844e-06,
1841
+ "loss": 0.0938,
1842
+ "num_input_tokens_seen": 6810880,
1843
+ "step": 1090
1844
+ },
1845
+ {
1846
+ "epoch": 2.3150105708245245,
1847
+ "grad_norm": 0.20987972617149353,
1848
+ "learning_rate": 6.3302490848648864e-06,
1849
+ "loss": 0.0923,
1850
+ "num_input_tokens_seen": 6842112,
1851
+ "step": 1095
1852
+ },
1853
+ {
1854
+ "epoch": 2.3255813953488373,
1855
+ "grad_norm": 0.22207896411418915,
1856
+ "learning_rate": 6.144747618578209e-06,
1857
+ "loss": 0.0887,
1858
+ "num_input_tokens_seen": 6873152,
1859
+ "step": 1100
1860
+ },
1861
+ {
1862
+ "epoch": 2.3255813953488373,
1863
+ "eval_loss": 0.09644165635108948,
1864
+ "eval_runtime": 40.523,
1865
+ "eval_samples_per_second": 82.99,
1866
+ "eval_steps_per_second": 10.389,
1867
+ "num_input_tokens_seen": 6873152,
1868
+ "step": 1100
1869
+ },
1870
+ {
1871
+ "epoch": 2.33615221987315,
1872
+ "grad_norm": 0.37628617882728577,
1873
+ "learning_rate": 5.961623168332006e-06,
1874
+ "loss": 0.0826,
1875
+ "num_input_tokens_seen": 6904512,
1876
+ "step": 1105
1877
+ },
1878
+ {
1879
+ "epoch": 2.346723044397463,
1880
+ "grad_norm": 0.29637783765792847,
1881
+ "learning_rate": 5.780898819988354e-06,
1882
+ "loss": 0.0826,
1883
+ "num_input_tokens_seen": 6936064,
1884
+ "step": 1110
1885
+ },
1886
+ {
1887
+ "epoch": 2.3572938689217757,
1888
+ "grad_norm": 0.22360184788703918,
1889
+ "learning_rate": 5.602597356836803e-06,
1890
+ "loss": 0.0929,
1891
+ "num_input_tokens_seen": 6967424,
1892
+ "step": 1115
1893
+ },
1894
+ {
1895
+ "epoch": 2.367864693446089,
1896
+ "grad_norm": 0.20639285445213318,
1897
+ "learning_rate": 5.426741256722239e-06,
1898
+ "loss": 0.0936,
1899
+ "num_input_tokens_seen": 6998592,
1900
+ "step": 1120
1901
+ },
1902
+ {
1903
+ "epoch": 2.3784355179704018,
1904
+ "grad_norm": 0.25867342948913574,
1905
+ "learning_rate": 5.253352689211114e-06,
1906
+ "loss": 0.0856,
1907
+ "num_input_tokens_seen": 7029952,
1908
+ "step": 1125
1909
+ },
1910
+ {
1911
+ "epoch": 2.3890063424947146,
1912
+ "grad_norm": 0.2777279019355774,
1913
+ "learning_rate": 5.082453512796634e-06,
1914
+ "loss": 0.0923,
1915
+ "num_input_tokens_seen": 7060992,
1916
+ "step": 1130
1917
+ },
1918
+ {
1919
+ "epoch": 2.3995771670190273,
1920
+ "grad_norm": 0.31583741307258606,
1921
+ "learning_rate": 4.914065272143153e-06,
1922
+ "loss": 0.0911,
1923
+ "num_input_tokens_seen": 7092224,
1924
+ "step": 1135
1925
+ },
1926
+ {
1927
+ "epoch": 2.41014799154334,
1928
+ "grad_norm": 0.3207012116909027,
1929
+ "learning_rate": 4.7482091953700705e-06,
1930
+ "loss": 0.0851,
1931
+ "num_input_tokens_seen": 7123776,
1932
+ "step": 1140
1933
+ },
1934
+ {
1935
+ "epoch": 2.4207188160676534,
1936
+ "grad_norm": 0.19293835759162903,
1937
+ "learning_rate": 4.584906191375715e-06,
1938
+ "loss": 0.0956,
1939
+ "num_input_tokens_seen": 7155072,
1940
+ "step": 1145
1941
+ },
1942
+ {
1943
+ "epoch": 2.431289640591966,
1944
+ "grad_norm": 0.19416087865829468,
1945
+ "learning_rate": 4.424176847201411e-06,
1946
+ "loss": 0.0916,
1947
+ "num_input_tokens_seen": 7186240,
1948
+ "step": 1150
1949
+ },
1950
+ {
1951
+ "epoch": 2.441860465116279,
1952
+ "grad_norm": 0.2779330313205719,
1953
+ "learning_rate": 4.266041425436151e-06,
1954
+ "loss": 0.0886,
1955
+ "num_input_tokens_seen": 7217536,
1956
+ "step": 1155
1957
+ },
1958
+ {
1959
+ "epoch": 2.452431289640592,
1960
+ "grad_norm": 0.19005738198757172,
1961
+ "learning_rate": 4.110519861662143e-06,
1962
+ "loss": 0.0852,
1963
+ "num_input_tokens_seen": 7248576,
1964
+ "step": 1160
1965
+ },
1966
+ {
1967
+ "epoch": 2.463002114164905,
1968
+ "grad_norm": 0.2309303879737854,
1969
+ "learning_rate": 3.957631761941641e-06,
1970
+ "loss": 0.0942,
1971
+ "num_input_tokens_seen": 7279808,
1972
+ "step": 1165
1973
+ },
1974
+ {
1975
+ "epoch": 2.473572938689218,
1976
+ "grad_norm": 0.18085496127605438,
1977
+ "learning_rate": 3.807396400345223e-06,
1978
+ "loss": 0.0889,
1979
+ "num_input_tokens_seen": 7311168,
1980
+ "step": 1170
1981
+ },
1982
+ {
1983
+ "epoch": 2.4841437632135306,
1984
+ "grad_norm": 0.2057885229587555,
1985
+ "learning_rate": 3.6598327165220296e-06,
1986
+ "loss": 0.0907,
1987
+ "num_input_tokens_seen": 7342528,
1988
+ "step": 1175
1989
+ },
1990
+ {
1991
+ "epoch": 2.4947145877378434,
1992
+ "grad_norm": 0.18742726743221283,
1993
+ "learning_rate": 3.514959313312061e-06,
1994
+ "loss": 0.091,
1995
+ "num_input_tokens_seen": 7373696,
1996
+ "step": 1180
1997
+ },
1998
+ {
1999
+ "epoch": 2.5052854122621566,
2000
+ "grad_norm": 0.1891215294599533,
2001
+ "learning_rate": 3.372794454401032e-06,
2002
+ "loss": 0.0888,
2003
+ "num_input_tokens_seen": 7404928,
2004
+ "step": 1185
2005
+ },
2006
+ {
2007
+ "epoch": 2.5158562367864694,
2008
+ "grad_norm": 0.42460882663726807,
2009
+ "learning_rate": 3.2333560620178727e-06,
2010
+ "loss": 0.0965,
2011
+ "num_input_tokens_seen": 7436096,
2012
+ "step": 1190
2013
+ },
2014
+ {
2015
+ "epoch": 2.526427061310782,
2016
+ "grad_norm": 0.1930340677499771,
2017
+ "learning_rate": 3.096661714675397e-06,
2018
+ "loss": 0.0879,
2019
+ "num_input_tokens_seen": 7467328,
2020
+ "step": 1195
2021
+ },
2022
+ {
2023
+ "epoch": 2.536997885835095,
2024
+ "grad_norm": 0.18262043595314026,
2025
+ "learning_rate": 2.962728644954191e-06,
2026
+ "loss": 0.0889,
2027
+ "num_input_tokens_seen": 7498688,
2028
+ "step": 1200
2029
+ },
2030
+ {
2031
+ "epoch": 2.536997885835095,
2032
+ "eval_loss": 0.09538523107767105,
2033
+ "eval_runtime": 40.4494,
2034
+ "eval_samples_per_second": 83.141,
2035
+ "eval_steps_per_second": 10.408,
2036
+ "num_input_tokens_seen": 7498688,
2037
+ "step": 1200
2038
+ },
2039
+ {
2040
+ "epoch": 2.547568710359408,
2041
+ "grad_norm": 0.18525810539722443,
2042
+ "learning_rate": 2.8315737373301955e-06,
2043
+ "loss": 0.089,
2044
+ "num_input_tokens_seen": 7529792,
2045
+ "step": 1205
2046
+ },
2047
+ {
2048
+ "epoch": 2.558139534883721,
2049
+ "grad_norm": 0.20218130946159363,
2050
+ "learning_rate": 2.703213526046108e-06,
2051
+ "loss": 0.0965,
2052
+ "num_input_tokens_seen": 7561088,
2053
+ "step": 1210
2054
+ },
2055
+ {
2056
+ "epoch": 2.568710359408034,
2057
+ "grad_norm": 0.2872017025947571,
2058
+ "learning_rate": 2.577664193027013e-06,
2059
+ "loss": 0.0921,
2060
+ "num_input_tokens_seen": 7592448,
2061
+ "step": 1215
2062
+ },
2063
+ {
2064
+ "epoch": 2.5792811839323466,
2065
+ "grad_norm": 0.19029676914215088,
2066
+ "learning_rate": 2.45494156584033e-06,
2067
+ "loss": 0.0831,
2068
+ "num_input_tokens_seen": 7624000,
2069
+ "step": 1220
2070
+ },
2071
+ {
2072
+ "epoch": 2.58985200845666,
2073
+ "grad_norm": 0.22011052072048187,
2074
+ "learning_rate": 2.3350611157005182e-06,
2075
+ "loss": 0.0915,
2076
+ "num_input_tokens_seen": 7655232,
2077
+ "step": 1225
2078
+ },
2079
+ {
2080
+ "epoch": 2.6004228329809727,
2081
+ "grad_norm": 0.26502084732055664,
2082
+ "learning_rate": 2.2180379555186844e-06,
2083
+ "loss": 0.0893,
2084
+ "num_input_tokens_seen": 7686464,
2085
+ "step": 1230
2086
+ },
2087
+ {
2088
+ "epoch": 2.6109936575052854,
2089
+ "grad_norm": 0.21893960237503052,
2090
+ "learning_rate": 2.103886837997307e-06,
2091
+ "loss": 0.0944,
2092
+ "num_input_tokens_seen": 7717824,
2093
+ "step": 1235
2094
+ },
2095
+ {
2096
+ "epoch": 2.6215644820295982,
2097
+ "grad_norm": 0.2057981640100479,
2098
+ "learning_rate": 1.9926221537704794e-06,
2099
+ "loss": 0.0854,
2100
+ "num_input_tokens_seen": 7749120,
2101
+ "step": 1240
2102
+ },
2103
+ {
2104
+ "epoch": 2.632135306553911,
2105
+ "grad_norm": 0.17995457351207733,
2106
+ "learning_rate": 1.884257929589664e-06,
2107
+ "loss": 0.0895,
2108
+ "num_input_tokens_seen": 7780736,
2109
+ "step": 1245
2110
+ },
2111
+ {
2112
+ "epoch": 2.6427061310782243,
2113
+ "grad_norm": 0.22111766040325165,
2114
+ "learning_rate": 1.7788078265554398e-06,
2115
+ "loss": 0.0807,
2116
+ "num_input_tokens_seen": 7812288,
2117
+ "step": 1250
2118
+ },
2119
+ {
2120
+ "epoch": 2.653276955602537,
2121
+ "grad_norm": 0.1810263991355896,
2122
+ "learning_rate": 1.6762851383952616e-06,
2123
+ "loss": 0.082,
2124
+ "num_input_tokens_seen": 7843392,
2125
+ "step": 1255
2126
+ },
2127
+ {
2128
+ "epoch": 2.66384778012685,
2129
+ "grad_norm": 0.21223782002925873,
2130
+ "learning_rate": 1.5767027897875957e-06,
2131
+ "loss": 0.0897,
2132
+ "num_input_tokens_seen": 7874560,
2133
+ "step": 1260
2134
+ },
2135
+ {
2136
+ "epoch": 2.6744186046511627,
2137
+ "grad_norm": 0.20275099575519562,
2138
+ "learning_rate": 1.4800733347325152e-06,
2139
+ "loss": 0.0909,
2140
+ "num_input_tokens_seen": 7905728,
2141
+ "step": 1265
2142
+ },
2143
+ {
2144
+ "epoch": 2.6849894291754755,
2145
+ "grad_norm": 0.3024641275405884,
2146
+ "learning_rate": 1.3864089549691012e-06,
2147
+ "loss": 0.0984,
2148
+ "num_input_tokens_seen": 7937088,
2149
+ "step": 1270
2150
+ },
2151
+ {
2152
+ "epoch": 2.6955602536997887,
2153
+ "grad_norm": 0.18514348566532135,
2154
+ "learning_rate": 1.2957214584396997e-06,
2155
+ "loss": 0.0893,
2156
+ "num_input_tokens_seen": 7968704,
2157
+ "step": 1275
2158
+ },
2159
+ {
2160
+ "epoch": 2.7061310782241015,
2161
+ "grad_norm": 0.16217848658561707,
2162
+ "learning_rate": 1.2080222778013573e-06,
2163
+ "loss": 0.0843,
2164
+ "num_input_tokens_seen": 8000064,
2165
+ "step": 1280
2166
+ },
2167
+ {
2168
+ "epoch": 2.7167019027484143,
2169
+ "grad_norm": 0.19633322954177856,
2170
+ "learning_rate": 1.1233224689845251e-06,
2171
+ "loss": 0.0892,
2172
+ "num_input_tokens_seen": 8031296,
2173
+ "step": 1285
2174
+ },
2175
+ {
2176
+ "epoch": 2.7272727272727275,
2177
+ "grad_norm": 0.254277765750885,
2178
+ "learning_rate": 1.041632709799306e-06,
2179
+ "loss": 0.0883,
2180
+ "num_input_tokens_seen": 8062208,
2181
+ "step": 1290
2182
+ },
2183
+ {
2184
+ "epoch": 2.7378435517970403,
2185
+ "grad_norm": 0.23036494851112366,
2186
+ "learning_rate": 9.629632985893033e-07,
2187
+ "loss": 0.089,
2188
+ "num_input_tokens_seen": 8093440,
2189
+ "step": 1295
2190
+ },
2191
+ {
2192
+ "epoch": 2.748414376321353,
2193
+ "grad_norm": 0.23279865086078644,
2194
+ "learning_rate": 8.873241529333776e-07,
2195
+ "loss": 0.0859,
2196
+ "num_input_tokens_seen": 8124864,
2197
+ "step": 1300
2198
+ },
2199
+ {
2200
+ "epoch": 2.748414376321353,
2201
+ "eval_loss": 0.09499379247426987,
2202
+ "eval_runtime": 40.5097,
2203
+ "eval_samples_per_second": 83.017,
2204
+ "eval_steps_per_second": 10.393,
2205
+ "num_input_tokens_seen": 8124864,
2206
+ "step": 1300
2207
+ },
2208
+ {
2209
+ "epoch": 2.758985200845666,
2210
+ "grad_norm": 0.22809527814388275,
2211
+ "learning_rate": 8.147248083953562e-07,
2212
+ "loss": 0.0937,
2213
+ "num_input_tokens_seen": 8156032,
2214
+ "step": 1305
2215
+ },
2216
+ {
2217
+ "epoch": 2.7695560253699787,
2218
+ "grad_norm": 0.1820860654115677,
2219
+ "learning_rate": 7.451744173219116e-07,
2220
+ "loss": 0.0927,
2221
+ "num_input_tokens_seen": 8187456,
2222
+ "step": 1310
2223
+ },
2224
+ {
2225
+ "epoch": 2.780126849894292,
2226
+ "grad_norm": 0.2634679973125458,
2227
+ "learning_rate": 6.786817476887725e-07,
2228
+ "loss": 0.084,
2229
+ "num_input_tokens_seen": 8218880,
2230
+ "step": 1315
2231
+ },
2232
+ {
2233
+ "epoch": 2.7906976744186047,
2234
+ "grad_norm": 0.20365993678569794,
2235
+ "learning_rate": 6.152551819953667e-07,
2236
+ "loss": 0.0862,
2237
+ "num_input_tokens_seen": 8250048,
2238
+ "step": 1320
2239
+ },
2240
+ {
2241
+ "epoch": 2.8012684989429175,
2242
+ "grad_norm": 0.24735113978385925,
2243
+ "learning_rate": 5.549027162080666e-07,
2244
+ "loss": 0.0967,
2245
+ "num_input_tokens_seen": 8281408,
2246
+ "step": 1325
2247
+ },
2248
+ {
2249
+ "epoch": 2.8118393234672303,
2250
+ "grad_norm": 0.21733231842517853,
2251
+ "learning_rate": 4.976319587521788e-07,
2252
+ "loss": 0.0878,
2253
+ "num_input_tokens_seen": 8312448,
2254
+ "step": 1330
2255
+ },
2256
+ {
2257
+ "epoch": 2.822410147991543,
2258
+ "grad_norm": 0.39031949639320374,
2259
+ "learning_rate": 4.434501295527582e-07,
2260
+ "loss": 0.0923,
2261
+ "num_input_tokens_seen": 8343488,
2262
+ "step": 1335
2263
+ },
2264
+ {
2265
+ "epoch": 2.8329809725158563,
2266
+ "grad_norm": 0.1717582643032074,
2267
+ "learning_rate": 3.9236405912442544e-07,
2268
+ "loss": 0.0887,
2269
+ "num_input_tokens_seen": 8374976,
2270
+ "step": 1340
2271
+ },
2272
+ {
2273
+ "epoch": 2.843551797040169,
2274
+ "grad_norm": 0.19292984902858734,
2275
+ "learning_rate": 3.44380187710272e-07,
2276
+ "loss": 0.0862,
2277
+ "num_input_tokens_seen": 8406208,
2278
+ "step": 1345
2279
+ },
2280
+ {
2281
+ "epoch": 2.854122621564482,
2282
+ "grad_norm": 0.19864223897457123,
2283
+ "learning_rate": 2.995045644699518e-07,
2284
+ "loss": 0.0862,
2285
+ "num_input_tokens_seen": 8437440,
2286
+ "step": 1350
2287
+ },
2288
+ {
2289
+ "epoch": 2.864693446088795,
2290
+ "grad_norm": 0.17732787132263184,
2291
+ "learning_rate": 2.577428467170989e-07,
2292
+ "loss": 0.0878,
2293
+ "num_input_tokens_seen": 8468416,
2294
+ "step": 1355
2295
+ },
2296
+ {
2297
+ "epoch": 2.875264270613108,
2298
+ "grad_norm": 0.1831037551164627,
2299
+ "learning_rate": 2.1910029920610974e-07,
2300
+ "loss": 0.0881,
2301
+ "num_input_tokens_seen": 8500032,
2302
+ "step": 1360
2303
+ },
2304
+ {
2305
+ "epoch": 2.8858350951374208,
2306
+ "grad_norm": 0.16692957282066345,
2307
+ "learning_rate": 1.8358179346845694e-07,
2308
+ "loss": 0.0913,
2309
+ "num_input_tokens_seen": 8531200,
2310
+ "step": 1365
2311
+ },
2312
+ {
2313
+ "epoch": 2.8964059196617336,
2314
+ "grad_norm": 0.19147560000419617,
2315
+ "learning_rate": 1.51191807198528e-07,
2316
+ "loss": 0.0899,
2317
+ "num_input_tokens_seen": 8562240,
2318
+ "step": 1370
2319
+ },
2320
+ {
2321
+ "epoch": 2.9069767441860463,
2322
+ "grad_norm": 0.1842157244682312,
2323
+ "learning_rate": 1.2193442368915732e-07,
2324
+ "loss": 0.0813,
2325
+ "num_input_tokens_seen": 8593600,
2326
+ "step": 1375
2327
+ },
2328
+ {
2329
+ "epoch": 2.9175475687103596,
2330
+ "grad_norm": 0.18177741765975952,
2331
+ "learning_rate": 9.581333131685467e-08,
2332
+ "loss": 0.0874,
2333
+ "num_input_tokens_seen": 8624768,
2334
+ "step": 1380
2335
+ },
2336
+ {
2337
+ "epoch": 2.9281183932346724,
2338
+ "grad_norm": 0.2615036070346832,
2339
+ "learning_rate": 7.283182307681324e-08,
2340
+ "loss": 0.0915,
2341
+ "num_input_tokens_seen": 8655808,
2342
+ "step": 1385
2343
+ },
2344
+ {
2345
+ "epoch": 2.938689217758985,
2346
+ "grad_norm": 0.30790311098098755,
2347
+ "learning_rate": 5.299279616779174e-08,
2348
+ "loss": 0.0835,
2349
+ "num_input_tokens_seen": 8687232,
2350
+ "step": 1390
2351
+ },
2352
+ {
2353
+ "epoch": 2.949260042283298,
2354
+ "grad_norm": 0.24962230026721954,
2355
+ "learning_rate": 3.629875162686203e-08,
2356
+ "loss": 0.092,
2357
+ "num_input_tokens_seen": 8718592,
2358
+ "step": 1395
2359
+ },
2360
+ {
2361
+ "epoch": 2.9598308668076108,
2362
+ "grad_norm": 0.2310824692249298,
2363
+ "learning_rate": 2.2751794014111428e-08,
2364
+ "loss": 0.0883,
2365
+ "num_input_tokens_seen": 8749760,
2366
+ "step": 1400
2367
+ },
2368
+ {
2369
+ "epoch": 2.9598308668076108,
2370
+ "eval_loss": 0.09467408061027527,
2371
+ "eval_runtime": 40.4856,
2372
+ "eval_samples_per_second": 83.067,
2373
+ "eval_steps_per_second": 10.399,
2374
+ "num_input_tokens_seen": 8749760,
2375
+ "step": 1400
2376
+ },
2377
+ {
2378
+ "epoch": 2.970401691331924,
2379
+ "grad_norm": 0.21396443247795105,
2380
+ "learning_rate": 1.2353631147335454e-08,
2381
+ "loss": 0.0872,
2382
+ "num_input_tokens_seen": 8780992,
2383
+ "step": 1405
2384
+ },
2385
+ {
2386
+ "epoch": 2.980972515856237,
2387
+ "grad_norm": 0.16851051151752472,
2388
+ "learning_rate": 5.105573886735049e-09,
2389
+ "loss": 0.0822,
2390
+ "num_input_tokens_seen": 8812224,
2391
+ "step": 1410
2392
+ },
2393
+ {
2394
+ "epoch": 2.9915433403805496,
2395
+ "grad_norm": 0.22018083930015564,
2396
+ "learning_rate": 1.0085359696654362e-09,
2397
+ "loss": 0.0901,
2398
+ "num_input_tokens_seen": 8843200,
2399
+ "step": 1415
2400
+ },
2401
+ {
2402
+ "epoch": 3.0,
2403
+ "num_input_tokens_seen": 8867536,
2404
+ "step": 1419,
2405
+ "total_flos": 3.600530754427945e+17,
2406
+ "train_loss": 0.2131162985812786,
2407
+ "train_runtime": 4701.6415,
2408
+ "train_samples_per_second": 19.312,
2409
+ "train_steps_per_second": 0.302
2410
+ }
2411
+ ],
2412
+ "logging_steps": 5,
2413
+ "max_steps": 1419,
2414
+ "num_input_tokens_seen": 8867536,
2415
+ "num_train_epochs": 3,
2416
+ "save_steps": 100,
2417
+ "stateful_callbacks": {
2418
+ "TrainerControl": {
2419
+ "args": {
2420
+ "should_epoch_stop": false,
2421
+ "should_evaluate": false,
2422
+ "should_log": false,
2423
+ "should_save": true,
2424
+ "should_training_stop": true
2425
+ },
2426
+ "attributes": {}
2427
+ }
2428
+ },
2429
+ "total_flos": 3.600530754427945e+17,
2430
+ "train_batch_size": 8,
2431
+ "trial_name": null,
2432
+ "trial_params": null
2433
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fc84cc3c5835f38c5b3721c54b978793ec53284573feda6cdf598a3f1b2a496
3
+ size 5688
training_args.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bf16: true
2
+ cutoff_len: 2048
3
+ dataset: adpr_train
4
+ dataset_dir: data
5
+ ddp_timeout: 180000000
6
+ do_train: true
7
+ eval_steps: 100
8
+ eval_strategy: steps
9
+ finetuning_type: lora
10
+ flash_attn: auto
11
+ gradient_accumulation_steps: 8
12
+ include_num_input_tokens_seen: true
13
+ learning_rate: 5.0e-05
14
+ logging_steps: 5
15
+ lora_alpha: 128
16
+ lora_dropout: 0.01
17
+ lora_rank: 64
18
+ lora_target: q_proj,v_proj,k_proj,o_proj,gate_proj,down_proj,up_proj
19
+ lr_scheduler_type: cosine
20
+ max_grad_norm: 1.0
21
+ max_samples: 100000
22
+ model_name_or_path: GreatCaptainNemo/ProLLaMA_Stage_1
23
+ num_train_epochs: 3.0
24
+ optim: adamw_torch
25
+ output_dir: saves/Custom/lora/train_2025-03-11-22-40-04
26
+ packing: false
27
+ per_device_eval_batch_size: 8
28
+ per_device_train_batch_size: 8
29
+ plot_loss: true
30
+ preprocessing_num_workers: 16
31
+ report_to: none
32
+ save_steps: 100
33
+ stage: sft
34
+ template: alpaca
35
+ trust_remote_code: true
36
+ val_size: 0.1
37
+ warmup_steps: 20
training_eval_loss.png ADDED

Git LFS Details

  • SHA256: 8955642a7c3b95415874d382d673e1f9845ef93a17283e99918f63fe5d73e502
  • Pointer size: 130 Bytes
  • Size of remote file: 38.8 kB
training_loss.png ADDED

Git LFS Details

  • SHA256: b3a1b5607093702b9189544da88a588cb34770a4ad7c58235e64b78073a682c1
  • Pointer size: 130 Bytes
  • Size of remote file: 29.4 kB