| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 48, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 5e-06, | |
| "loss": 1.4899, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 1e-05, | |
| "loss": 1.4678, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 9.988343845952697e-06, | |
| "loss": 1.4545, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 9.953429730181653e-06, | |
| "loss": 1.3429, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 9.895420438411616e-06, | |
| "loss": 1.212, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 9.814586436738998e-06, | |
| "loss": 1.1148, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 9.711304610594104e-06, | |
| "loss": 1.1304, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 9.586056507527266e-06, | |
| "loss": 1.0577, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 9.439426092011877e-06, | |
| "loss": 1.0367, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 9.272097022732444e-06, | |
| "loss": 1.0212, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 9.08484946505221e-06, | |
| "loss": 0.9874, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 8.8785564535221e-06, | |
| "loss": 0.9931, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 8.65417982139062e-06, | |
| "loss": 0.958, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 8.412765716093273e-06, | |
| "loss": 0.9636, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 8.155439721630265e-06, | |
| "loss": 0.9475, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 7.883401610574338e-06, | |
| "loss": 0.9285, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 7.597919750177168e-06, | |
| "loss": 0.8847, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 7.300325188655762e-06, | |
| "loss": 0.8571, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 6.9920054492312086e-06, | |
| "loss": 0.8835, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 6.674398060854931e-06, | |
| "loss": 0.8513, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 6.348983855785122e-06, | |
| "loss": 0.8182, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 6.0172800652631706e-06, | |
| "loss": 0.8267, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 5.680833245481234e-06, | |
| "loss": 0.8249, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 5.341212066823356e-06, | |
| "loss": 0.8238, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7924, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 4.6587879331766465e-06, | |
| "loss": 0.812, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 4.319166754518768e-06, | |
| "loss": 0.8352, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 3.982719934736832e-06, | |
| "loss": 0.8035, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 3.6510161442148783e-06, | |
| "loss": 0.7945, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 3.3256019391450696e-06, | |
| "loss": 0.812, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 3.007994550768793e-06, | |
| "loss": 0.7887, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 2.6996748113442397e-06, | |
| "loss": 0.763, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 2.06, | |
| "learning_rate": 2.4020802498228333e-06, | |
| "loss": 0.7638, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "learning_rate": 2.1165983894256647e-06, | |
| "loss": 0.7548, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "learning_rate": 1.8445602783697375e-06, | |
| "loss": 0.7549, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "learning_rate": 1.5872342839067305e-06, | |
| "loss": 0.7466, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 1.3458201786093795e-06, | |
| "loss": 0.7525, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 1.1214435464779006e-06, | |
| "loss": 0.7407, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "learning_rate": 9.151505349477901e-07, | |
| "loss": 0.727, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "learning_rate": 7.279029772675572e-07, | |
| "loss": 0.7484, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "learning_rate": 5.60573907988124e-07, | |
| "loss": 0.7489, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "learning_rate": 4.139434924727359e-07, | |
| "loss": 0.7311, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 2.69, | |
| "learning_rate": 2.88695389405898e-07, | |
| "loss": 0.7141, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "learning_rate": 1.8541356326100436e-07, | |
| "loss": 0.7456, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 2.81, | |
| "learning_rate": 1.0457956158838545e-07, | |
| "loss": 0.741, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "learning_rate": 4.657026981834623e-08, | |
| "loss": 0.744, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 1.1656154047303691e-08, | |
| "loss": 0.7353, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 0.0, | |
| "loss": 0.7103, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 48, | |
| "total_flos": 16927777751040.0, | |
| "train_loss": 0.8986819609999657, | |
| "train_runtime": 178.1736, | |
| "train_samples_per_second": 33.675, | |
| "train_steps_per_second": 0.269 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 48, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 50000, | |
| "total_flos": 16927777751040.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |