| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 615, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04884004884004884, | |
| "grad_norm": 0.6565904021263123, | |
| "learning_rate": 0.00018, | |
| "loss": 2.306, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.09768009768009768, | |
| "grad_norm": 0.4782625734806061, | |
| "learning_rate": 0.00019702479338842976, | |
| "loss": 1.8776, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.14652014652014653, | |
| "grad_norm": 0.34842449426651, | |
| "learning_rate": 0.0001937190082644628, | |
| "loss": 1.986, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.19536019536019536, | |
| "grad_norm": 0.5142027735710144, | |
| "learning_rate": 0.0001904132231404959, | |
| "loss": 1.953, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.2442002442002442, | |
| "grad_norm": 0.4530898332595825, | |
| "learning_rate": 0.00018710743801652891, | |
| "loss": 1.8595, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.29304029304029305, | |
| "grad_norm": 0.4601179361343384, | |
| "learning_rate": 0.000183801652892562, | |
| "loss": 1.9006, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.3418803418803419, | |
| "grad_norm": 0.49045854806900024, | |
| "learning_rate": 0.00018049586776859504, | |
| "loss": 1.6583, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.3907203907203907, | |
| "grad_norm": 0.4943140745162964, | |
| "learning_rate": 0.0001771900826446281, | |
| "loss": 1.841, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.43956043956043955, | |
| "grad_norm": 0.43958303332328796, | |
| "learning_rate": 0.00017388429752066115, | |
| "loss": 1.7377, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.4884004884004884, | |
| "grad_norm": 0.4475325047969818, | |
| "learning_rate": 0.00017057851239669423, | |
| "loss": 1.9054, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.5372405372405372, | |
| "grad_norm": 0.4703655242919922, | |
| "learning_rate": 0.00016727272727272728, | |
| "loss": 1.7999, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.5860805860805861, | |
| "grad_norm": 0.47283482551574707, | |
| "learning_rate": 0.00016396694214876033, | |
| "loss": 1.8296, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.6349206349206349, | |
| "grad_norm": 0.45280855894088745, | |
| "learning_rate": 0.00016066115702479338, | |
| "loss": 1.6442, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.6837606837606838, | |
| "grad_norm": 0.4415304362773895, | |
| "learning_rate": 0.00015735537190082646, | |
| "loss": 1.8178, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.7326007326007326, | |
| "grad_norm": 0.6615126729011536, | |
| "learning_rate": 0.0001540495867768595, | |
| "loss": 1.9372, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.7814407814407814, | |
| "grad_norm": 0.5677676200866699, | |
| "learning_rate": 0.00015074380165289256, | |
| "loss": 1.6327, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.8302808302808303, | |
| "grad_norm": 0.528029203414917, | |
| "learning_rate": 0.00014743801652892564, | |
| "loss": 1.7825, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.8791208791208791, | |
| "grad_norm": 0.6582658290863037, | |
| "learning_rate": 0.0001441322314049587, | |
| "loss": 1.9638, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.927960927960928, | |
| "grad_norm": 0.6919686198234558, | |
| "learning_rate": 0.00014082644628099175, | |
| "loss": 1.8914, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.9768009768009768, | |
| "grad_norm": 0.3584222197532654, | |
| "learning_rate": 0.0001375206611570248, | |
| "loss": 1.7909, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 1.735278844833374, | |
| "eval_runtime": 19.9432, | |
| "eval_samples_per_second": 9.126, | |
| "eval_steps_per_second": 1.153, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 1.0244200244200243, | |
| "grad_norm": 0.6349706649780273, | |
| "learning_rate": 0.00013421487603305788, | |
| "loss": 1.8237, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.0732600732600732, | |
| "grad_norm": 0.338216632604599, | |
| "learning_rate": 0.00013090909090909093, | |
| "loss": 1.7347, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.122100122100122, | |
| "grad_norm": 0.43673205375671387, | |
| "learning_rate": 0.00012760330578512398, | |
| "loss": 1.6828, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.170940170940171, | |
| "grad_norm": 0.3703135550022125, | |
| "learning_rate": 0.00012429752066115703, | |
| "loss": 1.6821, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.2197802197802199, | |
| "grad_norm": 0.4435603618621826, | |
| "learning_rate": 0.0001209917355371901, | |
| "loss": 1.7656, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.2686202686202686, | |
| "grad_norm": 0.5340843200683594, | |
| "learning_rate": 0.00011768595041322315, | |
| "loss": 1.792, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.3174603174603174, | |
| "grad_norm": 0.37987396121025085, | |
| "learning_rate": 0.00011438016528925621, | |
| "loss": 1.7761, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.3663003663003663, | |
| "grad_norm": 0.33273303508758545, | |
| "learning_rate": 0.00011107438016528926, | |
| "loss": 1.6398, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.4151404151404152, | |
| "grad_norm": 0.4231277406215668, | |
| "learning_rate": 0.00010776859504132233, | |
| "loss": 1.8901, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.463980463980464, | |
| "grad_norm": 0.2134978324174881, | |
| "learning_rate": 0.00010446280991735538, | |
| "loss": 1.8604, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.5128205128205128, | |
| "grad_norm": 0.43554314970970154, | |
| "learning_rate": 0.00010115702479338845, | |
| "loss": 1.7911, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.5616605616605617, | |
| "grad_norm": 0.20964309573173523, | |
| "learning_rate": 9.785123966942148e-05, | |
| "loss": 1.6337, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.6105006105006106, | |
| "grad_norm": 0.5670686364173889, | |
| "learning_rate": 9.454545454545455e-05, | |
| "loss": 1.7667, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.6593406593406592, | |
| "grad_norm": 0.28007999062538147, | |
| "learning_rate": 9.12396694214876e-05, | |
| "loss": 1.7941, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.7081807081807083, | |
| "grad_norm": 0.244796484708786, | |
| "learning_rate": 8.793388429752067e-05, | |
| "loss": 1.6848, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.757020757020757, | |
| "grad_norm": 0.2684899866580963, | |
| "learning_rate": 8.462809917355372e-05, | |
| "loss": 1.8238, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.8058608058608059, | |
| "grad_norm": 0.2789863049983978, | |
| "learning_rate": 8.132231404958678e-05, | |
| "loss": 1.7551, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.8547008547008548, | |
| "grad_norm": 0.2315087765455246, | |
| "learning_rate": 7.801652892561983e-05, | |
| "loss": 1.6725, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.9035409035409034, | |
| "grad_norm": 0.2351648360490799, | |
| "learning_rate": 7.47107438016529e-05, | |
| "loss": 1.7861, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.9523809523809523, | |
| "grad_norm": 0.15907102823257446, | |
| "learning_rate": 7.140495867768595e-05, | |
| "loss": 1.6753, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.15325959026813507, | |
| "learning_rate": 6.8099173553719e-05, | |
| "loss": 1.8352, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 1.7097043991088867, | |
| "eval_runtime": 19.9498, | |
| "eval_samples_per_second": 9.123, | |
| "eval_steps_per_second": 1.153, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.0488400488400487, | |
| "grad_norm": 0.10827562212944031, | |
| "learning_rate": 6.479338842975207e-05, | |
| "loss": 1.7363, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.0976800976800978, | |
| "grad_norm": 0.1293765902519226, | |
| "learning_rate": 6.148760330578512e-05, | |
| "loss": 1.726, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.1465201465201464, | |
| "grad_norm": 0.1775052547454834, | |
| "learning_rate": 5.818181818181818e-05, | |
| "loss": 1.6641, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.1953601953601956, | |
| "grad_norm": 0.10628961026668549, | |
| "learning_rate": 5.487603305785124e-05, | |
| "loss": 1.8193, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.244200244200244, | |
| "grad_norm": 0.11912493407726288, | |
| "learning_rate": 5.1570247933884295e-05, | |
| "loss": 1.6497, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 2.293040293040293, | |
| "grad_norm": 0.17353476583957672, | |
| "learning_rate": 4.826446280991736e-05, | |
| "loss": 1.7043, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 2.341880341880342, | |
| "grad_norm": 0.11081728339195251, | |
| "learning_rate": 4.495867768595042e-05, | |
| "loss": 1.7964, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 2.3907203907203907, | |
| "grad_norm": 0.09711920469999313, | |
| "learning_rate": 4.165289256198348e-05, | |
| "loss": 1.8315, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 2.4395604395604398, | |
| "grad_norm": 0.1099553108215332, | |
| "learning_rate": 3.8347107438016536e-05, | |
| "loss": 1.7696, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.4884004884004884, | |
| "grad_norm": 0.12278404831886292, | |
| "learning_rate": 3.504132231404959e-05, | |
| "loss": 1.7851, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 2.537240537240537, | |
| "grad_norm": 0.08975676447153091, | |
| "learning_rate": 3.1735537190082646e-05, | |
| "loss": 1.9197, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 2.586080586080586, | |
| "grad_norm": 0.08411797136068344, | |
| "learning_rate": 2.8429752066115704e-05, | |
| "loss": 1.6497, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 2.634920634920635, | |
| "grad_norm": 0.10348548740148544, | |
| "learning_rate": 2.5123966942148763e-05, | |
| "loss": 1.5763, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.683760683760684, | |
| "grad_norm": 0.10242890566587448, | |
| "learning_rate": 2.1818181818181818e-05, | |
| "loss": 1.5551, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.7326007326007327, | |
| "grad_norm": 0.09305644780397415, | |
| "learning_rate": 1.8512396694214876e-05, | |
| "loss": 1.8751, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 2.7814407814407813, | |
| "grad_norm": 0.10115351527929306, | |
| "learning_rate": 1.5206611570247933e-05, | |
| "loss": 1.6887, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 2.8302808302808304, | |
| "grad_norm": 0.0991702601313591, | |
| "learning_rate": 1.1900826446280993e-05, | |
| "loss": 1.6846, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 2.879120879120879, | |
| "grad_norm": 0.11022075265645981, | |
| "learning_rate": 8.59504132231405e-06, | |
| "loss": 1.88, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 2.927960927960928, | |
| "grad_norm": 0.12525387108325958, | |
| "learning_rate": 5.289256198347107e-06, | |
| "loss": 1.7041, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.976800976800977, | |
| "grad_norm": 0.1043957769870758, | |
| "learning_rate": 1.9834710743801654e-06, | |
| "loss": 1.8158, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 1.7082070112228394, | |
| "eval_runtime": 19.9486, | |
| "eval_samples_per_second": 9.123, | |
| "eval_steps_per_second": 1.153, | |
| "step": 615 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 615, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.149830720623739e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |