| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 808, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.024752475247524754, | |
| "grad_norm": 1.4873312819674722, | |
| "learning_rate": 1.111111111111111e-06, | |
| "loss": 0.4428, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.04950495049504951, | |
| "grad_norm": 0.860149696440848, | |
| "learning_rate": 2.345679012345679e-06, | |
| "loss": 0.4068, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.07425742574257425, | |
| "grad_norm": 0.7730974534228278, | |
| "learning_rate": 3.580246913580247e-06, | |
| "loss": 0.3273, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09900990099009901, | |
| "grad_norm": 0.5689200686287729, | |
| "learning_rate": 4.814814814814815e-06, | |
| "loss": 0.2622, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.12376237623762376, | |
| "grad_norm": 0.717240303466821, | |
| "learning_rate": 6.049382716049383e-06, | |
| "loss": 0.2315, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1485148514851485, | |
| "grad_norm": 0.5479796314240546, | |
| "learning_rate": 7.283950617283952e-06, | |
| "loss": 0.225, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.17326732673267325, | |
| "grad_norm": 0.5367981858324066, | |
| "learning_rate": 8.518518518518519e-06, | |
| "loss": 0.2078, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.19801980198019803, | |
| "grad_norm": 0.5128703711055814, | |
| "learning_rate": 9.753086419753087e-06, | |
| "loss": 0.2007, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.22277227722772278, | |
| "grad_norm": 0.5500134916740962, | |
| "learning_rate": 9.997012501794273e-06, | |
| "loss": 0.2115, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.24752475247524752, | |
| "grad_norm": 0.5442543564775922, | |
| "learning_rate": 9.984881908680157e-06, | |
| "loss": 0.1977, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.2722772277227723, | |
| "grad_norm": 0.4987905277999744, | |
| "learning_rate": 9.963444133394478e-06, | |
| "loss": 0.2044, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.297029702970297, | |
| "grad_norm": 0.5792621848377878, | |
| "learning_rate": 9.93273920201681e-06, | |
| "loss": 0.1859, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.3217821782178218, | |
| "grad_norm": 0.4991425649649758, | |
| "learning_rate": 9.892824443164987e-06, | |
| "loss": 0.1986, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3465346534653465, | |
| "grad_norm": 0.5558327647976072, | |
| "learning_rate": 9.84377438095789e-06, | |
| "loss": 0.1972, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3712871287128713, | |
| "grad_norm": 0.5522405055451383, | |
| "learning_rate": 9.785680595872824e-06, | |
| "loss": 0.1973, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.39603960396039606, | |
| "grad_norm": 0.4641525226246223, | |
| "learning_rate": 9.718651553757266e-06, | |
| "loss": 0.1848, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.4207920792079208, | |
| "grad_norm": 0.4712224353128171, | |
| "learning_rate": 9.642812403314272e-06, | |
| "loss": 0.1911, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.44554455445544555, | |
| "grad_norm": 0.5142622413114706, | |
| "learning_rate": 9.55830474243961e-06, | |
| "loss": 0.1925, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.47029702970297027, | |
| "grad_norm": 0.4006105519636306, | |
| "learning_rate": 9.465286353846905e-06, | |
| "loss": 0.1866, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.49504950495049505, | |
| "grad_norm": 0.5073171400534645, | |
| "learning_rate": 9.36393091047441e-06, | |
| "loss": 0.185, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5198019801980198, | |
| "grad_norm": 0.5880177221650992, | |
| "learning_rate": 9.254427651223434e-06, | |
| "loss": 0.1848, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5445544554455446, | |
| "grad_norm": 0.4554484357702942, | |
| "learning_rate": 9.136981027633834e-06, | |
| "loss": 0.1851, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5693069306930693, | |
| "grad_norm": 0.5031293487926348, | |
| "learning_rate": 9.011810322156269e-06, | |
| "loss": 0.1948, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.594059405940594, | |
| "grad_norm": 0.4577032226856764, | |
| "learning_rate": 8.879149238733932e-06, | |
| "loss": 0.1861, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.6188118811881188, | |
| "grad_norm": 0.44733995052099396, | |
| "learning_rate": 8.739245466458187e-06, | |
| "loss": 0.1757, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.6435643564356436, | |
| "grad_norm": 0.5406277161031414, | |
| "learning_rate": 8.592360217112759e-06, | |
| "loss": 0.1794, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6683168316831684, | |
| "grad_norm": 0.4515622758216762, | |
| "learning_rate": 8.438767737469995e-06, | |
| "loss": 0.1735, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.693069306930693, | |
| "grad_norm": 0.4635294881039678, | |
| "learning_rate": 8.278754797249702e-06, | |
| "loss": 0.1713, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.7178217821782178, | |
| "grad_norm": 0.5013665282058885, | |
| "learning_rate": 8.11262015369663e-06, | |
| "loss": 0.1761, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.7425742574257426, | |
| "grad_norm": 0.4829501917570715, | |
| "learning_rate": 7.940673993776258e-06, | |
| "loss": 0.1874, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7673267326732673, | |
| "grad_norm": 0.443719131169877, | |
| "learning_rate": 7.763237355030384e-06, | |
| "loss": 0.1683, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7920792079207921, | |
| "grad_norm": 0.4280811144981197, | |
| "learning_rate": 7.580641526173758e-06, | |
| "loss": 0.1776, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.8168316831683168, | |
| "grad_norm": 0.6434688708393701, | |
| "learning_rate": 7.39322742855097e-06, | |
| "loss": 0.1861, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.8415841584158416, | |
| "grad_norm": 0.4538744032232575, | |
| "learning_rate": 7.201344979608423e-06, | |
| "loss": 0.1832, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.8663366336633663, | |
| "grad_norm": 0.4590505681626231, | |
| "learning_rate": 7.0053524395698345e-06, | |
| "loss": 0.1778, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8910891089108911, | |
| "grad_norm": 0.39529209206101645, | |
| "learning_rate": 6.805615742535117e-06, | |
| "loss": 0.1712, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.9158415841584159, | |
| "grad_norm": 0.5257661202592374, | |
| "learning_rate": 6.602507813251478e-06, | |
| "loss": 0.1752, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.9405940594059405, | |
| "grad_norm": 0.49075057672303185, | |
| "learning_rate": 6.396407870832419e-06, | |
| "loss": 0.1691, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.9653465346534653, | |
| "grad_norm": 0.4063721529471352, | |
| "learning_rate": 6.187700720724648e-06, | |
| "loss": 0.1638, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9900990099009901, | |
| "grad_norm": 0.49409556281854755, | |
| "learning_rate": 5.976776036244833e-06, | |
| "loss": 0.1715, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.0148514851485149, | |
| "grad_norm": 0.4704764465424178, | |
| "learning_rate": 5.764027631027659e-06, | |
| "loss": 0.1338, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.0396039603960396, | |
| "grad_norm": 0.514030762089054, | |
| "learning_rate": 5.549852723743564e-06, | |
| "loss": 0.1068, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.0643564356435644, | |
| "grad_norm": 0.46024391907618734, | |
| "learning_rate": 5.334651196459003e-06, | |
| "loss": 0.1083, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.0891089108910892, | |
| "grad_norm": 0.5084928690964229, | |
| "learning_rate": 5.118824848023926e-06, | |
| "loss": 0.0991, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.113861386138614, | |
| "grad_norm": 0.5721555377829844, | |
| "learning_rate": 4.902776643880461e-06, | |
| "loss": 0.1082, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.1386138613861387, | |
| "grad_norm": 0.44271050446405025, | |
| "learning_rate": 4.686909963693498e-06, | |
| "loss": 0.0998, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.1633663366336633, | |
| "grad_norm": 0.4765511931794564, | |
| "learning_rate": 4.47162784820784e-06, | |
| "loss": 0.1, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.188118811881188, | |
| "grad_norm": 0.4946911218893508, | |
| "learning_rate": 4.257332246738201e-06, | |
| "loss": 0.1019, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.2128712871287128, | |
| "grad_norm": 0.4965201004334917, | |
| "learning_rate": 4.04442326669695e-06, | |
| "loss": 0.1051, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.2376237623762376, | |
| "grad_norm": 0.5408097804856393, | |
| "learning_rate": 3.833298426560851e-06, | |
| "loss": 0.1004, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.2623762376237624, | |
| "grad_norm": 0.5252320189983105, | |
| "learning_rate": 3.624351913671571e-06, | |
| "loss": 0.1014, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.2871287128712872, | |
| "grad_norm": 0.556206677324514, | |
| "learning_rate": 3.4179738482556648e-06, | |
| "loss": 0.1049, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.311881188118812, | |
| "grad_norm": 0.5360529256375285, | |
| "learning_rate": 3.214549555038218e-06, | |
| "loss": 0.1009, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.3366336633663367, | |
| "grad_norm": 0.48742930116454386, | |
| "learning_rate": 3.0144588438100693e-06, | |
| "loss": 0.0999, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.3613861386138613, | |
| "grad_norm": 0.7874631044020063, | |
| "learning_rate": 2.8180753002918735e-06, | |
| "loss": 0.0991, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.386138613861386, | |
| "grad_norm": 0.4606133875814067, | |
| "learning_rate": 2.6257655886190147e-06, | |
| "loss": 0.0933, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.4108910891089108, | |
| "grad_norm": 0.47731270631998807, | |
| "learning_rate": 2.4378887667496696e-06, | |
| "loss": 0.0989, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.4356435643564356, | |
| "grad_norm": 0.5003301127513975, | |
| "learning_rate": 2.2547956160742473e-06, | |
| "loss": 0.0994, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.4603960396039604, | |
| "grad_norm": 0.5212714732655578, | |
| "learning_rate": 2.0768279864778475e-06, | |
| "loss": 0.1083, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.4851485148514851, | |
| "grad_norm": 0.4573167459369404, | |
| "learning_rate": 1.9043181580785597e-06, | |
| "loss": 0.0955, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.50990099009901, | |
| "grad_norm": 0.5828901902132052, | |
| "learning_rate": 1.73758822083332e-06, | |
| "loss": 0.1021, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.5346534653465347, | |
| "grad_norm": 0.44258879326834666, | |
| "learning_rate": 1.5769494731696206e-06, | |
| "loss": 0.0944, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.5594059405940595, | |
| "grad_norm": 0.5612729118082875, | |
| "learning_rate": 1.4227018407658822e-06, | |
| "loss": 0.103, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.5841584158415842, | |
| "grad_norm": 0.5111939858784064, | |
| "learning_rate": 1.275133316565691e-06, | |
| "loss": 0.1015, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.608910891089109, | |
| "grad_norm": 0.43301100280236354, | |
| "learning_rate": 1.1345194230714235e-06, | |
| "loss": 0.0916, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.6336633663366338, | |
| "grad_norm": 0.5685808742921838, | |
| "learning_rate": 1.001122697921197e-06, | |
| "loss": 0.1006, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.6584158415841586, | |
| "grad_norm": 0.4732021093665504, | |
| "learning_rate": 8.751922037096328e-07, | |
| "loss": 0.0973, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.6831683168316833, | |
| "grad_norm": 0.5002688727866472, | |
| "learning_rate": 7.569630629676294e-07, | |
| "loss": 0.0988, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.7079207920792079, | |
| "grad_norm": 0.6448602699924979, | |
| "learning_rate": 6.466560191693566e-07, | |
| "loss": 0.0948, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.7326732673267327, | |
| "grad_norm": 0.49223407652600815, | |
| "learning_rate": 5.444770245861553e-07, | |
| "loss": 0.0913, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.7574257425742574, | |
| "grad_norm": 0.4356390296013288, | |
| "learning_rate": 4.506168557567886e-07, | |
| "loss": 0.0944, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.7821782178217822, | |
| "grad_norm": 0.4524339230442861, | |
| "learning_rate": 3.6525075729205274e-07, | |
| "loss": 0.0922, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.806930693069307, | |
| "grad_norm": 0.6615290546421568, | |
| "learning_rate": 2.8853811467875413e-07, | |
| "loss": 0.098, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.8316831683168315, | |
| "grad_norm": 0.49847536579909224, | |
| "learning_rate": 2.2062215669397201e-07, | |
| "loss": 0.1025, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.8564356435643563, | |
| "grad_norm": 0.4027083627268508, | |
| "learning_rate": 1.616296879852175e-07, | |
| "loss": 0.0906, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.881188118811881, | |
| "grad_norm": 0.5545161003969088, | |
| "learning_rate": 1.1167085231579111e-07, | |
| "loss": 0.1015, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.9059405940594059, | |
| "grad_norm": 0.6170175679054791, | |
| "learning_rate": 7.083892691736428e-08, | |
| "loss": 0.1024, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.9306930693069306, | |
| "grad_norm": 0.643468057667551, | |
| "learning_rate": 3.9210148333763135e-08, | |
| "loss": 0.0925, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.9554455445544554, | |
| "grad_norm": 0.4524005775955007, | |
| "learning_rate": 1.684357008110593e-08, | |
| "loss": 0.0973, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.9801980198019802, | |
| "grad_norm": 0.4828849570873087, | |
| "learning_rate": 3.780952390058379e-09, | |
| "loss": 0.0901, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 808, | |
| "total_flos": 1.995296975397847e+17, | |
| "train_loss": 0.15181401895709556, | |
| "train_runtime": 29257.7471, | |
| "train_samples_per_second": 0.221, | |
| "train_steps_per_second": 0.028 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 808, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 200, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.995296975397847e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |