| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.9801980198019802, | |
| "eval_steps": 500, | |
| "global_step": 800, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.024752475247524754, | |
| "grad_norm": 2.377986153981849, | |
| "learning_rate": 1.111111111111111e-06, | |
| "loss": 0.5134, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.04950495049504951, | |
| "grad_norm": 1.1527847734451684, | |
| "learning_rate": 2.345679012345679e-06, | |
| "loss": 0.4478, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.07425742574257425, | |
| "grad_norm": 0.9214168013745976, | |
| "learning_rate": 3.580246913580247e-06, | |
| "loss": 0.3606, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09900990099009901, | |
| "grad_norm": 0.6169142314211664, | |
| "learning_rate": 4.814814814814815e-06, | |
| "loss": 0.2914, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.12376237623762376, | |
| "grad_norm": 0.9045543763732307, | |
| "learning_rate": 6.049382716049383e-06, | |
| "loss": 0.258, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1485148514851485, | |
| "grad_norm": 0.6916239240771108, | |
| "learning_rate": 7.283950617283952e-06, | |
| "loss": 0.2541, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.17326732673267325, | |
| "grad_norm": 0.6563868068913417, | |
| "learning_rate": 8.518518518518519e-06, | |
| "loss": 0.2375, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.19801980198019803, | |
| "grad_norm": 0.6955688922095051, | |
| "learning_rate": 9.753086419753087e-06, | |
| "loss": 0.2281, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.22277227722772278, | |
| "grad_norm": 0.7774430403471241, | |
| "learning_rate": 9.997012501794273e-06, | |
| "loss": 0.2409, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.24752475247524752, | |
| "grad_norm": 0.7176934124930091, | |
| "learning_rate": 9.984881908680157e-06, | |
| "loss": 0.2235, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.2722772277227723, | |
| "grad_norm": 0.6385467956457365, | |
| "learning_rate": 9.963444133394478e-06, | |
| "loss": 0.2309, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.297029702970297, | |
| "grad_norm": 0.7973847481250326, | |
| "learning_rate": 9.93273920201681e-06, | |
| "loss": 0.2123, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.3217821782178218, | |
| "grad_norm": 0.7212534759324712, | |
| "learning_rate": 9.892824443164987e-06, | |
| "loss": 0.2261, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3465346534653465, | |
| "grad_norm": 0.7370462388577308, | |
| "learning_rate": 9.84377438095789e-06, | |
| "loss": 0.2256, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3712871287128713, | |
| "grad_norm": 0.7271303244318778, | |
| "learning_rate": 9.785680595872824e-06, | |
| "loss": 0.2241, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.39603960396039606, | |
| "grad_norm": 0.5777488399063054, | |
| "learning_rate": 9.718651553757266e-06, | |
| "loss": 0.2093, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.4207920792079208, | |
| "grad_norm": 0.6578719601127735, | |
| "learning_rate": 9.642812403314272e-06, | |
| "loss": 0.2168, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.44554455445544555, | |
| "grad_norm": 0.6851661976650275, | |
| "learning_rate": 9.55830474243961e-06, | |
| "loss": 0.2181, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.47029702970297027, | |
| "grad_norm": 1.0239830343468235, | |
| "learning_rate": 9.465286353846905e-06, | |
| "loss": 0.2108, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.49504950495049505, | |
| "grad_norm": 0.6639864769738921, | |
| "learning_rate": 9.36393091047441e-06, | |
| "loss": 0.2096, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5198019801980198, | |
| "grad_norm": 0.7442256820961616, | |
| "learning_rate": 9.254427651223434e-06, | |
| "loss": 0.2096, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5445544554455446, | |
| "grad_norm": 0.6200758715629883, | |
| "learning_rate": 9.136981027633834e-06, | |
| "loss": 0.2088, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5693069306930693, | |
| "grad_norm": 0.6806953465136265, | |
| "learning_rate": 9.011810322156269e-06, | |
| "loss": 0.2188, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.594059405940594, | |
| "grad_norm": 0.5965222339817741, | |
| "learning_rate": 8.879149238733932e-06, | |
| "loss": 0.2122, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.6188118811881188, | |
| "grad_norm": 0.5937778347833192, | |
| "learning_rate": 8.739245466458187e-06, | |
| "loss": 0.198, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.6435643564356436, | |
| "grad_norm": 0.6330113114538947, | |
| "learning_rate": 8.592360217112759e-06, | |
| "loss": 0.2014, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6683168316831684, | |
| "grad_norm": 0.6647979286799263, | |
| "learning_rate": 8.438767737469995e-06, | |
| "loss": 0.1965, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.693069306930693, | |
| "grad_norm": 0.5972574353654555, | |
| "learning_rate": 8.278754797249702e-06, | |
| "loss": 0.1936, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.7178217821782178, | |
| "grad_norm": 0.6961534332272147, | |
| "learning_rate": 8.11262015369663e-06, | |
| "loss": 0.1983, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.7425742574257426, | |
| "grad_norm": 0.5890977053667193, | |
| "learning_rate": 7.940673993776258e-06, | |
| "loss": 0.2124, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7673267326732673, | |
| "grad_norm": 0.5782056754231403, | |
| "learning_rate": 7.763237355030384e-06, | |
| "loss": 0.1912, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7920792079207921, | |
| "grad_norm": 0.47426904980129864, | |
| "learning_rate": 7.580641526173758e-06, | |
| "loss": 0.1998, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.8168316831683168, | |
| "grad_norm": 0.6336424200096511, | |
| "learning_rate": 7.39322742855097e-06, | |
| "loss": 0.2092, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.8415841584158416, | |
| "grad_norm": 0.5851430863896525, | |
| "learning_rate": 7.201344979608423e-06, | |
| "loss": 0.2048, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.8663366336633663, | |
| "grad_norm": 0.5985972703165557, | |
| "learning_rate": 7.0053524395698345e-06, | |
| "loss": 0.1997, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8910891089108911, | |
| "grad_norm": 0.5359885389422082, | |
| "learning_rate": 6.805615742535117e-06, | |
| "loss": 0.1924, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.9158415841584159, | |
| "grad_norm": 0.6513367769509769, | |
| "learning_rate": 6.602507813251478e-06, | |
| "loss": 0.1962, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.9405940594059405, | |
| "grad_norm": 2.5495071867624897, | |
| "learning_rate": 6.396407870832419e-06, | |
| "loss": 0.1916, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.9653465346534653, | |
| "grad_norm": 0.533464536541958, | |
| "learning_rate": 6.187700720724648e-06, | |
| "loss": 0.1867, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9900990099009901, | |
| "grad_norm": 0.6152123883544872, | |
| "learning_rate": 5.976776036244833e-06, | |
| "loss": 0.1932, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.0148514851485149, | |
| "grad_norm": 0.5754905609165137, | |
| "learning_rate": 5.764027631027659e-06, | |
| "loss": 0.1528, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.0396039603960396, | |
| "grad_norm": 0.6609360008628948, | |
| "learning_rate": 5.549852723743564e-06, | |
| "loss": 0.1244, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.0643564356435644, | |
| "grad_norm": 0.6141937179311839, | |
| "learning_rate": 5.334651196459003e-06, | |
| "loss": 0.1261, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.0891089108910892, | |
| "grad_norm": 0.6889260069385835, | |
| "learning_rate": 5.118824848023926e-06, | |
| "loss": 0.1173, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.113861386138614, | |
| "grad_norm": 0.8267827558845927, | |
| "learning_rate": 4.902776643880461e-06, | |
| "loss": 0.1248, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.1386138613861387, | |
| "grad_norm": 0.5753472949830176, | |
| "learning_rate": 4.686909963693498e-06, | |
| "loss": 0.1171, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.1633663366336633, | |
| "grad_norm": 0.6505293177551238, | |
| "learning_rate": 4.47162784820784e-06, | |
| "loss": 0.1187, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.188118811881188, | |
| "grad_norm": 0.6728733056608579, | |
| "learning_rate": 4.257332246738201e-06, | |
| "loss": 0.1188, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.2128712871287128, | |
| "grad_norm": 0.6334415322292648, | |
| "learning_rate": 4.04442326669695e-06, | |
| "loss": 0.1211, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.2376237623762376, | |
| "grad_norm": 0.6692215992603302, | |
| "learning_rate": 3.833298426560851e-06, | |
| "loss": 0.118, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.2623762376237624, | |
| "grad_norm": 0.6937673438761942, | |
| "learning_rate": 3.624351913671571e-06, | |
| "loss": 0.1181, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.2871287128712872, | |
| "grad_norm": 0.7260772952384915, | |
| "learning_rate": 3.4179738482556648e-06, | |
| "loss": 0.1222, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.311881188118812, | |
| "grad_norm": 0.7018430210814361, | |
| "learning_rate": 3.214549555038218e-06, | |
| "loss": 0.119, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.3366336633663367, | |
| "grad_norm": 0.6398785057605294, | |
| "learning_rate": 3.0144588438100693e-06, | |
| "loss": 0.1169, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.3613861386138613, | |
| "grad_norm": 0.6102680360344789, | |
| "learning_rate": 2.8180753002918735e-06, | |
| "loss": 0.1172, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.386138613861386, | |
| "grad_norm": 0.6214065811958993, | |
| "learning_rate": 2.6257655886190147e-06, | |
| "loss": 0.1097, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.4108910891089108, | |
| "grad_norm": 0.6069249122730125, | |
| "learning_rate": 2.4378887667496696e-06, | |
| "loss": 0.1167, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.4356435643564356, | |
| "grad_norm": 0.6906365846153183, | |
| "learning_rate": 2.2547956160742473e-06, | |
| "loss": 0.1168, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.4603960396039604, | |
| "grad_norm": 0.6780444546407847, | |
| "learning_rate": 2.0768279864778475e-06, | |
| "loss": 0.1263, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.4851485148514851, | |
| "grad_norm": 0.6276144596350219, | |
| "learning_rate": 1.9043181580785597e-06, | |
| "loss": 0.1125, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.50990099009901, | |
| "grad_norm": 0.8105107613665639, | |
| "learning_rate": 1.73758822083332e-06, | |
| "loss": 0.1183, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.5346534653465347, | |
| "grad_norm": 0.593668139214893, | |
| "learning_rate": 1.5769494731696206e-06, | |
| "loss": 0.1108, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.5594059405940595, | |
| "grad_norm": 0.7019089646578581, | |
| "learning_rate": 1.4227018407658822e-06, | |
| "loss": 0.1212, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.5841584158415842, | |
| "grad_norm": 0.6425882205315401, | |
| "learning_rate": 1.275133316565691e-06, | |
| "loss": 0.1184, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.608910891089109, | |
| "grad_norm": 0.5670206419588937, | |
| "learning_rate": 1.1345194230714235e-06, | |
| "loss": 0.1069, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.6336633663366338, | |
| "grad_norm": 0.761904942958332, | |
| "learning_rate": 1.001122697921197e-06, | |
| "loss": 0.1175, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.6584158415841586, | |
| "grad_norm": 0.6045327206848765, | |
| "learning_rate": 8.751922037096328e-07, | |
| "loss": 0.1129, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.6831683168316833, | |
| "grad_norm": 0.6977601855593518, | |
| "learning_rate": 7.569630629676294e-07, | |
| "loss": 0.1151, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.7079207920792079, | |
| "grad_norm": 0.5892491224169939, | |
| "learning_rate": 6.466560191693566e-07, | |
| "loss": 0.1114, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.7326732673267327, | |
| "grad_norm": 0.6349779012742225, | |
| "learning_rate": 5.444770245861553e-07, | |
| "loss": 0.1062, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.7574257425742574, | |
| "grad_norm": 0.5280424120170226, | |
| "learning_rate": 4.506168557567886e-07, | |
| "loss": 0.1101, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.7821782178217822, | |
| "grad_norm": 0.6082009840024847, | |
| "learning_rate": 3.6525075729205274e-07, | |
| "loss": 0.1076, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.806930693069307, | |
| "grad_norm": 0.7416157250316486, | |
| "learning_rate": 2.8853811467875413e-07, | |
| "loss": 0.1156, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.8316831683168315, | |
| "grad_norm": 0.6796922655628035, | |
| "learning_rate": 2.2062215669397201e-07, | |
| "loss": 0.1199, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.8564356435643563, | |
| "grad_norm": 0.519247955113028, | |
| "learning_rate": 1.616296879852175e-07, | |
| "loss": 0.1063, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.881188118811881, | |
| "grad_norm": 0.6685776260821267, | |
| "learning_rate": 1.1167085231579111e-07, | |
| "loss": 0.1175, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.9059405940594059, | |
| "grad_norm": 0.8282713027884988, | |
| "learning_rate": 7.083892691736428e-08, | |
| "loss": 0.1196, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.9306930693069306, | |
| "grad_norm": 0.8408881587671665, | |
| "learning_rate": 3.9210148333763135e-08, | |
| "loss": 0.1081, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.9554455445544554, | |
| "grad_norm": 0.6283575387075083, | |
| "learning_rate": 1.684357008110593e-08, | |
| "loss": 0.1135, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.9801980198019802, | |
| "grad_norm": 0.6309254787259336, | |
| "learning_rate": 3.780952390058379e-09, | |
| "loss": 0.105, | |
| "step": 800 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 808, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 200, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.38221627375616e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |