| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 406, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0024630541871921183, | |
| "grad_norm": 0.16922634840011597, | |
| "learning_rate": 4.8780487804878055e-06, | |
| "loss": 0.8252, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.012315270935960592, | |
| "grad_norm": 0.1554887592792511, | |
| "learning_rate": 2.4390243902439026e-05, | |
| "loss": 0.79, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.024630541871921183, | |
| "grad_norm": 0.16765658557415009, | |
| "learning_rate": 4.878048780487805e-05, | |
| "loss": 0.8156, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03694581280788178, | |
| "grad_norm": 0.18861277401447296, | |
| "learning_rate": 7.317073170731707e-05, | |
| "loss": 0.7756, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.04926108374384237, | |
| "grad_norm": 0.15377004444599152, | |
| "learning_rate": 9.75609756097561e-05, | |
| "loss": 0.7755, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06157635467980296, | |
| "grad_norm": 0.1191137507557869, | |
| "learning_rate": 0.00012195121951219512, | |
| "loss": 0.7548, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.07389162561576355, | |
| "grad_norm": 0.10861038416624069, | |
| "learning_rate": 0.00014634146341463414, | |
| "loss": 0.7464, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.08620689655172414, | |
| "grad_norm": 0.1100272685289383, | |
| "learning_rate": 0.0001707317073170732, | |
| "loss": 0.7447, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.09852216748768473, | |
| "grad_norm": 0.11167717725038528, | |
| "learning_rate": 0.0001951219512195122, | |
| "loss": 0.7476, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11083743842364532, | |
| "grad_norm": 0.12214276939630508, | |
| "learning_rate": 0.0001999407400739705, | |
| "loss": 0.7548, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.12315270935960591, | |
| "grad_norm": 0.1148504987359047, | |
| "learning_rate": 0.00019970011699250152, | |
| "loss": 0.7347, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1354679802955665, | |
| "grad_norm": 0.12183093279600143, | |
| "learning_rate": 0.00019927487224577402, | |
| "loss": 0.7486, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.1477832512315271, | |
| "grad_norm": 0.1088758111000061, | |
| "learning_rate": 0.0001986657932891657, | |
| "loss": 0.7309, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.16009852216748768, | |
| "grad_norm": 0.10927726328372955, | |
| "learning_rate": 0.00019787400799669154, | |
| "loss": 0.7515, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.1724137931034483, | |
| "grad_norm": 0.10450418293476105, | |
| "learning_rate": 0.00019690098257244064, | |
| "loss": 0.734, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18472906403940886, | |
| "grad_norm": 0.10391739755868912, | |
| "learning_rate": 0.00019574851883550395, | |
| "loss": 0.7481, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.19704433497536947, | |
| "grad_norm": 0.10028768330812454, | |
| "learning_rate": 0.00019441875088341997, | |
| "loss": 0.7402, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.20935960591133004, | |
| "grad_norm": 0.0981810986995697, | |
| "learning_rate": 0.00019291414114031743, | |
| "loss": 0.7455, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.22167487684729065, | |
| "grad_norm": 0.10314278304576874, | |
| "learning_rate": 0.00019123747579707275, | |
| "loss": 0.7397, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.23399014778325122, | |
| "grad_norm": 0.1047026589512825, | |
| "learning_rate": 0.0001893918596519257, | |
| "loss": 0.7284, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.24630541871921183, | |
| "grad_norm": 0.10668618232011795, | |
| "learning_rate": 0.00018738071036110808, | |
| "loss": 0.7164, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.25862068965517243, | |
| "grad_norm": 0.09526954591274261, | |
| "learning_rate": 0.00018520775211013093, | |
| "loss": 0.7195, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.270935960591133, | |
| "grad_norm": 0.10316859930753708, | |
| "learning_rate": 0.00018287700871745036, | |
| "loss": 0.7341, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.2832512315270936, | |
| "grad_norm": 0.10697784274816513, | |
| "learning_rate": 0.00018039279618328212, | |
| "loss": 0.7541, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.2955665024630542, | |
| "grad_norm": 0.12420064210891724, | |
| "learning_rate": 0.0001777597146973627, | |
| "loss": 0.7592, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.3078817733990148, | |
| "grad_norm": 0.10185825079679489, | |
| "learning_rate": 0.00017498264012045687, | |
| "loss": 0.7414, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.32019704433497537, | |
| "grad_norm": 0.1062440425157547, | |
| "learning_rate": 0.00017206671495538612, | |
| "loss": 0.7415, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.33251231527093594, | |
| "grad_norm": 0.09676851332187653, | |
| "learning_rate": 0.0001690173388242972, | |
| "loss": 0.7354, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.3448275862068966, | |
| "grad_norm": 0.10254685580730438, | |
| "learning_rate": 0.0001658401584698049, | |
| "loss": 0.7308, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.35714285714285715, | |
| "grad_norm": 0.10101324319839478, | |
| "learning_rate": 0.00016254105729852464, | |
| "loss": 0.7561, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.3694581280788177, | |
| "grad_norm": 0.0975121557712555, | |
| "learning_rate": 0.00015912614448635782, | |
| "loss": 0.7328, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3817733990147783, | |
| "grad_norm": 0.10298648476600647, | |
| "learning_rate": 0.00015560174366570446, | |
| "loss": 0.7233, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.39408866995073893, | |
| "grad_norm": 0.09906378388404846, | |
| "learning_rate": 0.0001519743812155516, | |
| "loss": 0.7478, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.4064039408866995, | |
| "grad_norm": 0.09897776693105698, | |
| "learning_rate": 0.00014825077417612186, | |
| "loss": 0.7428, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.4187192118226601, | |
| "grad_norm": 0.09831254184246063, | |
| "learning_rate": 0.00014443781781046136, | |
| "loss": 0.7341, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.43103448275862066, | |
| "grad_norm": 0.10286163538694382, | |
| "learning_rate": 0.00014054257283599973, | |
| "loss": 0.7409, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.4433497536945813, | |
| "grad_norm": 0.0995405986905098, | |
| "learning_rate": 0.00013657225234972695, | |
| "loss": 0.7316, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.45566502463054187, | |
| "grad_norm": 0.09380760788917542, | |
| "learning_rate": 0.00013253420847119803, | |
| "loss": 0.7503, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.46798029556650245, | |
| "grad_norm": 0.10748773068189621, | |
| "learning_rate": 0.0001284359187281004, | |
| "loss": 0.7325, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.4802955665024631, | |
| "grad_norm": 0.11373591423034668, | |
| "learning_rate": 0.0001242849722095936, | |
| "loss": 0.7307, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.49261083743842365, | |
| "grad_norm": 0.11263196170330048, | |
| "learning_rate": 0.00012008905551306356, | |
| "loss": 0.7083, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5049261083743842, | |
| "grad_norm": 0.10356634855270386, | |
| "learning_rate": 0.00011585593851031347, | |
| "loss": 0.7293, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.5172413793103449, | |
| "grad_norm": 0.1134498193860054, | |
| "learning_rate": 0.00011159345995955006, | |
| "loss": 0.7378, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5295566502463054, | |
| "grad_norm": 0.09741369634866714, | |
| "learning_rate": 0.00010730951298980776, | |
| "loss": 0.7416, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.541871921182266, | |
| "grad_norm": 0.1027570441365242, | |
| "learning_rate": 0.00010301203048469083, | |
| "loss": 0.7401, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5541871921182266, | |
| "grad_norm": 0.10384564101696014, | |
| "learning_rate": 9.870897039249911e-05, | |
| "loss": 0.7267, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.5665024630541872, | |
| "grad_norm": 0.10548094660043716, | |
| "learning_rate": 9.440830098993969e-05, | |
| "loss": 0.7341, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5788177339901478, | |
| "grad_norm": 0.1037910059094429, | |
| "learning_rate": 9.011798612671286e-05, | |
| "loss": 0.7362, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.5911330049261084, | |
| "grad_norm": 0.09916955232620239, | |
| "learning_rate": 8.58459704782957e-05, | |
| "loss": 0.739, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.603448275862069, | |
| "grad_norm": 0.1018625795841217, | |
| "learning_rate": 8.160016483423199e-05, | |
| "loss": 0.7435, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.6157635467980296, | |
| "grad_norm": 0.10083276778459549, | |
| "learning_rate": 7.738843144917119e-05, | |
| "loss": 0.7299, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.6280788177339901, | |
| "grad_norm": 0.10589368641376495, | |
| "learning_rate": 7.321856948378259e-05, | |
| "loss": 0.7246, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.6403940886699507, | |
| "grad_norm": 0.09519989788532257, | |
| "learning_rate": 6.909830056250527e-05, | |
| "loss": 0.7363, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6527093596059114, | |
| "grad_norm": 0.09882629662752151, | |
| "learning_rate": 6.503525447487715e-05, | |
| "loss": 0.713, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.6650246305418719, | |
| "grad_norm": 0.10993042588233948, | |
| "learning_rate": 6.103695504692122e-05, | |
| "loss": 0.7131, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6773399014778325, | |
| "grad_norm": 0.09953361004590988, | |
| "learning_rate": 5.7110806208751655e-05, | |
| "loss": 0.737, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.6896551724137931, | |
| "grad_norm": 0.1014907956123352, | |
| "learning_rate": 5.326407828419979e-05, | |
| "loss": 0.7264, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.7019704433497537, | |
| "grad_norm": 0.09946195781230927, | |
| "learning_rate": 4.9503894527847964e-05, | |
| "loss": 0.7244, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 0.10249713063240051, | |
| "learning_rate": 4.583721793440188e-05, | |
| "loss": 0.7618, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.7266009852216748, | |
| "grad_norm": 0.09448078274726868, | |
| "learning_rate": 4.227083834482728e-05, | |
| "loss": 0.7279, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.7389162561576355, | |
| "grad_norm": 0.1023305207490921, | |
| "learning_rate": 3.881135987312757e-05, | |
| "loss": 0.7314, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7512315270935961, | |
| "grad_norm": 0.10410130769014359, | |
| "learning_rate": 3.546518867704499e-05, | |
| "loss": 0.7415, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.7635467980295566, | |
| "grad_norm": 0.10321201384067535, | |
| "learning_rate": 3.223852109533112e-05, | |
| "loss": 0.7328, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7758620689655172, | |
| "grad_norm": 0.1026611328125, | |
| "learning_rate": 2.9137332173554043e-05, | |
| "loss": 0.7186, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.7881773399014779, | |
| "grad_norm": 0.10746520757675171, | |
| "learning_rate": 2.616736459968936e-05, | |
| "loss": 0.7209, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.8004926108374384, | |
| "grad_norm": 0.09868486225605011, | |
| "learning_rate": 2.33341180699841e-05, | |
| "loss": 0.7249, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.812807881773399, | |
| "grad_norm": 0.0974905863404274, | |
| "learning_rate": 2.0642839104785272e-05, | |
| "loss": 0.7288, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.8251231527093597, | |
| "grad_norm": 0.1025204062461853, | |
| "learning_rate": 1.8098511333192024e-05, | |
| "loss": 0.7465, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.8374384236453202, | |
| "grad_norm": 0.10062424093484879, | |
| "learning_rate": 1.570584626452173e-05, | |
| "loss": 0.7323, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.8497536945812808, | |
| "grad_norm": 0.09559416770935059, | |
| "learning_rate": 1.3469274563679402e-05, | |
| "loss": 0.716, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.8620689655172413, | |
| "grad_norm": 0.10290331393480301, | |
| "learning_rate": 1.1392937846586215e-05, | |
| "loss": 0.7307, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.874384236453202, | |
| "grad_norm": 0.1096779927611351, | |
| "learning_rate": 9.48068101086026e-06, | |
| "loss": 0.7285, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.8866995073891626, | |
| "grad_norm": 0.09808935970067978, | |
| "learning_rate": 7.736045115951251e-06, | |
| "loss": 0.7392, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8990147783251231, | |
| "grad_norm": 0.10223131626844406, | |
| "learning_rate": 6.16226082591359e-06, | |
| "loss": 0.7202, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.9113300492610837, | |
| "grad_norm": 0.10011482238769531, | |
| "learning_rate": 4.762242426960262e-06, | |
| "loss": 0.7332, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.9236453201970444, | |
| "grad_norm": 0.10329649597406387, | |
| "learning_rate": 3.5385824308756587e-06, | |
| "loss": 0.7103, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.9359605911330049, | |
| "grad_norm": 0.10814712941646576, | |
| "learning_rate": 2.493546774280531e-06, | |
| "loss": 0.7397, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.9482758620689655, | |
| "grad_norm": 0.10298412293195724, | |
| "learning_rate": 1.6290706226390285e-06, | |
| "loss": 0.7262, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.9605911330049262, | |
| "grad_norm": 0.1037633940577507, | |
| "learning_rate": 9.46754786777726e-07, | |
| "loss": 0.7133, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9729064039408867, | |
| "grad_norm": 0.09317852556705475, | |
| "learning_rate": 4.4786275855247527e-07, | |
| "loss": 0.7231, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.9852216748768473, | |
| "grad_norm": 0.09987390041351318, | |
| "learning_rate": 1.333183711524133e-07, | |
| "loss": 0.7209, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9975369458128078, | |
| "grad_norm": 0.0951569527387619, | |
| "learning_rate": 3.7040883734462683e-09, | |
| "loss": 0.7019, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.9071829915046692, | |
| "eval_runtime": 618.8722, | |
| "eval_samples_per_second": 1.866, | |
| "eval_steps_per_second": 0.118, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 406, | |
| "total_flos": 2.1031592360023163e+18, | |
| "train_loss": 0.7368072108388535, | |
| "train_runtime": 156724.9428, | |
| "train_samples_per_second": 0.663, | |
| "train_steps_per_second": 0.003 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 406, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 25, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.1031592360023163e+18, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |