| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 406, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0024630541871921183, | |
| "grad_norm": 0.13177792727947235, | |
| "learning_rate": 4.8780487804878055e-06, | |
| "loss": 0.7592, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.012315270935960592, | |
| "grad_norm": 0.1387966126203537, | |
| "learning_rate": 2.4390243902439026e-05, | |
| "loss": 0.7841, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.024630541871921183, | |
| "grad_norm": 0.13816028833389282, | |
| "learning_rate": 4.878048780487805e-05, | |
| "loss": 0.7896, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03694581280788178, | |
| "grad_norm": 0.12815995514392853, | |
| "learning_rate": 7.317073170731707e-05, | |
| "loss": 0.7837, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.04926108374384237, | |
| "grad_norm": 0.11007481813430786, | |
| "learning_rate": 9.75609756097561e-05, | |
| "loss": 0.7807, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06157635467980296, | |
| "grad_norm": 0.10207259654998779, | |
| "learning_rate": 0.00012195121951219512, | |
| "loss": 0.7567, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.07389162561576355, | |
| "grad_norm": 0.12121258676052094, | |
| "learning_rate": 0.00014634146341463414, | |
| "loss": 0.7527, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.08620689655172414, | |
| "grad_norm": 0.11584752053022385, | |
| "learning_rate": 0.0001707317073170732, | |
| "loss": 0.7514, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.09852216748768473, | |
| "grad_norm": 0.10999231040477753, | |
| "learning_rate": 0.0001951219512195122, | |
| "loss": 0.7499, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11083743842364532, | |
| "grad_norm": 0.10557198524475098, | |
| "learning_rate": 0.0001999407400739705, | |
| "loss": 0.7379, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.12315270935960591, | |
| "grad_norm": 0.10717561095952988, | |
| "learning_rate": 0.00019970011699250152, | |
| "loss": 0.7747, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1354679802955665, | |
| "grad_norm": 0.09578366577625275, | |
| "learning_rate": 0.00019927487224577402, | |
| "loss": 0.747, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.1477832512315271, | |
| "grad_norm": 0.09655096381902695, | |
| "learning_rate": 0.0001986657932891657, | |
| "loss": 0.7435, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.16009852216748768, | |
| "grad_norm": 0.09785150736570358, | |
| "learning_rate": 0.00019787400799669154, | |
| "loss": 0.7481, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.1724137931034483, | |
| "grad_norm": 0.10019391030073166, | |
| "learning_rate": 0.00019690098257244064, | |
| "loss": 0.7549, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18472906403940886, | |
| "grad_norm": 0.10015880316495895, | |
| "learning_rate": 0.00019574851883550395, | |
| "loss": 0.7501, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.19704433497536947, | |
| "grad_norm": 0.10224644094705582, | |
| "learning_rate": 0.00019441875088341997, | |
| "loss": 0.7438, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.20935960591133004, | |
| "grad_norm": 0.09352747350931168, | |
| "learning_rate": 0.00019291414114031743, | |
| "loss": 0.7374, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.22167487684729065, | |
| "grad_norm": 0.09862257540225983, | |
| "learning_rate": 0.00019123747579707275, | |
| "loss": 0.7588, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.23399014778325122, | |
| "grad_norm": 0.09364981204271317, | |
| "learning_rate": 0.0001893918596519257, | |
| "loss": 0.7631, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.24630541871921183, | |
| "grad_norm": 0.10517935454845428, | |
| "learning_rate": 0.00018738071036110808, | |
| "loss": 0.7507, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.25862068965517243, | |
| "grad_norm": 0.10279802232980728, | |
| "learning_rate": 0.00018520775211013093, | |
| "loss": 0.7503, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.270935960591133, | |
| "grad_norm": 0.0953611433506012, | |
| "learning_rate": 0.00018287700871745036, | |
| "loss": 0.7644, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.2832512315270936, | |
| "grad_norm": 0.10334543138742447, | |
| "learning_rate": 0.00018039279618328212, | |
| "loss": 0.7601, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.2955665024630542, | |
| "grad_norm": 0.10075850784778595, | |
| "learning_rate": 0.0001777597146973627, | |
| "loss": 0.7391, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.3078817733990148, | |
| "grad_norm": 0.09204421937465668, | |
| "learning_rate": 0.00017498264012045687, | |
| "loss": 0.7442, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.32019704433497537, | |
| "grad_norm": 0.09441586583852768, | |
| "learning_rate": 0.00017206671495538612, | |
| "loss": 0.7407, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.33251231527093594, | |
| "grad_norm": 0.10386330634355545, | |
| "learning_rate": 0.0001690173388242972, | |
| "loss": 0.7371, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.3448275862068966, | |
| "grad_norm": 0.09605494141578674, | |
| "learning_rate": 0.0001658401584698049, | |
| "loss": 0.7467, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.35714285714285715, | |
| "grad_norm": 0.10094594210386276, | |
| "learning_rate": 0.00016254105729852464, | |
| "loss": 0.7414, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.3694581280788177, | |
| "grad_norm": 0.09067249298095703, | |
| "learning_rate": 0.00015912614448635782, | |
| "loss": 0.753, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3817733990147783, | |
| "grad_norm": 0.10036266595125198, | |
| "learning_rate": 0.00015560174366570446, | |
| "loss": 0.7397, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.39408866995073893, | |
| "grad_norm": 0.10051223635673523, | |
| "learning_rate": 0.0001519743812155516, | |
| "loss": 0.7535, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.4064039408866995, | |
| "grad_norm": 0.09287708252668381, | |
| "learning_rate": 0.00014825077417612186, | |
| "loss": 0.754, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.4187192118226601, | |
| "grad_norm": 0.09688938409090042, | |
| "learning_rate": 0.00014443781781046136, | |
| "loss": 0.7584, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.43103448275862066, | |
| "grad_norm": 0.09463707357645035, | |
| "learning_rate": 0.00014054257283599973, | |
| "loss": 0.7192, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.4433497536945813, | |
| "grad_norm": 0.0989103689789772, | |
| "learning_rate": 0.00013657225234972695, | |
| "loss": 0.7346, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.45566502463054187, | |
| "grad_norm": 0.09697972238063812, | |
| "learning_rate": 0.00013253420847119803, | |
| "loss": 0.7356, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.46798029556650245, | |
| "grad_norm": 0.09980908036231995, | |
| "learning_rate": 0.0001284359187281004, | |
| "loss": 0.7403, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.4802955665024631, | |
| "grad_norm": 0.09531185775995255, | |
| "learning_rate": 0.0001242849722095936, | |
| "loss": 0.7283, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.49261083743842365, | |
| "grad_norm": 0.0942201018333435, | |
| "learning_rate": 0.00012008905551306356, | |
| "loss": 0.7513, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5049261083743842, | |
| "grad_norm": 0.09356020390987396, | |
| "learning_rate": 0.00011585593851031347, | |
| "loss": 0.7375, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.5172413793103449, | |
| "grad_norm": 0.09078332781791687, | |
| "learning_rate": 0.00011159345995955006, | |
| "loss": 0.7341, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5295566502463054, | |
| "grad_norm": 0.09174405783414841, | |
| "learning_rate": 0.00010730951298980776, | |
| "loss": 0.7338, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.541871921182266, | |
| "grad_norm": 0.0960531234741211, | |
| "learning_rate": 0.00010301203048469083, | |
| "loss": 0.7583, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5541871921182266, | |
| "grad_norm": 0.10194765776395798, | |
| "learning_rate": 9.870897039249911e-05, | |
| "loss": 0.7339, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.5665024630541872, | |
| "grad_norm": 0.09580555558204651, | |
| "learning_rate": 9.440830098993969e-05, | |
| "loss": 0.7369, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5788177339901478, | |
| "grad_norm": 0.09715011715888977, | |
| "learning_rate": 9.011798612671286e-05, | |
| "loss": 0.7232, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.5911330049261084, | |
| "grad_norm": 0.09561636298894882, | |
| "learning_rate": 8.58459704782957e-05, | |
| "loss": 0.7325, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.603448275862069, | |
| "grad_norm": 0.09226809442043304, | |
| "learning_rate": 8.160016483423199e-05, | |
| "loss": 0.7411, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.6157635467980296, | |
| "grad_norm": 0.09844076633453369, | |
| "learning_rate": 7.738843144917119e-05, | |
| "loss": 0.7347, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.6280788177339901, | |
| "grad_norm": 0.09305092692375183, | |
| "learning_rate": 7.321856948378259e-05, | |
| "loss": 0.7461, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.6403940886699507, | |
| "grad_norm": 0.09724168479442596, | |
| "learning_rate": 6.909830056250527e-05, | |
| "loss": 0.749, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6527093596059114, | |
| "grad_norm": 0.09665533900260925, | |
| "learning_rate": 6.503525447487715e-05, | |
| "loss": 0.7365, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.6650246305418719, | |
| "grad_norm": 0.09020380675792694, | |
| "learning_rate": 6.103695504692122e-05, | |
| "loss": 0.7403, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6773399014778325, | |
| "grad_norm": 0.09968876093626022, | |
| "learning_rate": 5.7110806208751655e-05, | |
| "loss": 0.7391, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.6896551724137931, | |
| "grad_norm": 0.09027230739593506, | |
| "learning_rate": 5.326407828419979e-05, | |
| "loss": 0.7473, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.7019704433497537, | |
| "grad_norm": 0.10408838093280792, | |
| "learning_rate": 4.9503894527847964e-05, | |
| "loss": 0.7405, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 0.09234675765037537, | |
| "learning_rate": 4.583721793440188e-05, | |
| "loss": 0.7352, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.7266009852216748, | |
| "grad_norm": 0.09968586266040802, | |
| "learning_rate": 4.227083834482728e-05, | |
| "loss": 0.7563, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.7389162561576355, | |
| "grad_norm": 0.09535784274339676, | |
| "learning_rate": 3.881135987312757e-05, | |
| "loss": 0.7303, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7512315270935961, | |
| "grad_norm": 0.09004243463277817, | |
| "learning_rate": 3.546518867704499e-05, | |
| "loss": 0.7462, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.7635467980295566, | |
| "grad_norm": 0.096128910779953, | |
| "learning_rate": 3.223852109533112e-05, | |
| "loss": 0.7392, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7758620689655172, | |
| "grad_norm": 0.09637811779975891, | |
| "learning_rate": 2.9137332173554043e-05, | |
| "loss": 0.7318, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.7881773399014779, | |
| "grad_norm": 0.09532997012138367, | |
| "learning_rate": 2.616736459968936e-05, | |
| "loss": 0.7404, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.8004926108374384, | |
| "grad_norm": 0.0936516597867012, | |
| "learning_rate": 2.33341180699841e-05, | |
| "loss": 0.7556, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.812807881773399, | |
| "grad_norm": 0.0916033461689949, | |
| "learning_rate": 2.0642839104785272e-05, | |
| "loss": 0.7443, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.8251231527093597, | |
| "grad_norm": 0.09518487006425858, | |
| "learning_rate": 1.8098511333192024e-05, | |
| "loss": 0.7335, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.8374384236453202, | |
| "grad_norm": 0.09961594641208649, | |
| "learning_rate": 1.570584626452173e-05, | |
| "loss": 0.7602, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.8497536945812808, | |
| "grad_norm": 0.09134498238563538, | |
| "learning_rate": 1.3469274563679402e-05, | |
| "loss": 0.7456, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.8620689655172413, | |
| "grad_norm": 0.09200974553823471, | |
| "learning_rate": 1.1392937846586215e-05, | |
| "loss": 0.7409, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.874384236453202, | |
| "grad_norm": 0.09712038189172745, | |
| "learning_rate": 9.48068101086026e-06, | |
| "loss": 0.7192, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.8866995073891626, | |
| "grad_norm": 0.09649989008903503, | |
| "learning_rate": 7.736045115951251e-06, | |
| "loss": 0.7327, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8990147783251231, | |
| "grad_norm": 0.09973736852407455, | |
| "learning_rate": 6.16226082591359e-06, | |
| "loss": 0.7295, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.9113300492610837, | |
| "grad_norm": 0.09414437413215637, | |
| "learning_rate": 4.762242426960262e-06, | |
| "loss": 0.7286, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.9236453201970444, | |
| "grad_norm": 0.08962981402873993, | |
| "learning_rate": 3.5385824308756587e-06, | |
| "loss": 0.7407, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.9359605911330049, | |
| "grad_norm": 0.09547747671604156, | |
| "learning_rate": 2.493546774280531e-06, | |
| "loss": 0.7322, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.9482758620689655, | |
| "grad_norm": 0.09376917779445648, | |
| "learning_rate": 1.6290706226390285e-06, | |
| "loss": 0.7347, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.9605911330049262, | |
| "grad_norm": 0.09975626319646835, | |
| "learning_rate": 9.46754786777726e-07, | |
| "loss": 0.7333, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9729064039408867, | |
| "grad_norm": 0.09062483161687851, | |
| "learning_rate": 4.4786275855247527e-07, | |
| "loss": 0.7479, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.9852216748768473, | |
| "grad_norm": 0.09053165465593338, | |
| "learning_rate": 1.333183711524133e-07, | |
| "loss": 0.7375, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9975369458128078, | |
| "grad_norm": 0.093317411839962, | |
| "learning_rate": 3.7040883734462683e-09, | |
| "loss": 0.7398, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.9064456224441528, | |
| "eval_runtime": 996.7111, | |
| "eval_samples_per_second": 1.159, | |
| "eval_steps_per_second": 0.073, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 406, | |
| "total_flos": 2.1410486778152878e+18, | |
| "train_loss": 0.7449803688549643, | |
| "train_runtime": 152626.4688, | |
| "train_samples_per_second": 0.681, | |
| "train_steps_per_second": 0.003 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 406, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 25, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.1410486778152878e+18, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |