| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9997090485888857, | |
| "eval_steps": 100, | |
| "global_step": 859, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.005819028222286878, | |
| "grad_norm": 4.729677200317383, | |
| "learning_rate": 1.1627906976744188e-06, | |
| "loss": 0.8687, | |
| "mean_token_accuracy": 0.8001353883684503, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.011638056444573755, | |
| "grad_norm": 3.4830121994018555, | |
| "learning_rate": 2.3255813953488376e-06, | |
| "loss": 0.8575, | |
| "mean_token_accuracy": 0.7992099680535122, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.017457084666860634, | |
| "grad_norm": 1.5148690938949585, | |
| "learning_rate": 3.4883720930232564e-06, | |
| "loss": 0.7863, | |
| "mean_token_accuracy": 0.8034960847656469, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.02327611288914751, | |
| "grad_norm": 1.5541874170303345, | |
| "learning_rate": 4.651162790697675e-06, | |
| "loss": 0.7052, | |
| "mean_token_accuracy": 0.813427774762903, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02909514111143439, | |
| "grad_norm": 1.0066415071487427, | |
| "learning_rate": 5.8139534883720935e-06, | |
| "loss": 0.6611, | |
| "mean_token_accuracy": 0.8186242834655136, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.03491416933372127, | |
| "grad_norm": 0.7388224601745605, | |
| "learning_rate": 6.976744186046513e-06, | |
| "loss": 0.6075, | |
| "mean_token_accuracy": 0.8315073685463057, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.04073319755600815, | |
| "grad_norm": 0.5410726070404053, | |
| "learning_rate": 8.139534883720931e-06, | |
| "loss": 0.5962, | |
| "mean_token_accuracy": 0.832317557757456, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.04655222577829502, | |
| "grad_norm": 0.53746497631073, | |
| "learning_rate": 9.30232558139535e-06, | |
| "loss": 0.5733, | |
| "mean_token_accuracy": 0.8373400698056097, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0523712540005819, | |
| "grad_norm": 0.512347400188446, | |
| "learning_rate": 1.046511627906977e-05, | |
| "loss": 0.5627, | |
| "mean_token_accuracy": 0.8384384618000557, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.05819028222286878, | |
| "grad_norm": 0.5112928748130798, | |
| "learning_rate": 1.1627906976744187e-05, | |
| "loss": 0.5752, | |
| "mean_token_accuracy": 0.836557222748716, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.06400931044515566, | |
| "grad_norm": 0.4823078513145447, | |
| "learning_rate": 1.2790697674418606e-05, | |
| "loss": 0.5546, | |
| "mean_token_accuracy": 0.8406492042617113, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.06982833866744254, | |
| "grad_norm": 0.48803916573524475, | |
| "learning_rate": 1.3953488372093025e-05, | |
| "loss": 0.5552, | |
| "mean_token_accuracy": 0.8404829421801073, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.07564736688972941, | |
| "grad_norm": 0.5077307224273682, | |
| "learning_rate": 1.5116279069767443e-05, | |
| "loss": 0.5537, | |
| "mean_token_accuracy": 0.8408575111390093, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.0814663951120163, | |
| "grad_norm": 0.5025640726089478, | |
| "learning_rate": 1.6279069767441862e-05, | |
| "loss": 0.5324, | |
| "mean_token_accuracy": 0.8454815162857129, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.08728542333430317, | |
| "grad_norm": 0.4802047610282898, | |
| "learning_rate": 1.744186046511628e-05, | |
| "loss": 0.5298, | |
| "mean_token_accuracy": 0.8467988429180437, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.09310445155659004, | |
| "grad_norm": 0.495412141084671, | |
| "learning_rate": 1.86046511627907e-05, | |
| "loss": 0.5332, | |
| "mean_token_accuracy": 0.847131608146347, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.09892347977887693, | |
| "grad_norm": 0.4867161512374878, | |
| "learning_rate": 1.9767441860465116e-05, | |
| "loss": 0.5315, | |
| "mean_token_accuracy": 0.8456074647350855, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.1047425080011638, | |
| "grad_norm": 0.5120564699172974, | |
| "learning_rate": 1.999867863992634e-05, | |
| "loss": 0.5419, | |
| "mean_token_accuracy": 0.8431668032243257, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.11056153622345069, | |
| "grad_norm": 0.5223878026008606, | |
| "learning_rate": 1.9993311213101313e-05, | |
| "loss": 0.5379, | |
| "mean_token_accuracy": 0.8446533035042771, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.11638056444573756, | |
| "grad_norm": 0.4827958643436432, | |
| "learning_rate": 1.998381734913258e-05, | |
| "loss": 0.528, | |
| "mean_token_accuracy": 0.8463738754778392, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.12219959266802444, | |
| "grad_norm": 0.49347934126853943, | |
| "learning_rate": 1.997020096822557e-05, | |
| "loss": 0.524, | |
| "mean_token_accuracy": 0.8469525801009745, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.12801862089031132, | |
| "grad_norm": 0.4837639033794403, | |
| "learning_rate": 1.9952467692855043e-05, | |
| "loss": 0.5411, | |
| "mean_token_accuracy": 0.8443662513099179, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.13383764911259818, | |
| "grad_norm": 0.44964534044265747, | |
| "learning_rate": 1.993062484544341e-05, | |
| "loss": 0.5122, | |
| "mean_token_accuracy": 0.8500057242518922, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.13965667733488507, | |
| "grad_norm": 0.4763283133506775, | |
| "learning_rate": 1.990468144533722e-05, | |
| "loss": 0.5467, | |
| "mean_token_accuracy": 0.842521201317496, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.14547570555717196, | |
| "grad_norm": 0.4805982708930969, | |
| "learning_rate": 1.9874648205082847e-05, | |
| "loss": 0.5267, | |
| "mean_token_accuracy": 0.8467973214319761, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.15129473377945882, | |
| "grad_norm": 0.47071903944015503, | |
| "learning_rate": 1.9840537526003085e-05, | |
| "loss": 0.534, | |
| "mean_token_accuracy": 0.8436821750241809, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.1571137620017457, | |
| "grad_norm": 0.49743327498435974, | |
| "learning_rate": 1.9802363493076392e-05, | |
| "loss": 0.5175, | |
| "mean_token_accuracy": 0.8488135271716413, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.1629327902240326, | |
| "grad_norm": 0.46099644899368286, | |
| "learning_rate": 1.9760141869120917e-05, | |
| "loss": 0.5188, | |
| "mean_token_accuracy": 0.8494274636540204, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.16875181844631945, | |
| "grad_norm": 0.43989109992980957, | |
| "learning_rate": 1.971389008828573e-05, | |
| "loss": 0.51, | |
| "mean_token_accuracy": 0.8514118628602676, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.17457084666860634, | |
| "grad_norm": 0.44857239723205566, | |
| "learning_rate": 1.9663627248851903e-05, | |
| "loss": 0.505, | |
| "mean_token_accuracy": 0.8521678886513273, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.18038987489089323, | |
| "grad_norm": 0.4434855580329895, | |
| "learning_rate": 1.9609374105346458e-05, | |
| "loss": 0.5348, | |
| "mean_token_accuracy": 0.8445110785073409, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.18620890311318009, | |
| "grad_norm": 0.45866096019744873, | |
| "learning_rate": 1.9551153059972397e-05, | |
| "loss": 0.5387, | |
| "mean_token_accuracy": 0.8436002910675257, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.19202793133546697, | |
| "grad_norm": 0.4627978801727295, | |
| "learning_rate": 1.948898815335841e-05, | |
| "loss": 0.5164, | |
| "mean_token_accuracy": 0.848416589531312, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.19784695955775386, | |
| "grad_norm": 0.4659474790096283, | |
| "learning_rate": 1.9422905054631996e-05, | |
| "loss": 0.5129, | |
| "mean_token_accuracy": 0.8507672546269303, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.20366598778004075, | |
| "grad_norm": 0.4798222482204437, | |
| "learning_rate": 1.9352931050820175e-05, | |
| "loss": 0.5173, | |
| "mean_token_accuracy": 0.8474816378041764, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.2094850160023276, | |
| "grad_norm": 0.5056359171867371, | |
| "learning_rate": 1.9279095035582153e-05, | |
| "loss": 0.5128, | |
| "mean_token_accuracy": 0.8497545293493058, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.2153040442246145, | |
| "grad_norm": 0.41810187697410583, | |
| "learning_rate": 1.9201427497278518e-05, | |
| "loss": 0.5105, | |
| "mean_token_accuracy": 0.8495568220795893, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.22112307244690138, | |
| "grad_norm": 0.45786747336387634, | |
| "learning_rate": 1.911996050638202e-05, | |
| "loss": 0.508, | |
| "mean_token_accuracy": 0.8502620839044074, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.22694210066918824, | |
| "grad_norm": 0.4433153569698334, | |
| "learning_rate": 1.9034727702235023e-05, | |
| "loss": 0.5251, | |
| "mean_token_accuracy": 0.8470169206232894, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.23276112889147513, | |
| "grad_norm": 0.43002447485923767, | |
| "learning_rate": 1.8945764279159144e-05, | |
| "loss": 0.51, | |
| "mean_token_accuracy": 0.8515899255383441, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.23858015711376201, | |
| "grad_norm": 0.43477922677993774, | |
| "learning_rate": 1.8853106971922833e-05, | |
| "loss": 0.5162, | |
| "mean_token_accuracy": 0.8489151326449708, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.24439918533604887, | |
| "grad_norm": 0.4386807978153229, | |
| "learning_rate": 1.8756794040572834e-05, | |
| "loss": 0.528, | |
| "mean_token_accuracy": 0.8479445785479058, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.25021821355833573, | |
| "grad_norm": 0.43842801451683044, | |
| "learning_rate": 1.8656865254635877e-05, | |
| "loss": 0.5007, | |
| "mean_token_accuracy": 0.852635679878258, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.25603724178062265, | |
| "grad_norm": 0.46626922488212585, | |
| "learning_rate": 1.8553361876697025e-05, | |
| "loss": 0.5274, | |
| "mean_token_accuracy": 0.8479028685155747, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.2618562700029095, | |
| "grad_norm": 0.4394637644290924, | |
| "learning_rate": 1.8446326645361542e-05, | |
| "loss": 0.5113, | |
| "mean_token_accuracy": 0.8496161991247855, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.26767529822519637, | |
| "grad_norm": 0.46903783082962036, | |
| "learning_rate": 1.8335803757607274e-05, | |
| "loss": 0.5042, | |
| "mean_token_accuracy": 0.851652269825092, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.2734943264474833, | |
| "grad_norm": 0.4322178363800049, | |
| "learning_rate": 1.822183885053483e-05, | |
| "loss": 0.5254, | |
| "mean_token_accuracy": 0.8445282817492037, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.27931335466977014, | |
| "grad_norm": 0.4331161677837372, | |
| "learning_rate": 1.8104478982523117e-05, | |
| "loss": 0.5043, | |
| "mean_token_accuracy": 0.8515775238870337, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.285132382892057, | |
| "grad_norm": 0.4396701753139496, | |
| "learning_rate": 1.7983772613798006e-05, | |
| "loss": 0.5003, | |
| "mean_token_accuracy": 0.8519097787097343, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.2909514111143439, | |
| "grad_norm": 0.4399448335170746, | |
| "learning_rate": 1.7859769586422122e-05, | |
| "loss": 0.495, | |
| "mean_token_accuracy": 0.8534961898688037, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.2967704393366308, | |
| "grad_norm": 0.4380975067615509, | |
| "learning_rate": 1.773252110371409e-05, | |
| "loss": 0.5117, | |
| "mean_token_accuracy": 0.849924917367521, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.30258946755891764, | |
| "grad_norm": 0.4397619962692261, | |
| "learning_rate": 1.7602079709105644e-05, | |
| "loss": 0.4925, | |
| "mean_token_accuracy": 0.8539092350587474, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.30840849578120455, | |
| "grad_norm": 0.4179295003414154, | |
| "learning_rate": 1.7468499264445405e-05, | |
| "loss": 0.4958, | |
| "mean_token_accuracy": 0.8536148051263105, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.3142275240034914, | |
| "grad_norm": 0.4244683086872101, | |
| "learning_rate": 1.733183492775825e-05, | |
| "loss": 0.5052, | |
| "mean_token_accuracy": 0.8512700493372496, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.32004655222577827, | |
| "grad_norm": 0.44008669257164, | |
| "learning_rate": 1.7192143130469466e-05, | |
| "loss": 0.5043, | |
| "mean_token_accuracy": 0.8509524474133208, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.3258655804480652, | |
| "grad_norm": 0.42162829637527466, | |
| "learning_rate": 1.7049481554103107e-05, | |
| "loss": 0.5036, | |
| "mean_token_accuracy": 0.8519439016703739, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.33168460867035204, | |
| "grad_norm": 0.44394752383232117, | |
| "learning_rate": 1.690390910646411e-05, | |
| "loss": 0.4962, | |
| "mean_token_accuracy": 0.8529925945929515, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.3375036368926389, | |
| "grad_norm": 0.43867236375808716, | |
| "learning_rate": 1.6755485897314122e-05, | |
| "loss": 0.4986, | |
| "mean_token_accuracy": 0.8537370793513223, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.3433226651149258, | |
| "grad_norm": 0.4371063709259033, | |
| "learning_rate": 1.6604273213550957e-05, | |
| "loss": 0.4994, | |
| "mean_token_accuracy": 0.853144893346005, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.3491416933372127, | |
| "grad_norm": 0.4125002920627594, | |
| "learning_rate": 1.6450333493902046e-05, | |
| "loss": 0.5145, | |
| "mean_token_accuracy": 0.8495829518753062, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.35496072155949954, | |
| "grad_norm": 0.422319233417511, | |
| "learning_rate": 1.6293730303142218e-05, | |
| "loss": 0.5098, | |
| "mean_token_accuracy": 0.8512014355945091, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.36077974978178645, | |
| "grad_norm": 0.4452224671840668, | |
| "learning_rate": 1.6134528305846537e-05, | |
| "loss": 0.4868, | |
| "mean_token_accuracy": 0.856624218400263, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.3665987780040733, | |
| "grad_norm": 0.4240838885307312, | |
| "learning_rate": 1.5972793239689038e-05, | |
| "loss": 0.4885, | |
| "mean_token_accuracy": 0.8560715988512444, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.37241780622636017, | |
| "grad_norm": 0.4351670742034912, | |
| "learning_rate": 1.5808591888298314e-05, | |
| "loss": 0.4949, | |
| "mean_token_accuracy": 0.8541411151521825, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.3782368344486471, | |
| "grad_norm": 0.41906318068504333, | |
| "learning_rate": 1.5641992053681213e-05, | |
| "loss": 0.5087, | |
| "mean_token_accuracy": 0.850579562045576, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.38405586267093395, | |
| "grad_norm": 0.42045196890830994, | |
| "learning_rate": 1.5473062528226082e-05, | |
| "loss": 0.4853, | |
| "mean_token_accuracy": 0.8547947922524161, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.3898748908932208, | |
| "grad_norm": 0.43161919713020325, | |
| "learning_rate": 1.530187306629697e-05, | |
| "loss": 0.5078, | |
| "mean_token_accuracy": 0.851508238713485, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.3956939191155077, | |
| "grad_norm": 0.4057161509990692, | |
| "learning_rate": 1.5128494355430698e-05, | |
| "loss": 0.4848, | |
| "mean_token_accuracy": 0.8560923491214802, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.4015129473377946, | |
| "grad_norm": 0.43893197178840637, | |
| "learning_rate": 1.4952997987148554e-05, | |
| "loss": 0.5114, | |
| "mean_token_accuracy": 0.849725545842043, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.4073319755600815, | |
| "grad_norm": 0.432678759098053, | |
| "learning_rate": 1.4775456427394732e-05, | |
| "loss": 0.5008, | |
| "mean_token_accuracy": 0.8532302583194186, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.41315100378236835, | |
| "grad_norm": 0.40871691703796387, | |
| "learning_rate": 1.4595942986613696e-05, | |
| "loss": 0.4984, | |
| "mean_token_accuracy": 0.8532044769095325, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.4189700320046552, | |
| "grad_norm": 0.43299028277397156, | |
| "learning_rate": 1.4414531789478841e-05, | |
| "loss": 0.5092, | |
| "mean_token_accuracy": 0.8504414872190281, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.42478906022694213, | |
| "grad_norm": 0.41469907760620117, | |
| "learning_rate": 1.423129774428495e-05, | |
| "loss": 0.496, | |
| "mean_token_accuracy": 0.8539978662641772, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.430608088449229, | |
| "grad_norm": 0.44283366203308105, | |
| "learning_rate": 1.4046316512017044e-05, | |
| "loss": 0.4887, | |
| "mean_token_accuracy": 0.8548189689797085, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.43642711667151585, | |
| "grad_norm": 0.43634092807769775, | |
| "learning_rate": 1.3859664475108483e-05, | |
| "loss": 0.4949, | |
| "mean_token_accuracy": 0.8529859121625627, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.44224614489380276, | |
| "grad_norm": 0.41822701692581177, | |
| "learning_rate": 1.3671418705901116e-05, | |
| "loss": 0.5025, | |
| "mean_token_accuracy": 0.8515156968159479, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.4480651731160896, | |
| "grad_norm": 0.42008936405181885, | |
| "learning_rate": 1.3481656934820588e-05, | |
| "loss": 0.4957, | |
| "mean_token_accuracy": 0.8534913304740183, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.4538842013383765, | |
| "grad_norm": 0.4045666754245758, | |
| "learning_rate": 1.32904575182799e-05, | |
| "loss": 0.4837, | |
| "mean_token_accuracy": 0.8559156045618721, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.4597032295606634, | |
| "grad_norm": 0.4344455301761627, | |
| "learning_rate": 1.309789940632448e-05, | |
| "loss": 0.4863, | |
| "mean_token_accuracy": 0.8565342245889443, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.46552225778295026, | |
| "grad_norm": 0.4040561616420746, | |
| "learning_rate": 1.2904062110032145e-05, | |
| "loss": 0.4888, | |
| "mean_token_accuracy": 0.8559227694031748, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.4713412860052371, | |
| "grad_norm": 0.41261550784111023, | |
| "learning_rate": 1.270902566868139e-05, | |
| "loss": 0.4999, | |
| "mean_token_accuracy": 0.8528450147039587, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.47716031422752403, | |
| "grad_norm": 0.41591835021972656, | |
| "learning_rate": 1.2512870616701571e-05, | |
| "loss": 0.4858, | |
| "mean_token_accuracy": 0.8563491876605844, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.4829793424498109, | |
| "grad_norm": 0.4135805368423462, | |
| "learning_rate": 1.2315677950418645e-05, | |
| "loss": 0.491, | |
| "mean_token_accuracy": 0.8547786818046786, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.48879837067209775, | |
| "grad_norm": 0.42353108525276184, | |
| "learning_rate": 1.2117529094610177e-05, | |
| "loss": 0.4889, | |
| "mean_token_accuracy": 0.8551664301669952, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.49461739889438466, | |
| "grad_norm": 0.4162733554840088, | |
| "learning_rate": 1.1918505868883414e-05, | |
| "loss": 0.4838, | |
| "mean_token_accuracy": 0.856896010365834, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.5004364271166715, | |
| "grad_norm": 0.41785794496536255, | |
| "learning_rate": 1.1718690453890356e-05, | |
| "loss": 0.4917, | |
| "mean_token_accuracy": 0.8560505977625705, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.5062554553389584, | |
| "grad_norm": 0.44993114471435547, | |
| "learning_rate": 1.1518165357393716e-05, | |
| "loss": 0.5001, | |
| "mean_token_accuracy": 0.852639218024868, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.5120744835612453, | |
| "grad_norm": 0.41796672344207764, | |
| "learning_rate": 1.1317013380197832e-05, | |
| "loss": 0.4823, | |
| "mean_token_accuracy": 0.8560310313992504, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.5178935117835322, | |
| "grad_norm": 0.44025880098342896, | |
| "learning_rate": 1.111531758195856e-05, | |
| "loss": 0.48, | |
| "mean_token_accuracy": 0.8575471706652287, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.523712540005819, | |
| "grad_norm": 0.4278118312358856, | |
| "learning_rate": 1.091316124688629e-05, | |
| "loss": 0.4971, | |
| "mean_token_accuracy": 0.852513281695311, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5295315682281059, | |
| "grad_norm": 0.42229601740837097, | |
| "learning_rate": 1.0710627849356246e-05, | |
| "loss": 0.4773, | |
| "mean_token_accuracy": 0.8587941342459156, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.5353505964503927, | |
| "grad_norm": 0.4171890318393707, | |
| "learning_rate": 1.0507801019440235e-05, | |
| "loss": 0.4776, | |
| "mean_token_accuracy": 0.8578564343102109, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.5411696246726797, | |
| "grad_norm": 0.41802269220352173, | |
| "learning_rate": 1.0304764508374152e-05, | |
| "loss": 0.4837, | |
| "mean_token_accuracy": 0.8558379914367457, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.5469886528949666, | |
| "grad_norm": 0.43975040316581726, | |
| "learning_rate": 1.0101602153975398e-05, | |
| "loss": 0.4925, | |
| "mean_token_accuracy": 0.8547882250775738, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.5528076811172534, | |
| "grad_norm": 0.3969772160053253, | |
| "learning_rate": 9.898397846024604e-06, | |
| "loss": 0.4725, | |
| "mean_token_accuracy": 0.8595373324490415, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.5586267093395403, | |
| "grad_norm": 0.41007471084594727, | |
| "learning_rate": 9.69523549162585e-06, | |
| "loss": 0.4851, | |
| "mean_token_accuracy": 0.855284327692498, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.5644457375618271, | |
| "grad_norm": 0.4255843758583069, | |
| "learning_rate": 9.492198980559766e-06, | |
| "loss": 0.4905, | |
| "mean_token_accuracy": 0.8558610098037495, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.570264765784114, | |
| "grad_norm": 0.41996172070503235, | |
| "learning_rate": 9.289372150643759e-06, | |
| "loss": 0.4882, | |
| "mean_token_accuracy": 0.8572012931662899, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.576083794006401, | |
| "grad_norm": 0.4162617623806, | |
| "learning_rate": 9.08683875311371e-06, | |
| "loss": 0.4932, | |
| "mean_token_accuracy": 0.8533011128757575, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.5819028222286878, | |
| "grad_norm": 0.4218461811542511, | |
| "learning_rate": 8.884682418041443e-06, | |
| "loss": 0.4795, | |
| "mean_token_accuracy": 0.8570308869685629, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.5877218504509747, | |
| "grad_norm": 0.41105833649635315, | |
| "learning_rate": 8.682986619802171e-06, | |
| "loss": 0.489, | |
| "mean_token_accuracy": 0.8549116723561353, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.5935408786732616, | |
| "grad_norm": 0.3960190713405609, | |
| "learning_rate": 8.481834642606287e-06, | |
| "loss": 0.4804, | |
| "mean_token_accuracy": 0.8588179330924269, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.5993599068955484, | |
| "grad_norm": 0.4174754321575165, | |
| "learning_rate": 8.281309546109649e-06, | |
| "loss": 0.4904, | |
| "mean_token_accuracy": 0.8553032555444215, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.6051789351178353, | |
| "grad_norm": 0.41682904958724976, | |
| "learning_rate": 8.081494131116588e-06, | |
| "loss": 0.4797, | |
| "mean_token_accuracy": 0.85778211568845, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.6109979633401222, | |
| "grad_norm": 0.39849600195884705, | |
| "learning_rate": 7.882470905389827e-06, | |
| "loss": 0.4983, | |
| "mean_token_accuracy": 0.8528337732738089, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.6168169915624091, | |
| "grad_norm": 0.4062061309814453, | |
| "learning_rate": 7.684322049581359e-06, | |
| "loss": 0.4918, | |
| "mean_token_accuracy": 0.8539959811143045, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.622636019784696, | |
| "grad_norm": 0.38275521993637085, | |
| "learning_rate": 7.487129383298433e-06, | |
| "loss": 0.4741, | |
| "mean_token_accuracy": 0.8592997100935363, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.6284550480069828, | |
| "grad_norm": 0.3841171860694885, | |
| "learning_rate": 7.29097433131861e-06, | |
| "loss": 0.4721, | |
| "mean_token_accuracy": 0.859289038573644, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.6342740762292697, | |
| "grad_norm": 0.41029033064842224, | |
| "learning_rate": 7.095937889967854e-06, | |
| "loss": 0.4688, | |
| "mean_token_accuracy": 0.8600678009977978, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.6400931044515565, | |
| "grad_norm": 0.4171801209449768, | |
| "learning_rate": 6.90210059367552e-06, | |
| "loss": 0.4811, | |
| "mean_token_accuracy": 0.8564380862600244, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.6459121326738435, | |
| "grad_norm": 0.41676321625709534, | |
| "learning_rate": 6.7095424817201035e-06, | |
| "loss": 0.4775, | |
| "mean_token_accuracy": 0.8579857639838829, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.6517311608961304, | |
| "grad_norm": 0.40989112854003906, | |
| "learning_rate": 6.518343065179414e-06, | |
| "loss": 0.4916, | |
| "mean_token_accuracy": 0.8554774335695117, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.6575501891184172, | |
| "grad_norm": 0.41312697529792786, | |
| "learning_rate": 6.328581294098887e-06, | |
| "loss": 0.4841, | |
| "mean_token_accuracy": 0.8565219937605406, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.6633692173407041, | |
| "grad_norm": 0.40967249870300293, | |
| "learning_rate": 6.140335524891518e-06, | |
| "loss": 0.4806, | |
| "mean_token_accuracy": 0.8576536743376204, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.669188245562991, | |
| "grad_norm": 0.39644986391067505, | |
| "learning_rate": 5.953683487982958e-06, | |
| "loss": 0.4846, | |
| "mean_token_accuracy": 0.8560946241771303, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.6750072737852778, | |
| "grad_norm": 0.40402865409851074, | |
| "learning_rate": 5.768702255715053e-06, | |
| "loss": 0.4871, | |
| "mean_token_accuracy": 0.8555429979464371, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.6808263020075648, | |
| "grad_norm": 0.4249478578567505, | |
| "learning_rate": 5.58546821052116e-06, | |
| "loss": 0.4776, | |
| "mean_token_accuracy": 0.8574913247699447, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.6866453302298516, | |
| "grad_norm": 0.41155800223350525, | |
| "learning_rate": 5.404057013386306e-06, | |
| "loss": 0.4686, | |
| "mean_token_accuracy": 0.8604994771907524, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.6924643584521385, | |
| "grad_norm": 0.4111210107803345, | |
| "learning_rate": 5.224543572605272e-06, | |
| "loss": 0.4888, | |
| "mean_token_accuracy": 0.8553543295438057, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.6982833866744254, | |
| "grad_norm": 0.40666860342025757, | |
| "learning_rate": 5.047002012851447e-06, | |
| "loss": 0.4915, | |
| "mean_token_accuracy": 0.8555927078584643, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7041024148967122, | |
| "grad_norm": 0.41017675399780273, | |
| "learning_rate": 4.871505644569303e-06, | |
| "loss": 0.4837, | |
| "mean_token_accuracy": 0.8566939587596106, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.7099214431189991, | |
| "grad_norm": 0.4147651195526123, | |
| "learning_rate": 4.698126933703031e-06, | |
| "loss": 0.483, | |
| "mean_token_accuracy": 0.8570666700504006, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.715740471341286, | |
| "grad_norm": 0.40449991822242737, | |
| "learning_rate": 4.526937471773919e-06, | |
| "loss": 0.4689, | |
| "mean_token_accuracy": 0.860008777707927, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.7215594995635729, | |
| "grad_norm": 0.40807604789733887, | |
| "learning_rate": 4.35800794631879e-06, | |
| "loss": 0.4826, | |
| "mean_token_accuracy": 0.8565822278909835, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.7273785277858598, | |
| "grad_norm": 0.39830535650253296, | |
| "learning_rate": 4.191408111701693e-06, | |
| "loss": 0.4731, | |
| "mean_token_accuracy": 0.8593161088649728, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.7331975560081466, | |
| "grad_norm": 0.3948498070240021, | |
| "learning_rate": 4.0272067603109646e-06, | |
| "loss": 0.4676, | |
| "mean_token_accuracy": 0.8615339302083884, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.7390165842304335, | |
| "grad_norm": 0.3837417960166931, | |
| "learning_rate": 3.865471694153465e-06, | |
| "loss": 0.4638, | |
| "mean_token_accuracy": 0.8620474819313568, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.7448356124527203, | |
| "grad_norm": 0.4020540118217468, | |
| "learning_rate": 3.706269696857785e-06, | |
| "loss": 0.4794, | |
| "mean_token_accuracy": 0.8580428959872017, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.7506546406750073, | |
| "grad_norm": 0.3988768756389618, | |
| "learning_rate": 3.5496665060979563e-06, | |
| "loss": 0.4791, | |
| "mean_token_accuracy": 0.857951603270609, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 0.7564736688972942, | |
| "grad_norm": 0.38841554522514343, | |
| "learning_rate": 3.395726786449044e-06, | |
| "loss": 0.4795, | |
| "mean_token_accuracy": 0.8580783028985468, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.762292697119581, | |
| "grad_norm": 0.3969273865222931, | |
| "learning_rate": 3.244514102685881e-06, | |
| "loss": 0.4752, | |
| "mean_token_accuracy": 0.8586160745112773, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 0.7681117253418679, | |
| "grad_norm": 0.40074124932289124, | |
| "learning_rate": 3.0960908935358904e-06, | |
| "loss": 0.4881, | |
| "mean_token_accuracy": 0.855172939595195, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.7739307535641547, | |
| "grad_norm": 0.4185120463371277, | |
| "learning_rate": 2.9505184458968925e-06, | |
| "loss": 0.4689, | |
| "mean_token_accuracy": 0.8608836993671453, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 0.7797497817864416, | |
| "grad_norm": 0.42662736773490906, | |
| "learning_rate": 2.807856869530534e-06, | |
| "loss": 0.4906, | |
| "mean_token_accuracy": 0.854303324792995, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.7855688100087286, | |
| "grad_norm": 0.3838253915309906, | |
| "learning_rate": 2.6681650722417517e-06, | |
| "loss": 0.4739, | |
| "mean_token_accuracy": 0.859679923783397, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.7913878382310154, | |
| "grad_norm": 0.4014156460762024, | |
| "learning_rate": 2.5315007355545983e-06, | |
| "loss": 0.4647, | |
| "mean_token_accuracy": 0.8613118683252459, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.7972068664533023, | |
| "grad_norm": 0.41356369853019714, | |
| "learning_rate": 2.3979202908943576e-06, | |
| "loss": 0.4647, | |
| "mean_token_accuracy": 0.8605610072930571, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 0.8030258946755892, | |
| "grad_norm": 0.4056306481361389, | |
| "learning_rate": 2.267478896285913e-06, | |
| "loss": 0.48, | |
| "mean_token_accuracy": 0.8584646023840529, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.808844922897876, | |
| "grad_norm": 0.3952696919441223, | |
| "learning_rate": 2.140230413577882e-06, | |
| "loss": 0.4773, | |
| "mean_token_accuracy": 0.8575413077069948, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 0.814663951120163, | |
| "grad_norm": 0.40555790066719055, | |
| "learning_rate": 2.0162273862019965e-06, | |
| "loss": 0.4822, | |
| "mean_token_accuracy": 0.8564873526871629, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.8204829793424498, | |
| "grad_norm": 0.39178261160850525, | |
| "learning_rate": 1.8955210174768857e-06, | |
| "loss": 0.4665, | |
| "mean_token_accuracy": 0.8613343347514766, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 0.8263020075647367, | |
| "grad_norm": 0.39173874258995056, | |
| "learning_rate": 1.7781611494651729e-06, | |
| "loss": 0.4736, | |
| "mean_token_accuracy": 0.8586594796262844, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.8321210357870236, | |
| "grad_norm": 0.3980768620967865, | |
| "learning_rate": 1.6641962423927294e-06, | |
| "loss": 0.4742, | |
| "mean_token_accuracy": 0.8592263804363405, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 0.8379400640093104, | |
| "grad_norm": 0.3935108780860901, | |
| "learning_rate": 1.5536733546384574e-06, | |
| "loss": 0.4861, | |
| "mean_token_accuracy": 0.8557533487541985, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.8437590922315973, | |
| "grad_norm": 0.41904276609420776, | |
| "learning_rate": 1.4466381233029781e-06, | |
| "loss": 0.4806, | |
| "mean_token_accuracy": 0.8569492588154313, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.8495781204538843, | |
| "grad_norm": 0.3956993818283081, | |
| "learning_rate": 1.3431347453641253e-06, | |
| "loss": 0.4772, | |
| "mean_token_accuracy": 0.8591885796899528, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.8553971486761711, | |
| "grad_norm": 0.413522869348526, | |
| "learning_rate": 1.2432059594271684e-06, | |
| "loss": 0.4737, | |
| "mean_token_accuracy": 0.8588345902893689, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 0.861216176898458, | |
| "grad_norm": 0.40007099509239197, | |
| "learning_rate": 1.1468930280771728e-06, | |
| "loss": 0.4713, | |
| "mean_token_accuracy": 0.8596527738293955, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.8670352051207448, | |
| "grad_norm": 0.4135393798351288, | |
| "learning_rate": 1.0542357208408572e-06, | |
| "loss": 0.5017, | |
| "mean_token_accuracy": 0.8527636202781883, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 0.8728542333430317, | |
| "grad_norm": 0.404310941696167, | |
| "learning_rate": 9.652722977649797e-07, | |
| "loss": 0.453, | |
| "mean_token_accuracy": 0.8641921613151137, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.8786732615653186, | |
| "grad_norm": 0.3922938406467438, | |
| "learning_rate": 8.800394936179812e-07, | |
| "loss": 0.4654, | |
| "mean_token_accuracy": 0.8607802659264431, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 0.8844922897876055, | |
| "grad_norm": 0.39852988719940186, | |
| "learning_rate": 7.985725027214841e-07, | |
| "loss": 0.4657, | |
| "mean_token_accuracy": 0.8610265562422169, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.8903113180098924, | |
| "grad_norm": 0.4099702835083008, | |
| "learning_rate": 7.209049644178489e-07, | |
| "loss": 0.4645, | |
| "mean_token_accuracy": 0.8611578725473782, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 0.8961303462321792, | |
| "grad_norm": 0.41344019770622253, | |
| "learning_rate": 6.470689491798232e-07, | |
| "loss": 0.4709, | |
| "mean_token_accuracy": 0.8593041269818004, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.9019493744544661, | |
| "grad_norm": 0.38776156306266785, | |
| "learning_rate": 5.770949453680064e-07, | |
| "loss": 0.4719, | |
| "mean_token_accuracy": 0.8599895481770243, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.907768402676753, | |
| "grad_norm": 0.39070042967796326, | |
| "learning_rate": 5.110118466415903e-07, | |
| "loss": 0.485, | |
| "mean_token_accuracy": 0.8541616461336453, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.9135874308990398, | |
| "grad_norm": 0.39178237318992615, | |
| "learning_rate": 4.4884694002760297e-07, | |
| "loss": 0.4733, | |
| "mean_token_accuracy": 0.8594136677203281, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 0.9194064591213268, | |
| "grad_norm": 0.3782612383365631, | |
| "learning_rate": 3.906258946535446e-07, | |
| "loss": 0.4762, | |
| "mean_token_accuracy": 0.8573084054617641, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.9252254873436137, | |
| "grad_norm": 0.39697375893592834, | |
| "learning_rate": 3.363727511480974e-07, | |
| "loss": 0.4765, | |
| "mean_token_accuracy": 0.8577650392622143, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 0.9310445155659005, | |
| "grad_norm": 0.3849934935569763, | |
| "learning_rate": 2.861099117142718e-07, | |
| "loss": 0.461, | |
| "mean_token_accuracy": 0.8616937448944618, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.9368635437881874, | |
| "grad_norm": 0.4121275842189789, | |
| "learning_rate": 2.398581308790848e-07, | |
| "loss": 0.4827, | |
| "mean_token_accuracy": 0.857346642587542, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 0.9426825720104742, | |
| "grad_norm": 0.39553239941596985, | |
| "learning_rate": 1.976365069236108e-07, | |
| "loss": 0.475, | |
| "mean_token_accuracy": 0.8578932364447682, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.9485016002327611, | |
| "grad_norm": 0.3881802558898926, | |
| "learning_rate": 1.5946247399691638e-07, | |
| "loss": 0.4706, | |
| "mean_token_accuracy": 0.8601075021722997, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 0.9543206284550481, | |
| "grad_norm": 0.3950815498828888, | |
| "learning_rate": 1.2535179491715453e-07, | |
| "loss": 0.4666, | |
| "mean_token_accuracy": 0.8601555329046816, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.9601396566773349, | |
| "grad_norm": 0.4037560522556305, | |
| "learning_rate": 9.531855466278218e-08, | |
| "loss": 0.4636, | |
| "mean_token_accuracy": 0.8610238882101969, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 0.9659586848996218, | |
| "grad_norm": 0.3907164931297302, | |
| "learning_rate": 6.937515455659128e-08, | |
| "loss": 0.4686, | |
| "mean_token_accuracy": 0.8608224325086367, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.9717777131219086, | |
| "grad_norm": 0.41244828701019287, | |
| "learning_rate": 4.753230714496071e-08, | |
| "loss": 0.4832, | |
| "mean_token_accuracy": 0.8551789295502588, | |
| "step": 835 | |
| }, | |
| { | |
| "epoch": 0.9775967413441955, | |
| "grad_norm": 0.39467689394950867, | |
| "learning_rate": 2.9799031774427888e-08, | |
| "loss": 0.4877, | |
| "mean_token_accuracy": 0.8558349555197562, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.9834157695664824, | |
| "grad_norm": 0.39321061968803406, | |
| "learning_rate": 1.6182650867421213e-08, | |
| "loss": 0.4634, | |
| "mean_token_accuracy": 0.8616077323763098, | |
| "step": 845 | |
| }, | |
| { | |
| "epoch": 0.9892347977887693, | |
| "grad_norm": 0.4110186696052551, | |
| "learning_rate": 6.688786898688682e-09, | |
| "loss": 0.4885, | |
| "mean_token_accuracy": 0.8555144290494805, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.9950538260110562, | |
| "grad_norm": 0.388217955827713, | |
| "learning_rate": 1.321360073662792e-09, | |
| "loss": 0.4607, | |
| "mean_token_accuracy": 0.8616522037739971, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 0.9997090485888857, | |
| "mean_token_accuracy": 0.8550544311901966, | |
| "step": 859, | |
| "total_flos": 42703635087360.0, | |
| "train_loss": 0.504338482067388, | |
| "train_runtime": 61692.734, | |
| "train_samples_per_second": 0.446, | |
| "train_steps_per_second": 0.014 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 859, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 42703635087360.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |