| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 49419, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.010117566118294584, | |
| "grad_norm": 1.3671875, | |
| "learning_rate": 6.058276001618777e-05, | |
| "loss": 11.5303, | |
| "mean_token_accuracy": 0.095332110256124, | |
| "num_tokens": 12016855.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.020235132236589168, | |
| "grad_norm": 1.1953125, | |
| "learning_rate": 0.00012128692836908133, | |
| "loss": 6.812, | |
| "mean_token_accuracy": 0.21804794558882715, | |
| "num_tokens": 24034987.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.030352698354883748, | |
| "grad_norm": 0.80859375, | |
| "learning_rate": 0.00018199109672197488, | |
| "loss": 5.7925, | |
| "mean_token_accuracy": 0.27103758984804155, | |
| "num_tokens": 36048578.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.040470264473178336, | |
| "grad_norm": 0.703125, | |
| "learning_rate": 0.00024269526507486846, | |
| "loss": 5.4354, | |
| "mean_token_accuracy": 0.2889174829721451, | |
| "num_tokens": 48062842.0, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.05058783059147291, | |
| "grad_norm": 0.5546875, | |
| "learning_rate": 0.0002999997367049147, | |
| "loss": 5.2677, | |
| "mean_token_accuracy": 0.29825611528754237, | |
| "num_tokens": 60066067.0, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.060705396709767497, | |
| "grad_norm": 0.421875, | |
| "learning_rate": 0.000299906384128946, | |
| "loss": 5.029, | |
| "mean_token_accuracy": 0.307963959723711, | |
| "num_tokens": 72080788.0, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.07082296282806208, | |
| "grad_norm": 0.404296875, | |
| "learning_rate": 0.00029964523417265877, | |
| "loss": 4.8824, | |
| "mean_token_accuracy": 0.3160543051958084, | |
| "num_tokens": 84106466.0, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.08094052894635667, | |
| "grad_norm": 0.4453125, | |
| "learning_rate": 0.00029921657915368024, | |
| "loss": 4.7702, | |
| "mean_token_accuracy": 0.3228713305592537, | |
| "num_tokens": 96125107.0, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.09105809506465125, | |
| "grad_norm": 0.369140625, | |
| "learning_rate": 0.0002986208988860602, | |
| "loss": 4.6848, | |
| "mean_token_accuracy": 0.32875489246845246, | |
| "num_tokens": 108141921.0, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.10117566118294583, | |
| "grad_norm": 0.396484375, | |
| "learning_rate": 0.0002978588601431918, | |
| "loss": 4.6171, | |
| "mean_token_accuracy": 0.33351320880651475, | |
| "num_tokens": 120150578.0, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.11129322730124042, | |
| "grad_norm": 0.458984375, | |
| "learning_rate": 0.0002969313159114605, | |
| "loss": 4.5504, | |
| "mean_token_accuracy": 0.3377925636172295, | |
| "num_tokens": 132163036.0, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.12141079341953499, | |
| "grad_norm": 0.345703125, | |
| "learning_rate": 0.00029583930443545563, | |
| "loss": 4.517, | |
| "mean_token_accuracy": 0.34025603461265563, | |
| "num_tokens": 144176271.0, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.13152835953782957, | |
| "grad_norm": 0.3984375, | |
| "learning_rate": 0.00029458404805581337, | |
| "loss": 4.4534, | |
| "mean_token_accuracy": 0.34516079604625705, | |
| "num_tokens": 156188843.0, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.14164592565612416, | |
| "grad_norm": 0.369140625, | |
| "learning_rate": 0.00029316695184099267, | |
| "loss": 4.4374, | |
| "mean_token_accuracy": 0.34581841200590135, | |
| "num_tokens": 168208403.0, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.15176349177441875, | |
| "grad_norm": 0.392578125, | |
| "learning_rate": 0.0002915896020145148, | |
| "loss": 4.3998, | |
| "mean_token_accuracy": 0.3480748535394669, | |
| "num_tokens": 180235618.0, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.16188105789271334, | |
| "grad_norm": 0.41796875, | |
| "learning_rate": 0.00028985376417942787, | |
| "loss": 4.3825, | |
| "mean_token_accuracy": 0.34968917137384414, | |
| "num_tokens": 192246640.0, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.1719986240110079, | |
| "grad_norm": 0.345703125, | |
| "learning_rate": 0.00028796138134198245, | |
| "loss": 4.3497, | |
| "mean_token_accuracy": 0.3521047782897949, | |
| "num_tokens": 204257025.0, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.1821161901293025, | |
| "grad_norm": 0.357421875, | |
| "learning_rate": 0.00028591457173673235, | |
| "loss": 4.3222, | |
| "mean_token_accuracy": 0.3550558543205261, | |
| "num_tokens": 216269157.0, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.1922337562475971, | |
| "grad_norm": 0.408203125, | |
| "learning_rate": 0.00028371562645549314, | |
| "loss": 4.2894, | |
| "mean_token_accuracy": 0.3578448301553726, | |
| "num_tokens": 228272544.0, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.20235132236589165, | |
| "grad_norm": 0.376953125, | |
| "learning_rate": 0.0002813670068828134, | |
| "loss": 4.2983, | |
| "mean_token_accuracy": 0.355904599070549, | |
| "num_tokens": 240295030.0, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.21246888848418624, | |
| "grad_norm": 0.359375, | |
| "learning_rate": 0.00027887134194082996, | |
| "loss": 4.2844, | |
| "mean_token_accuracy": 0.35704867881536484, | |
| "num_tokens": 252314856.0, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.22258645460248083, | |
| "grad_norm": 0.365234375, | |
| "learning_rate": 0.0002762314251465891, | |
| "loss": 4.2747, | |
| "mean_token_accuracy": 0.35737198293209077, | |
| "num_tokens": 264343569.0, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.23270402072077542, | |
| "grad_norm": 0.390625, | |
| "learning_rate": 0.0002734502114851296, | |
| "loss": 4.2424, | |
| "mean_token_accuracy": 0.36065280497074126, | |
| "num_tokens": 276359037.0, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.24282158683906999, | |
| "grad_norm": 0.396484375, | |
| "learning_rate": 0.00027053081410182697, | |
| "loss": 4.234, | |
| "mean_token_accuracy": 0.36129101461172103, | |
| "num_tokens": 288373491.0, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.2529391529573646, | |
| "grad_norm": 0.365234375, | |
| "learning_rate": 0.0002674765008177008, | |
| "loss": 4.2163, | |
| "mean_token_accuracy": 0.36271012741327285, | |
| "num_tokens": 300383941.0, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.26305671907565914, | |
| "grad_norm": 0.40234375, | |
| "learning_rate": 0.00026429069047158657, | |
| "loss": 4.2082, | |
| "mean_token_accuracy": 0.36296020871400836, | |
| "num_tokens": 312416834.0, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.27317428519395376, | |
| "grad_norm": 0.333984375, | |
| "learning_rate": 0.000260976949093266, | |
| "loss": 4.1802, | |
| "mean_token_accuracy": 0.36644324856996535, | |
| "num_tokens": 324430305.0, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 0.2832918513122483, | |
| "grad_norm": 0.345703125, | |
| "learning_rate": 0.0002575389859118394, | |
| "loss": 4.1717, | |
| "mean_token_accuracy": 0.36681148010492326, | |
| "num_tokens": 336432753.0, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.2934094174305429, | |
| "grad_norm": 0.361328125, | |
| "learning_rate": 0.00025398064920380834, | |
| "loss": 4.1852, | |
| "mean_token_accuracy": 0.36508618742227555, | |
| "num_tokens": 348448508.0, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 0.3035269835488375, | |
| "grad_norm": 0.37890625, | |
| "learning_rate": 0.00025030592198551575, | |
| "loss": 4.1387, | |
| "mean_token_accuracy": 0.37016365003585816, | |
| "num_tokens": 360455888.0, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.31364454966713207, | |
| "grad_norm": 0.314453125, | |
| "learning_rate": 0.0002465189175547654, | |
| "loss": 4.147, | |
| "mean_token_accuracy": 0.36943590980768204, | |
| "num_tokens": 372469047.0, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 0.3237621157854267, | |
| "grad_norm": 0.4453125, | |
| "learning_rate": 0.00024262387488661118, | |
| "loss": 4.2161, | |
| "mean_token_accuracy": 0.3663038157224655, | |
| "num_tokens": 384482227.0, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.33387968190372125, | |
| "grad_norm": 0.431640625, | |
| "learning_rate": 0.0002386251538884696, | |
| "loss": 4.2708, | |
| "mean_token_accuracy": 0.35985021191835403, | |
| "num_tokens": 396507994.0, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 0.3439972480220158, | |
| "grad_norm": 0.3984375, | |
| "learning_rate": 0.0002345272305198671, | |
| "loss": 4.2626, | |
| "mean_token_accuracy": 0.35938486641645434, | |
| "num_tokens": 408527074.0, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 0.35411481414031043, | |
| "grad_norm": 0.4375, | |
| "learning_rate": 0.00023033469178228457, | |
| "loss": 4.2604, | |
| "mean_token_accuracy": 0.35944406408071516, | |
| "num_tokens": 420551611.0, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 0.364232380258605, | |
| "grad_norm": 0.4296875, | |
| "learning_rate": 0.0002260522305847074, | |
| "loss": 4.259, | |
| "mean_token_accuracy": 0.3585598506331444, | |
| "num_tokens": 432578604.0, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 0.37434994637689956, | |
| "grad_norm": 0.458984375, | |
| "learning_rate": 0.00022168464049062824, | |
| "loss": 4.2539, | |
| "mean_token_accuracy": 0.359400195479393, | |
| "num_tokens": 444606807.0, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 0.3844675124951942, | |
| "grad_norm": 0.46484375, | |
| "learning_rate": 0.0002172368103523822, | |
| "loss": 4.2396, | |
| "mean_token_accuracy": 0.3610253592133522, | |
| "num_tokens": 456622578.0, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 0.39458507861348874, | |
| "grad_norm": 0.4921875, | |
| "learning_rate": 0.0002127137188388207, | |
| "loss": 4.2317, | |
| "mean_token_accuracy": 0.3614521362781525, | |
| "num_tokens": 468626335.0, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 0.4047026447317833, | |
| "grad_norm": 0.431640625, | |
| "learning_rate": 0.0002081204288624496, | |
| "loss": 4.1995, | |
| "mean_token_accuracy": 0.3652240701317787, | |
| "num_tokens": 480636609.0, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 0.4148202108500779, | |
| "grad_norm": 0.4375, | |
| "learning_rate": 0.00020346208191226927, | |
| "loss": 4.1966, | |
| "mean_token_accuracy": 0.36518134355545045, | |
| "num_tokens": 492648597.0, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 0.4249377769683725, | |
| "grad_norm": 0.48046875, | |
| "learning_rate": 0.0001987438922986602, | |
| "loss": 4.1926, | |
| "mean_token_accuracy": 0.365411285161972, | |
| "num_tokens": 504658765.0, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 0.43505534308666705, | |
| "grad_norm": 0.5625, | |
| "learning_rate": 0.0001939711413167559, | |
| "loss": 4.1985, | |
| "mean_token_accuracy": 0.36408871501684187, | |
| "num_tokens": 516665859.0, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 0.44517290920496166, | |
| "grad_norm": 0.404296875, | |
| "learning_rate": 0.0001891491713348375, | |
| "loss": 4.2043, | |
| "mean_token_accuracy": 0.3634589037895203, | |
| "num_tokens": 528675596.0, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 0.4552904753232562, | |
| "grad_norm": 0.423828125, | |
| "learning_rate": 0.0001842833798143648, | |
| "loss": 4.1872, | |
| "mean_token_accuracy": 0.36537654942274095, | |
| "num_tokens": 540694481.0, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 0.46540804144155085, | |
| "grad_norm": 0.439453125, | |
| "learning_rate": 0.00017937921326834042, | |
| "loss": 4.1642, | |
| "mean_token_accuracy": 0.36855128794908526, | |
| "num_tokens": 552703293.0, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 0.4755256075598454, | |
| "grad_norm": 0.455078125, | |
| "learning_rate": 0.0001744421611647669, | |
| "loss": 4.1161, | |
| "mean_token_accuracy": 0.37240500724315645, | |
| "num_tokens": 564716126.0, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 0.48564317367813997, | |
| "grad_norm": 0.451171875, | |
| "learning_rate": 0.00016947774978202324, | |
| "loss": 4.114, | |
| "mean_token_accuracy": 0.3716895794272423, | |
| "num_tokens": 576739422.0, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 0.4957607397964346, | |
| "grad_norm": 0.46875, | |
| "learning_rate": 0.00016449153602303716, | |
| "loss": 4.127, | |
| "mean_token_accuracy": 0.37084434121847154, | |
| "num_tokens": 588758561.0, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 0.5058783059147292, | |
| "grad_norm": 0.455078125, | |
| "learning_rate": 0.00015948910119517801, | |
| "loss": 4.1131, | |
| "mean_token_accuracy": 0.3723976674079895, | |
| "num_tokens": 600780001.0, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 0.5159958720330238, | |
| "grad_norm": 0.44140625, | |
| "learning_rate": 0.00015447604476283297, | |
| "loss": 4.1103, | |
| "mean_token_accuracy": 0.37234303742647173, | |
| "num_tokens": 612790687.0, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 0.5261134381513183, | |
| "grad_norm": 0.439453125, | |
| "learning_rate": 0.00014945797807965866, | |
| "loss": 4.0859, | |
| "mean_token_accuracy": 0.3740212817192078, | |
| "num_tokens": 624818330.0, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 0.5362310042696129, | |
| "grad_norm": 0.447265625, | |
| "learning_rate": 0.00014444051810752503, | |
| "loss": 4.0808, | |
| "mean_token_accuracy": 0.3752990872859955, | |
| "num_tokens": 636836789.0, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 0.5463485703879075, | |
| "grad_norm": 0.455078125, | |
| "learning_rate": 0.00013942928112918113, | |
| "loss": 4.0793, | |
| "mean_token_accuracy": 0.37516462814807894, | |
| "num_tokens": 648846886.0, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 0.556466136506202, | |
| "grad_norm": 0.44140625, | |
| "learning_rate": 0.0001344298764616819, | |
| "loss": 4.0674, | |
| "mean_token_accuracy": 0.37648630076646805, | |
| "num_tokens": 660859222.0, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 0.5665837026244966, | |
| "grad_norm": 0.455078125, | |
| "learning_rate": 0.00012944790017761106, | |
| "loss": 4.0677, | |
| "mean_token_accuracy": 0.37691014271974566, | |
| "num_tokens": 672873174.0, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 0.5767012687427913, | |
| "grad_norm": 0.46484375, | |
| "learning_rate": 0.00012448892884112992, | |
| "loss": 4.041, | |
| "mean_token_accuracy": 0.3792866112589836, | |
| "num_tokens": 684869774.0, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 0.5868188348610858, | |
| "grad_norm": 2.34375, | |
| "learning_rate": 0.00011955851326586234, | |
| "loss": 4.0607, | |
| "mean_token_accuracy": 0.3807999837398529, | |
| "num_tokens": 696881301.0, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 0.5969364009793804, | |
| "grad_norm": 0.55078125, | |
| "learning_rate": 0.00011466217230160404, | |
| "loss": 4.0344, | |
| "mean_token_accuracy": 0.3827848986387253, | |
| "num_tokens": 708895273.0, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 0.607053967097675, | |
| "grad_norm": 0.53125, | |
| "learning_rate": 0.00010980538665680985, | |
| "loss": 4.0099, | |
| "mean_token_accuracy": 0.38436073118448255, | |
| "num_tokens": 720912243.0, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 0.6171715332159695, | |
| "grad_norm": 0.54296875, | |
| "learning_rate": 0.00010499359276377534, | |
| "loss": 3.9881, | |
| "mean_token_accuracy": 0.386309523165226, | |
| "num_tokens": 732929445.0, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 0.6272890993342641, | |
| "grad_norm": 0.5390625, | |
| "learning_rate": 0.00010023217669337769, | |
| "loss": 3.9851, | |
| "mean_token_accuracy": 0.386250978410244, | |
| "num_tokens": 744956030.0, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 0.6374066654525588, | |
| "grad_norm": 0.55859375, | |
| "learning_rate": 9.552646812618973e-05, | |
| "loss": 3.9912, | |
| "mean_token_accuracy": 0.38541185545921325, | |
| "num_tokens": 756973559.0, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 0.6475242315708534, | |
| "grad_norm": 0.5703125, | |
| "learning_rate": 9.088173438671332e-05, | |
| "loss": 3.9618, | |
| "mean_token_accuracy": 0.3886643843054771, | |
| "num_tokens": 768993412.0, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 0.6576417976891479, | |
| "grad_norm": 0.5625, | |
| "learning_rate": 8.630317454741159e-05, | |
| "loss": 3.9566, | |
| "mean_token_accuracy": 0.38894479793310166, | |
| "num_tokens": 781002910.0, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 0.6677593638074425, | |
| "grad_norm": 0.5390625, | |
| "learning_rate": 8.17959136091384e-05, | |
| "loss": 3.9384, | |
| "mean_token_accuracy": 0.3909342797398567, | |
| "num_tokens": 793011499.0, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 0.6778769299257371, | |
| "grad_norm": 0.5625, | |
| "learning_rate": 7.736499676448012e-05, | |
| "loss": 3.9304, | |
| "mean_token_accuracy": 0.3920640903711319, | |
| "num_tokens": 805027429.0, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 0.6879944960440316, | |
| "grad_norm": 0.58203125, | |
| "learning_rate": 7.301538375043028e-05, | |
| "loss": 3.927, | |
| "mean_token_accuracy": 0.39186790603399274, | |
| "num_tokens": 817043838.0, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 0.6981120621623262, | |
| "grad_norm": 0.52734375, | |
| "learning_rate": 6.875194329671924e-05, | |
| "loss": 3.9135, | |
| "mean_token_accuracy": 0.39424380254745484, | |
| "num_tokens": 829051457.0, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 0.7082296282806209, | |
| "grad_norm": 0.59375, | |
| "learning_rate": 6.457944767601184e-05, | |
| "loss": 3.9194, | |
| "mean_token_accuracy": 0.3936538654565811, | |
| "num_tokens": 841063558.0, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 0.7183471943989154, | |
| "grad_norm": 0.478515625, | |
| "learning_rate": 6.0502567362074895e-05, | |
| "loss": 3.9167, | |
| "mean_token_accuracy": 0.39285322940349576, | |
| "num_tokens": 853081842.0, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 0.72846476051721, | |
| "grad_norm": 0.59765625, | |
| "learning_rate": 5.6525865801892425e-05, | |
| "loss": 3.8827, | |
| "mean_token_accuracy": 0.3979086767435074, | |
| "num_tokens": 865088195.0, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 0.7385823266355046, | |
| "grad_norm": 0.55078125, | |
| "learning_rate": 5.265379430758167e-05, | |
| "loss": 3.8683, | |
| "mean_token_accuracy": 0.3997677757143974, | |
| "num_tokens": 877099653.0, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 0.7486998927537991, | |
| "grad_norm": 0.51953125, | |
| "learning_rate": 4.889068707382631e-05, | |
| "loss": 3.8626, | |
| "mean_token_accuracy": 0.4014857310652733, | |
| "num_tokens": 889098774.0, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 0.7588174588720937, | |
| "grad_norm": 0.5703125, | |
| "learning_rate": 4.524075632640548e-05, | |
| "loss": 3.8701, | |
| "mean_token_accuracy": 0.3988613831400871, | |
| "num_tokens": 901122305.0, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 0.7689350249903883, | |
| "grad_norm": 0.58984375, | |
| "learning_rate": 4.1708087607247995e-05, | |
| "loss": 3.869, | |
| "mean_token_accuracy": 0.3993934275507927, | |
| "num_tokens": 913141836.0, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 0.7790525911086829, | |
| "grad_norm": 0.5703125, | |
| "learning_rate": 3.829663520129017e-05, | |
| "loss": 3.8759, | |
| "mean_token_accuracy": 0.3983539624810219, | |
| "num_tokens": 925159236.0, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 0.7891701572269775, | |
| "grad_norm": 0.54296875, | |
| "learning_rate": 3.5010217710255475e-05, | |
| "loss": 3.8641, | |
| "mean_token_accuracy": 0.3992738093137741, | |
| "num_tokens": 937190539.0, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 0.7992877233452721, | |
| "grad_norm": 0.5703125, | |
| "learning_rate": 3.185251377831133e-05, | |
| "loss": 3.8512, | |
| "mean_token_accuracy": 0.4009958416223526, | |
| "num_tokens": 949212097.0, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 0.8094052894635666, | |
| "grad_norm": 0.55078125, | |
| "learning_rate": 2.8827057974386658e-05, | |
| "loss": 3.8534, | |
| "mean_token_accuracy": 0.40131270414590836, | |
| "num_tokens": 961210545.0, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 0.8195228555818612, | |
| "grad_norm": 0.53515625, | |
| "learning_rate": 2.5937236835760272e-05, | |
| "loss": 3.8699, | |
| "mean_token_accuracy": 0.3994129166603088, | |
| "num_tokens": 973234104.0, | |
| "step": 40500 | |
| }, | |
| { | |
| "epoch": 0.8296404217001558, | |
| "grad_norm": 0.55859375, | |
| "learning_rate": 2.318628507734748e-05, | |
| "loss": 3.8392, | |
| "mean_token_accuracy": 0.40251973223686216, | |
| "num_tokens": 985244716.0, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 0.8397579878184503, | |
| "grad_norm": 0.54296875, | |
| "learning_rate": 2.0577281970929332e-05, | |
| "loss": 3.8332, | |
| "mean_token_accuracy": 0.40371298098564146, | |
| "num_tokens": 997252495.0, | |
| "step": 41500 | |
| }, | |
| { | |
| "epoch": 0.849875553936745, | |
| "grad_norm": 0.53125, | |
| "learning_rate": 1.8113147898376435e-05, | |
| "loss": 3.8686, | |
| "mean_token_accuracy": 0.39953106695413587, | |
| "num_tokens": 1009277702.0, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 0.8599931200550396, | |
| "grad_norm": 0.53125, | |
| "learning_rate": 1.5796641082725953e-05, | |
| "loss": 3.846, | |
| "mean_token_accuracy": 0.4027084149122238, | |
| "num_tokens": 1021271039.0, | |
| "step": 42500 | |
| }, | |
| { | |
| "epoch": 0.8701106861733341, | |
| "grad_norm": 0.53125, | |
| "learning_rate": 1.3630354500770884e-05, | |
| "loss": 3.8402, | |
| "mean_token_accuracy": 0.4029158518910408, | |
| "num_tokens": 1033279547.0, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 0.8802282522916287, | |
| "grad_norm": 0.5546875, | |
| "learning_rate": 1.1616712980616954e-05, | |
| "loss": 3.8334, | |
| "mean_token_accuracy": 0.4026625081896782, | |
| "num_tokens": 1045302954.0, | |
| "step": 43500 | |
| }, | |
| { | |
| "epoch": 0.8903458184099233, | |
| "grad_norm": 0.56640625, | |
| "learning_rate": 9.757970487456834e-06, | |
| "loss": 3.846, | |
| "mean_token_accuracy": 0.4019183788895607, | |
| "num_tokens": 1057312737.0, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 0.900463384528218, | |
| "grad_norm": 0.53125, | |
| "learning_rate": 8.056207600599047e-06, | |
| "loss": 3.8371, | |
| "mean_token_accuracy": 0.40209768468141555, | |
| "num_tokens": 1069335133.0, | |
| "step": 44500 | |
| }, | |
| { | |
| "epoch": 0.9105809506465125, | |
| "grad_norm": 0.5546875, | |
| "learning_rate": 6.513329184576232e-06, | |
| "loss": 3.8209, | |
| "mean_token_accuracy": 0.40574184638261795, | |
| "num_tokens": 1081330922.0, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 0.9206985167648071, | |
| "grad_norm": 0.55859375, | |
| "learning_rate": 5.131062256939128e-06, | |
| "loss": 3.847, | |
| "mean_token_accuracy": 0.40258895909786224, | |
| "num_tokens": 1093337188.0, | |
| "step": 45500 | |
| }, | |
| { | |
| "epoch": 0.9308160828831017, | |
| "grad_norm": 0.5234375, | |
| "learning_rate": 3.910954055123322e-06, | |
| "loss": 3.856, | |
| "mean_token_accuracy": 0.4003161287903786, | |
| "num_tokens": 1105361310.0, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 0.9409336490013962, | |
| "grad_norm": 0.58984375, | |
| "learning_rate": 2.8543703045524167e-06, | |
| "loss": 3.8275, | |
| "mean_token_accuracy": 0.40424364000558854, | |
| "num_tokens": 1117367893.0, | |
| "step": 46500 | |
| }, | |
| { | |
| "epoch": 0.9510512151196908, | |
| "grad_norm": 0.59375, | |
| "learning_rate": 1.9624936899163945e-06, | |
| "loss": 3.8295, | |
| "mean_token_accuracy": 0.4040605016946793, | |
| "num_tokens": 1129375583.0, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 0.9611687812379854, | |
| "grad_norm": 0.54296875, | |
| "learning_rate": 1.2363225313359758e-06, | |
| "loss": 3.8356, | |
| "mean_token_accuracy": 0.40258765465021135, | |
| "num_tokens": 1141387823.0, | |
| "step": 47500 | |
| }, | |
| { | |
| "epoch": 0.9712863473562799, | |
| "grad_norm": 0.56640625, | |
| "learning_rate": 6.766696668952853e-07, | |
| "loss": 3.855, | |
| "mean_token_accuracy": 0.4004232720732689, | |
| "num_tokens": 1153399040.0, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 0.9814039134745746, | |
| "grad_norm": 0.55078125, | |
| "learning_rate": 2.8416154279330417e-07, | |
| "loss": 3.8378, | |
| "mean_token_accuracy": 0.40229264545440674, | |
| "num_tokens": 1165414940.0, | |
| "step": 48500 | |
| }, | |
| { | |
| "epoch": 0.9915214795928692, | |
| "grad_norm": 0.57421875, | |
| "learning_rate": 5.923751213269823e-08, | |
| "loss": 3.8603, | |
| "mean_token_accuracy": 0.4007369539737701, | |
| "num_tokens": 1177449058.0, | |
| "step": 49000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 49419, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 5000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.4876879400717517e+18, | |
| "train_batch_size": 48, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |