| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0858324715615306, | |
| "eval_steps": 500, | |
| "global_step": 2100, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 2.0375, | |
| "epoch": 0.005170630816959669, | |
| "grad_norm": 0.27744260430336, | |
| "learning_rate": 1.9968976215098243e-05, | |
| "loss": 1.3067, | |
| "mean_token_accuracy": 0.6855354130268096, | |
| "num_tokens": 3537600.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 1.88203125, | |
| "epoch": 0.010341261633919338, | |
| "grad_norm": 0.17058567702770233, | |
| "learning_rate": 1.993450534298518e-05, | |
| "loss": 1.1895, | |
| "mean_token_accuracy": 0.708834320306778, | |
| "num_tokens": 7032962.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 1.7921875, | |
| "epoch": 0.015511892450879007, | |
| "grad_norm": 0.13976408541202545, | |
| "learning_rate": 1.9900034470872113e-05, | |
| "loss": 1.1912, | |
| "mean_token_accuracy": 0.7101442337036132, | |
| "num_tokens": 10614996.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 1.7640625, | |
| "epoch": 0.020682523267838676, | |
| "grad_norm": 0.13212522864341736, | |
| "learning_rate": 1.986556359875905e-05, | |
| "loss": 1.1405, | |
| "mean_token_accuracy": 0.7191447138786315, | |
| "num_tokens": 14141355.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 1.70859375, | |
| "epoch": 0.025853154084798345, | |
| "grad_norm": 0.1327238529920578, | |
| "learning_rate": 1.9831092726645986e-05, | |
| "loss": 1.1139, | |
| "mean_token_accuracy": 0.7228168249130249, | |
| "num_tokens": 17721204.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 1.68359375, | |
| "epoch": 0.031023784901758014, | |
| "grad_norm": 0.13344530761241913, | |
| "learning_rate": 1.979662185453292e-05, | |
| "loss": 1.0762, | |
| "mean_token_accuracy": 0.729270726442337, | |
| "num_tokens": 21211732.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 1.6890625, | |
| "epoch": 0.03619441571871768, | |
| "grad_norm": 0.13124032318592072, | |
| "learning_rate": 1.9762150982419856e-05, | |
| "loss": 1.0805, | |
| "mean_token_accuracy": 0.7282510459423065, | |
| "num_tokens": 24751832.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 1.678125, | |
| "epoch": 0.04136504653567735, | |
| "grad_norm": 0.1330331563949585, | |
| "learning_rate": 1.9727680110306793e-05, | |
| "loss": 1.0846, | |
| "mean_token_accuracy": 0.727464348077774, | |
| "num_tokens": 28301590.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 1.66640625, | |
| "epoch": 0.04653567735263702, | |
| "grad_norm": 0.1394193321466446, | |
| "learning_rate": 1.969320923819373e-05, | |
| "loss": 1.0566, | |
| "mean_token_accuracy": 0.7313333511352539, | |
| "num_tokens": 31802125.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 1.65, | |
| "epoch": 0.05170630816959669, | |
| "grad_norm": 0.14272728562355042, | |
| "learning_rate": 1.9658738366080663e-05, | |
| "loss": 1.0603, | |
| "mean_token_accuracy": 0.7316244184970856, | |
| "num_tokens": 35299495.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 1.640625, | |
| "epoch": 0.05687693898655636, | |
| "grad_norm": 0.13103780150413513, | |
| "learning_rate": 1.96242674939676e-05, | |
| "loss": 1.0617, | |
| "mean_token_accuracy": 0.7324262440204621, | |
| "num_tokens": 38770579.0, | |
| "step": 110 | |
| }, | |
| { | |
| "entropy": 1.6359375, | |
| "epoch": 0.06204756980351603, | |
| "grad_norm": 0.131998211145401, | |
| "learning_rate": 1.9589796621854536e-05, | |
| "loss": 1.0699, | |
| "mean_token_accuracy": 0.7302633821964264, | |
| "num_tokens": 42276781.0, | |
| "step": 120 | |
| }, | |
| { | |
| "entropy": 1.6296875, | |
| "epoch": 0.0672182006204757, | |
| "grad_norm": 0.13271059095859528, | |
| "learning_rate": 1.955532574974147e-05, | |
| "loss": 1.0701, | |
| "mean_token_accuracy": 0.7319574892520905, | |
| "num_tokens": 45791721.0, | |
| "step": 130 | |
| }, | |
| { | |
| "entropy": 1.6125, | |
| "epoch": 0.07238883143743537, | |
| "grad_norm": 0.1325104981660843, | |
| "learning_rate": 1.9520854877628406e-05, | |
| "loss": 1.06, | |
| "mean_token_accuracy": 0.7329302072525025, | |
| "num_tokens": 49358721.0, | |
| "step": 140 | |
| }, | |
| { | |
| "entropy": 1.615625, | |
| "epoch": 0.07755946225439504, | |
| "grad_norm": 0.12658704817295074, | |
| "learning_rate": 1.9486384005515343e-05, | |
| "loss": 1.0477, | |
| "mean_token_accuracy": 0.7350383698940277, | |
| "num_tokens": 52890301.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 1.63515625, | |
| "epoch": 0.0827300930713547, | |
| "grad_norm": 0.13451404869556427, | |
| "learning_rate": 1.9451913133402276e-05, | |
| "loss": 1.0558, | |
| "mean_token_accuracy": 0.733129221200943, | |
| "num_tokens": 56419066.0, | |
| "step": 160 | |
| }, | |
| { | |
| "entropy": 1.62421875, | |
| "epoch": 0.08790072388831438, | |
| "grad_norm": 0.1334684193134308, | |
| "learning_rate": 1.9417442261289213e-05, | |
| "loss": 1.028, | |
| "mean_token_accuracy": 0.737556916475296, | |
| "num_tokens": 59961473.0, | |
| "step": 170 | |
| }, | |
| { | |
| "entropy": 1.6125, | |
| "epoch": 0.09307135470527404, | |
| "grad_norm": 0.12568821012973785, | |
| "learning_rate": 1.938297138917615e-05, | |
| "loss": 1.0458, | |
| "mean_token_accuracy": 0.7361509501934052, | |
| "num_tokens": 63434899.0, | |
| "step": 180 | |
| }, | |
| { | |
| "entropy": 1.58125, | |
| "epoch": 0.09824198552223372, | |
| "grad_norm": 0.12893521785736084, | |
| "learning_rate": 1.9348500517063083e-05, | |
| "loss": 1.0201, | |
| "mean_token_accuracy": 0.7413658320903778, | |
| "num_tokens": 66996997.0, | |
| "step": 190 | |
| }, | |
| { | |
| "entropy": 1.56484375, | |
| "epoch": 0.10341261633919338, | |
| "grad_norm": 0.12755005061626434, | |
| "learning_rate": 1.931402964495002e-05, | |
| "loss": 1.0229, | |
| "mean_token_accuracy": 0.7409334897994995, | |
| "num_tokens": 70549858.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 1.5453125, | |
| "epoch": 0.10858324715615306, | |
| "grad_norm": 0.12291249632835388, | |
| "learning_rate": 1.9279558772836953e-05, | |
| "loss": 1.0298, | |
| "mean_token_accuracy": 0.738553661108017, | |
| "num_tokens": 74086821.0, | |
| "step": 210 | |
| }, | |
| { | |
| "entropy": 1.57890625, | |
| "epoch": 0.11375387797311272, | |
| "grad_norm": 0.13515888154506683, | |
| "learning_rate": 1.924508790072389e-05, | |
| "loss": 1.0365, | |
| "mean_token_accuracy": 0.7381217360496521, | |
| "num_tokens": 77581264.0, | |
| "step": 220 | |
| }, | |
| { | |
| "entropy": 1.5890625, | |
| "epoch": 0.1189245087900724, | |
| "grad_norm": 0.13524512946605682, | |
| "learning_rate": 1.9210617028610827e-05, | |
| "loss": 1.0402, | |
| "mean_token_accuracy": 0.7369829535484314, | |
| "num_tokens": 81045144.0, | |
| "step": 230 | |
| }, | |
| { | |
| "entropy": 1.52734375, | |
| "epoch": 0.12409513960703206, | |
| "grad_norm": 0.1315074861049652, | |
| "learning_rate": 1.917614615649776e-05, | |
| "loss": 1.0071, | |
| "mean_token_accuracy": 0.7420039892196655, | |
| "num_tokens": 84564212.0, | |
| "step": 240 | |
| }, | |
| { | |
| "entropy": 1.55703125, | |
| "epoch": 0.12926577042399173, | |
| "grad_norm": 0.12327884882688522, | |
| "learning_rate": 1.9141675284384697e-05, | |
| "loss": 1.014, | |
| "mean_token_accuracy": 0.7410085022449493, | |
| "num_tokens": 88081496.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 1.57890625, | |
| "epoch": 0.1344364012409514, | |
| "grad_norm": 0.12867313623428345, | |
| "learning_rate": 1.910720441227163e-05, | |
| "loss": 1.0069, | |
| "mean_token_accuracy": 0.7418998777866364, | |
| "num_tokens": 91615725.0, | |
| "step": 260 | |
| }, | |
| { | |
| "entropy": 1.5484375, | |
| "epoch": 0.13960703205791106, | |
| "grad_norm": 0.12194376438856125, | |
| "learning_rate": 1.9072733540158567e-05, | |
| "loss": 1.0288, | |
| "mean_token_accuracy": 0.7407114923000335, | |
| "num_tokens": 95146899.0, | |
| "step": 270 | |
| }, | |
| { | |
| "entropy": 1.57421875, | |
| "epoch": 0.14477766287487073, | |
| "grad_norm": 0.1243792474269867, | |
| "learning_rate": 1.9038262668045503e-05, | |
| "loss": 1.0273, | |
| "mean_token_accuracy": 0.7392907023429871, | |
| "num_tokens": 98742455.0, | |
| "step": 280 | |
| }, | |
| { | |
| "entropy": 1.56015625, | |
| "epoch": 0.1499482936918304, | |
| "grad_norm": 0.1307244598865509, | |
| "learning_rate": 1.9003791795932437e-05, | |
| "loss": 0.9956, | |
| "mean_token_accuracy": 0.7460803270339966, | |
| "num_tokens": 102251107.0, | |
| "step": 290 | |
| }, | |
| { | |
| "entropy": 1.5421875, | |
| "epoch": 0.15511892450879008, | |
| "grad_norm": 0.12532740831375122, | |
| "learning_rate": 1.8969320923819374e-05, | |
| "loss": 0.9876, | |
| "mean_token_accuracy": 0.7475418984889984, | |
| "num_tokens": 105784628.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 1.5484375, | |
| "epoch": 0.16028955532574973, | |
| "grad_norm": 0.13335883617401123, | |
| "learning_rate": 1.893485005170631e-05, | |
| "loss": 1.0098, | |
| "mean_token_accuracy": 0.7438597559928894, | |
| "num_tokens": 109309371.0, | |
| "step": 310 | |
| }, | |
| { | |
| "entropy": 1.54140625, | |
| "epoch": 0.1654601861427094, | |
| "grad_norm": 0.12339036166667938, | |
| "learning_rate": 1.8900379179593244e-05, | |
| "loss": 1.0017, | |
| "mean_token_accuracy": 0.7453927099704742, | |
| "num_tokens": 112813280.0, | |
| "step": 320 | |
| }, | |
| { | |
| "entropy": 1.5625, | |
| "epoch": 0.17063081695966908, | |
| "grad_norm": 0.1306329220533371, | |
| "learning_rate": 1.886590830748018e-05, | |
| "loss": 1.0239, | |
| "mean_token_accuracy": 0.7398741483688355, | |
| "num_tokens": 116277173.0, | |
| "step": 330 | |
| }, | |
| { | |
| "entropy": 1.55234375, | |
| "epoch": 0.17580144777662876, | |
| "grad_norm": 0.1199406087398529, | |
| "learning_rate": 1.8831437435367117e-05, | |
| "loss": 1.0089, | |
| "mean_token_accuracy": 0.7426419973373413, | |
| "num_tokens": 119775750.0, | |
| "step": 340 | |
| }, | |
| { | |
| "entropy": 1.5203125, | |
| "epoch": 0.1809720785935884, | |
| "grad_norm": 0.1323672980070114, | |
| "learning_rate": 1.879696656325405e-05, | |
| "loss": 0.9968, | |
| "mean_token_accuracy": 0.7448601007461548, | |
| "num_tokens": 123248571.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 1.53671875, | |
| "epoch": 0.18614270941054809, | |
| "grad_norm": 0.12378353625535965, | |
| "learning_rate": 1.8762495691140987e-05, | |
| "loss": 1.0024, | |
| "mean_token_accuracy": 0.7444860756397247, | |
| "num_tokens": 126768515.0, | |
| "step": 360 | |
| }, | |
| { | |
| "entropy": 1.5515625, | |
| "epoch": 0.19131334022750776, | |
| "grad_norm": 0.12648890912532806, | |
| "learning_rate": 1.8728024819027924e-05, | |
| "loss": 0.9973, | |
| "mean_token_accuracy": 0.7450312077999115, | |
| "num_tokens": 130266395.0, | |
| "step": 370 | |
| }, | |
| { | |
| "entropy": 1.5265625, | |
| "epoch": 0.19648397104446744, | |
| "grad_norm": 0.12423662841320038, | |
| "learning_rate": 1.8693553946914857e-05, | |
| "loss": 0.9861, | |
| "mean_token_accuracy": 0.7472135782241821, | |
| "num_tokens": 133821405.0, | |
| "step": 380 | |
| }, | |
| { | |
| "entropy": 1.5265625, | |
| "epoch": 0.20165460186142709, | |
| "grad_norm": 0.12470203638076782, | |
| "learning_rate": 1.8659083074801794e-05, | |
| "loss": 1.0048, | |
| "mean_token_accuracy": 0.7438580453395843, | |
| "num_tokens": 137356477.0, | |
| "step": 390 | |
| }, | |
| { | |
| "entropy": 1.52734375, | |
| "epoch": 0.20682523267838676, | |
| "grad_norm": 0.11875167489051819, | |
| "learning_rate": 1.862461220268873e-05, | |
| "loss": 0.9833, | |
| "mean_token_accuracy": 0.7478334903717041, | |
| "num_tokens": 140888954.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 1.54609375, | |
| "epoch": 0.21199586349534644, | |
| "grad_norm": 0.11470374464988708, | |
| "learning_rate": 1.8590141330575667e-05, | |
| "loss": 0.985, | |
| "mean_token_accuracy": 0.7477921664714813, | |
| "num_tokens": 144421867.0, | |
| "step": 410 | |
| }, | |
| { | |
| "entropy": 1.53515625, | |
| "epoch": 0.2171664943123061, | |
| "grad_norm": 0.1266632378101349, | |
| "learning_rate": 1.85556704584626e-05, | |
| "loss": 0.9762, | |
| "mean_token_accuracy": 0.7494876086711884, | |
| "num_tokens": 147936416.0, | |
| "step": 420 | |
| }, | |
| { | |
| "entropy": 1.5125, | |
| "epoch": 0.22233712512926576, | |
| "grad_norm": 0.13191717863082886, | |
| "learning_rate": 1.8521199586349537e-05, | |
| "loss": 0.9747, | |
| "mean_token_accuracy": 0.750033575296402, | |
| "num_tokens": 151400555.0, | |
| "step": 430 | |
| }, | |
| { | |
| "entropy": 1.51640625, | |
| "epoch": 0.22750775594622544, | |
| "grad_norm": 0.12589062750339508, | |
| "learning_rate": 1.8486728714236474e-05, | |
| "loss": 0.9784, | |
| "mean_token_accuracy": 0.7486616790294647, | |
| "num_tokens": 154906166.0, | |
| "step": 440 | |
| }, | |
| { | |
| "entropy": 1.5203125, | |
| "epoch": 0.2326783867631851, | |
| "grad_norm": 0.13004685938358307, | |
| "learning_rate": 1.8452257842123407e-05, | |
| "loss": 0.9776, | |
| "mean_token_accuracy": 0.7488125145435334, | |
| "num_tokens": 158369458.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 1.503125, | |
| "epoch": 0.2378490175801448, | |
| "grad_norm": 0.12742707133293152, | |
| "learning_rate": 1.8417786970010344e-05, | |
| "loss": 0.9906, | |
| "mean_token_accuracy": 0.746845805644989, | |
| "num_tokens": 161916438.0, | |
| "step": 460 | |
| }, | |
| { | |
| "entropy": 1.51640625, | |
| "epoch": 0.24301964839710444, | |
| "grad_norm": 0.13093426823616028, | |
| "learning_rate": 1.838331609789728e-05, | |
| "loss": 0.995, | |
| "mean_token_accuracy": 0.7453965723514557, | |
| "num_tokens": 165415312.0, | |
| "step": 470 | |
| }, | |
| { | |
| "entropy": 1.56015625, | |
| "epoch": 0.2481902792140641, | |
| "grad_norm": 0.13128837943077087, | |
| "learning_rate": 1.8348845225784214e-05, | |
| "loss": 1.0114, | |
| "mean_token_accuracy": 0.7426946878433227, | |
| "num_tokens": 168898815.0, | |
| "step": 480 | |
| }, | |
| { | |
| "entropy": 1.55703125, | |
| "epoch": 0.2533609100310238, | |
| "grad_norm": 0.13076794147491455, | |
| "learning_rate": 1.831437435367115e-05, | |
| "loss": 0.9759, | |
| "mean_token_accuracy": 0.7508574306964875, | |
| "num_tokens": 172415633.0, | |
| "step": 490 | |
| }, | |
| { | |
| "entropy": 1.521875, | |
| "epoch": 0.25853154084798347, | |
| "grad_norm": 0.12472254037857056, | |
| "learning_rate": 1.8279903481558084e-05, | |
| "loss": 0.9674, | |
| "mean_token_accuracy": 0.7516848564147949, | |
| "num_tokens": 175959198.0, | |
| "step": 500 | |
| }, | |
| { | |
| "entropy": 1.53515625, | |
| "epoch": 0.26370217166494314, | |
| "grad_norm": 0.1270027905702591, | |
| "learning_rate": 1.824543260944502e-05, | |
| "loss": 0.9842, | |
| "mean_token_accuracy": 0.7485461413860321, | |
| "num_tokens": 179473499.0, | |
| "step": 510 | |
| }, | |
| { | |
| "entropy": 1.540625, | |
| "epoch": 0.2688728024819028, | |
| "grad_norm": 0.1286272406578064, | |
| "learning_rate": 1.8210961737331958e-05, | |
| "loss": 0.9872, | |
| "mean_token_accuracy": 0.7479561626911163, | |
| "num_tokens": 182948456.0, | |
| "step": 520 | |
| }, | |
| { | |
| "entropy": 1.51015625, | |
| "epoch": 0.27404343329886244, | |
| "grad_norm": 0.12400833517313004, | |
| "learning_rate": 1.817649086521889e-05, | |
| "loss": 0.9621, | |
| "mean_token_accuracy": 0.7519203543663024, | |
| "num_tokens": 186385965.0, | |
| "step": 530 | |
| }, | |
| { | |
| "entropy": 1.51328125, | |
| "epoch": 0.2792140641158221, | |
| "grad_norm": 0.1264849752187729, | |
| "learning_rate": 1.8142019993105828e-05, | |
| "loss": 0.9668, | |
| "mean_token_accuracy": 0.7518666923046112, | |
| "num_tokens": 189926499.0, | |
| "step": 540 | |
| }, | |
| { | |
| "entropy": 1.50859375, | |
| "epoch": 0.2843846949327818, | |
| "grad_norm": 0.12239896506071091, | |
| "learning_rate": 1.810754912099276e-05, | |
| "loss": 0.9713, | |
| "mean_token_accuracy": 0.7509606957435608, | |
| "num_tokens": 193432394.0, | |
| "step": 550 | |
| }, | |
| { | |
| "entropy": 1.5328125, | |
| "epoch": 0.28955532574974147, | |
| "grad_norm": 0.12010076642036438, | |
| "learning_rate": 1.8073078248879698e-05, | |
| "loss": 0.9739, | |
| "mean_token_accuracy": 0.7502519965171814, | |
| "num_tokens": 196907834.0, | |
| "step": 560 | |
| }, | |
| { | |
| "entropy": 1.52265625, | |
| "epoch": 0.29472595656670114, | |
| "grad_norm": 0.12827058136463165, | |
| "learning_rate": 1.8038607376766634e-05, | |
| "loss": 0.9887, | |
| "mean_token_accuracy": 0.7459660708904267, | |
| "num_tokens": 200401413.0, | |
| "step": 570 | |
| }, | |
| { | |
| "entropy": 1.5109375, | |
| "epoch": 0.2998965873836608, | |
| "grad_norm": 0.1177738755941391, | |
| "learning_rate": 1.8004136504653568e-05, | |
| "loss": 0.9623, | |
| "mean_token_accuracy": 0.7526758849620819, | |
| "num_tokens": 203921751.0, | |
| "step": 580 | |
| }, | |
| { | |
| "entropy": 1.5265625, | |
| "epoch": 0.3050672182006205, | |
| "grad_norm": 0.1272975504398346, | |
| "learning_rate": 1.7969665632540504e-05, | |
| "loss": 0.98, | |
| "mean_token_accuracy": 0.749792069196701, | |
| "num_tokens": 207392847.0, | |
| "step": 590 | |
| }, | |
| { | |
| "entropy": 1.4953125, | |
| "epoch": 0.31023784901758017, | |
| "grad_norm": 0.12402703613042831, | |
| "learning_rate": 1.793519476042744e-05, | |
| "loss": 0.9855, | |
| "mean_token_accuracy": 0.7477928400039673, | |
| "num_tokens": 210946738.0, | |
| "step": 600 | |
| }, | |
| { | |
| "entropy": 1.50078125, | |
| "epoch": 0.3154084798345398, | |
| "grad_norm": 0.1203068271279335, | |
| "learning_rate": 1.7900723888314374e-05, | |
| "loss": 0.9592, | |
| "mean_token_accuracy": 0.7534137964248657, | |
| "num_tokens": 214442947.0, | |
| "step": 610 | |
| }, | |
| { | |
| "entropy": 1.54296875, | |
| "epoch": 0.32057911065149947, | |
| "grad_norm": 0.12435421347618103, | |
| "learning_rate": 1.786625301620131e-05, | |
| "loss": 0.969, | |
| "mean_token_accuracy": 0.7514902472496032, | |
| "num_tokens": 217974343.0, | |
| "step": 620 | |
| }, | |
| { | |
| "entropy": 1.5265625, | |
| "epoch": 0.32574974146845914, | |
| "grad_norm": 0.12747792899608612, | |
| "learning_rate": 1.7831782144088248e-05, | |
| "loss": 0.975, | |
| "mean_token_accuracy": 0.7483082413673401, | |
| "num_tokens": 221488604.0, | |
| "step": 630 | |
| }, | |
| { | |
| "entropy": 1.52734375, | |
| "epoch": 0.3309203722854188, | |
| "grad_norm": 0.12488026171922684, | |
| "learning_rate": 1.779731127197518e-05, | |
| "loss": 0.9989, | |
| "mean_token_accuracy": 0.745906400680542, | |
| "num_tokens": 225024161.0, | |
| "step": 640 | |
| }, | |
| { | |
| "entropy": 1.484375, | |
| "epoch": 0.3360910031023785, | |
| "grad_norm": 0.11778416484594345, | |
| "learning_rate": 1.7762840399862118e-05, | |
| "loss": 0.9765, | |
| "mean_token_accuracy": 0.7498968541622162, | |
| "num_tokens": 228517539.0, | |
| "step": 650 | |
| }, | |
| { | |
| "entropy": 1.4875, | |
| "epoch": 0.34126163391933817, | |
| "grad_norm": 0.11822066456079483, | |
| "learning_rate": 1.7728369527749055e-05, | |
| "loss": 0.9769, | |
| "mean_token_accuracy": 0.7499968707561493, | |
| "num_tokens": 232050772.0, | |
| "step": 660 | |
| }, | |
| { | |
| "entropy": 1.48046875, | |
| "epoch": 0.34643226473629785, | |
| "grad_norm": 0.11642944067716599, | |
| "learning_rate": 1.7693898655635988e-05, | |
| "loss": 0.9656, | |
| "mean_token_accuracy": 0.7531766474246979, | |
| "num_tokens": 235589506.0, | |
| "step": 670 | |
| }, | |
| { | |
| "entropy": 1.4984375, | |
| "epoch": 0.3516028955532575, | |
| "grad_norm": 0.11931689083576202, | |
| "learning_rate": 1.7659427783522925e-05, | |
| "loss": 0.9602, | |
| "mean_token_accuracy": 0.753033047914505, | |
| "num_tokens": 239103526.0, | |
| "step": 680 | |
| }, | |
| { | |
| "entropy": 1.484375, | |
| "epoch": 0.35677352637021714, | |
| "grad_norm": 0.10961424559354782, | |
| "learning_rate": 1.762495691140986e-05, | |
| "loss": 0.9283, | |
| "mean_token_accuracy": 0.7590166687965393, | |
| "num_tokens": 242628802.0, | |
| "step": 690 | |
| }, | |
| { | |
| "entropy": 1.48359375, | |
| "epoch": 0.3619441571871768, | |
| "grad_norm": 0.11637769639492035, | |
| "learning_rate": 1.7590486039296795e-05, | |
| "loss": 0.9534, | |
| "mean_token_accuracy": 0.7538342356681824, | |
| "num_tokens": 246124006.0, | |
| "step": 700 | |
| }, | |
| { | |
| "entropy": 1.49921875, | |
| "epoch": 0.3671147880041365, | |
| "grad_norm": 0.13185293972492218, | |
| "learning_rate": 1.755601516718373e-05, | |
| "loss": 0.9766, | |
| "mean_token_accuracy": 0.7504629611968994, | |
| "num_tokens": 249627086.0, | |
| "step": 710 | |
| }, | |
| { | |
| "entropy": 1.5203125, | |
| "epoch": 0.37228541882109617, | |
| "grad_norm": 0.11748187988996506, | |
| "learning_rate": 1.7521544295070668e-05, | |
| "loss": 0.9696, | |
| "mean_token_accuracy": 0.7497382581233978, | |
| "num_tokens": 253142510.0, | |
| "step": 720 | |
| }, | |
| { | |
| "entropy": 1.50703125, | |
| "epoch": 0.37745604963805585, | |
| "grad_norm": 0.12026237696409225, | |
| "learning_rate": 1.7487073422957605e-05, | |
| "loss": 0.9348, | |
| "mean_token_accuracy": 0.7580827891826629, | |
| "num_tokens": 256734152.0, | |
| "step": 730 | |
| }, | |
| { | |
| "entropy": 1.53671875, | |
| "epoch": 0.3826266804550155, | |
| "grad_norm": 0.1143966093659401, | |
| "learning_rate": 1.7452602550844538e-05, | |
| "loss": 0.9895, | |
| "mean_token_accuracy": 0.746571522951126, | |
| "num_tokens": 260325040.0, | |
| "step": 740 | |
| }, | |
| { | |
| "entropy": 1.52421875, | |
| "epoch": 0.3877973112719752, | |
| "grad_norm": 0.12038221210241318, | |
| "learning_rate": 1.7418131678731475e-05, | |
| "loss": 0.9492, | |
| "mean_token_accuracy": 0.7550131738185882, | |
| "num_tokens": 263838715.0, | |
| "step": 750 | |
| }, | |
| { | |
| "entropy": 1.5015625, | |
| "epoch": 0.3929679420889349, | |
| "grad_norm": 0.12316713482141495, | |
| "learning_rate": 1.7383660806618408e-05, | |
| "loss": 0.9403, | |
| "mean_token_accuracy": 0.7563300907611847, | |
| "num_tokens": 267341909.0, | |
| "step": 760 | |
| }, | |
| { | |
| "entropy": 1.50703125, | |
| "epoch": 0.3981385729058945, | |
| "grad_norm": 0.11857284605503082, | |
| "learning_rate": 1.7349189934505345e-05, | |
| "loss": 0.9357, | |
| "mean_token_accuracy": 0.7585722267627716, | |
| "num_tokens": 270844304.0, | |
| "step": 770 | |
| }, | |
| { | |
| "entropy": 1.50390625, | |
| "epoch": 0.40330920372285417, | |
| "grad_norm": 0.12760131061077118, | |
| "learning_rate": 1.731471906239228e-05, | |
| "loss": 0.958, | |
| "mean_token_accuracy": 0.7544421136379242, | |
| "num_tokens": 274347980.0, | |
| "step": 780 | |
| }, | |
| { | |
| "entropy": 1.4859375, | |
| "epoch": 0.40847983453981385, | |
| "grad_norm": 0.11467541754245758, | |
| "learning_rate": 1.7280248190279215e-05, | |
| "loss": 0.9432, | |
| "mean_token_accuracy": 0.756697702407837, | |
| "num_tokens": 277878589.0, | |
| "step": 790 | |
| }, | |
| { | |
| "entropy": 1.453125, | |
| "epoch": 0.4136504653567735, | |
| "grad_norm": 0.1142238974571228, | |
| "learning_rate": 1.724577731816615e-05, | |
| "loss": 0.9264, | |
| "mean_token_accuracy": 0.7594365060329438, | |
| "num_tokens": 281393449.0, | |
| "step": 800 | |
| }, | |
| { | |
| "entropy": 1.465625, | |
| "epoch": 0.4188210961737332, | |
| "grad_norm": 0.1210210919380188, | |
| "learning_rate": 1.7211306446053085e-05, | |
| "loss": 0.9461, | |
| "mean_token_accuracy": 0.7550443470478058, | |
| "num_tokens": 284863610.0, | |
| "step": 810 | |
| }, | |
| { | |
| "entropy": 1.4859375, | |
| "epoch": 0.4239917269906929, | |
| "grad_norm": 0.1252208799123764, | |
| "learning_rate": 1.7176835573940022e-05, | |
| "loss": 0.9506, | |
| "mean_token_accuracy": 0.7554301738739013, | |
| "num_tokens": 288435640.0, | |
| "step": 820 | |
| }, | |
| { | |
| "entropy": 1.46640625, | |
| "epoch": 0.42916235780765255, | |
| "grad_norm": 0.12282290309667587, | |
| "learning_rate": 1.714236470182696e-05, | |
| "loss": 0.9561, | |
| "mean_token_accuracy": 0.7522307515144349, | |
| "num_tokens": 291972853.0, | |
| "step": 830 | |
| }, | |
| { | |
| "entropy": 1.48359375, | |
| "epoch": 0.4343329886246122, | |
| "grad_norm": 0.13035908341407776, | |
| "learning_rate": 1.7107893829713892e-05, | |
| "loss": 0.9588, | |
| "mean_token_accuracy": 0.7543351411819458, | |
| "num_tokens": 295448635.0, | |
| "step": 840 | |
| }, | |
| { | |
| "entropy": 1.4921875, | |
| "epoch": 0.43950361944157185, | |
| "grad_norm": 0.11919616162776947, | |
| "learning_rate": 1.707342295760083e-05, | |
| "loss": 0.9802, | |
| "mean_token_accuracy": 0.748005723953247, | |
| "num_tokens": 299005991.0, | |
| "step": 850 | |
| }, | |
| { | |
| "entropy": 1.4859375, | |
| "epoch": 0.4446742502585315, | |
| "grad_norm": 0.12567895650863647, | |
| "learning_rate": 1.7038952085487762e-05, | |
| "loss": 0.9692, | |
| "mean_token_accuracy": 0.7503853142261505, | |
| "num_tokens": 302531494.0, | |
| "step": 860 | |
| }, | |
| { | |
| "entropy": 1.4609375, | |
| "epoch": 0.4498448810754912, | |
| "grad_norm": 0.11992225050926208, | |
| "learning_rate": 1.70044812133747e-05, | |
| "loss": 0.9247, | |
| "mean_token_accuracy": 0.760009765625, | |
| "num_tokens": 306035085.0, | |
| "step": 870 | |
| }, | |
| { | |
| "entropy": 1.49609375, | |
| "epoch": 0.4550155118924509, | |
| "grad_norm": 0.12116365134716034, | |
| "learning_rate": 1.6970010341261635e-05, | |
| "loss": 0.975, | |
| "mean_token_accuracy": 0.7497194230556488, | |
| "num_tokens": 309470942.0, | |
| "step": 880 | |
| }, | |
| { | |
| "entropy": 1.475, | |
| "epoch": 0.46018614270941055, | |
| "grad_norm": 0.11720309406518936, | |
| "learning_rate": 1.6935539469148572e-05, | |
| "loss": 0.952, | |
| "mean_token_accuracy": 0.7535983860492707, | |
| "num_tokens": 312985893.0, | |
| "step": 890 | |
| }, | |
| { | |
| "entropy": 1.49609375, | |
| "epoch": 0.4653567735263702, | |
| "grad_norm": 0.11972068250179291, | |
| "learning_rate": 1.6901068597035505e-05, | |
| "loss": 0.9631, | |
| "mean_token_accuracy": 0.7522001743316651, | |
| "num_tokens": 316493658.0, | |
| "step": 900 | |
| }, | |
| { | |
| "entropy": 1.48515625, | |
| "epoch": 0.4705274043433299, | |
| "grad_norm": 0.11726847290992737, | |
| "learning_rate": 1.6866597724922442e-05, | |
| "loss": 0.9487, | |
| "mean_token_accuracy": 0.7549257040023803, | |
| "num_tokens": 319971268.0, | |
| "step": 910 | |
| }, | |
| { | |
| "entropy": 1.50390625, | |
| "epoch": 0.4756980351602896, | |
| "grad_norm": 0.11658336967229843, | |
| "learning_rate": 1.683212685280938e-05, | |
| "loss": 0.9779, | |
| "mean_token_accuracy": 0.7492849051952362, | |
| "num_tokens": 323543858.0, | |
| "step": 920 | |
| }, | |
| { | |
| "entropy": 1.471875, | |
| "epoch": 0.4808686659772492, | |
| "grad_norm": 0.11780999600887299, | |
| "learning_rate": 1.6797655980696312e-05, | |
| "loss": 0.9249, | |
| "mean_token_accuracy": 0.759690934419632, | |
| "num_tokens": 327128684.0, | |
| "step": 930 | |
| }, | |
| { | |
| "entropy": 1.5015625, | |
| "epoch": 0.4860392967942089, | |
| "grad_norm": 0.11748922616243362, | |
| "learning_rate": 1.676318510858325e-05, | |
| "loss": 0.9644, | |
| "mean_token_accuracy": 0.752453887462616, | |
| "num_tokens": 330698096.0, | |
| "step": 940 | |
| }, | |
| { | |
| "entropy": 1.5, | |
| "epoch": 0.49120992761116855, | |
| "grad_norm": 0.12470361590385437, | |
| "learning_rate": 1.6728714236470185e-05, | |
| "loss": 0.9606, | |
| "mean_token_accuracy": 0.7517075955867767, | |
| "num_tokens": 334239261.0, | |
| "step": 950 | |
| }, | |
| { | |
| "entropy": 1.50078125, | |
| "epoch": 0.4963805584281282, | |
| "grad_norm": 0.12562286853790283, | |
| "learning_rate": 1.669424336435712e-05, | |
| "loss": 0.9476, | |
| "mean_token_accuracy": 0.75567626953125, | |
| "num_tokens": 337713689.0, | |
| "step": 960 | |
| }, | |
| { | |
| "entropy": 1.49921875, | |
| "epoch": 0.5015511892450879, | |
| "grad_norm": 0.1192832961678505, | |
| "learning_rate": 1.6659772492244055e-05, | |
| "loss": 0.9485, | |
| "mean_token_accuracy": 0.7538661956787109, | |
| "num_tokens": 341230527.0, | |
| "step": 970 | |
| }, | |
| { | |
| "entropy": 1.50234375, | |
| "epoch": 0.5067218200620476, | |
| "grad_norm": 0.11119144409894943, | |
| "learning_rate": 1.6625301620130992e-05, | |
| "loss": 0.9516, | |
| "mean_token_accuracy": 0.7550728797912598, | |
| "num_tokens": 344782636.0, | |
| "step": 980 | |
| }, | |
| { | |
| "entropy": 1.5046875, | |
| "epoch": 0.5118924508790073, | |
| "grad_norm": 0.12529641389846802, | |
| "learning_rate": 1.6590830748017926e-05, | |
| "loss": 0.9704, | |
| "mean_token_accuracy": 0.750161212682724, | |
| "num_tokens": 348288352.0, | |
| "step": 990 | |
| }, | |
| { | |
| "entropy": 1.54609375, | |
| "epoch": 0.5170630816959669, | |
| "grad_norm": 0.12611797451972961, | |
| "learning_rate": 1.6556359875904862e-05, | |
| "loss": 0.9823, | |
| "mean_token_accuracy": 0.7478528261184693, | |
| "num_tokens": 351754878.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "entropy": 1.50390625, | |
| "epoch": 0.5222337125129266, | |
| "grad_norm": 0.12376543134450912, | |
| "learning_rate": 1.65218890037918e-05, | |
| "loss": 0.9457, | |
| "mean_token_accuracy": 0.7560109198093414, | |
| "num_tokens": 355224219.0, | |
| "step": 1010 | |
| }, | |
| { | |
| "entropy": 1.503125, | |
| "epoch": 0.5274043433298863, | |
| "grad_norm": 0.12620951235294342, | |
| "learning_rate": 1.6487418131678732e-05, | |
| "loss": 0.9276, | |
| "mean_token_accuracy": 0.7603226721286773, | |
| "num_tokens": 358763362.0, | |
| "step": 1020 | |
| }, | |
| { | |
| "entropy": 1.521875, | |
| "epoch": 0.532574974146846, | |
| "grad_norm": 0.11898606270551682, | |
| "learning_rate": 1.645294725956567e-05, | |
| "loss": 0.9356, | |
| "mean_token_accuracy": 0.7571122646331787, | |
| "num_tokens": 362325652.0, | |
| "step": 1030 | |
| }, | |
| { | |
| "entropy": 1.5140625, | |
| "epoch": 0.5377456049638056, | |
| "grad_norm": 0.12898164987564087, | |
| "learning_rate": 1.6418476387452606e-05, | |
| "loss": 0.926, | |
| "mean_token_accuracy": 0.7587803602218628, | |
| "num_tokens": 365822057.0, | |
| "step": 1040 | |
| }, | |
| { | |
| "entropy": 1.5, | |
| "epoch": 0.5429162357807652, | |
| "grad_norm": 0.11916136741638184, | |
| "learning_rate": 1.638400551533954e-05, | |
| "loss": 0.9359, | |
| "mean_token_accuracy": 0.7570879578590393, | |
| "num_tokens": 369329930.0, | |
| "step": 1050 | |
| }, | |
| { | |
| "entropy": 1.48203125, | |
| "epoch": 0.5480868665977249, | |
| "grad_norm": 0.12124485522508621, | |
| "learning_rate": 1.6349534643226476e-05, | |
| "loss": 0.9515, | |
| "mean_token_accuracy": 0.7533541798591614, | |
| "num_tokens": 372877333.0, | |
| "step": 1060 | |
| }, | |
| { | |
| "entropy": 1.4828125, | |
| "epoch": 0.5532574974146846, | |
| "grad_norm": 0.11895754188299179, | |
| "learning_rate": 1.631506377111341e-05, | |
| "loss": 0.9389, | |
| "mean_token_accuracy": 0.7570245862007141, | |
| "num_tokens": 376405650.0, | |
| "step": 1070 | |
| }, | |
| { | |
| "entropy": 1.47109375, | |
| "epoch": 0.5584281282316442, | |
| "grad_norm": 0.12033624947071075, | |
| "learning_rate": 1.6280592899000346e-05, | |
| "loss": 0.9536, | |
| "mean_token_accuracy": 0.7542732715606689, | |
| "num_tokens": 379955803.0, | |
| "step": 1080 | |
| }, | |
| { | |
| "entropy": 1.50859375, | |
| "epoch": 0.5635987590486039, | |
| "grad_norm": 0.1252926141023636, | |
| "learning_rate": 1.6246122026887283e-05, | |
| "loss": 0.9807, | |
| "mean_token_accuracy": 0.7493639528751374, | |
| "num_tokens": 383508886.0, | |
| "step": 1090 | |
| }, | |
| { | |
| "entropy": 1.45859375, | |
| "epoch": 0.5687693898655636, | |
| "grad_norm": 0.11495286226272583, | |
| "learning_rate": 1.6211651154774216e-05, | |
| "loss": 0.9302, | |
| "mean_token_accuracy": 0.758233267068863, | |
| "num_tokens": 387045540.0, | |
| "step": 1100 | |
| }, | |
| { | |
| "entropy": 1.47890625, | |
| "epoch": 0.5739400206825233, | |
| "grad_norm": 0.1155555322766304, | |
| "learning_rate": 1.6177180282661153e-05, | |
| "loss": 0.9276, | |
| "mean_token_accuracy": 0.759257459640503, | |
| "num_tokens": 390553827.0, | |
| "step": 1110 | |
| }, | |
| { | |
| "entropy": 1.48125, | |
| "epoch": 0.5791106514994829, | |
| "grad_norm": 0.12705214321613312, | |
| "learning_rate": 1.6142709410548086e-05, | |
| "loss": 0.9367, | |
| "mean_token_accuracy": 0.7574102461338044, | |
| "num_tokens": 394086014.0, | |
| "step": 1120 | |
| }, | |
| { | |
| "entropy": 1.475, | |
| "epoch": 0.5842812823164426, | |
| "grad_norm": 0.11858130246400833, | |
| "learning_rate": 1.6108238538435023e-05, | |
| "loss": 0.9397, | |
| "mean_token_accuracy": 0.7564952373504639, | |
| "num_tokens": 397627950.0, | |
| "step": 1130 | |
| }, | |
| { | |
| "entropy": 1.46796875, | |
| "epoch": 0.5894519131334023, | |
| "grad_norm": 0.12174364179372787, | |
| "learning_rate": 1.607376766632196e-05, | |
| "loss": 0.9317, | |
| "mean_token_accuracy": 0.758020156621933, | |
| "num_tokens": 401141051.0, | |
| "step": 1140 | |
| }, | |
| { | |
| "entropy": 1.48671875, | |
| "epoch": 0.594622543950362, | |
| "grad_norm": 0.12224062532186508, | |
| "learning_rate": 1.6039296794208893e-05, | |
| "loss": 0.9298, | |
| "mean_token_accuracy": 0.7598855435848236, | |
| "num_tokens": 404694343.0, | |
| "step": 1150 | |
| }, | |
| { | |
| "entropy": 1.48671875, | |
| "epoch": 0.5997931747673216, | |
| "grad_norm": 0.12275724858045578, | |
| "learning_rate": 1.600482592209583e-05, | |
| "loss": 0.9564, | |
| "mean_token_accuracy": 0.7541142761707306, | |
| "num_tokens": 408162048.0, | |
| "step": 1160 | |
| }, | |
| { | |
| "entropy": 1.48671875, | |
| "epoch": 0.6049638055842813, | |
| "grad_norm": 0.12119049578905106, | |
| "learning_rate": 1.5970355049982766e-05, | |
| "loss": 0.9241, | |
| "mean_token_accuracy": 0.7597784161567688, | |
| "num_tokens": 411698032.0, | |
| "step": 1170 | |
| }, | |
| { | |
| "entropy": 1.46875, | |
| "epoch": 0.610134436401241, | |
| "grad_norm": 0.11573296785354614, | |
| "learning_rate": 1.59358841778697e-05, | |
| "loss": 0.9028, | |
| "mean_token_accuracy": 0.7644691288471221, | |
| "num_tokens": 415198276.0, | |
| "step": 1180 | |
| }, | |
| { | |
| "entropy": 1.48984375, | |
| "epoch": 0.6153050672182007, | |
| "grad_norm": 0.12336082011461258, | |
| "learning_rate": 1.5901413305756636e-05, | |
| "loss": 0.9059, | |
| "mean_token_accuracy": 0.763912558555603, | |
| "num_tokens": 418689534.0, | |
| "step": 1190 | |
| }, | |
| { | |
| "entropy": 1.46953125, | |
| "epoch": 0.6204756980351603, | |
| "grad_norm": 0.1194983720779419, | |
| "learning_rate": 1.5866942433643573e-05, | |
| "loss": 0.9387, | |
| "mean_token_accuracy": 0.7568131923675537, | |
| "num_tokens": 422206745.0, | |
| "step": 1200 | |
| }, | |
| { | |
| "entropy": 1.47109375, | |
| "epoch": 0.6256463288521199, | |
| "grad_norm": 0.12008671462535858, | |
| "learning_rate": 1.583247156153051e-05, | |
| "loss": 0.9287, | |
| "mean_token_accuracy": 0.759381753206253, | |
| "num_tokens": 425735490.0, | |
| "step": 1210 | |
| }, | |
| { | |
| "entropy": 1.46484375, | |
| "epoch": 0.6308169596690796, | |
| "grad_norm": 0.12218579649925232, | |
| "learning_rate": 1.5798000689417443e-05, | |
| "loss": 0.9326, | |
| "mean_token_accuracy": 0.7594467222690582, | |
| "num_tokens": 429247925.0, | |
| "step": 1220 | |
| }, | |
| { | |
| "entropy": 1.4625, | |
| "epoch": 0.6359875904860393, | |
| "grad_norm": 0.12128184735774994, | |
| "learning_rate": 1.576352981730438e-05, | |
| "loss": 0.9143, | |
| "mean_token_accuracy": 0.7615148365497589, | |
| "num_tokens": 432773801.0, | |
| "step": 1230 | |
| }, | |
| { | |
| "entropy": 1.4796875, | |
| "epoch": 0.6411582213029989, | |
| "grad_norm": 0.12187935411930084, | |
| "learning_rate": 1.5729058945191316e-05, | |
| "loss": 0.92, | |
| "mean_token_accuracy": 0.7607288718223572, | |
| "num_tokens": 436248156.0, | |
| "step": 1240 | |
| }, | |
| { | |
| "entropy": 1.4765625, | |
| "epoch": 0.6463288521199586, | |
| "grad_norm": 0.12327364832162857, | |
| "learning_rate": 1.569458807307825e-05, | |
| "loss": 0.9249, | |
| "mean_token_accuracy": 0.7601008296012879, | |
| "num_tokens": 439808916.0, | |
| "step": 1250 | |
| }, | |
| { | |
| "entropy": 1.47734375, | |
| "epoch": 0.6514994829369183, | |
| "grad_norm": 0.12044602632522583, | |
| "learning_rate": 1.5660117200965186e-05, | |
| "loss": 0.9456, | |
| "mean_token_accuracy": 0.7569905340671539, | |
| "num_tokens": 443343747.0, | |
| "step": 1260 | |
| }, | |
| { | |
| "entropy": 1.47734375, | |
| "epoch": 0.656670113753878, | |
| "grad_norm": 0.12415958940982819, | |
| "learning_rate": 1.5625646328852123e-05, | |
| "loss": 0.935, | |
| "mean_token_accuracy": 0.7574470579624176, | |
| "num_tokens": 446845796.0, | |
| "step": 1270 | |
| }, | |
| { | |
| "entropy": 1.46796875, | |
| "epoch": 0.6618407445708376, | |
| "grad_norm": 0.12678299844264984, | |
| "learning_rate": 1.5591175456739056e-05, | |
| "loss": 0.9206, | |
| "mean_token_accuracy": 0.7590867578983307, | |
| "num_tokens": 450337419.0, | |
| "step": 1280 | |
| }, | |
| { | |
| "entropy": 1.48984375, | |
| "epoch": 0.6670113753877973, | |
| "grad_norm": 0.12798433005809784, | |
| "learning_rate": 1.5556704584625993e-05, | |
| "loss": 0.9467, | |
| "mean_token_accuracy": 0.7556301057338715, | |
| "num_tokens": 453821582.0, | |
| "step": 1290 | |
| }, | |
| { | |
| "entropy": 1.4796875, | |
| "epoch": 0.672182006204757, | |
| "grad_norm": 0.12155309319496155, | |
| "learning_rate": 1.552223371251293e-05, | |
| "loss": 0.9339, | |
| "mean_token_accuracy": 0.7585957705974579, | |
| "num_tokens": 457299519.0, | |
| "step": 1300 | |
| }, | |
| { | |
| "entropy": 1.47265625, | |
| "epoch": 0.6773526370217167, | |
| "grad_norm": 0.12246780097484589, | |
| "learning_rate": 1.5487762840399863e-05, | |
| "loss": 0.9403, | |
| "mean_token_accuracy": 0.7562773644924163, | |
| "num_tokens": 460871201.0, | |
| "step": 1310 | |
| }, | |
| { | |
| "entropy": 1.4984375, | |
| "epoch": 0.6825232678386763, | |
| "grad_norm": 0.11269519478082657, | |
| "learning_rate": 1.54532919682868e-05, | |
| "loss": 0.9263, | |
| "mean_token_accuracy": 0.7602653086185456, | |
| "num_tokens": 464382014.0, | |
| "step": 1320 | |
| }, | |
| { | |
| "entropy": 1.47890625, | |
| "epoch": 0.687693898655636, | |
| "grad_norm": 0.1227000281214714, | |
| "learning_rate": 1.5418821096173737e-05, | |
| "loss": 0.9458, | |
| "mean_token_accuracy": 0.7562500655651092, | |
| "num_tokens": 467894476.0, | |
| "step": 1330 | |
| }, | |
| { | |
| "entropy": 1.48046875, | |
| "epoch": 0.6928645294725957, | |
| "grad_norm": 0.12165822833776474, | |
| "learning_rate": 1.538435022406067e-05, | |
| "loss": 0.9325, | |
| "mean_token_accuracy": 0.756926417350769, | |
| "num_tokens": 471392902.0, | |
| "step": 1340 | |
| }, | |
| { | |
| "entropy": 1.4703125, | |
| "epoch": 0.6980351602895554, | |
| "grad_norm": 0.12303000688552856, | |
| "learning_rate": 1.5349879351947607e-05, | |
| "loss": 0.9064, | |
| "mean_token_accuracy": 0.7626752257347107, | |
| "num_tokens": 474902078.0, | |
| "step": 1350 | |
| }, | |
| { | |
| "entropy": 1.46328125, | |
| "epoch": 0.703205791106515, | |
| "grad_norm": 0.11540233343839645, | |
| "learning_rate": 1.531540847983454e-05, | |
| "loss": 0.9407, | |
| "mean_token_accuracy": 0.7561467230319977, | |
| "num_tokens": 478433388.0, | |
| "step": 1360 | |
| }, | |
| { | |
| "entropy": 1.49140625, | |
| "epoch": 0.7083764219234746, | |
| "grad_norm": 0.12478071451187134, | |
| "learning_rate": 1.5280937607721477e-05, | |
| "loss": 0.9359, | |
| "mean_token_accuracy": 0.7584864318370819, | |
| "num_tokens": 481921966.0, | |
| "step": 1370 | |
| }, | |
| { | |
| "entropy": 1.49375, | |
| "epoch": 0.7135470527404343, | |
| "grad_norm": 0.1309724599123001, | |
| "learning_rate": 1.5246466735608412e-05, | |
| "loss": 0.9271, | |
| "mean_token_accuracy": 0.7596810460090637, | |
| "num_tokens": 485379998.0, | |
| "step": 1380 | |
| }, | |
| { | |
| "entropy": 1.46015625, | |
| "epoch": 0.718717683557394, | |
| "grad_norm": 0.12762272357940674, | |
| "learning_rate": 1.5211995863495348e-05, | |
| "loss": 0.9154, | |
| "mean_token_accuracy": 0.7618087232112885, | |
| "num_tokens": 488917377.0, | |
| "step": 1390 | |
| }, | |
| { | |
| "entropy": 1.45703125, | |
| "epoch": 0.7238883143743536, | |
| "grad_norm": 0.12085416913032532, | |
| "learning_rate": 1.5177524991382283e-05, | |
| "loss": 0.9189, | |
| "mean_token_accuracy": 0.7611732959747315, | |
| "num_tokens": 492457961.0, | |
| "step": 1400 | |
| }, | |
| { | |
| "entropy": 1.4421875, | |
| "epoch": 0.7290589451913133, | |
| "grad_norm": 0.12487323582172394, | |
| "learning_rate": 1.5143054119269218e-05, | |
| "loss": 0.9286, | |
| "mean_token_accuracy": 0.7590953767299652, | |
| "num_tokens": 495919216.0, | |
| "step": 1410 | |
| }, | |
| { | |
| "entropy": 1.4453125, | |
| "epoch": 0.734229576008273, | |
| "grad_norm": 0.1264040321111679, | |
| "learning_rate": 1.5108583247156155e-05, | |
| "loss": 0.9369, | |
| "mean_token_accuracy": 0.7580833673477173, | |
| "num_tokens": 499447846.0, | |
| "step": 1420 | |
| }, | |
| { | |
| "entropy": 1.45859375, | |
| "epoch": 0.7394002068252327, | |
| "grad_norm": 0.11700266599655151, | |
| "learning_rate": 1.507411237504309e-05, | |
| "loss": 0.9444, | |
| "mean_token_accuracy": 0.7564094007015228, | |
| "num_tokens": 502931170.0, | |
| "step": 1430 | |
| }, | |
| { | |
| "entropy": 1.4625, | |
| "epoch": 0.7445708376421923, | |
| "grad_norm": 0.12091559171676636, | |
| "learning_rate": 1.5039641502930025e-05, | |
| "loss": 0.9403, | |
| "mean_token_accuracy": 0.756866055727005, | |
| "num_tokens": 506455188.0, | |
| "step": 1440 | |
| }, | |
| { | |
| "entropy": 1.43515625, | |
| "epoch": 0.749741468459152, | |
| "grad_norm": 0.11989731341600418, | |
| "learning_rate": 1.500517063081696e-05, | |
| "loss": 0.9139, | |
| "mean_token_accuracy": 0.7620751082897186, | |
| "num_tokens": 509960186.0, | |
| "step": 1450 | |
| }, | |
| { | |
| "entropy": 1.475, | |
| "epoch": 0.7549120992761117, | |
| "grad_norm": 0.12092899531126022, | |
| "learning_rate": 1.4970699758703897e-05, | |
| "loss": 0.9271, | |
| "mean_token_accuracy": 0.7599273502826691, | |
| "num_tokens": 513463939.0, | |
| "step": 1460 | |
| }, | |
| { | |
| "entropy": 1.478125, | |
| "epoch": 0.7600827300930714, | |
| "grad_norm": 0.11744925379753113, | |
| "learning_rate": 1.4936228886590832e-05, | |
| "loss": 0.9298, | |
| "mean_token_accuracy": 0.7581395506858826, | |
| "num_tokens": 516984058.0, | |
| "step": 1470 | |
| }, | |
| { | |
| "entropy": 1.465625, | |
| "epoch": 0.765253360910031, | |
| "grad_norm": 0.11444874107837677, | |
| "learning_rate": 1.4901758014477767e-05, | |
| "loss": 0.9228, | |
| "mean_token_accuracy": 0.760771507024765, | |
| "num_tokens": 520529841.0, | |
| "step": 1480 | |
| }, | |
| { | |
| "entropy": 1.4828125, | |
| "epoch": 0.7704239917269907, | |
| "grad_norm": 0.12582576274871826, | |
| "learning_rate": 1.4867287142364704e-05, | |
| "loss": 0.9261, | |
| "mean_token_accuracy": 0.7593208611011505, | |
| "num_tokens": 524080208.0, | |
| "step": 1490 | |
| }, | |
| { | |
| "entropy": 1.4671875, | |
| "epoch": 0.7755946225439504, | |
| "grad_norm": 0.11210944503545761, | |
| "learning_rate": 1.4832816270251637e-05, | |
| "loss": 0.9086, | |
| "mean_token_accuracy": 0.7635318100452423, | |
| "num_tokens": 527634182.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "entropy": 1.44375, | |
| "epoch": 0.7807652533609101, | |
| "grad_norm": 0.12150448560714722, | |
| "learning_rate": 1.4798345398138574e-05, | |
| "loss": 0.9147, | |
| "mean_token_accuracy": 0.7632191896438598, | |
| "num_tokens": 531156991.0, | |
| "step": 1510 | |
| }, | |
| { | |
| "entropy": 1.453125, | |
| "epoch": 0.7859358841778697, | |
| "grad_norm": 0.11527998745441437, | |
| "learning_rate": 1.476387452602551e-05, | |
| "loss": 0.9302, | |
| "mean_token_accuracy": 0.7586703896522522, | |
| "num_tokens": 534685727.0, | |
| "step": 1520 | |
| }, | |
| { | |
| "entropy": 1.45859375, | |
| "epoch": 0.7911065149948294, | |
| "grad_norm": 0.11719609051942825, | |
| "learning_rate": 1.4729403653912444e-05, | |
| "loss": 0.9273, | |
| "mean_token_accuracy": 0.7598319470882415, | |
| "num_tokens": 538205698.0, | |
| "step": 1530 | |
| }, | |
| { | |
| "entropy": 1.44765625, | |
| "epoch": 0.796277145811789, | |
| "grad_norm": 0.11949972808361053, | |
| "learning_rate": 1.469493278179938e-05, | |
| "loss": 0.892, | |
| "mean_token_accuracy": 0.7663702487945556, | |
| "num_tokens": 541713751.0, | |
| "step": 1540 | |
| }, | |
| { | |
| "entropy": 1.47578125, | |
| "epoch": 0.8014477766287487, | |
| "grad_norm": 0.1147402822971344, | |
| "learning_rate": 1.4660461909686317e-05, | |
| "loss": 0.9391, | |
| "mean_token_accuracy": 0.75692800283432, | |
| "num_tokens": 545255223.0, | |
| "step": 1550 | |
| }, | |
| { | |
| "entropy": 1.484375, | |
| "epoch": 0.8066184074457083, | |
| "grad_norm": 0.11995094269514084, | |
| "learning_rate": 1.4625991037573252e-05, | |
| "loss": 0.9238, | |
| "mean_token_accuracy": 0.7611361384391785, | |
| "num_tokens": 548770048.0, | |
| "step": 1560 | |
| }, | |
| { | |
| "entropy": 1.4578125, | |
| "epoch": 0.811789038262668, | |
| "grad_norm": 0.1158953309059143, | |
| "learning_rate": 1.4591520165460187e-05, | |
| "loss": 0.9055, | |
| "mean_token_accuracy": 0.7651141226291657, | |
| "num_tokens": 552280466.0, | |
| "step": 1570 | |
| }, | |
| { | |
| "entropy": 1.49375, | |
| "epoch": 0.8169596690796277, | |
| "grad_norm": 0.12239167839288712, | |
| "learning_rate": 1.4557049293347122e-05, | |
| "loss": 0.9674, | |
| "mean_token_accuracy": 0.7507982015609741, | |
| "num_tokens": 555747931.0, | |
| "step": 1580 | |
| }, | |
| { | |
| "entropy": 1.484375, | |
| "epoch": 0.8221302998965874, | |
| "grad_norm": 0.11154595762491226, | |
| "learning_rate": 1.4522578421234059e-05, | |
| "loss": 0.9261, | |
| "mean_token_accuracy": 0.7608684718608856, | |
| "num_tokens": 559290660.0, | |
| "step": 1590 | |
| }, | |
| { | |
| "entropy": 1.484375, | |
| "epoch": 0.827300930713547, | |
| "grad_norm": 0.1206263080239296, | |
| "learning_rate": 1.4488107549120994e-05, | |
| "loss": 0.9171, | |
| "mean_token_accuracy": 0.7622957825660706, | |
| "num_tokens": 562788799.0, | |
| "step": 1600 | |
| }, | |
| { | |
| "entropy": 1.4640625, | |
| "epoch": 0.8324715615305067, | |
| "grad_norm": 0.11691080778837204, | |
| "learning_rate": 1.4453636677007929e-05, | |
| "loss": 0.9425, | |
| "mean_token_accuracy": 0.7569314420223237, | |
| "num_tokens": 566371887.0, | |
| "step": 1610 | |
| }, | |
| { | |
| "entropy": 1.44921875, | |
| "epoch": 0.8376421923474664, | |
| "grad_norm": 0.11534351855516434, | |
| "learning_rate": 1.4419165804894866e-05, | |
| "loss": 0.9099, | |
| "mean_token_accuracy": 0.7634938955307007, | |
| "num_tokens": 569917270.0, | |
| "step": 1620 | |
| }, | |
| { | |
| "entropy": 1.46328125, | |
| "epoch": 0.8428128231644261, | |
| "grad_norm": 0.11727341264486313, | |
| "learning_rate": 1.4384694932781799e-05, | |
| "loss": 0.9037, | |
| "mean_token_accuracy": 0.7641095459461212, | |
| "num_tokens": 573393579.0, | |
| "step": 1630 | |
| }, | |
| { | |
| "entropy": 1.49375, | |
| "epoch": 0.8479834539813857, | |
| "grad_norm": 0.12199830263853073, | |
| "learning_rate": 1.4350224060668736e-05, | |
| "loss": 0.9202, | |
| "mean_token_accuracy": 0.7616257786750793, | |
| "num_tokens": 576885405.0, | |
| "step": 1640 | |
| }, | |
| { | |
| "entropy": 1.49296875, | |
| "epoch": 0.8531540847983454, | |
| "grad_norm": 0.11643374711275101, | |
| "learning_rate": 1.4315753188555672e-05, | |
| "loss": 0.8988, | |
| "mean_token_accuracy": 0.7642439842224121, | |
| "num_tokens": 580339669.0, | |
| "step": 1650 | |
| }, | |
| { | |
| "entropy": 1.475, | |
| "epoch": 0.8583247156153051, | |
| "grad_norm": 0.1187024861574173, | |
| "learning_rate": 1.4281282316442606e-05, | |
| "loss": 0.9011, | |
| "mean_token_accuracy": 0.764795571565628, | |
| "num_tokens": 583862692.0, | |
| "step": 1660 | |
| }, | |
| { | |
| "entropy": 1.4828125, | |
| "epoch": 0.8634953464322648, | |
| "grad_norm": 0.11858861893415451, | |
| "learning_rate": 1.4246811444329543e-05, | |
| "loss": 0.9113, | |
| "mean_token_accuracy": 0.7625495612621307, | |
| "num_tokens": 587415782.0, | |
| "step": 1670 | |
| }, | |
| { | |
| "entropy": 1.4921875, | |
| "epoch": 0.8686659772492245, | |
| "grad_norm": 0.12404865026473999, | |
| "learning_rate": 1.421234057221648e-05, | |
| "loss": 0.9305, | |
| "mean_token_accuracy": 0.7590306222438812, | |
| "num_tokens": 590914299.0, | |
| "step": 1680 | |
| }, | |
| { | |
| "entropy": 1.46328125, | |
| "epoch": 0.8738366080661841, | |
| "grad_norm": 0.11309592425823212, | |
| "learning_rate": 1.4177869700103413e-05, | |
| "loss": 0.9215, | |
| "mean_token_accuracy": 0.7607256054878235, | |
| "num_tokens": 594392721.0, | |
| "step": 1690 | |
| }, | |
| { | |
| "entropy": 1.47734375, | |
| "epoch": 0.8790072388831437, | |
| "grad_norm": 0.11948959529399872, | |
| "learning_rate": 1.414339882799035e-05, | |
| "loss": 0.9219, | |
| "mean_token_accuracy": 0.7603579103946686, | |
| "num_tokens": 597925926.0, | |
| "step": 1700 | |
| }, | |
| { | |
| "entropy": 1.45078125, | |
| "epoch": 0.8841778697001034, | |
| "grad_norm": 0.12206869572401047, | |
| "learning_rate": 1.4108927955877284e-05, | |
| "loss": 0.9058, | |
| "mean_token_accuracy": 0.7641239047050477, | |
| "num_tokens": 601418970.0, | |
| "step": 1710 | |
| }, | |
| { | |
| "entropy": 1.4953125, | |
| "epoch": 0.889348500517063, | |
| "grad_norm": 0.11994688957929611, | |
| "learning_rate": 1.4074457083764221e-05, | |
| "loss": 0.9439, | |
| "mean_token_accuracy": 0.7552627563476563, | |
| "num_tokens": 604907270.0, | |
| "step": 1720 | |
| }, | |
| { | |
| "entropy": 1.45078125, | |
| "epoch": 0.8945191313340227, | |
| "grad_norm": 0.1137346401810646, | |
| "learning_rate": 1.4039986211651156e-05, | |
| "loss": 0.9044, | |
| "mean_token_accuracy": 0.7650101542472839, | |
| "num_tokens": 608417023.0, | |
| "step": 1730 | |
| }, | |
| { | |
| "entropy": 1.47734375, | |
| "epoch": 0.8996897621509824, | |
| "grad_norm": 0.1210690587759018, | |
| "learning_rate": 1.4005515339538091e-05, | |
| "loss": 0.9318, | |
| "mean_token_accuracy": 0.7580539882183075, | |
| "num_tokens": 611892606.0, | |
| "step": 1740 | |
| }, | |
| { | |
| "entropy": 1.4609375, | |
| "epoch": 0.9048603929679421, | |
| "grad_norm": 0.1245742216706276, | |
| "learning_rate": 1.3971044467425028e-05, | |
| "loss": 0.9071, | |
| "mean_token_accuracy": 0.7634753048419952, | |
| "num_tokens": 615479462.0, | |
| "step": 1750 | |
| }, | |
| { | |
| "entropy": 1.46953125, | |
| "epoch": 0.9100310237849017, | |
| "grad_norm": 0.1237822026014328, | |
| "learning_rate": 1.3936573595311961e-05, | |
| "loss": 0.9, | |
| "mean_token_accuracy": 0.7645568609237671, | |
| "num_tokens": 618981303.0, | |
| "step": 1760 | |
| }, | |
| { | |
| "entropy": 1.478125, | |
| "epoch": 0.9152016546018614, | |
| "grad_norm": 0.11876232922077179, | |
| "learning_rate": 1.3902102723198898e-05, | |
| "loss": 0.9173, | |
| "mean_token_accuracy": 0.762673944234848, | |
| "num_tokens": 622483548.0, | |
| "step": 1770 | |
| }, | |
| { | |
| "entropy": 1.4609375, | |
| "epoch": 0.9203722854188211, | |
| "grad_norm": 0.11670742928981781, | |
| "learning_rate": 1.3867631851085835e-05, | |
| "loss": 0.9214, | |
| "mean_token_accuracy": 0.7605323374271393, | |
| "num_tokens": 626030553.0, | |
| "step": 1780 | |
| }, | |
| { | |
| "entropy": 1.4578125, | |
| "epoch": 0.9255429162357808, | |
| "grad_norm": 0.11695141345262527, | |
| "learning_rate": 1.3833160978972768e-05, | |
| "loss": 0.9207, | |
| "mean_token_accuracy": 0.7607087075710297, | |
| "num_tokens": 629557901.0, | |
| "step": 1790 | |
| }, | |
| { | |
| "entropy": 1.45234375, | |
| "epoch": 0.9307135470527405, | |
| "grad_norm": 0.1188148483633995, | |
| "learning_rate": 1.3798690106859705e-05, | |
| "loss": 0.9227, | |
| "mean_token_accuracy": 0.7603236854076385, | |
| "num_tokens": 633058595.0, | |
| "step": 1800 | |
| }, | |
| { | |
| "entropy": 1.43671875, | |
| "epoch": 0.9358841778697001, | |
| "grad_norm": 0.11865752935409546, | |
| "learning_rate": 1.3764219234746641e-05, | |
| "loss": 0.8993, | |
| "mean_token_accuracy": 0.7662648856639862, | |
| "num_tokens": 636564235.0, | |
| "step": 1810 | |
| }, | |
| { | |
| "entropy": 1.4421875, | |
| "epoch": 0.9410548086866598, | |
| "grad_norm": 0.1295095682144165, | |
| "learning_rate": 1.3729748362633575e-05, | |
| "loss": 0.9281, | |
| "mean_token_accuracy": 0.7599412083625794, | |
| "num_tokens": 640078587.0, | |
| "step": 1820 | |
| }, | |
| { | |
| "entropy": 1.42265625, | |
| "epoch": 0.9462254395036195, | |
| "grad_norm": 0.11797858774662018, | |
| "learning_rate": 1.3695277490520511e-05, | |
| "loss": 0.9165, | |
| "mean_token_accuracy": 0.761432534456253, | |
| "num_tokens": 643624605.0, | |
| "step": 1830 | |
| }, | |
| { | |
| "entropy": 1.42578125, | |
| "epoch": 0.9513960703205792, | |
| "grad_norm": 0.12107110023498535, | |
| "learning_rate": 1.3660806618407448e-05, | |
| "loss": 0.9199, | |
| "mean_token_accuracy": 0.7606053829193116, | |
| "num_tokens": 647142310.0, | |
| "step": 1840 | |
| }, | |
| { | |
| "entropy": 1.42890625, | |
| "epoch": 0.9565667011375388, | |
| "grad_norm": 0.10939899832010269, | |
| "learning_rate": 1.3626335746294381e-05, | |
| "loss": 0.9083, | |
| "mean_token_accuracy": 0.7634673655033112, | |
| "num_tokens": 650597087.0, | |
| "step": 1850 | |
| }, | |
| { | |
| "entropy": 1.4453125, | |
| "epoch": 0.9617373319544984, | |
| "grad_norm": 0.12392441928386688, | |
| "learning_rate": 1.3591864874181318e-05, | |
| "loss": 0.9025, | |
| "mean_token_accuracy": 0.7637257993221283, | |
| "num_tokens": 654112458.0, | |
| "step": 1860 | |
| }, | |
| { | |
| "entropy": 1.47109375, | |
| "epoch": 0.9669079627714581, | |
| "grad_norm": 0.11592892557382584, | |
| "learning_rate": 1.3557394002068253e-05, | |
| "loss": 0.9189, | |
| "mean_token_accuracy": 0.7612792730331421, | |
| "num_tokens": 657622848.0, | |
| "step": 1870 | |
| }, | |
| { | |
| "entropy": 1.45, | |
| "epoch": 0.9720785935884177, | |
| "grad_norm": 0.11586037278175354, | |
| "learning_rate": 1.352292312995519e-05, | |
| "loss": 0.9115, | |
| "mean_token_accuracy": 0.7627564072608948, | |
| "num_tokens": 661133982.0, | |
| "step": 1880 | |
| }, | |
| { | |
| "entropy": 1.4546875, | |
| "epoch": 0.9772492244053774, | |
| "grad_norm": 0.12101200968027115, | |
| "learning_rate": 1.3488452257842123e-05, | |
| "loss": 0.9049, | |
| "mean_token_accuracy": 0.7641494333744049, | |
| "num_tokens": 664651190.0, | |
| "step": 1890 | |
| }, | |
| { | |
| "entropy": 1.4359375, | |
| "epoch": 0.9824198552223371, | |
| "grad_norm": 0.1195937767624855, | |
| "learning_rate": 1.345398138572906e-05, | |
| "loss": 0.9232, | |
| "mean_token_accuracy": 0.7607501983642578, | |
| "num_tokens": 668185502.0, | |
| "step": 1900 | |
| }, | |
| { | |
| "entropy": 1.43515625, | |
| "epoch": 0.9875904860392968, | |
| "grad_norm": 0.13262051343917847, | |
| "learning_rate": 1.3419510513615997e-05, | |
| "loss": 0.911, | |
| "mean_token_accuracy": 0.7632193565368652, | |
| "num_tokens": 671716553.0, | |
| "step": 1910 | |
| }, | |
| { | |
| "entropy": 1.43203125, | |
| "epoch": 0.9927611168562565, | |
| "grad_norm": 0.13163335621356964, | |
| "learning_rate": 1.338503964150293e-05, | |
| "loss": 0.9053, | |
| "mean_token_accuracy": 0.7645232558250428, | |
| "num_tokens": 675258147.0, | |
| "step": 1920 | |
| }, | |
| { | |
| "entropy": 1.45625, | |
| "epoch": 0.9979317476732161, | |
| "grad_norm": 0.12155997008085251, | |
| "learning_rate": 1.3350568769389867e-05, | |
| "loss": 0.9054, | |
| "mean_token_accuracy": 0.7639196395874024, | |
| "num_tokens": 678787268.0, | |
| "step": 1930 | |
| }, | |
| { | |
| "entropy": 1.41796875, | |
| "epoch": 1.0031023784901758, | |
| "grad_norm": 0.12953729927539825, | |
| "learning_rate": 1.3316097897276803e-05, | |
| "loss": 0.9077, | |
| "mean_token_accuracy": 0.7628555178642273, | |
| "num_tokens": 682352660.0, | |
| "step": 1940 | |
| }, | |
| { | |
| "entropy": 1.35234375, | |
| "epoch": 1.0082730093071355, | |
| "grad_norm": 0.13617122173309326, | |
| "learning_rate": 1.3281627025163737e-05, | |
| "loss": 0.8488, | |
| "mean_token_accuracy": 0.7747729301452637, | |
| "num_tokens": 685885551.0, | |
| "step": 1950 | |
| }, | |
| { | |
| "entropy": 1.38359375, | |
| "epoch": 1.0134436401240952, | |
| "grad_norm": 0.12055247277021408, | |
| "learning_rate": 1.3247156153050673e-05, | |
| "loss": 0.8776, | |
| "mean_token_accuracy": 0.770104318857193, | |
| "num_tokens": 689438133.0, | |
| "step": 1960 | |
| }, | |
| { | |
| "entropy": 1.371875, | |
| "epoch": 1.0186142709410548, | |
| "grad_norm": 0.12040404230356216, | |
| "learning_rate": 1.321268528093761e-05, | |
| "loss": 0.8446, | |
| "mean_token_accuracy": 0.7767514526844025, | |
| "num_tokens": 692951376.0, | |
| "step": 1970 | |
| }, | |
| { | |
| "entropy": 1.37578125, | |
| "epoch": 1.0237849017580145, | |
| "grad_norm": 0.11971385031938553, | |
| "learning_rate": 1.3178214408824543e-05, | |
| "loss": 0.8555, | |
| "mean_token_accuracy": 0.7742941915988922, | |
| "num_tokens": 696477318.0, | |
| "step": 1980 | |
| }, | |
| { | |
| "entropy": 1.38828125, | |
| "epoch": 1.0289555325749742, | |
| "grad_norm": 0.11191146075725555, | |
| "learning_rate": 1.314374353671148e-05, | |
| "loss": 0.8586, | |
| "mean_token_accuracy": 0.7740735948085785, | |
| "num_tokens": 699984789.0, | |
| "step": 1990 | |
| }, | |
| { | |
| "entropy": 1.37265625, | |
| "epoch": 1.0341261633919339, | |
| "grad_norm": 0.12285640090703964, | |
| "learning_rate": 1.3109272664598415e-05, | |
| "loss": 0.8513, | |
| "mean_token_accuracy": 0.7760735332965851, | |
| "num_tokens": 703494729.0, | |
| "step": 2000 | |
| }, | |
| { | |
| "entropy": 1.3921875, | |
| "epoch": 1.0392967942088935, | |
| "grad_norm": 0.1279694139957428, | |
| "learning_rate": 1.307480179248535e-05, | |
| "loss": 0.857, | |
| "mean_token_accuracy": 0.7742870032787323, | |
| "num_tokens": 707006914.0, | |
| "step": 2010 | |
| }, | |
| { | |
| "entropy": 1.38828125, | |
| "epoch": 1.0444674250258532, | |
| "grad_norm": 0.1300376057624817, | |
| "learning_rate": 1.3040330920372287e-05, | |
| "loss": 0.8541, | |
| "mean_token_accuracy": 0.7744471371173859, | |
| "num_tokens": 710573396.0, | |
| "step": 2020 | |
| }, | |
| { | |
| "entropy": 1.375, | |
| "epoch": 1.049638055842813, | |
| "grad_norm": 0.12188553810119629, | |
| "learning_rate": 1.3005860048259222e-05, | |
| "loss": 0.8461, | |
| "mean_token_accuracy": 0.7757511854171752, | |
| "num_tokens": 714162377.0, | |
| "step": 2030 | |
| }, | |
| { | |
| "entropy": 1.36875, | |
| "epoch": 1.0548086866597726, | |
| "grad_norm": 0.12129724025726318, | |
| "learning_rate": 1.2971389176146159e-05, | |
| "loss": 0.8318, | |
| "mean_token_accuracy": 0.7799286842346191, | |
| "num_tokens": 717683573.0, | |
| "step": 2040 | |
| }, | |
| { | |
| "entropy": 1.37265625, | |
| "epoch": 1.0599793174767322, | |
| "grad_norm": 0.11378098279237747, | |
| "learning_rate": 1.2936918304033092e-05, | |
| "loss": 0.8475, | |
| "mean_token_accuracy": 0.7767247796058655, | |
| "num_tokens": 721227222.0, | |
| "step": 2050 | |
| }, | |
| { | |
| "entropy": 1.38359375, | |
| "epoch": 1.065149948293692, | |
| "grad_norm": 0.1249590516090393, | |
| "learning_rate": 1.2902447431920029e-05, | |
| "loss": 0.8305, | |
| "mean_token_accuracy": 0.7789794862270355, | |
| "num_tokens": 724706080.0, | |
| "step": 2060 | |
| }, | |
| { | |
| "entropy": 1.3953125, | |
| "epoch": 1.0703205791106516, | |
| "grad_norm": 0.12428700923919678, | |
| "learning_rate": 1.2867976559806965e-05, | |
| "loss": 0.8666, | |
| "mean_token_accuracy": 0.7721951365470886, | |
| "num_tokens": 728199061.0, | |
| "step": 2070 | |
| }, | |
| { | |
| "entropy": 1.37265625, | |
| "epoch": 1.0754912099276113, | |
| "grad_norm": 0.12617260217666626, | |
| "learning_rate": 1.2833505687693899e-05, | |
| "loss": 0.8381, | |
| "mean_token_accuracy": 0.7785492360591888, | |
| "num_tokens": 731689061.0, | |
| "step": 2080 | |
| }, | |
| { | |
| "entropy": 1.384375, | |
| "epoch": 1.080661840744571, | |
| "grad_norm": 0.11602634191513062, | |
| "learning_rate": 1.2799034815580835e-05, | |
| "loss": 0.8618, | |
| "mean_token_accuracy": 0.7727059543132782, | |
| "num_tokens": 735212770.0, | |
| "step": 2090 | |
| }, | |
| { | |
| "entropy": 1.36796875, | |
| "epoch": 1.0858324715615306, | |
| "grad_norm": 0.11466547101736069, | |
| "learning_rate": 1.2764563943467772e-05, | |
| "loss": 0.8158, | |
| "mean_token_accuracy": 0.7820233702659607, | |
| "num_tokens": 738666342.0, | |
| "step": 2100 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 5802, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 2100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.8591552888100094e+19, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |