| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.0, | |
| "eval_steps": 500, | |
| "global_step": 916, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 0.39035615921020506, | |
| "epoch": 0.04381161007667032, | |
| "grad_norm": 19.073434829711914, | |
| "learning_rate": 2e-05, | |
| "loss": 4.3018, | |
| "mean_token_accuracy": 0.5682853847742081, | |
| "num_tokens": 29485.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 0.7484827220439911, | |
| "epoch": 0.08762322015334063, | |
| "grad_norm": 3.970487356185913, | |
| "learning_rate": 2e-05, | |
| "loss": 1.9588, | |
| "mean_token_accuracy": 0.6895880490541458, | |
| "num_tokens": 58977.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 1.1801635682582856, | |
| "epoch": 0.13143483023001096, | |
| "grad_norm": 2.532576322555542, | |
| "learning_rate": 2e-05, | |
| "loss": 1.431, | |
| "mean_token_accuracy": 0.7343599200248718, | |
| "num_tokens": 88402.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 1.1498825669288635, | |
| "epoch": 0.17524644030668127, | |
| "grad_norm": 1.982933521270752, | |
| "learning_rate": 2e-05, | |
| "loss": 1.1355, | |
| "mean_token_accuracy": 0.7867384225130081, | |
| "num_tokens": 117810.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 0.8834485694766044, | |
| "epoch": 0.21905805038335158, | |
| "grad_norm": 1.8390016555786133, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8543, | |
| "mean_token_accuracy": 0.8341205582022667, | |
| "num_tokens": 147327.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.5984191231429576, | |
| "epoch": 0.2628696604600219, | |
| "grad_norm": 1.747591257095337, | |
| "learning_rate": 2e-05, | |
| "loss": 0.5852, | |
| "mean_token_accuracy": 0.8857637628912925, | |
| "num_tokens": 176791.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 0.3483880817890167, | |
| "epoch": 0.3066812705366922, | |
| "grad_norm": 1.5721803903579712, | |
| "learning_rate": 2e-05, | |
| "loss": 0.3545, | |
| "mean_token_accuracy": 0.9292098119854927, | |
| "num_tokens": 206271.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 0.20057316161692143, | |
| "epoch": 0.35049288061336253, | |
| "grad_norm": 1.2334290742874146, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1877, | |
| "mean_token_accuracy": 0.9710112065076828, | |
| "num_tokens": 235692.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 0.12095212489366532, | |
| "epoch": 0.39430449069003287, | |
| "grad_norm": 0.8091076016426086, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1073, | |
| "mean_token_accuracy": 0.9890251606702805, | |
| "num_tokens": 265133.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 0.10107735879719257, | |
| "epoch": 0.43811610076670315, | |
| "grad_norm": 0.6297779679298401, | |
| "learning_rate": 2e-05, | |
| "loss": 0.085, | |
| "mean_token_accuracy": 0.9887343898415566, | |
| "num_tokens": 294684.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.08415136393159628, | |
| "epoch": 0.4819277108433735, | |
| "grad_norm": 0.47038641571998596, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0702, | |
| "mean_token_accuracy": 0.9906241714954376, | |
| "num_tokens": 324140.0, | |
| "step": 110 | |
| }, | |
| { | |
| "entropy": 0.07522545410320162, | |
| "epoch": 0.5257393209200438, | |
| "grad_norm": 0.44743451476097107, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0682, | |
| "mean_token_accuracy": 0.9909884691238403, | |
| "num_tokens": 353646.0, | |
| "step": 120 | |
| }, | |
| { | |
| "entropy": 0.07190036196261644, | |
| "epoch": 0.5695509309967142, | |
| "grad_norm": 0.4326375722885132, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0679, | |
| "mean_token_accuracy": 0.990379473567009, | |
| "num_tokens": 383164.0, | |
| "step": 130 | |
| }, | |
| { | |
| "entropy": 0.06936099929735065, | |
| "epoch": 0.6133625410733844, | |
| "grad_norm": 0.3178843557834625, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0638, | |
| "mean_token_accuracy": 0.9907173082232476, | |
| "num_tokens": 412692.0, | |
| "step": 140 | |
| }, | |
| { | |
| "entropy": 0.06100328806787729, | |
| "epoch": 0.6571741511500547, | |
| "grad_norm": 0.3546409606933594, | |
| "learning_rate": 2e-05, | |
| "loss": 0.056, | |
| "mean_token_accuracy": 0.9925005912780762, | |
| "num_tokens": 442159.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.060952140018343925, | |
| "epoch": 0.7009857612267251, | |
| "grad_norm": 0.34292343258857727, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0551, | |
| "mean_token_accuracy": 0.991845327615738, | |
| "num_tokens": 471592.0, | |
| "step": 160 | |
| }, | |
| { | |
| "entropy": 0.06124872919172049, | |
| "epoch": 0.7447973713033954, | |
| "grad_norm": 0.3005734384059906, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0552, | |
| "mean_token_accuracy": 0.9918943449854851, | |
| "num_tokens": 501090.0, | |
| "step": 170 | |
| }, | |
| { | |
| "entropy": 0.05876323413103819, | |
| "epoch": 0.7886089813800657, | |
| "grad_norm": 0.24807888269424438, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0517, | |
| "mean_token_accuracy": 0.9916606426239014, | |
| "num_tokens": 530595.0, | |
| "step": 180 | |
| }, | |
| { | |
| "entropy": 0.058617806807160375, | |
| "epoch": 0.8324205914567361, | |
| "grad_norm": 0.4288617968559265, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0572, | |
| "mean_token_accuracy": 0.9911757484078407, | |
| "num_tokens": 560059.0, | |
| "step": 190 | |
| }, | |
| { | |
| "entropy": 0.05736046200618148, | |
| "epoch": 0.8762322015334063, | |
| "grad_norm": 0.3320342004299164, | |
| "learning_rate": 2e-05, | |
| "loss": 0.05, | |
| "mean_token_accuracy": 0.9926259219646454, | |
| "num_tokens": 589375.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.05618324866518378, | |
| "epoch": 0.9200438116100766, | |
| "grad_norm": 0.33638137578964233, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0525, | |
| "mean_token_accuracy": 0.9919258087873459, | |
| "num_tokens": 618850.0, | |
| "step": 210 | |
| }, | |
| { | |
| "entropy": 0.05968485539779067, | |
| "epoch": 0.963855421686747, | |
| "grad_norm": 0.5651094317436218, | |
| "learning_rate": 2e-05, | |
| "loss": 0.052, | |
| "mean_token_accuracy": 0.9916668817400932, | |
| "num_tokens": 648391.0, | |
| "step": 220 | |
| }, | |
| { | |
| "entropy": 0.060051426794883365, | |
| "epoch": 1.004381161007667, | |
| "grad_norm": 0.31502196192741394, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0486, | |
| "mean_token_accuracy": 0.9919279153282577, | |
| "num_tokens": 675667.0, | |
| "step": 230 | |
| }, | |
| { | |
| "entropy": 0.05292966021224856, | |
| "epoch": 1.0481927710843373, | |
| "grad_norm": 0.40586429834365845, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0475, | |
| "mean_token_accuracy": 0.9920881032943726, | |
| "num_tokens": 705116.0, | |
| "step": 240 | |
| }, | |
| { | |
| "entropy": 0.052853992022573945, | |
| "epoch": 1.0920043811610076, | |
| "grad_norm": 0.2613174021244049, | |
| "learning_rate": 2e-05, | |
| "loss": 0.045, | |
| "mean_token_accuracy": 0.9926601052284241, | |
| "num_tokens": 734545.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.052618366107344626, | |
| "epoch": 1.135815991237678, | |
| "grad_norm": 0.3835960030555725, | |
| "learning_rate": 2e-05, | |
| "loss": 0.044, | |
| "mean_token_accuracy": 0.9924288704991341, | |
| "num_tokens": 764013.0, | |
| "step": 260 | |
| }, | |
| { | |
| "entropy": 0.052085249312222005, | |
| "epoch": 1.1796276013143483, | |
| "grad_norm": 0.35782596468925476, | |
| "learning_rate": 2e-05, | |
| "loss": 0.046, | |
| "mean_token_accuracy": 0.9919404909014702, | |
| "num_tokens": 793540.0, | |
| "step": 270 | |
| }, | |
| { | |
| "entropy": 0.05476817348971963, | |
| "epoch": 1.2234392113910186, | |
| "grad_norm": 0.30708274245262146, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0474, | |
| "mean_token_accuracy": 0.9918309196829795, | |
| "num_tokens": 823042.0, | |
| "step": 280 | |
| }, | |
| { | |
| "entropy": 0.05322778979316354, | |
| "epoch": 1.267250821467689, | |
| "grad_norm": 0.2604309022426605, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0424, | |
| "mean_token_accuracy": 0.9924165293574333, | |
| "num_tokens": 852457.0, | |
| "step": 290 | |
| }, | |
| { | |
| "entropy": 0.04979227380827069, | |
| "epoch": 1.3110624315443593, | |
| "grad_norm": 0.334300696849823, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0404, | |
| "mean_token_accuracy": 0.9923597663640976, | |
| "num_tokens": 881910.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.05080299507826567, | |
| "epoch": 1.3548740416210296, | |
| "grad_norm": 0.3369467854499817, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0429, | |
| "mean_token_accuracy": 0.9918962031602859, | |
| "num_tokens": 911418.0, | |
| "step": 310 | |
| }, | |
| { | |
| "entropy": 0.052211628574877975, | |
| "epoch": 1.3986856516976998, | |
| "grad_norm": 0.49774906039237976, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0442, | |
| "mean_token_accuracy": 0.9922201976180076, | |
| "num_tokens": 940867.0, | |
| "step": 320 | |
| }, | |
| { | |
| "entropy": 0.05023647788912058, | |
| "epoch": 1.44249726177437, | |
| "grad_norm": 0.3457210659980774, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0406, | |
| "mean_token_accuracy": 0.9923922121524811, | |
| "num_tokens": 970331.0, | |
| "step": 330 | |
| }, | |
| { | |
| "entropy": 0.047794731613248584, | |
| "epoch": 1.4863088718510404, | |
| "grad_norm": 0.35947972536087036, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0417, | |
| "mean_token_accuracy": 0.992406377196312, | |
| "num_tokens": 999841.0, | |
| "step": 340 | |
| }, | |
| { | |
| "entropy": 0.047655423637479544, | |
| "epoch": 1.5301204819277108, | |
| "grad_norm": 0.37163057923316956, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0405, | |
| "mean_token_accuracy": 0.9928415760397911, | |
| "num_tokens": 1029329.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.04932913850061595, | |
| "epoch": 1.5739320920043811, | |
| "grad_norm": 0.34155628085136414, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0409, | |
| "mean_token_accuracy": 0.992286778986454, | |
| "num_tokens": 1058772.0, | |
| "step": 360 | |
| }, | |
| { | |
| "entropy": 0.05013784933835268, | |
| "epoch": 1.6177437020810514, | |
| "grad_norm": 0.3228084444999695, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0385, | |
| "mean_token_accuracy": 0.9925702095031739, | |
| "num_tokens": 1088250.0, | |
| "step": 370 | |
| }, | |
| { | |
| "entropy": 0.04742059959098697, | |
| "epoch": 1.6615553121577218, | |
| "grad_norm": 0.2427096664905548, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0348, | |
| "mean_token_accuracy": 0.9927995279431343, | |
| "num_tokens": 1117693.0, | |
| "step": 380 | |
| }, | |
| { | |
| "entropy": 0.04431099114008248, | |
| "epoch": 1.7053669222343921, | |
| "grad_norm": 0.2118023931980133, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0349, | |
| "mean_token_accuracy": 0.9929380178451538, | |
| "num_tokens": 1147140.0, | |
| "step": 390 | |
| }, | |
| { | |
| "entropy": 0.04604467884637416, | |
| "epoch": 1.7491785323110625, | |
| "grad_norm": 0.25765758752822876, | |
| "learning_rate": 2e-05, | |
| "loss": 0.037, | |
| "mean_token_accuracy": 0.9926380544900895, | |
| "num_tokens": 1176644.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.04647672027349472, | |
| "epoch": 1.7929901423877328, | |
| "grad_norm": 0.2914351522922516, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0362, | |
| "mean_token_accuracy": 0.99278933852911, | |
| "num_tokens": 1206205.0, | |
| "step": 410 | |
| }, | |
| { | |
| "entropy": 0.04514645580202341, | |
| "epoch": 1.8368017524644031, | |
| "grad_norm": 0.23769572377204895, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0375, | |
| "mean_token_accuracy": 0.992934164404869, | |
| "num_tokens": 1235651.0, | |
| "step": 420 | |
| }, | |
| { | |
| "entropy": 0.044267228711396454, | |
| "epoch": 1.8806133625410735, | |
| "grad_norm": 0.22526511549949646, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0331, | |
| "mean_token_accuracy": 0.9943611547350883, | |
| "num_tokens": 1265105.0, | |
| "step": 430 | |
| }, | |
| { | |
| "entropy": 0.04463120717555284, | |
| "epoch": 1.9244249726177438, | |
| "grad_norm": 0.27114230394363403, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0348, | |
| "mean_token_accuracy": 0.99424988925457, | |
| "num_tokens": 1294507.0, | |
| "step": 440 | |
| }, | |
| { | |
| "entropy": 0.045189128536731, | |
| "epoch": 1.9682365826944141, | |
| "grad_norm": 0.3721632957458496, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0348, | |
| "mean_token_accuracy": 0.9939030453562736, | |
| "num_tokens": 1324016.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.04366534243564348, | |
| "epoch": 2.008762322015334, | |
| "grad_norm": 0.2200087010860443, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0344, | |
| "mean_token_accuracy": 0.9943878634555919, | |
| "num_tokens": 1351286.0, | |
| "step": 460 | |
| }, | |
| { | |
| "entropy": 0.04503620527684689, | |
| "epoch": 2.0525739320920042, | |
| "grad_norm": 0.2603790760040283, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0321, | |
| "mean_token_accuracy": 0.9944434046745301, | |
| "num_tokens": 1380791.0, | |
| "step": 470 | |
| }, | |
| { | |
| "entropy": 0.04118769532069564, | |
| "epoch": 2.0963855421686746, | |
| "grad_norm": 0.29507240653038025, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0314, | |
| "mean_token_accuracy": 0.994631552696228, | |
| "num_tokens": 1410230.0, | |
| "step": 480 | |
| }, | |
| { | |
| "entropy": 0.04305135570466519, | |
| "epoch": 2.140197152245345, | |
| "grad_norm": 0.2874581515789032, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0325, | |
| "mean_token_accuracy": 0.9944758623838424, | |
| "num_tokens": 1439719.0, | |
| "step": 490 | |
| }, | |
| { | |
| "entropy": 0.0408366883173585, | |
| "epoch": 2.1840087623220152, | |
| "grad_norm": 0.3915460407733917, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0304, | |
| "mean_token_accuracy": 0.9943425431847572, | |
| "num_tokens": 1469243.0, | |
| "step": 500 | |
| }, | |
| { | |
| "entropy": 0.041507516894489525, | |
| "epoch": 2.2278203723986856, | |
| "grad_norm": 0.2981090545654297, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0299, | |
| "mean_token_accuracy": 0.9945579648017884, | |
| "num_tokens": 1498662.0, | |
| "step": 510 | |
| }, | |
| { | |
| "entropy": 0.03967566112987697, | |
| "epoch": 2.271631982475356, | |
| "grad_norm": 0.38977038860321045, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0322, | |
| "mean_token_accuracy": 0.9944061517715455, | |
| "num_tokens": 1528141.0, | |
| "step": 520 | |
| }, | |
| { | |
| "entropy": 0.039826968545094134, | |
| "epoch": 2.3154435925520263, | |
| "grad_norm": 0.3456617295742035, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0285, | |
| "mean_token_accuracy": 0.9949691370129585, | |
| "num_tokens": 1557578.0, | |
| "step": 530 | |
| }, | |
| { | |
| "entropy": 0.0421893454156816, | |
| "epoch": 2.3592552026286966, | |
| "grad_norm": 0.3418237566947937, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0313, | |
| "mean_token_accuracy": 0.9939802244305611, | |
| "num_tokens": 1587144.0, | |
| "step": 540 | |
| }, | |
| { | |
| "entropy": 0.0395780008751899, | |
| "epoch": 2.403066812705367, | |
| "grad_norm": 0.30387938022613525, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0292, | |
| "mean_token_accuracy": 0.9946425825357437, | |
| "num_tokens": 1616658.0, | |
| "step": 550 | |
| }, | |
| { | |
| "entropy": 0.03764454615302384, | |
| "epoch": 2.4468784227820373, | |
| "grad_norm": 0.33084434270858765, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0296, | |
| "mean_token_accuracy": 0.9947469428181648, | |
| "num_tokens": 1646176.0, | |
| "step": 560 | |
| }, | |
| { | |
| "entropy": 0.037794701755046844, | |
| "epoch": 2.4906900328587076, | |
| "grad_norm": 0.2900339365005493, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0294, | |
| "mean_token_accuracy": 0.9943759799003601, | |
| "num_tokens": 1675707.0, | |
| "step": 570 | |
| }, | |
| { | |
| "entropy": 0.03779870173893869, | |
| "epoch": 2.534501642935378, | |
| "grad_norm": 0.2534691095352173, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0296, | |
| "mean_token_accuracy": 0.9940598547458649, | |
| "num_tokens": 1705162.0, | |
| "step": 580 | |
| }, | |
| { | |
| "entropy": 0.036255474342033266, | |
| "epoch": 2.5783132530120483, | |
| "grad_norm": 0.35891082882881165, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0276, | |
| "mean_token_accuracy": 0.9944613263010978, | |
| "num_tokens": 1734615.0, | |
| "step": 590 | |
| }, | |
| { | |
| "entropy": 0.03256695491727442, | |
| "epoch": 2.6221248630887186, | |
| "grad_norm": 0.2845175266265869, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0278, | |
| "mean_token_accuracy": 0.994592797756195, | |
| "num_tokens": 1764014.0, | |
| "step": 600 | |
| }, | |
| { | |
| "entropy": 0.03345188507810235, | |
| "epoch": 2.665936473165389, | |
| "grad_norm": 0.3167726397514343, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0266, | |
| "mean_token_accuracy": 0.9946425467729568, | |
| "num_tokens": 1793503.0, | |
| "step": 610 | |
| }, | |
| { | |
| "entropy": 0.03313743188045919, | |
| "epoch": 2.7097480832420593, | |
| "grad_norm": 0.21710677444934845, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0272, | |
| "mean_token_accuracy": 0.9945120304822922, | |
| "num_tokens": 1823006.0, | |
| "step": 620 | |
| }, | |
| { | |
| "entropy": 0.030652716779150067, | |
| "epoch": 2.7535596933187296, | |
| "grad_norm": 0.3033951222896576, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0253, | |
| "mean_token_accuracy": 0.9948923841118813, | |
| "num_tokens": 1852394.0, | |
| "step": 630 | |
| }, | |
| { | |
| "entropy": 0.03487149984575808, | |
| "epoch": 2.7973713033953995, | |
| "grad_norm": 0.2704299986362457, | |
| "learning_rate": 2e-05, | |
| "loss": 0.029, | |
| "mean_token_accuracy": 0.9941735804080963, | |
| "num_tokens": 1881893.0, | |
| "step": 640 | |
| }, | |
| { | |
| "entropy": 0.031937569426372645, | |
| "epoch": 2.8411829134720703, | |
| "grad_norm": 0.3431037962436676, | |
| "learning_rate": 2e-05, | |
| "loss": 0.028, | |
| "mean_token_accuracy": 0.9944406807422638, | |
| "num_tokens": 1911381.0, | |
| "step": 650 | |
| }, | |
| { | |
| "entropy": 0.031133335269987582, | |
| "epoch": 2.88499452354874, | |
| "grad_norm": 0.35892975330352783, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0268, | |
| "mean_token_accuracy": 0.9947311311960221, | |
| "num_tokens": 1940830.0, | |
| "step": 660 | |
| }, | |
| { | |
| "entropy": 0.030122329480946064, | |
| "epoch": 2.928806133625411, | |
| "grad_norm": 0.25242748856544495, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0276, | |
| "mean_token_accuracy": 0.9946604892611504, | |
| "num_tokens": 1970238.0, | |
| "step": 670 | |
| }, | |
| { | |
| "entropy": 0.03375864648260176, | |
| "epoch": 2.972617743702081, | |
| "grad_norm": 0.22651821374893188, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0284, | |
| "mean_token_accuracy": 0.9947338193655014, | |
| "num_tokens": 1999673.0, | |
| "step": 680 | |
| }, | |
| { | |
| "entropy": 0.02985721411233818, | |
| "epoch": 3.013143483023001, | |
| "grad_norm": 0.2214677631855011, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0253, | |
| "mean_token_accuracy": 0.9945424746822666, | |
| "num_tokens": 2026976.0, | |
| "step": 690 | |
| }, | |
| { | |
| "entropy": 0.02816909532994032, | |
| "epoch": 3.0569550930996714, | |
| "grad_norm": 0.19869959354400635, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0234, | |
| "mean_token_accuracy": 0.9953982174396515, | |
| "num_tokens": 2056329.0, | |
| "step": 700 | |
| }, | |
| { | |
| "entropy": 0.027280686935409904, | |
| "epoch": 3.1007667031763417, | |
| "grad_norm": 0.22260268032550812, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0232, | |
| "mean_token_accuracy": 0.9950461372733116, | |
| "num_tokens": 2085798.0, | |
| "step": 710 | |
| }, | |
| { | |
| "entropy": 0.028381537599489092, | |
| "epoch": 3.144578313253012, | |
| "grad_norm": 0.3607195019721985, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0267, | |
| "mean_token_accuracy": 0.9944693371653557, | |
| "num_tokens": 2115279.0, | |
| "step": 720 | |
| }, | |
| { | |
| "entropy": 0.029735187301412225, | |
| "epoch": 3.1883899233296824, | |
| "grad_norm": 0.25269201397895813, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0273, | |
| "mean_token_accuracy": 0.9947093442082405, | |
| "num_tokens": 2144765.0, | |
| "step": 730 | |
| }, | |
| { | |
| "entropy": 0.027832537749782206, | |
| "epoch": 3.2322015334063527, | |
| "grad_norm": 0.25261572003364563, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0226, | |
| "mean_token_accuracy": 0.9951098829507827, | |
| "num_tokens": 2174228.0, | |
| "step": 740 | |
| }, | |
| { | |
| "entropy": 0.02715804339386523, | |
| "epoch": 3.276013143483023, | |
| "grad_norm": 0.27859893441200256, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0266, | |
| "mean_token_accuracy": 0.9944431126117707, | |
| "num_tokens": 2203757.0, | |
| "step": 750 | |
| }, | |
| { | |
| "entropy": 0.028143454669043423, | |
| "epoch": 3.3198247535596934, | |
| "grad_norm": 0.2973248064517975, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0262, | |
| "mean_token_accuracy": 0.9948585823178291, | |
| "num_tokens": 2233150.0, | |
| "step": 760 | |
| }, | |
| { | |
| "entropy": 0.030359381577000022, | |
| "epoch": 3.3636363636363638, | |
| "grad_norm": 0.2367042601108551, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0268, | |
| "mean_token_accuracy": 0.9942718967795372, | |
| "num_tokens": 2262643.0, | |
| "step": 770 | |
| }, | |
| { | |
| "entropy": 0.026462095649912955, | |
| "epoch": 3.407447973713034, | |
| "grad_norm": 0.34042081236839294, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0243, | |
| "mean_token_accuracy": 0.9947688922286033, | |
| "num_tokens": 2292090.0, | |
| "step": 780 | |
| }, | |
| { | |
| "entropy": 0.026516364049166442, | |
| "epoch": 3.4512595837897044, | |
| "grad_norm": 0.29167208075523376, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0228, | |
| "mean_token_accuracy": 0.995012117922306, | |
| "num_tokens": 2321564.0, | |
| "step": 790 | |
| }, | |
| { | |
| "entropy": 0.026681744982488452, | |
| "epoch": 3.4950711938663748, | |
| "grad_norm": 0.2620503604412079, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0265, | |
| "mean_token_accuracy": 0.9948214083909989, | |
| "num_tokens": 2351115.0, | |
| "step": 800 | |
| }, | |
| { | |
| "entropy": 0.027950569428503512, | |
| "epoch": 3.5388828039430447, | |
| "grad_norm": 0.3914905786514282, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0264, | |
| "mean_token_accuracy": 0.994230942428112, | |
| "num_tokens": 2380589.0, | |
| "step": 810 | |
| }, | |
| { | |
| "entropy": 0.029146080673672258, | |
| "epoch": 3.5826944140197154, | |
| "grad_norm": 0.2935093343257904, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0263, | |
| "mean_token_accuracy": 0.9945803046226501, | |
| "num_tokens": 2410081.0, | |
| "step": 820 | |
| }, | |
| { | |
| "entropy": 0.029389529721811414, | |
| "epoch": 3.6265060240963853, | |
| "grad_norm": 0.20426620543003082, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0251, | |
| "mean_token_accuracy": 0.9949777990579605, | |
| "num_tokens": 2439552.0, | |
| "step": 830 | |
| }, | |
| { | |
| "entropy": 0.02622994459234178, | |
| "epoch": 3.670317634173056, | |
| "grad_norm": 0.37980544567108154, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0254, | |
| "mean_token_accuracy": 0.9946694761514664, | |
| "num_tokens": 2469015.0, | |
| "step": 840 | |
| }, | |
| { | |
| "entropy": 0.027297466271556915, | |
| "epoch": 3.714129244249726, | |
| "grad_norm": 0.2875664234161377, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0245, | |
| "mean_token_accuracy": 0.9948430895805359, | |
| "num_tokens": 2498494.0, | |
| "step": 850 | |
| }, | |
| { | |
| "entropy": 0.026779479440301658, | |
| "epoch": 3.7579408543263964, | |
| "grad_norm": 0.24819296598434448, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0269, | |
| "mean_token_accuracy": 0.994810126721859, | |
| "num_tokens": 2527986.0, | |
| "step": 860 | |
| }, | |
| { | |
| "entropy": 0.02729630549438298, | |
| "epoch": 3.8017524644030667, | |
| "grad_norm": 0.24704840779304504, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0228, | |
| "mean_token_accuracy": 0.9947375372052193, | |
| "num_tokens": 2557430.0, | |
| "step": 870 | |
| }, | |
| { | |
| "entropy": 0.02764119957573712, | |
| "epoch": 3.845564074479737, | |
| "grad_norm": 0.3414749503135681, | |
| "learning_rate": 2e-05, | |
| "loss": 0.027, | |
| "mean_token_accuracy": 0.9947045534849167, | |
| "num_tokens": 2586894.0, | |
| "step": 880 | |
| }, | |
| { | |
| "entropy": 0.02578033790923655, | |
| "epoch": 3.8893756845564074, | |
| "grad_norm": 0.37503501772880554, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0248, | |
| "mean_token_accuracy": 0.9950520157814026, | |
| "num_tokens": 2616380.0, | |
| "step": 890 | |
| }, | |
| { | |
| "entropy": 0.027033341675996782, | |
| "epoch": 3.9331872946330777, | |
| "grad_norm": 0.2305474430322647, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0256, | |
| "mean_token_accuracy": 0.9948741808533669, | |
| "num_tokens": 2645835.0, | |
| "step": 900 | |
| }, | |
| { | |
| "entropy": 0.027415974205359815, | |
| "epoch": 3.976998904709748, | |
| "grad_norm": 0.2407739907503128, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0246, | |
| "mean_token_accuracy": 0.9946468979120254, | |
| "num_tokens": 2675356.0, | |
| "step": 910 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1832, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1681042640606208.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |