| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.4104372355430184, | |
| "eval_steps": 500, | |
| "global_step": 7000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0020149103364900263, | |
| "grad_norm": 18.375, | |
| "learning_rate": 1.998791053798106e-05, | |
| "loss": 1.9277, | |
| "mean_token_accuracy": 0.679860633611679, | |
| "num_tokens": 9373.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.004029820672980053, | |
| "grad_norm": 13.5625, | |
| "learning_rate": 1.9974477802404462e-05, | |
| "loss": 1.2796, | |
| "mean_token_accuracy": 0.7233692526817321, | |
| "num_tokens": 20789.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.006044731009470079, | |
| "grad_norm": 13.0, | |
| "learning_rate": 1.996104506682786e-05, | |
| "loss": 1.2607, | |
| "mean_token_accuracy": 0.7299719333648682, | |
| "num_tokens": 32661.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.008059641345960105, | |
| "grad_norm": 12.8125, | |
| "learning_rate": 1.994761233125126e-05, | |
| "loss": 1.2356, | |
| "mean_token_accuracy": 0.7324558198451996, | |
| "num_tokens": 43049.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.01007455168245013, | |
| "grad_norm": 12.125, | |
| "learning_rate": 1.9934179595674662e-05, | |
| "loss": 1.1324, | |
| "mean_token_accuracy": 0.7531639993190765, | |
| "num_tokens": 52956.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.012089462018940157, | |
| "grad_norm": 16.125, | |
| "learning_rate": 1.992074686009806e-05, | |
| "loss": 1.1775, | |
| "mean_token_accuracy": 0.7408373892307282, | |
| "num_tokens": 63513.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.014104372355430184, | |
| "grad_norm": 14.0625, | |
| "learning_rate": 1.990731412452146e-05, | |
| "loss": 1.2446, | |
| "mean_token_accuracy": 0.7307547807693482, | |
| "num_tokens": 74794.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.01611928269192021, | |
| "grad_norm": 11.875, | |
| "learning_rate": 1.989388138894486e-05, | |
| "loss": 1.2428, | |
| "mean_token_accuracy": 0.7255984365940094, | |
| "num_tokens": 86903.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.018134193028410236, | |
| "grad_norm": 14.4375, | |
| "learning_rate": 1.988044865336826e-05, | |
| "loss": 1.2766, | |
| "mean_token_accuracy": 0.7225647568702698, | |
| "num_tokens": 97159.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.02014910336490026, | |
| "grad_norm": 12.5625, | |
| "learning_rate": 1.986701591779166e-05, | |
| "loss": 1.1458, | |
| "mean_token_accuracy": 0.7415299773216247, | |
| "num_tokens": 107437.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.02216401370139029, | |
| "grad_norm": 16.75, | |
| "learning_rate": 1.985358318221506e-05, | |
| "loss": 1.2748, | |
| "mean_token_accuracy": 0.7202155470848084, | |
| "num_tokens": 117867.0, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.024178924037880314, | |
| "grad_norm": 18.125, | |
| "learning_rate": 1.984015044663846e-05, | |
| "loss": 1.1689, | |
| "mean_token_accuracy": 0.7355200052261353, | |
| "num_tokens": 128288.0, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.02619383437437034, | |
| "grad_norm": 12.6875, | |
| "learning_rate": 1.982671771106186e-05, | |
| "loss": 1.2324, | |
| "mean_token_accuracy": 0.7224856972694397, | |
| "num_tokens": 139627.0, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.028208744710860368, | |
| "grad_norm": 11.5625, | |
| "learning_rate": 1.981328497548526e-05, | |
| "loss": 1.1365, | |
| "mean_token_accuracy": 0.7402825653553009, | |
| "num_tokens": 150498.0, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.030223655047350393, | |
| "grad_norm": 14.75, | |
| "learning_rate": 1.979985223990866e-05, | |
| "loss": 1.1178, | |
| "mean_token_accuracy": 0.7426175236701965, | |
| "num_tokens": 161754.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.03223856538384042, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.978641950433206e-05, | |
| "loss": 1.2596, | |
| "mean_token_accuracy": 0.7134447395801544, | |
| "num_tokens": 173087.0, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.03425347572033045, | |
| "grad_norm": 12.75, | |
| "learning_rate": 1.9772986768755458e-05, | |
| "loss": 1.0652, | |
| "mean_token_accuracy": 0.7474986433982849, | |
| "num_tokens": 184747.0, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.03626838605682047, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 1.9759554033178857e-05, | |
| "loss": 1.1436, | |
| "mean_token_accuracy": 0.7323237180709838, | |
| "num_tokens": 195331.0, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.0382832963933105, | |
| "grad_norm": 9.875, | |
| "learning_rate": 1.974612129760226e-05, | |
| "loss": 1.0312, | |
| "mean_token_accuracy": 0.7625056743621826, | |
| "num_tokens": 208260.0, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.04029820672980052, | |
| "grad_norm": 14.9375, | |
| "learning_rate": 1.9732688562025658e-05, | |
| "loss": 1.0084, | |
| "mean_token_accuracy": 0.7631498157978058, | |
| "num_tokens": 218822.0, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.04231311706629055, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.9719255826449057e-05, | |
| "loss": 0.9813, | |
| "mean_token_accuracy": 0.7651655077934265, | |
| "num_tokens": 228580.0, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.04432802740278058, | |
| "grad_norm": 17.875, | |
| "learning_rate": 1.970582309087246e-05, | |
| "loss": 1.07, | |
| "mean_token_accuracy": 0.7532146275043488, | |
| "num_tokens": 239159.0, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.046342937739270604, | |
| "grad_norm": 11.25, | |
| "learning_rate": 1.9692390355295858e-05, | |
| "loss": 1.113, | |
| "mean_token_accuracy": 0.7436384916305542, | |
| "num_tokens": 251695.0, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.04835784807576063, | |
| "grad_norm": 13.375, | |
| "learning_rate": 1.9678957619719257e-05, | |
| "loss": 0.929, | |
| "mean_token_accuracy": 0.7755303025245667, | |
| "num_tokens": 261128.0, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.050372758412250654, | |
| "grad_norm": 12.8125, | |
| "learning_rate": 1.9665524884142656e-05, | |
| "loss": 1.0999, | |
| "mean_token_accuracy": 0.7514171898365021, | |
| "num_tokens": 271560.0, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.05238766874874068, | |
| "grad_norm": 13.0, | |
| "learning_rate": 1.9652092148566058e-05, | |
| "loss": 1.0339, | |
| "mean_token_accuracy": 0.7604846298694611, | |
| "num_tokens": 282223.0, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.054402579085230704, | |
| "grad_norm": 12.9375, | |
| "learning_rate": 1.9638659412989457e-05, | |
| "loss": 1.0473, | |
| "mean_token_accuracy": 0.7622893512248993, | |
| "num_tokens": 292726.0, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.056417489421720736, | |
| "grad_norm": 15.0, | |
| "learning_rate": 1.9625226677412856e-05, | |
| "loss": 0.9894, | |
| "mean_token_accuracy": 0.764206200838089, | |
| "num_tokens": 303785.0, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.05843239975821076, | |
| "grad_norm": 10.3125, | |
| "learning_rate": 1.9611793941836258e-05, | |
| "loss": 1.109, | |
| "mean_token_accuracy": 0.7469749927520752, | |
| "num_tokens": 314725.0, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.060447310094700786, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.9598361206259657e-05, | |
| "loss": 1.2098, | |
| "mean_token_accuracy": 0.718773603439331, | |
| "num_tokens": 326635.0, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.06246222043119081, | |
| "grad_norm": 11.125, | |
| "learning_rate": 1.9584928470683055e-05, | |
| "loss": 1.1025, | |
| "mean_token_accuracy": 0.7460452795028687, | |
| "num_tokens": 337866.0, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.06447713076768084, | |
| "grad_norm": 10.6875, | |
| "learning_rate": 1.9571495735106458e-05, | |
| "loss": 1.0772, | |
| "mean_token_accuracy": 0.7526730418205261, | |
| "num_tokens": 348512.0, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.06649204110417087, | |
| "grad_norm": 13.375, | |
| "learning_rate": 1.9558062999529857e-05, | |
| "loss": 1.157, | |
| "mean_token_accuracy": 0.7320702195167541, | |
| "num_tokens": 360281.0, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.0685069514406609, | |
| "grad_norm": 12.0, | |
| "learning_rate": 1.9544630263953255e-05, | |
| "loss": 1.0157, | |
| "mean_token_accuracy": 0.760700649023056, | |
| "num_tokens": 371068.0, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.07052186177715092, | |
| "grad_norm": 17.375, | |
| "learning_rate": 1.9531197528376654e-05, | |
| "loss": 0.8851, | |
| "mean_token_accuracy": 0.7925353944301605, | |
| "num_tokens": 380947.0, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.07253677211364094, | |
| "grad_norm": 11.3125, | |
| "learning_rate": 1.9517764792800056e-05, | |
| "loss": 1.0325, | |
| "mean_token_accuracy": 0.7617525398731232, | |
| "num_tokens": 391552.0, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.07455168245013097, | |
| "grad_norm": 10.9375, | |
| "learning_rate": 1.9504332057223455e-05, | |
| "loss": 0.9852, | |
| "mean_token_accuracy": 0.7655075788497925, | |
| "num_tokens": 403321.0, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.076566592786621, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.9490899321646854e-05, | |
| "loss": 1.0527, | |
| "mean_token_accuracy": 0.7569321393966675, | |
| "num_tokens": 414435.0, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.07858150312311102, | |
| "grad_norm": 14.6875, | |
| "learning_rate": 1.9477466586070256e-05, | |
| "loss": 0.9602, | |
| "mean_token_accuracy": 0.7720924854278565, | |
| "num_tokens": 423506.0, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.08059641345960104, | |
| "grad_norm": 11.0, | |
| "learning_rate": 1.9464033850493655e-05, | |
| "loss": 1.0475, | |
| "mean_token_accuracy": 0.7505548059940338, | |
| "num_tokens": 436556.0, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.08261132379609107, | |
| "grad_norm": 11.9375, | |
| "learning_rate": 1.9450601114917054e-05, | |
| "loss": 1.0775, | |
| "mean_token_accuracy": 0.7474610984325409, | |
| "num_tokens": 448248.0, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.0846262341325811, | |
| "grad_norm": 10.25, | |
| "learning_rate": 1.9437168379340453e-05, | |
| "loss": 1.0487, | |
| "mean_token_accuracy": 0.7566307663917542, | |
| "num_tokens": 460102.0, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.08664114446907113, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.9423735643763855e-05, | |
| "loss": 0.9919, | |
| "mean_token_accuracy": 0.7676237523555756, | |
| "num_tokens": 471590.0, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.08865605480556116, | |
| "grad_norm": 13.125, | |
| "learning_rate": 1.9410302908187254e-05, | |
| "loss": 1.0473, | |
| "mean_token_accuracy": 0.7525161623954773, | |
| "num_tokens": 482096.0, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.09067096514205118, | |
| "grad_norm": 13.4375, | |
| "learning_rate": 1.9396870172610653e-05, | |
| "loss": 1.0347, | |
| "mean_token_accuracy": 0.7515169024467468, | |
| "num_tokens": 493585.0, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.09268587547854121, | |
| "grad_norm": 10.9375, | |
| "learning_rate": 1.9383437437034055e-05, | |
| "loss": 1.0487, | |
| "mean_token_accuracy": 0.7547510921955108, | |
| "num_tokens": 505989.0, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.09470078581503123, | |
| "grad_norm": 12.25, | |
| "learning_rate": 1.9370004701457454e-05, | |
| "loss": 1.018, | |
| "mean_token_accuracy": 0.7596003413200378, | |
| "num_tokens": 516900.0, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.09671569615152126, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.9356571965880853e-05, | |
| "loss": 0.9797, | |
| "mean_token_accuracy": 0.7699940800666809, | |
| "num_tokens": 526427.0, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.09873060648801128, | |
| "grad_norm": 10.3125, | |
| "learning_rate": 1.9343139230304255e-05, | |
| "loss": 1.0817, | |
| "mean_token_accuracy": 0.7470319092273712, | |
| "num_tokens": 537981.0, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.10074551682450131, | |
| "grad_norm": 13.25, | |
| "learning_rate": 1.9329706494727654e-05, | |
| "loss": 1.0089, | |
| "mean_token_accuracy": 0.7595715343952179, | |
| "num_tokens": 549174.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.10276042716099133, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.9316273759151052e-05, | |
| "loss": 1.0164, | |
| "mean_token_accuracy": 0.7571583390235901, | |
| "num_tokens": 559988.0, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.10477533749748136, | |
| "grad_norm": 14.3125, | |
| "learning_rate": 1.930284102357445e-05, | |
| "loss": 1.1148, | |
| "mean_token_accuracy": 0.7423564851284027, | |
| "num_tokens": 571510.0, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.10679024783397138, | |
| "grad_norm": 14.6875, | |
| "learning_rate": 1.9289408287997854e-05, | |
| "loss": 1.053, | |
| "mean_token_accuracy": 0.7485374748706818, | |
| "num_tokens": 583020.0, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.10880515817046141, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.9275975552421252e-05, | |
| "loss": 0.9756, | |
| "mean_token_accuracy": 0.7606720209121705, | |
| "num_tokens": 594042.0, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.11082006850695145, | |
| "grad_norm": 13.3125, | |
| "learning_rate": 1.926254281684465e-05, | |
| "loss": 0.9514, | |
| "mean_token_accuracy": 0.7702824532985687, | |
| "num_tokens": 605932.0, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.11283497884344147, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.9249110081268053e-05, | |
| "loss": 1.0008, | |
| "mean_token_accuracy": 0.7583375632762909, | |
| "num_tokens": 617431.0, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.1148498891799315, | |
| "grad_norm": 10.875, | |
| "learning_rate": 1.9235677345691452e-05, | |
| "loss": 0.998, | |
| "mean_token_accuracy": 0.7597042858600617, | |
| "num_tokens": 629827.0, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.11686479951642152, | |
| "grad_norm": 12.5625, | |
| "learning_rate": 1.922224461011485e-05, | |
| "loss": 0.9512, | |
| "mean_token_accuracy": 0.7806954503059387, | |
| "num_tokens": 640144.0, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.11887970985291155, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.920881187453825e-05, | |
| "loss": 0.9292, | |
| "mean_token_accuracy": 0.7761410176753998, | |
| "num_tokens": 652386.0, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.12089462018940157, | |
| "grad_norm": 11.0, | |
| "learning_rate": 1.9195379138961652e-05, | |
| "loss": 1.0768, | |
| "mean_token_accuracy": 0.7544383645057678, | |
| "num_tokens": 663460.0, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.1229095305258916, | |
| "grad_norm": 14.3125, | |
| "learning_rate": 1.918194640338505e-05, | |
| "loss": 0.8975, | |
| "mean_token_accuracy": 0.7799494147300721, | |
| "num_tokens": 673425.0, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.12492444086238162, | |
| "grad_norm": 10.375, | |
| "learning_rate": 1.916851366780845e-05, | |
| "loss": 0.899, | |
| "mean_token_accuracy": 0.7885317802429199, | |
| "num_tokens": 683817.0, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.12693935119887165, | |
| "grad_norm": 13.375, | |
| "learning_rate": 1.9155080932231852e-05, | |
| "loss": 0.998, | |
| "mean_token_accuracy": 0.7671383440494537, | |
| "num_tokens": 694196.0, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.12895426153536169, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.914164819665525e-05, | |
| "loss": 0.9808, | |
| "mean_token_accuracy": 0.7700311303138733, | |
| "num_tokens": 704564.0, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.1309691718718517, | |
| "grad_norm": 13.25, | |
| "learning_rate": 1.912821546107865e-05, | |
| "loss": 1.0077, | |
| "mean_token_accuracy": 0.7643253684043885, | |
| "num_tokens": 715775.0, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.13298408220834174, | |
| "grad_norm": 13.625, | |
| "learning_rate": 1.911478272550205e-05, | |
| "loss": 0.9457, | |
| "mean_token_accuracy": 0.7678769171237946, | |
| "num_tokens": 726005.0, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.13499899254483175, | |
| "grad_norm": 13.5, | |
| "learning_rate": 1.910134998992545e-05, | |
| "loss": 1.0155, | |
| "mean_token_accuracy": 0.7607427120208741, | |
| "num_tokens": 738053.0, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.1370139028813218, | |
| "grad_norm": 11.6875, | |
| "learning_rate": 1.908791725434885e-05, | |
| "loss": 0.9395, | |
| "mean_token_accuracy": 0.7723658442497253, | |
| "num_tokens": 748480.0, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.1390288132178118, | |
| "grad_norm": 15.6875, | |
| "learning_rate": 1.907448451877225e-05, | |
| "loss": 0.9639, | |
| "mean_token_accuracy": 0.7676171123981476, | |
| "num_tokens": 759972.0, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.14104372355430184, | |
| "grad_norm": 12.875, | |
| "learning_rate": 1.906105178319565e-05, | |
| "loss": 0.9557, | |
| "mean_token_accuracy": 0.7719902992248535, | |
| "num_tokens": 771123.0, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.14305863389079185, | |
| "grad_norm": 12.8125, | |
| "learning_rate": 1.904761904761905e-05, | |
| "loss": 1.0022, | |
| "mean_token_accuracy": 0.7667870819568634, | |
| "num_tokens": 782532.0, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.1450735442272819, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.903418631204245e-05, | |
| "loss": 0.9519, | |
| "mean_token_accuracy": 0.7708106875419617, | |
| "num_tokens": 794067.0, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.14708845456377193, | |
| "grad_norm": 14.125, | |
| "learning_rate": 1.902075357646585e-05, | |
| "loss": 0.9718, | |
| "mean_token_accuracy": 0.766555666923523, | |
| "num_tokens": 804871.0, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.14910336490026194, | |
| "grad_norm": 12.8125, | |
| "learning_rate": 1.900732084088925e-05, | |
| "loss": 0.9852, | |
| "mean_token_accuracy": 0.7678309619426728, | |
| "num_tokens": 815050.0, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.15111827523675198, | |
| "grad_norm": 11.3125, | |
| "learning_rate": 1.8993888105312648e-05, | |
| "loss": 0.9951, | |
| "mean_token_accuracy": 0.7627758264541626, | |
| "num_tokens": 826248.0, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.153133185573242, | |
| "grad_norm": 17.25, | |
| "learning_rate": 1.8980455369736047e-05, | |
| "loss": 1.0433, | |
| "mean_token_accuracy": 0.7571396887302398, | |
| "num_tokens": 835706.0, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.15514809590973203, | |
| "grad_norm": 10.9375, | |
| "learning_rate": 1.896702263415945e-05, | |
| "loss": 1.0518, | |
| "mean_token_accuracy": 0.7517435431480408, | |
| "num_tokens": 847261.0, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.15716300624622204, | |
| "grad_norm": 10.4375, | |
| "learning_rate": 1.8953589898582848e-05, | |
| "loss": 0.9629, | |
| "mean_token_accuracy": 0.7732720315456391, | |
| "num_tokens": 858655.0, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.15917791658271208, | |
| "grad_norm": 12.5, | |
| "learning_rate": 1.8940157163006247e-05, | |
| "loss": 1.0231, | |
| "mean_token_accuracy": 0.7555422127246857, | |
| "num_tokens": 870002.0, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.1611928269192021, | |
| "grad_norm": 11.0, | |
| "learning_rate": 1.892672442742965e-05, | |
| "loss": 1.1283, | |
| "mean_token_accuracy": 0.7441882312297821, | |
| "num_tokens": 881131.0, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.16320773725569213, | |
| "grad_norm": 12.4375, | |
| "learning_rate": 1.8913291691853048e-05, | |
| "loss": 1.0252, | |
| "mean_token_accuracy": 0.7630669414997101, | |
| "num_tokens": 893437.0, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.16522264759218214, | |
| "grad_norm": 11.0, | |
| "learning_rate": 1.8899858956276447e-05, | |
| "loss": 1.0528, | |
| "mean_token_accuracy": 0.7483877301216125, | |
| "num_tokens": 904976.0, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.16723755792867218, | |
| "grad_norm": 12.375, | |
| "learning_rate": 1.8886426220699846e-05, | |
| "loss": 0.8715, | |
| "mean_token_accuracy": 0.7899761021137237, | |
| "num_tokens": 915631.0, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.1692524682651622, | |
| "grad_norm": 13.375, | |
| "learning_rate": 1.8872993485123248e-05, | |
| "loss": 1.0548, | |
| "mean_token_accuracy": 0.7494987368583679, | |
| "num_tokens": 927141.0, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.17126737860165223, | |
| "grad_norm": 11.0, | |
| "learning_rate": 1.8859560749546647e-05, | |
| "loss": 0.9579, | |
| "mean_token_accuracy": 0.7668360054492951, | |
| "num_tokens": 938792.0, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.17328228893814226, | |
| "grad_norm": 13.125, | |
| "learning_rate": 1.8846128013970046e-05, | |
| "loss": 0.8595, | |
| "mean_token_accuracy": 0.7870603501796722, | |
| "num_tokens": 949894.0, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.17529719927463228, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.8832695278393448e-05, | |
| "loss": 0.9216, | |
| "mean_token_accuracy": 0.7846542239189148, | |
| "num_tokens": 961003.0, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.17731210961112231, | |
| "grad_norm": 12.125, | |
| "learning_rate": 1.8819262542816847e-05, | |
| "loss": 1.0052, | |
| "mean_token_accuracy": 0.7603223979473114, | |
| "num_tokens": 971577.0, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.17932701994761233, | |
| "grad_norm": 12.4375, | |
| "learning_rate": 1.8805829807240245e-05, | |
| "loss": 0.9299, | |
| "mean_token_accuracy": 0.7757908642292023, | |
| "num_tokens": 982234.0, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.18134193028410237, | |
| "grad_norm": 11.0, | |
| "learning_rate": 1.8792397071663648e-05, | |
| "loss": 1.0312, | |
| "mean_token_accuracy": 0.7591780245304107, | |
| "num_tokens": 992997.0, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.18335684062059238, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.8778964336087047e-05, | |
| "loss": 0.8999, | |
| "mean_token_accuracy": 0.779550439119339, | |
| "num_tokens": 1004102.0, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.18537175095708242, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.8765531600510445e-05, | |
| "loss": 0.8892, | |
| "mean_token_accuracy": 0.7890210688114166, | |
| "num_tokens": 1015447.0, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.18738666129357243, | |
| "grad_norm": 12.125, | |
| "learning_rate": 1.8752098864933844e-05, | |
| "loss": 1.0344, | |
| "mean_token_accuracy": 0.7584980130195618, | |
| "num_tokens": 1026939.0, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.18940157163006247, | |
| "grad_norm": 10.9375, | |
| "learning_rate": 1.8738666129357246e-05, | |
| "loss": 0.9686, | |
| "mean_token_accuracy": 0.7649740993976593, | |
| "num_tokens": 1037937.0, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.19141648196655248, | |
| "grad_norm": 8.75, | |
| "learning_rate": 1.8725233393780645e-05, | |
| "loss": 1.0364, | |
| "mean_token_accuracy": 0.7554452955722809, | |
| "num_tokens": 1049173.0, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.19343139230304252, | |
| "grad_norm": 13.625, | |
| "learning_rate": 1.8711800658204044e-05, | |
| "loss": 1.0173, | |
| "mean_token_accuracy": 0.7559767007827759, | |
| "num_tokens": 1060166.0, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.19544630263953255, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.8698367922627446e-05, | |
| "loss": 0.9464, | |
| "mean_token_accuracy": 0.7735530078411103, | |
| "num_tokens": 1070458.0, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.19746121297602257, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.8684935187050845e-05, | |
| "loss": 0.9397, | |
| "mean_token_accuracy": 0.7724673867225647, | |
| "num_tokens": 1081477.0, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.1994761233125126, | |
| "grad_norm": 15.1875, | |
| "learning_rate": 1.8671502451474244e-05, | |
| "loss": 1.0769, | |
| "mean_token_accuracy": 0.7459556341171265, | |
| "num_tokens": 1094205.0, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.20149103364900262, | |
| "grad_norm": 16.5, | |
| "learning_rate": 1.8658069715897643e-05, | |
| "loss": 0.9763, | |
| "mean_token_accuracy": 0.7707934081554413, | |
| "num_tokens": 1104929.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.20350594398549265, | |
| "grad_norm": 9.9375, | |
| "learning_rate": 1.8644636980321045e-05, | |
| "loss": 0.9065, | |
| "mean_token_accuracy": 0.7750193297863006, | |
| "num_tokens": 1115780.0, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.20552085432198267, | |
| "grad_norm": 15.5, | |
| "learning_rate": 1.8631204244744444e-05, | |
| "loss": 0.9421, | |
| "mean_token_accuracy": 0.7709006071090698, | |
| "num_tokens": 1127078.0, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.2075357646584727, | |
| "grad_norm": 11.5625, | |
| "learning_rate": 1.8617771509167843e-05, | |
| "loss": 1.0089, | |
| "mean_token_accuracy": 0.7673897624015809, | |
| "num_tokens": 1138685.0, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.20955067499496272, | |
| "grad_norm": 11.875, | |
| "learning_rate": 1.8604338773591245e-05, | |
| "loss": 0.9082, | |
| "mean_token_accuracy": 0.7804294168949127, | |
| "num_tokens": 1149508.0, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.21156558533145275, | |
| "grad_norm": 13.1875, | |
| "learning_rate": 1.8590906038014644e-05, | |
| "loss": 0.9128, | |
| "mean_token_accuracy": 0.7730132281780243, | |
| "num_tokens": 1159971.0, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.21358049566794277, | |
| "grad_norm": 15.5625, | |
| "learning_rate": 1.8577473302438043e-05, | |
| "loss": 0.8863, | |
| "mean_token_accuracy": 0.7842482626438141, | |
| "num_tokens": 1170506.0, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.2155954060044328, | |
| "grad_norm": 10.1875, | |
| "learning_rate": 1.8564040566861445e-05, | |
| "loss": 1.0306, | |
| "mean_token_accuracy": 0.7470630705356598, | |
| "num_tokens": 1183402.0, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.21761031634092282, | |
| "grad_norm": 13.4375, | |
| "learning_rate": 1.8550607831284844e-05, | |
| "loss": 0.9829, | |
| "mean_token_accuracy": 0.7678338825702667, | |
| "num_tokens": 1193700.0, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.21962522667741285, | |
| "grad_norm": 10.875, | |
| "learning_rate": 1.8537175095708242e-05, | |
| "loss": 1.0178, | |
| "mean_token_accuracy": 0.7664987504482269, | |
| "num_tokens": 1204501.0, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.2216401370139029, | |
| "grad_norm": 10.9375, | |
| "learning_rate": 1.852374236013164e-05, | |
| "loss": 0.9276, | |
| "mean_token_accuracy": 0.7776144444942474, | |
| "num_tokens": 1214622.0, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.2236550473503929, | |
| "grad_norm": 12.125, | |
| "learning_rate": 1.8510309624555044e-05, | |
| "loss": 0.9235, | |
| "mean_token_accuracy": 0.7812209010124207, | |
| "num_tokens": 1225266.0, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.22566995768688294, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.8496876888978442e-05, | |
| "loss": 0.8635, | |
| "mean_token_accuracy": 0.7839280545711518, | |
| "num_tokens": 1236214.0, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.22768486802337295, | |
| "grad_norm": 13.5625, | |
| "learning_rate": 1.848344415340184e-05, | |
| "loss": 0.9995, | |
| "mean_token_accuracy": 0.7634225428104401, | |
| "num_tokens": 1248434.0, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.229699778359863, | |
| "grad_norm": 14.4375, | |
| "learning_rate": 1.8470011417825243e-05, | |
| "loss": 0.8734, | |
| "mean_token_accuracy": 0.7929128646850586, | |
| "num_tokens": 1258925.0, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.231714688696353, | |
| "grad_norm": 11.375, | |
| "learning_rate": 1.845657868224864e-05, | |
| "loss": 0.8612, | |
| "mean_token_accuracy": 0.7883239209651947, | |
| "num_tokens": 1268877.0, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.23372959903284304, | |
| "grad_norm": 9.375, | |
| "learning_rate": 1.844314594667204e-05, | |
| "loss": 0.8697, | |
| "mean_token_accuracy": 0.782884806394577, | |
| "num_tokens": 1280712.0, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.23574450936933306, | |
| "grad_norm": 12.4375, | |
| "learning_rate": 1.842971321109544e-05, | |
| "loss": 0.9373, | |
| "mean_token_accuracy": 0.7709940969944, | |
| "num_tokens": 1291740.0, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.2377594197058231, | |
| "grad_norm": 12.4375, | |
| "learning_rate": 1.8416280475518842e-05, | |
| "loss": 1.0077, | |
| "mean_token_accuracy": 0.7596822798252105, | |
| "num_tokens": 1303009.0, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.2397743300423131, | |
| "grad_norm": 9.5625, | |
| "learning_rate": 1.840284773994224e-05, | |
| "loss": 0.9671, | |
| "mean_token_accuracy": 0.7675224483013153, | |
| "num_tokens": 1314524.0, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.24178924037880314, | |
| "grad_norm": 14.6875, | |
| "learning_rate": 1.838941500436564e-05, | |
| "loss": 0.8832, | |
| "mean_token_accuracy": 0.7861056625843048, | |
| "num_tokens": 1327497.0, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.24380415071529318, | |
| "grad_norm": 10.75, | |
| "learning_rate": 1.8375982268789042e-05, | |
| "loss": 0.8841, | |
| "mean_token_accuracy": 0.785036051273346, | |
| "num_tokens": 1338614.0, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.2458190610517832, | |
| "grad_norm": 13.9375, | |
| "learning_rate": 1.836254953321244e-05, | |
| "loss": 0.9576, | |
| "mean_token_accuracy": 0.77821044921875, | |
| "num_tokens": 1348997.0, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.24783397138827323, | |
| "grad_norm": 11.3125, | |
| "learning_rate": 1.834911679763584e-05, | |
| "loss": 0.9204, | |
| "mean_token_accuracy": 0.7739447593688965, | |
| "num_tokens": 1360384.0, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.24984888172476324, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 1.833568406205924e-05, | |
| "loss": 0.9523, | |
| "mean_token_accuracy": 0.7746530413627625, | |
| "num_tokens": 1371506.0, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.25186379206125326, | |
| "grad_norm": 11.6875, | |
| "learning_rate": 1.832225132648264e-05, | |
| "loss": 1.0415, | |
| "mean_token_accuracy": 0.7526679396629333, | |
| "num_tokens": 1383841.0, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.2538787023977433, | |
| "grad_norm": 11.0, | |
| "learning_rate": 1.830881859090604e-05, | |
| "loss": 1.0038, | |
| "mean_token_accuracy": 0.7654858827590942, | |
| "num_tokens": 1395211.0, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.25589361273423333, | |
| "grad_norm": 13.5625, | |
| "learning_rate": 1.829538585532944e-05, | |
| "loss": 0.9847, | |
| "mean_token_accuracy": 0.769145131111145, | |
| "num_tokens": 1405181.0, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.25790852307072337, | |
| "grad_norm": 10.8125, | |
| "learning_rate": 1.828195311975284e-05, | |
| "loss": 1.0403, | |
| "mean_token_accuracy": 0.7538439452648162, | |
| "num_tokens": 1415965.0, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.25992343340721336, | |
| "grad_norm": 10.25, | |
| "learning_rate": 1.826852038417624e-05, | |
| "loss": 0.8642, | |
| "mean_token_accuracy": 0.7828892707824707, | |
| "num_tokens": 1427838.0, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.2619383437437034, | |
| "grad_norm": 16.125, | |
| "learning_rate": 1.825508764859964e-05, | |
| "loss": 1.0695, | |
| "mean_token_accuracy": 0.7503586292266846, | |
| "num_tokens": 1438672.0, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.26395325408019343, | |
| "grad_norm": 9.375, | |
| "learning_rate": 1.824165491302304e-05, | |
| "loss": 0.9433, | |
| "mean_token_accuracy": 0.7743871629238128, | |
| "num_tokens": 1450338.0, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.2659681644166835, | |
| "grad_norm": 12.5, | |
| "learning_rate": 1.8228222177446436e-05, | |
| "loss": 1.0234, | |
| "mean_token_accuracy": 0.7584192335605622, | |
| "num_tokens": 1462159.0, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.2679830747531735, | |
| "grad_norm": 12.3125, | |
| "learning_rate": 1.8214789441869838e-05, | |
| "loss": 0.9743, | |
| "mean_token_accuracy": 0.765831732749939, | |
| "num_tokens": 1475528.0, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.2699979850896635, | |
| "grad_norm": 12.125, | |
| "learning_rate": 1.8201356706293237e-05, | |
| "loss": 0.9147, | |
| "mean_token_accuracy": 0.7787733376026154, | |
| "num_tokens": 1484980.0, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.27201289542615353, | |
| "grad_norm": 12.5625, | |
| "learning_rate": 1.818792397071664e-05, | |
| "loss": 0.9997, | |
| "mean_token_accuracy": 0.7686746776103973, | |
| "num_tokens": 1496744.0, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.2740278057626436, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.8174491235140038e-05, | |
| "loss": 0.8834, | |
| "mean_token_accuracy": 0.791484820842743, | |
| "num_tokens": 1507317.0, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.2760427160991336, | |
| "grad_norm": 12.0, | |
| "learning_rate": 1.8161058499563437e-05, | |
| "loss": 0.9816, | |
| "mean_token_accuracy": 0.7709372580051422, | |
| "num_tokens": 1519459.0, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.2780576264356236, | |
| "grad_norm": 12.0, | |
| "learning_rate": 1.814762576398684e-05, | |
| "loss": 0.9477, | |
| "mean_token_accuracy": 0.7731155812740326, | |
| "num_tokens": 1530464.0, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.28007253677211363, | |
| "grad_norm": 14.625, | |
| "learning_rate": 1.8134193028410235e-05, | |
| "loss": 0.9117, | |
| "mean_token_accuracy": 0.780947208404541, | |
| "num_tokens": 1541480.0, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.2820874471086037, | |
| "grad_norm": 12.125, | |
| "learning_rate": 1.8120760292833637e-05, | |
| "loss": 0.8446, | |
| "mean_token_accuracy": 0.7891036987304687, | |
| "num_tokens": 1552611.0, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.2841023574450937, | |
| "grad_norm": 15.5625, | |
| "learning_rate": 1.8107327557257036e-05, | |
| "loss": 0.8572, | |
| "mean_token_accuracy": 0.7868121325969696, | |
| "num_tokens": 1563258.0, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.2861172677815837, | |
| "grad_norm": 13.5625, | |
| "learning_rate": 1.8093894821680438e-05, | |
| "loss": 0.8396, | |
| "mean_token_accuracy": 0.7922836720943451, | |
| "num_tokens": 1575060.0, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.28813217811807373, | |
| "grad_norm": 16.25, | |
| "learning_rate": 1.8080462086103837e-05, | |
| "loss": 0.9779, | |
| "mean_token_accuracy": 0.7661596953868866, | |
| "num_tokens": 1586846.0, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.2901470884545638, | |
| "grad_norm": 11.9375, | |
| "learning_rate": 1.8067029350527236e-05, | |
| "loss": 0.9174, | |
| "mean_token_accuracy": 0.7865382909774781, | |
| "num_tokens": 1597526.0, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.2921619987910538, | |
| "grad_norm": 12.125, | |
| "learning_rate": 1.8053596614950638e-05, | |
| "loss": 1.1157, | |
| "mean_token_accuracy": 0.733438128232956, | |
| "num_tokens": 1608463.0, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.29417690912754385, | |
| "grad_norm": 9.4375, | |
| "learning_rate": 1.8040163879374037e-05, | |
| "loss": 0.9306, | |
| "mean_token_accuracy": 0.7765897631645202, | |
| "num_tokens": 1619939.0, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.29619181946403383, | |
| "grad_norm": 11.125, | |
| "learning_rate": 1.8026731143797435e-05, | |
| "loss": 0.9663, | |
| "mean_token_accuracy": 0.773787796497345, | |
| "num_tokens": 1630503.0, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.2982067298005239, | |
| "grad_norm": 10.3125, | |
| "learning_rate": 1.8013298408220838e-05, | |
| "loss": 0.8462, | |
| "mean_token_accuracy": 0.793005895614624, | |
| "num_tokens": 1641658.0, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.3002216401370139, | |
| "grad_norm": 15.875, | |
| "learning_rate": 1.7999865672644233e-05, | |
| "loss": 0.8524, | |
| "mean_token_accuracy": 0.7874381899833679, | |
| "num_tokens": 1652188.0, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.30223655047350395, | |
| "grad_norm": 10.9375, | |
| "learning_rate": 1.7986432937067635e-05, | |
| "loss": 1.0263, | |
| "mean_token_accuracy": 0.7567296206951142, | |
| "num_tokens": 1663193.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.30425146080999393, | |
| "grad_norm": 11.75, | |
| "learning_rate": 1.7973000201491034e-05, | |
| "loss": 0.9706, | |
| "mean_token_accuracy": 0.7680864214897156, | |
| "num_tokens": 1674150.0, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.306266371146484, | |
| "grad_norm": 11.75, | |
| "learning_rate": 1.7959567465914436e-05, | |
| "loss": 0.8763, | |
| "mean_token_accuracy": 0.7913140594959259, | |
| "num_tokens": 1683177.0, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.308281281482974, | |
| "grad_norm": 13.25, | |
| "learning_rate": 1.7946134730337835e-05, | |
| "loss": 1.0059, | |
| "mean_token_accuracy": 0.7636462986469269, | |
| "num_tokens": 1694685.0, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.31029619181946405, | |
| "grad_norm": 10.375, | |
| "learning_rate": 1.7932701994761234e-05, | |
| "loss": 0.9025, | |
| "mean_token_accuracy": 0.7839828193187713, | |
| "num_tokens": 1706205.0, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.31231110215595403, | |
| "grad_norm": 11.6875, | |
| "learning_rate": 1.7919269259184636e-05, | |
| "loss": 0.8492, | |
| "mean_token_accuracy": 0.7961820960044861, | |
| "num_tokens": 1717490.0, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.3143260124924441, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.7905836523608032e-05, | |
| "loss": 0.9433, | |
| "mean_token_accuracy": 0.7731126248836517, | |
| "num_tokens": 1728246.0, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.3163409228289341, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.7892403788031434e-05, | |
| "loss": 0.9186, | |
| "mean_token_accuracy": 0.7792913258075714, | |
| "num_tokens": 1739564.0, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 0.31835583316542415, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.7878971052454833e-05, | |
| "loss": 1.03, | |
| "mean_token_accuracy": 0.7519110560417175, | |
| "num_tokens": 1750523.0, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.3203707435019142, | |
| "grad_norm": 14.125, | |
| "learning_rate": 1.7865538316878235e-05, | |
| "loss": 0.8591, | |
| "mean_token_accuracy": 0.7900331735610961, | |
| "num_tokens": 1761179.0, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 0.3223856538384042, | |
| "grad_norm": 11.875, | |
| "learning_rate": 1.7852105581301634e-05, | |
| "loss": 0.9531, | |
| "mean_token_accuracy": 0.7734063506126404, | |
| "num_tokens": 1771529.0, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.3244005641748942, | |
| "grad_norm": 12.125, | |
| "learning_rate": 1.7838672845725033e-05, | |
| "loss": 0.9979, | |
| "mean_token_accuracy": 0.7627780497074127, | |
| "num_tokens": 1783491.0, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 0.32641547451138425, | |
| "grad_norm": 13.125, | |
| "learning_rate": 1.7825240110148435e-05, | |
| "loss": 0.9718, | |
| "mean_token_accuracy": 0.7701032817363739, | |
| "num_tokens": 1793982.0, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.3284303848478743, | |
| "grad_norm": 10.875, | |
| "learning_rate": 1.7811807374571834e-05, | |
| "loss": 0.8964, | |
| "mean_token_accuracy": 0.778260862827301, | |
| "num_tokens": 1804365.0, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 0.3304452951843643, | |
| "grad_norm": 12.25, | |
| "learning_rate": 1.7798374638995233e-05, | |
| "loss": 0.8359, | |
| "mean_token_accuracy": 0.7983898043632507, | |
| "num_tokens": 1815367.0, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.3324602055208543, | |
| "grad_norm": 10.375, | |
| "learning_rate": 1.7784941903418635e-05, | |
| "loss": 0.9085, | |
| "mean_token_accuracy": 0.7792557597160339, | |
| "num_tokens": 1827485.0, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.33447511585734435, | |
| "grad_norm": 11.0, | |
| "learning_rate": 1.777150916784203e-05, | |
| "loss": 0.9841, | |
| "mean_token_accuracy": 0.7674819171428681, | |
| "num_tokens": 1839427.0, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.3364900261938344, | |
| "grad_norm": 11.9375, | |
| "learning_rate": 1.7758076432265433e-05, | |
| "loss": 0.9128, | |
| "mean_token_accuracy": 0.7775863528251648, | |
| "num_tokens": 1850272.0, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 0.3385049365303244, | |
| "grad_norm": 12.1875, | |
| "learning_rate": 1.774464369668883e-05, | |
| "loss": 0.8176, | |
| "mean_token_accuracy": 0.7950083971023559, | |
| "num_tokens": 1861066.0, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.3405198468668144, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 1.7731210961112234e-05, | |
| "loss": 0.8512, | |
| "mean_token_accuracy": 0.7908262252807617, | |
| "num_tokens": 1873013.0, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 0.34253475720330445, | |
| "grad_norm": 12.25, | |
| "learning_rate": 1.7717778225535632e-05, | |
| "loss": 0.9421, | |
| "mean_token_accuracy": 0.7760106027126312, | |
| "num_tokens": 1884754.0, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.3445496675397945, | |
| "grad_norm": 10.3125, | |
| "learning_rate": 1.770434548995903e-05, | |
| "loss": 0.9274, | |
| "mean_token_accuracy": 0.7754071950912476, | |
| "num_tokens": 1894900.0, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 0.34656457787628453, | |
| "grad_norm": 12.3125, | |
| "learning_rate": 1.7690912754382433e-05, | |
| "loss": 0.8674, | |
| "mean_token_accuracy": 0.7867493867874146, | |
| "num_tokens": 1904852.0, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.3485794882127745, | |
| "grad_norm": 13.875, | |
| "learning_rate": 1.767748001880583e-05, | |
| "loss": 0.89, | |
| "mean_token_accuracy": 0.7835441708564759, | |
| "num_tokens": 1914856.0, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 0.35059439854926455, | |
| "grad_norm": 17.375, | |
| "learning_rate": 1.766404728322923e-05, | |
| "loss": 0.895, | |
| "mean_token_accuracy": 0.777972149848938, | |
| "num_tokens": 1926675.0, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.3526093088857546, | |
| "grad_norm": 13.375, | |
| "learning_rate": 1.765061454765263e-05, | |
| "loss": 0.9383, | |
| "mean_token_accuracy": 0.7738365948200225, | |
| "num_tokens": 1938008.0, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.35462421922224463, | |
| "grad_norm": 12.4375, | |
| "learning_rate": 1.763718181207603e-05, | |
| "loss": 0.9798, | |
| "mean_token_accuracy": 0.7683565199375153, | |
| "num_tokens": 1947741.0, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.3566391295587346, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.762374907649943e-05, | |
| "loss": 0.9872, | |
| "mean_token_accuracy": 0.7626317620277405, | |
| "num_tokens": 1958393.0, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 0.35865403989522465, | |
| "grad_norm": 9.625, | |
| "learning_rate": 1.761031634092283e-05, | |
| "loss": 0.8591, | |
| "mean_token_accuracy": 0.7906990587711334, | |
| "num_tokens": 1970077.0, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.3606689502317147, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.7596883605346232e-05, | |
| "loss": 0.8462, | |
| "mean_token_accuracy": 0.7935640037059783, | |
| "num_tokens": 1980357.0, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 0.36268386056820473, | |
| "grad_norm": 11.6875, | |
| "learning_rate": 1.758345086976963e-05, | |
| "loss": 1.0495, | |
| "mean_token_accuracy": 0.7528632760047913, | |
| "num_tokens": 1991123.0, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.3646987709046947, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.757001813419303e-05, | |
| "loss": 0.9717, | |
| "mean_token_accuracy": 0.7630142509937287, | |
| "num_tokens": 2001627.0, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 0.36671368124118475, | |
| "grad_norm": 11.3125, | |
| "learning_rate": 1.7556585398616432e-05, | |
| "loss": 0.8413, | |
| "mean_token_accuracy": 0.792235940694809, | |
| "num_tokens": 2013933.0, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.3687285915776748, | |
| "grad_norm": 12.9375, | |
| "learning_rate": 1.7543152663039827e-05, | |
| "loss": 0.9425, | |
| "mean_token_accuracy": 0.7698959350585938, | |
| "num_tokens": 2024301.0, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 0.37074350191416483, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.752971992746323e-05, | |
| "loss": 0.8502, | |
| "mean_token_accuracy": 0.781799453496933, | |
| "num_tokens": 2034610.0, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.37275841225065487, | |
| "grad_norm": 10.3125, | |
| "learning_rate": 1.751628719188663e-05, | |
| "loss": 1.0261, | |
| "mean_token_accuracy": 0.7564146995544434, | |
| "num_tokens": 2046053.0, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.37477332258714485, | |
| "grad_norm": 15.625, | |
| "learning_rate": 1.750285445631003e-05, | |
| "loss": 1.0081, | |
| "mean_token_accuracy": 0.760506784915924, | |
| "num_tokens": 2056818.0, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.3767882329236349, | |
| "grad_norm": 13.25, | |
| "learning_rate": 1.748942172073343e-05, | |
| "loss": 1.0131, | |
| "mean_token_accuracy": 0.7612803816795349, | |
| "num_tokens": 2067615.0, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 0.37880314326012493, | |
| "grad_norm": 9.8125, | |
| "learning_rate": 1.747598898515683e-05, | |
| "loss": 0.8831, | |
| "mean_token_accuracy": 0.7784870386123657, | |
| "num_tokens": 2079047.0, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.38081805359661497, | |
| "grad_norm": 12.3125, | |
| "learning_rate": 1.746255624958023e-05, | |
| "loss": 1.0057, | |
| "mean_token_accuracy": 0.7611044764518737, | |
| "num_tokens": 2090465.0, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 0.38283296393310495, | |
| "grad_norm": 12.25, | |
| "learning_rate": 1.7449123514003626e-05, | |
| "loss": 0.9935, | |
| "mean_token_accuracy": 0.7548797488212585, | |
| "num_tokens": 2102421.0, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.384847874269595, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.7435690778427028e-05, | |
| "loss": 0.8325, | |
| "mean_token_accuracy": 0.7887258946895599, | |
| "num_tokens": 2112546.0, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 0.38686278460608503, | |
| "grad_norm": 20.25, | |
| "learning_rate": 1.7422258042850427e-05, | |
| "loss": 0.886, | |
| "mean_token_accuracy": 0.7784853160381318, | |
| "num_tokens": 2123698.0, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.38887769494257507, | |
| "grad_norm": 14.25, | |
| "learning_rate": 1.7408825307273826e-05, | |
| "loss": 0.9794, | |
| "mean_token_accuracy": 0.7646930754184723, | |
| "num_tokens": 2134235.0, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 0.3908926052790651, | |
| "grad_norm": 13.125, | |
| "learning_rate": 1.7395392571697228e-05, | |
| "loss": 0.8836, | |
| "mean_token_accuracy": 0.7887324035167694, | |
| "num_tokens": 2144224.0, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.3929075156155551, | |
| "grad_norm": 13.125, | |
| "learning_rate": 1.7381959836120627e-05, | |
| "loss": 0.8652, | |
| "mean_token_accuracy": 0.7922823071479798, | |
| "num_tokens": 2154554.0, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.39492242595204513, | |
| "grad_norm": 12.375, | |
| "learning_rate": 1.736852710054403e-05, | |
| "loss": 0.9197, | |
| "mean_token_accuracy": 0.7768184185028076, | |
| "num_tokens": 2165026.0, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.39693733628853517, | |
| "grad_norm": 12.125, | |
| "learning_rate": 1.7355094364967428e-05, | |
| "loss": 0.9668, | |
| "mean_token_accuracy": 0.7621938049793243, | |
| "num_tokens": 2176032.0, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 0.3989522466250252, | |
| "grad_norm": 13.0, | |
| "learning_rate": 1.7341661629390827e-05, | |
| "loss": 0.8779, | |
| "mean_token_accuracy": 0.7878279089927673, | |
| "num_tokens": 2186771.0, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.4009671569615152, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 1.7328228893814226e-05, | |
| "loss": 0.8448, | |
| "mean_token_accuracy": 0.7939679741859436, | |
| "num_tokens": 2197173.0, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 0.40298206729800523, | |
| "grad_norm": 10.6875, | |
| "learning_rate": 1.7314796158237625e-05, | |
| "loss": 0.821, | |
| "mean_token_accuracy": 0.7935750424861908, | |
| "num_tokens": 2208982.0, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.40499697763449527, | |
| "grad_norm": 17.875, | |
| "learning_rate": 1.7301363422661027e-05, | |
| "loss": 0.9803, | |
| "mean_token_accuracy": 0.7665176451206207, | |
| "num_tokens": 2219728.0, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 0.4070118879709853, | |
| "grad_norm": 12.0, | |
| "learning_rate": 1.7287930687084426e-05, | |
| "loss": 0.9941, | |
| "mean_token_accuracy": 0.7633516311645507, | |
| "num_tokens": 2230087.0, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 0.4090267983074753, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.7274497951507828e-05, | |
| "loss": 0.8946, | |
| "mean_token_accuracy": 0.786914736032486, | |
| "num_tokens": 2240723.0, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 0.41104170864396533, | |
| "grad_norm": 11.0, | |
| "learning_rate": 1.7261065215931227e-05, | |
| "loss": 0.8631, | |
| "mean_token_accuracy": 0.7922929883003235, | |
| "num_tokens": 2250999.0, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 0.41305661898045537, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.7247632480354626e-05, | |
| "loss": 0.8731, | |
| "mean_token_accuracy": 0.7846612274646759, | |
| "num_tokens": 2261138.0, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.4150715293169454, | |
| "grad_norm": 10.125, | |
| "learning_rate": 1.7234199744778028e-05, | |
| "loss": 0.885, | |
| "mean_token_accuracy": 0.7879790186882019, | |
| "num_tokens": 2271083.0, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 0.41708643965343545, | |
| "grad_norm": 11.375, | |
| "learning_rate": 1.7220767009201423e-05, | |
| "loss": 0.9247, | |
| "mean_token_accuracy": 0.7802027463912964, | |
| "num_tokens": 2282490.0, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 0.41910134998992543, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.7207334273624825e-05, | |
| "loss": 0.953, | |
| "mean_token_accuracy": 0.7730051100254058, | |
| "num_tokens": 2293586.0, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 0.42111626032641547, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.7193901538048224e-05, | |
| "loss": 1.0067, | |
| "mean_token_accuracy": 0.7572938621044158, | |
| "num_tokens": 2304802.0, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 0.4231311706629055, | |
| "grad_norm": 12.3125, | |
| "learning_rate": 1.7180468802471623e-05, | |
| "loss": 0.9289, | |
| "mean_token_accuracy": 0.7806627154350281, | |
| "num_tokens": 2315105.0, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.42514608099939555, | |
| "grad_norm": 12.9375, | |
| "learning_rate": 1.7167036066895025e-05, | |
| "loss": 0.8419, | |
| "mean_token_accuracy": 0.7955414175987243, | |
| "num_tokens": 2326101.0, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 0.42716099133588553, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.7153603331318424e-05, | |
| "loss": 0.9516, | |
| "mean_token_accuracy": 0.7715274512767791, | |
| "num_tokens": 2337205.0, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 0.42917590167237557, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 1.7140170595741826e-05, | |
| "loss": 0.8855, | |
| "mean_token_accuracy": 0.7864199817180634, | |
| "num_tokens": 2349394.0, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 0.4311908120088656, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.7126737860165222e-05, | |
| "loss": 0.9328, | |
| "mean_token_accuracy": 0.7836083233356476, | |
| "num_tokens": 2360556.0, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 0.43320572234535565, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.7113305124588624e-05, | |
| "loss": 0.9885, | |
| "mean_token_accuracy": 0.7649979829788208, | |
| "num_tokens": 2371734.0, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.43522063268184563, | |
| "grad_norm": 12.8125, | |
| "learning_rate": 1.7099872389012023e-05, | |
| "loss": 0.9407, | |
| "mean_token_accuracy": 0.7679962277412414, | |
| "num_tokens": 2382510.0, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 0.43723554301833567, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.708643965343542e-05, | |
| "loss": 0.9365, | |
| "mean_token_accuracy": 0.7730210840702056, | |
| "num_tokens": 2395814.0, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 0.4392504533548257, | |
| "grad_norm": 14.4375, | |
| "learning_rate": 1.7073006917858824e-05, | |
| "loss": 0.9407, | |
| "mean_token_accuracy": 0.773440134525299, | |
| "num_tokens": 2407124.0, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 0.44126536369131575, | |
| "grad_norm": 11.375, | |
| "learning_rate": 1.7059574182282223e-05, | |
| "loss": 0.9601, | |
| "mean_token_accuracy": 0.7624564170837402, | |
| "num_tokens": 2418793.0, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 0.4432802740278058, | |
| "grad_norm": 10.4375, | |
| "learning_rate": 1.7046141446705625e-05, | |
| "loss": 0.8615, | |
| "mean_token_accuracy": 0.7828535497188568, | |
| "num_tokens": 2430231.0, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.44529518436429577, | |
| "grad_norm": 13.0625, | |
| "learning_rate": 1.7032708711129024e-05, | |
| "loss": 0.994, | |
| "mean_token_accuracy": 0.7661273539066314, | |
| "num_tokens": 2441407.0, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 0.4473100947007858, | |
| "grad_norm": 14.4375, | |
| "learning_rate": 1.7019275975552423e-05, | |
| "loss": 0.975, | |
| "mean_token_accuracy": 0.7622927308082581, | |
| "num_tokens": 2453109.0, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 0.44932500503727585, | |
| "grad_norm": 8.875, | |
| "learning_rate": 1.7005843239975825e-05, | |
| "loss": 0.9016, | |
| "mean_token_accuracy": 0.7804525554180145, | |
| "num_tokens": 2463823.0, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 0.4513399153737659, | |
| "grad_norm": 13.8125, | |
| "learning_rate": 1.699241050439922e-05, | |
| "loss": 0.8886, | |
| "mean_token_accuracy": 0.7791055798530578, | |
| "num_tokens": 2474062.0, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 0.45335482571025587, | |
| "grad_norm": 12.5625, | |
| "learning_rate": 1.6978977768822623e-05, | |
| "loss": 0.8376, | |
| "mean_token_accuracy": 0.791073453426361, | |
| "num_tokens": 2485012.0, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.4553697360467459, | |
| "grad_norm": 12.4375, | |
| "learning_rate": 1.696554503324602e-05, | |
| "loss": 0.9526, | |
| "mean_token_accuracy": 0.7676692366600036, | |
| "num_tokens": 2497094.0, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 0.45738464638323595, | |
| "grad_norm": 10.75, | |
| "learning_rate": 1.695211229766942e-05, | |
| "loss": 1.0348, | |
| "mean_token_accuracy": 0.7595704078674317, | |
| "num_tokens": 2509251.0, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 0.459399556719726, | |
| "grad_norm": 11.375, | |
| "learning_rate": 1.6938679562092822e-05, | |
| "loss": 0.8975, | |
| "mean_token_accuracy": 0.7786314010620117, | |
| "num_tokens": 2519167.0, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 0.46141446705621597, | |
| "grad_norm": 10.5, | |
| "learning_rate": 1.692524682651622e-05, | |
| "loss": 0.931, | |
| "mean_token_accuracy": 0.780303293466568, | |
| "num_tokens": 2530095.0, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 0.463429377392706, | |
| "grad_norm": 10.0, | |
| "learning_rate": 1.6911814090939623e-05, | |
| "loss": 0.9055, | |
| "mean_token_accuracy": 0.7792095363140106, | |
| "num_tokens": 2542753.0, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.46544428772919605, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.689838135536302e-05, | |
| "loss": 0.8634, | |
| "mean_token_accuracy": 0.7921158850193024, | |
| "num_tokens": 2553761.0, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 0.4674591980656861, | |
| "grad_norm": 10.75, | |
| "learning_rate": 1.688494861978642e-05, | |
| "loss": 0.8504, | |
| "mean_token_accuracy": 0.7890695691108703, | |
| "num_tokens": 2564639.0, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 0.4694741084021761, | |
| "grad_norm": 13.4375, | |
| "learning_rate": 1.687151588420982e-05, | |
| "loss": 0.9293, | |
| "mean_token_accuracy": 0.7696837067604065, | |
| "num_tokens": 2576449.0, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 0.4714890187386661, | |
| "grad_norm": 12.375, | |
| "learning_rate": 1.685808314863322e-05, | |
| "loss": 0.867, | |
| "mean_token_accuracy": 0.7936202645301819, | |
| "num_tokens": 2588236.0, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 0.47350392907515615, | |
| "grad_norm": 12.875, | |
| "learning_rate": 1.684465041305662e-05, | |
| "loss": 0.8259, | |
| "mean_token_accuracy": 0.7961400330066681, | |
| "num_tokens": 2599720.0, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.4755188394116462, | |
| "grad_norm": 12.875, | |
| "learning_rate": 1.683121767748002e-05, | |
| "loss": 0.9056, | |
| "mean_token_accuracy": 0.780539608001709, | |
| "num_tokens": 2609396.0, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 0.4775337497481362, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.681778494190342e-05, | |
| "loss": 0.9019, | |
| "mean_token_accuracy": 0.7818064391613007, | |
| "num_tokens": 2621392.0, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 0.4795486600846262, | |
| "grad_norm": 10.75, | |
| "learning_rate": 1.680435220632682e-05, | |
| "loss": 0.7993, | |
| "mean_token_accuracy": 0.8025827884674073, | |
| "num_tokens": 2633038.0, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 0.48156357042111625, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.679091947075022e-05, | |
| "loss": 0.9316, | |
| "mean_token_accuracy": 0.7733452200889588, | |
| "num_tokens": 2644078.0, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 0.4835784807576063, | |
| "grad_norm": 9.5625, | |
| "learning_rate": 1.6777486735173622e-05, | |
| "loss": 0.8044, | |
| "mean_token_accuracy": 0.8011213660240173, | |
| "num_tokens": 2655374.0, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.4855933910940963, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 1.6764053999597017e-05, | |
| "loss": 0.8751, | |
| "mean_token_accuracy": 0.7866755127906799, | |
| "num_tokens": 2665838.0, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 0.48760830143058637, | |
| "grad_norm": 10.125, | |
| "learning_rate": 1.675062126402042e-05, | |
| "loss": 0.827, | |
| "mean_token_accuracy": 0.7927514970302582, | |
| "num_tokens": 2675934.0, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 0.48962321176707635, | |
| "grad_norm": 12.8125, | |
| "learning_rate": 1.673718852844382e-05, | |
| "loss": 0.9346, | |
| "mean_token_accuracy": 0.7792349219322204, | |
| "num_tokens": 2687584.0, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 0.4916381221035664, | |
| "grad_norm": 11.875, | |
| "learning_rate": 1.6723755792867217e-05, | |
| "loss": 0.8867, | |
| "mean_token_accuracy": 0.7851879954338074, | |
| "num_tokens": 2697927.0, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 0.4936530324400564, | |
| "grad_norm": 11.75, | |
| "learning_rate": 1.671032305729062e-05, | |
| "loss": 0.8585, | |
| "mean_token_accuracy": 0.7973346531391143, | |
| "num_tokens": 2708092.0, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.49566794277654647, | |
| "grad_norm": 14.25, | |
| "learning_rate": 1.669689032171402e-05, | |
| "loss": 0.9075, | |
| "mean_token_accuracy": 0.7788807570934295, | |
| "num_tokens": 2719293.0, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 0.49768285311303645, | |
| "grad_norm": 9.875, | |
| "learning_rate": 1.668345758613742e-05, | |
| "loss": 0.8931, | |
| "mean_token_accuracy": 0.7861545145511627, | |
| "num_tokens": 2730531.0, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 0.4996977634495265, | |
| "grad_norm": 10.4375, | |
| "learning_rate": 1.6670024850560816e-05, | |
| "loss": 0.9438, | |
| "mean_token_accuracy": 0.7664293229579926, | |
| "num_tokens": 2741732.0, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 0.5017126737860165, | |
| "grad_norm": 11.125, | |
| "learning_rate": 1.6656592114984218e-05, | |
| "loss": 0.8719, | |
| "mean_token_accuracy": 0.7909434497356415, | |
| "num_tokens": 2753005.0, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 0.5037275841225065, | |
| "grad_norm": 11.25, | |
| "learning_rate": 1.6643159379407617e-05, | |
| "loss": 0.8739, | |
| "mean_token_accuracy": 0.7839685261249543, | |
| "num_tokens": 2765568.0, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.5057424944589965, | |
| "grad_norm": 10.5, | |
| "learning_rate": 1.6629726643831016e-05, | |
| "loss": 0.9455, | |
| "mean_token_accuracy": 0.7747004866600037, | |
| "num_tokens": 2777172.0, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 0.5077574047954866, | |
| "grad_norm": 10.6875, | |
| "learning_rate": 1.6616293908254418e-05, | |
| "loss": 0.9188, | |
| "mean_token_accuracy": 0.7760649502277375, | |
| "num_tokens": 2789318.0, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 0.5097723151319766, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.6602861172677817e-05, | |
| "loss": 0.9055, | |
| "mean_token_accuracy": 0.7822152853012085, | |
| "num_tokens": 2800307.0, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 0.5117872254684667, | |
| "grad_norm": 12.9375, | |
| "learning_rate": 1.6589428437101216e-05, | |
| "loss": 0.9045, | |
| "mean_token_accuracy": 0.7808859765529632, | |
| "num_tokens": 2811228.0, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 0.5138021358049567, | |
| "grad_norm": 11.25, | |
| "learning_rate": 1.6575995701524618e-05, | |
| "loss": 0.8518, | |
| "mean_token_accuracy": 0.7890688478946686, | |
| "num_tokens": 2821512.0, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.5158170461414467, | |
| "grad_norm": 13.5, | |
| "learning_rate": 1.6562562965948017e-05, | |
| "loss": 0.9224, | |
| "mean_token_accuracy": 0.7703654527664184, | |
| "num_tokens": 2832524.0, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 0.5178319564779368, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.6549130230371416e-05, | |
| "loss": 0.9554, | |
| "mean_token_accuracy": 0.7688623070716858, | |
| "num_tokens": 2844366.0, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 0.5198468668144267, | |
| "grad_norm": 16.25, | |
| "learning_rate": 1.6535697494794815e-05, | |
| "loss": 0.8999, | |
| "mean_token_accuracy": 0.7806227684020997, | |
| "num_tokens": 2855210.0, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 0.5218617771509168, | |
| "grad_norm": 12.25, | |
| "learning_rate": 1.6522264759218217e-05, | |
| "loss": 0.951, | |
| "mean_token_accuracy": 0.7751711547374726, | |
| "num_tokens": 2865914.0, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 0.5238766874874068, | |
| "grad_norm": 13.375, | |
| "learning_rate": 1.6508832023641616e-05, | |
| "loss": 0.951, | |
| "mean_token_accuracy": 0.7700467705726624, | |
| "num_tokens": 2875344.0, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.5258915978238968, | |
| "grad_norm": 11.25, | |
| "learning_rate": 1.6495399288065014e-05, | |
| "loss": 0.9551, | |
| "mean_token_accuracy": 0.7730932533740997, | |
| "num_tokens": 2886246.0, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 0.5279065081603869, | |
| "grad_norm": 11.25, | |
| "learning_rate": 1.6481966552488417e-05, | |
| "loss": 0.8656, | |
| "mean_token_accuracy": 0.7870718777179718, | |
| "num_tokens": 2896640.0, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 0.5299214184968769, | |
| "grad_norm": 13.5, | |
| "learning_rate": 1.6468533816911816e-05, | |
| "loss": 0.8316, | |
| "mean_token_accuracy": 0.790769350528717, | |
| "num_tokens": 2907645.0, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 0.531936328833367, | |
| "grad_norm": 10.5, | |
| "learning_rate": 1.6455101081335218e-05, | |
| "loss": 0.8114, | |
| "mean_token_accuracy": 0.7977836310863495, | |
| "num_tokens": 2918502.0, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 0.533951239169857, | |
| "grad_norm": 13.0, | |
| "learning_rate": 1.6441668345758613e-05, | |
| "loss": 0.7938, | |
| "mean_token_accuracy": 0.8016635835170746, | |
| "num_tokens": 2929723.0, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.535966149506347, | |
| "grad_norm": 8.8125, | |
| "learning_rate": 1.6428235610182015e-05, | |
| "loss": 0.8881, | |
| "mean_token_accuracy": 0.7800805151462555, | |
| "num_tokens": 2941026.0, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 0.537981059842837, | |
| "grad_norm": 10.75, | |
| "learning_rate": 1.6414802874605414e-05, | |
| "loss": 0.98, | |
| "mean_token_accuracy": 0.7662722408771515, | |
| "num_tokens": 2952300.0, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 0.539995970179327, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.6401370139028813e-05, | |
| "loss": 0.9393, | |
| "mean_token_accuracy": 0.7773295342922211, | |
| "num_tokens": 2963688.0, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 0.542010880515817, | |
| "grad_norm": 10.5, | |
| "learning_rate": 1.6387937403452215e-05, | |
| "loss": 0.8579, | |
| "mean_token_accuracy": 0.7857769846916198, | |
| "num_tokens": 2975798.0, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 0.5440257908523071, | |
| "grad_norm": 11.5625, | |
| "learning_rate": 1.6374504667875614e-05, | |
| "loss": 0.8915, | |
| "mean_token_accuracy": 0.7862484276294708, | |
| "num_tokens": 2986958.0, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.5460407011887971, | |
| "grad_norm": 14.0625, | |
| "learning_rate": 1.6361071932299013e-05, | |
| "loss": 0.944, | |
| "mean_token_accuracy": 0.774361002445221, | |
| "num_tokens": 2998027.0, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 0.5480556115252871, | |
| "grad_norm": 9.8125, | |
| "learning_rate": 1.6347639196722412e-05, | |
| "loss": 0.8616, | |
| "mean_token_accuracy": 0.7841140806674958, | |
| "num_tokens": 3008990.0, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 0.5500705218617772, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.6334206461145814e-05, | |
| "loss": 0.995, | |
| "mean_token_accuracy": 0.764478224515915, | |
| "num_tokens": 3020180.0, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 0.5520854321982672, | |
| "grad_norm": 16.375, | |
| "learning_rate": 1.6320773725569213e-05, | |
| "loss": 0.9769, | |
| "mean_token_accuracy": 0.762674230337143, | |
| "num_tokens": 3031468.0, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 0.5541003425347572, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.6307340989992612e-05, | |
| "loss": 0.9019, | |
| "mean_token_accuracy": 0.7809956490993499, | |
| "num_tokens": 3043604.0, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.5561152528712472, | |
| "grad_norm": 10.3125, | |
| "learning_rate": 1.6293908254416014e-05, | |
| "loss": 0.8849, | |
| "mean_token_accuracy": 0.7833445549011231, | |
| "num_tokens": 3054038.0, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 0.5581301632077372, | |
| "grad_norm": 13.3125, | |
| "learning_rate": 1.6280475518839413e-05, | |
| "loss": 0.9858, | |
| "mean_token_accuracy": 0.7633112788200378, | |
| "num_tokens": 3064515.0, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 0.5601450735442273, | |
| "grad_norm": 12.3125, | |
| "learning_rate": 1.626704278326281e-05, | |
| "loss": 0.8388, | |
| "mean_token_accuracy": 0.7929341971874238, | |
| "num_tokens": 3075661.0, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 0.5621599838807173, | |
| "grad_norm": 11.6875, | |
| "learning_rate": 1.6253610047686214e-05, | |
| "loss": 0.9444, | |
| "mean_token_accuracy": 0.7728216648101807, | |
| "num_tokens": 3087402.0, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 0.5641748942172073, | |
| "grad_norm": 15.3125, | |
| "learning_rate": 1.6240177312109613e-05, | |
| "loss": 0.9326, | |
| "mean_token_accuracy": 0.7675645828247071, | |
| "num_tokens": 3097807.0, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.5661898045536974, | |
| "grad_norm": 11.6875, | |
| "learning_rate": 1.6226744576533015e-05, | |
| "loss": 0.8888, | |
| "mean_token_accuracy": 0.7739431619644165, | |
| "num_tokens": 3109256.0, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 0.5682047148901874, | |
| "grad_norm": 9.9375, | |
| "learning_rate": 1.621331184095641e-05, | |
| "loss": 0.8275, | |
| "mean_token_accuracy": 0.796435010433197, | |
| "num_tokens": 3119274.0, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 0.5702196252266775, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 1.6199879105379813e-05, | |
| "loss": 0.9389, | |
| "mean_token_accuracy": 0.7698701798915863, | |
| "num_tokens": 3129740.0, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 0.5722345355631674, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.618644636980321e-05, | |
| "loss": 0.9461, | |
| "mean_token_accuracy": 0.7643969297409058, | |
| "num_tokens": 3141041.0, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 0.5742494458996574, | |
| "grad_norm": 13.8125, | |
| "learning_rate": 1.617301363422661e-05, | |
| "loss": 1.0038, | |
| "mean_token_accuracy": 0.7637837052345275, | |
| "num_tokens": 3151060.0, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.5762643562361475, | |
| "grad_norm": 10.75, | |
| "learning_rate": 1.6159580898650012e-05, | |
| "loss": 0.862, | |
| "mean_token_accuracy": 0.785337769985199, | |
| "num_tokens": 3162459.0, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 0.5782792665726375, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.614614816307341e-05, | |
| "loss": 0.9322, | |
| "mean_token_accuracy": 0.7696834802627563, | |
| "num_tokens": 3172476.0, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 0.5802941769091275, | |
| "grad_norm": 13.3125, | |
| "learning_rate": 1.613271542749681e-05, | |
| "loss": 0.9818, | |
| "mean_token_accuracy": 0.7669282913208008, | |
| "num_tokens": 3182571.0, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 0.5823090872456176, | |
| "grad_norm": 10.0, | |
| "learning_rate": 1.611928269192021e-05, | |
| "loss": 0.8449, | |
| "mean_token_accuracy": 0.7930525064468383, | |
| "num_tokens": 3194456.0, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 0.5843239975821076, | |
| "grad_norm": 10.4375, | |
| "learning_rate": 1.610584995634361e-05, | |
| "loss": 0.862, | |
| "mean_token_accuracy": 0.7849324703216553, | |
| "num_tokens": 3204377.0, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.5863389079185977, | |
| "grad_norm": 11.875, | |
| "learning_rate": 1.609241722076701e-05, | |
| "loss": 0.8853, | |
| "mean_token_accuracy": 0.7843615412712097, | |
| "num_tokens": 3216082.0, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 0.5883538182550877, | |
| "grad_norm": 9.375, | |
| "learning_rate": 1.607898448519041e-05, | |
| "loss": 0.7821, | |
| "mean_token_accuracy": 0.7997645199298858, | |
| "num_tokens": 3226582.0, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 0.5903687285915776, | |
| "grad_norm": 9.1875, | |
| "learning_rate": 1.606555174961381e-05, | |
| "loss": 0.897, | |
| "mean_token_accuracy": 0.7760909557342529, | |
| "num_tokens": 3238387.0, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 0.5923836389280677, | |
| "grad_norm": 12.0, | |
| "learning_rate": 1.605211901403721e-05, | |
| "loss": 0.9027, | |
| "mean_token_accuracy": 0.78245330452919, | |
| "num_tokens": 3248670.0, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 0.5943985492645577, | |
| "grad_norm": 10.0625, | |
| "learning_rate": 1.603868627846061e-05, | |
| "loss": 0.7964, | |
| "mean_token_accuracy": 0.8025750041007995, | |
| "num_tokens": 3259282.0, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 0.5964134596010477, | |
| "grad_norm": 10.6875, | |
| "learning_rate": 1.602525354288401e-05, | |
| "loss": 0.8159, | |
| "mean_token_accuracy": 0.7951205134391784, | |
| "num_tokens": 3270771.0, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 0.5984283699375378, | |
| "grad_norm": 12.75, | |
| "learning_rate": 1.601182080730741e-05, | |
| "loss": 1.0354, | |
| "mean_token_accuracy": 0.7645578503608703, | |
| "num_tokens": 3282321.0, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 0.6004432802740278, | |
| "grad_norm": 14.6875, | |
| "learning_rate": 1.599838807173081e-05, | |
| "loss": 0.9655, | |
| "mean_token_accuracy": 0.7646917760372162, | |
| "num_tokens": 3294284.0, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 0.6024581906105179, | |
| "grad_norm": 11.375, | |
| "learning_rate": 1.5984955336154207e-05, | |
| "loss": 0.9532, | |
| "mean_token_accuracy": 0.7740876019001007, | |
| "num_tokens": 3305259.0, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 0.6044731009470079, | |
| "grad_norm": 11.5625, | |
| "learning_rate": 1.597152260057761e-05, | |
| "loss": 0.8269, | |
| "mean_token_accuracy": 0.7911386549472809, | |
| "num_tokens": 3316348.0, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.6064880112834978, | |
| "grad_norm": 11.25, | |
| "learning_rate": 1.595808986500101e-05, | |
| "loss": 0.9079, | |
| "mean_token_accuracy": 0.7789463937282562, | |
| "num_tokens": 3326996.0, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 0.6085029216199879, | |
| "grad_norm": 11.75, | |
| "learning_rate": 1.5944657129424407e-05, | |
| "loss": 0.8986, | |
| "mean_token_accuracy": 0.7775606334209442, | |
| "num_tokens": 3339244.0, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 0.6105178319564779, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.593122439384781e-05, | |
| "loss": 0.8756, | |
| "mean_token_accuracy": 0.7890569746494294, | |
| "num_tokens": 3349636.0, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 0.612532742292968, | |
| "grad_norm": 11.9375, | |
| "learning_rate": 1.591779165827121e-05, | |
| "loss": 0.8937, | |
| "mean_token_accuracy": 0.7829440474510193, | |
| "num_tokens": 3360711.0, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 0.614547652629458, | |
| "grad_norm": 10.25, | |
| "learning_rate": 1.5904358922694607e-05, | |
| "loss": 0.9303, | |
| "mean_token_accuracy": 0.7729782402515412, | |
| "num_tokens": 3372520.0, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 0.616562562965948, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.5890926187118006e-05, | |
| "loss": 0.9247, | |
| "mean_token_accuracy": 0.7741464018821717, | |
| "num_tokens": 3384007.0, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 0.6185774733024381, | |
| "grad_norm": 9.75, | |
| "learning_rate": 1.5877493451541408e-05, | |
| "loss": 0.7869, | |
| "mean_token_accuracy": 0.8034534513950348, | |
| "num_tokens": 3395421.0, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 0.6205923836389281, | |
| "grad_norm": 12.0, | |
| "learning_rate": 1.5864060715964807e-05, | |
| "loss": 0.8645, | |
| "mean_token_accuracy": 0.7838839650154114, | |
| "num_tokens": 3405277.0, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 0.6226072939754181, | |
| "grad_norm": 11.375, | |
| "learning_rate": 1.5850627980388206e-05, | |
| "loss": 0.8441, | |
| "mean_token_accuracy": 0.7875894546508789, | |
| "num_tokens": 3417672.0, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 0.6246222043119081, | |
| "grad_norm": 11.5625, | |
| "learning_rate": 1.5837195244811608e-05, | |
| "loss": 0.876, | |
| "mean_token_accuracy": 0.7891711950302124, | |
| "num_tokens": 3428808.0, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.6266371146483981, | |
| "grad_norm": 10.6875, | |
| "learning_rate": 1.5823762509235007e-05, | |
| "loss": 0.8585, | |
| "mean_token_accuracy": 0.790976220369339, | |
| "num_tokens": 3440047.0, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 0.6286520249848881, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 1.5810329773658406e-05, | |
| "loss": 0.8671, | |
| "mean_token_accuracy": 0.7851876437664032, | |
| "num_tokens": 3450270.0, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 0.6306669353213782, | |
| "grad_norm": 13.125, | |
| "learning_rate": 1.5796897038081808e-05, | |
| "loss": 0.9273, | |
| "mean_token_accuracy": 0.7657091677188873, | |
| "num_tokens": 3462054.0, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 0.6326818456578682, | |
| "grad_norm": 10.9375, | |
| "learning_rate": 1.5783464302505207e-05, | |
| "loss": 0.8185, | |
| "mean_token_accuracy": 0.7917792797088623, | |
| "num_tokens": 3472333.0, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 0.6346967559943583, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.5770031566928606e-05, | |
| "loss": 0.9939, | |
| "mean_token_accuracy": 0.7620590627193451, | |
| "num_tokens": 3484753.0, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 0.6367116663308483, | |
| "grad_norm": 14.1875, | |
| "learning_rate": 1.5756598831352005e-05, | |
| "loss": 0.8723, | |
| "mean_token_accuracy": 0.7846651554107666, | |
| "num_tokens": 3496220.0, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 0.6387265766673383, | |
| "grad_norm": 11.6875, | |
| "learning_rate": 1.5743166095775407e-05, | |
| "loss": 0.9375, | |
| "mean_token_accuracy": 0.7770143210887909, | |
| "num_tokens": 3508224.0, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 0.6407414870038284, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.5729733360198806e-05, | |
| "loss": 0.8789, | |
| "mean_token_accuracy": 0.7903493702411651, | |
| "num_tokens": 3519271.0, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 0.6427563973403183, | |
| "grad_norm": 16.25, | |
| "learning_rate": 1.5716300624622204e-05, | |
| "loss": 0.9003, | |
| "mean_token_accuracy": 0.7797963619232178, | |
| "num_tokens": 3530537.0, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 0.6447713076768083, | |
| "grad_norm": 10.75, | |
| "learning_rate": 1.5702867889045607e-05, | |
| "loss": 0.9229, | |
| "mean_token_accuracy": 0.7731367945671082, | |
| "num_tokens": 3540961.0, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.6467862180132984, | |
| "grad_norm": 11.75, | |
| "learning_rate": 1.5689435153469006e-05, | |
| "loss": 0.9519, | |
| "mean_token_accuracy": 0.766649729013443, | |
| "num_tokens": 3552392.0, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 0.6488011283497884, | |
| "grad_norm": 11.375, | |
| "learning_rate": 1.5676002417892404e-05, | |
| "loss": 0.8958, | |
| "mean_token_accuracy": 0.7798868775367737, | |
| "num_tokens": 3563665.0, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 0.6508160386862785, | |
| "grad_norm": 10.875, | |
| "learning_rate": 1.5662569682315803e-05, | |
| "loss": 0.9158, | |
| "mean_token_accuracy": 0.7784943222999573, | |
| "num_tokens": 3575115.0, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 0.6528309490227685, | |
| "grad_norm": 10.1875, | |
| "learning_rate": 1.5649136946739205e-05, | |
| "loss": 0.8092, | |
| "mean_token_accuracy": 0.7988557398319245, | |
| "num_tokens": 3585453.0, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 0.6548458593592585, | |
| "grad_norm": 12.8125, | |
| "learning_rate": 1.5635704211162604e-05, | |
| "loss": 0.8562, | |
| "mean_token_accuracy": 0.7906098127365112, | |
| "num_tokens": 3595472.0, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.6568607696957486, | |
| "grad_norm": 10.9375, | |
| "learning_rate": 1.5622271475586003e-05, | |
| "loss": 0.9317, | |
| "mean_token_accuracy": 0.776879757642746, | |
| "num_tokens": 3607704.0, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 0.6588756800322386, | |
| "grad_norm": 9.6875, | |
| "learning_rate": 1.5608838740009405e-05, | |
| "loss": 0.8642, | |
| "mean_token_accuracy": 0.7901065409183502, | |
| "num_tokens": 3618233.0, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 0.6608905903687285, | |
| "grad_norm": 13.8125, | |
| "learning_rate": 1.5595406004432804e-05, | |
| "loss": 0.9939, | |
| "mean_token_accuracy": 0.7686895251274108, | |
| "num_tokens": 3628902.0, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 0.6629055007052186, | |
| "grad_norm": 12.25, | |
| "learning_rate": 1.5581973268856203e-05, | |
| "loss": 0.8935, | |
| "mean_token_accuracy": 0.7827515482902527, | |
| "num_tokens": 3640225.0, | |
| "step": 3290 | |
| }, | |
| { | |
| "epoch": 0.6649204110417086, | |
| "grad_norm": 13.8125, | |
| "learning_rate": 1.5568540533279605e-05, | |
| "loss": 0.8856, | |
| "mean_token_accuracy": 0.7820924818515778, | |
| "num_tokens": 3651992.0, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.6669353213781987, | |
| "grad_norm": 11.0, | |
| "learning_rate": 1.5555107797703004e-05, | |
| "loss": 0.9789, | |
| "mean_token_accuracy": 0.7679969072341919, | |
| "num_tokens": 3663369.0, | |
| "step": 3310 | |
| }, | |
| { | |
| "epoch": 0.6689502317146887, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.5541675062126403e-05, | |
| "loss": 0.9536, | |
| "mean_token_accuracy": 0.7675111889839172, | |
| "num_tokens": 3674969.0, | |
| "step": 3320 | |
| }, | |
| { | |
| "epoch": 0.6709651420511787, | |
| "grad_norm": 10.375, | |
| "learning_rate": 1.5528242326549802e-05, | |
| "loss": 0.917, | |
| "mean_token_accuracy": 0.7766897320747376, | |
| "num_tokens": 3685794.0, | |
| "step": 3330 | |
| }, | |
| { | |
| "epoch": 0.6729800523876688, | |
| "grad_norm": 12.6875, | |
| "learning_rate": 1.5514809590973204e-05, | |
| "loss": 0.8213, | |
| "mean_token_accuracy": 0.798279982805252, | |
| "num_tokens": 3698384.0, | |
| "step": 3340 | |
| }, | |
| { | |
| "epoch": 0.6749949627241588, | |
| "grad_norm": 14.125, | |
| "learning_rate": 1.5501376855396603e-05, | |
| "loss": 0.9941, | |
| "mean_token_accuracy": 0.7723333060741424, | |
| "num_tokens": 3709110.0, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 0.6770098730606487, | |
| "grad_norm": 10.875, | |
| "learning_rate": 1.548794411982e-05, | |
| "loss": 0.9428, | |
| "mean_token_accuracy": 0.7793790519237518, | |
| "num_tokens": 3720500.0, | |
| "step": 3360 | |
| }, | |
| { | |
| "epoch": 0.6790247833971388, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.5474511384243404e-05, | |
| "loss": 0.9055, | |
| "mean_token_accuracy": 0.7757111012935638, | |
| "num_tokens": 3733649.0, | |
| "step": 3370 | |
| }, | |
| { | |
| "epoch": 0.6810396937336288, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.5461078648666803e-05, | |
| "loss": 1.0494, | |
| "mean_token_accuracy": 0.748576694726944, | |
| "num_tokens": 3744082.0, | |
| "step": 3380 | |
| }, | |
| { | |
| "epoch": 0.6830546040701189, | |
| "grad_norm": 11.0, | |
| "learning_rate": 1.54476459130902e-05, | |
| "loss": 0.9308, | |
| "mean_token_accuracy": 0.7800273001194, | |
| "num_tokens": 3755970.0, | |
| "step": 3390 | |
| }, | |
| { | |
| "epoch": 0.6850695144066089, | |
| "grad_norm": 10.4375, | |
| "learning_rate": 1.54342131775136e-05, | |
| "loss": 0.8138, | |
| "mean_token_accuracy": 0.7963871121406555, | |
| "num_tokens": 3766720.0, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.6870844247430989, | |
| "grad_norm": 11.5625, | |
| "learning_rate": 1.5420780441937003e-05, | |
| "loss": 0.8504, | |
| "mean_token_accuracy": 0.7921045780181885, | |
| "num_tokens": 3777338.0, | |
| "step": 3410 | |
| }, | |
| { | |
| "epoch": 0.689099335079589, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.54073477063604e-05, | |
| "loss": 0.9142, | |
| "mean_token_accuracy": 0.7711953699588776, | |
| "num_tokens": 3788475.0, | |
| "step": 3420 | |
| }, | |
| { | |
| "epoch": 0.691114245416079, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.53939149707838e-05, | |
| "loss": 0.9957, | |
| "mean_token_accuracy": 0.7668613314628601, | |
| "num_tokens": 3800222.0, | |
| "step": 3430 | |
| }, | |
| { | |
| "epoch": 0.6931291557525691, | |
| "grad_norm": 13.125, | |
| "learning_rate": 1.5380482235207202e-05, | |
| "loss": 0.8789, | |
| "mean_token_accuracy": 0.7842870116233825, | |
| "num_tokens": 3811363.0, | |
| "step": 3440 | |
| }, | |
| { | |
| "epoch": 0.695144066089059, | |
| "grad_norm": 14.4375, | |
| "learning_rate": 1.53670494996306e-05, | |
| "loss": 0.7952, | |
| "mean_token_accuracy": 0.8054643094539642, | |
| "num_tokens": 3821569.0, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 0.697158976425549, | |
| "grad_norm": 9.6875, | |
| "learning_rate": 1.5353616764054e-05, | |
| "loss": 0.8705, | |
| "mean_token_accuracy": 0.7873030543327332, | |
| "num_tokens": 3833270.0, | |
| "step": 3460 | |
| }, | |
| { | |
| "epoch": 0.6991738867620391, | |
| "grad_norm": 10.6875, | |
| "learning_rate": 1.53401840284774e-05, | |
| "loss": 0.9286, | |
| "mean_token_accuracy": 0.7691307544708252, | |
| "num_tokens": 3844114.0, | |
| "step": 3470 | |
| }, | |
| { | |
| "epoch": 0.7011887970985291, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.53267512929008e-05, | |
| "loss": 0.993, | |
| "mean_token_accuracy": 0.7620323598384857, | |
| "num_tokens": 3856040.0, | |
| "step": 3480 | |
| }, | |
| { | |
| "epoch": 0.7032037074350191, | |
| "grad_norm": 11.5, | |
| "learning_rate": 1.53133185573242e-05, | |
| "loss": 0.8668, | |
| "mean_token_accuracy": 0.7913073658943176, | |
| "num_tokens": 3867207.0, | |
| "step": 3490 | |
| }, | |
| { | |
| "epoch": 0.7052186177715092, | |
| "grad_norm": 9.25, | |
| "learning_rate": 1.52998858217476e-05, | |
| "loss": 0.8715, | |
| "mean_token_accuracy": 0.7891253709793091, | |
| "num_tokens": 3879065.0, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.7072335281079992, | |
| "grad_norm": 12.25, | |
| "learning_rate": 1.5286453086171e-05, | |
| "loss": 1.021, | |
| "mean_token_accuracy": 0.7534075140953064, | |
| "num_tokens": 3890409.0, | |
| "step": 3510 | |
| }, | |
| { | |
| "epoch": 0.7092484384444893, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.52730203505944e-05, | |
| "loss": 0.9314, | |
| "mean_token_accuracy": 0.7765843331813812, | |
| "num_tokens": 3902208.0, | |
| "step": 3520 | |
| }, | |
| { | |
| "epoch": 0.7112633487809793, | |
| "grad_norm": 9.875, | |
| "learning_rate": 1.52595876150178e-05, | |
| "loss": 0.7786, | |
| "mean_token_accuracy": 0.8059293925762177, | |
| "num_tokens": 3913221.0, | |
| "step": 3530 | |
| }, | |
| { | |
| "epoch": 0.7132782591174692, | |
| "grad_norm": 13.1875, | |
| "learning_rate": 1.52461548794412e-05, | |
| "loss": 0.8675, | |
| "mean_token_accuracy": 0.7885148406028748, | |
| "num_tokens": 3923202.0, | |
| "step": 3540 | |
| }, | |
| { | |
| "epoch": 0.7152931694539593, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.52327221438646e-05, | |
| "loss": 0.945, | |
| "mean_token_accuracy": 0.7707227051258088, | |
| "num_tokens": 3933469.0, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 0.7173080797904493, | |
| "grad_norm": 11.75, | |
| "learning_rate": 1.5219289408287999e-05, | |
| "loss": 0.8829, | |
| "mean_token_accuracy": 0.7817340910434722, | |
| "num_tokens": 3944523.0, | |
| "step": 3560 | |
| }, | |
| { | |
| "epoch": 0.7193229901269393, | |
| "grad_norm": 13.5625, | |
| "learning_rate": 1.52058566727114e-05, | |
| "loss": 0.9075, | |
| "mean_token_accuracy": 0.7843019485473632, | |
| "num_tokens": 3955650.0, | |
| "step": 3570 | |
| }, | |
| { | |
| "epoch": 0.7213379004634294, | |
| "grad_norm": 11.25, | |
| "learning_rate": 1.51924239371348e-05, | |
| "loss": 0.9087, | |
| "mean_token_accuracy": 0.7732051312923431, | |
| "num_tokens": 3967014.0, | |
| "step": 3580 | |
| }, | |
| { | |
| "epoch": 0.7233528107999194, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.5178991201558197e-05, | |
| "loss": 0.8595, | |
| "mean_token_accuracy": 0.7891602039337158, | |
| "num_tokens": 3977669.0, | |
| "step": 3590 | |
| }, | |
| { | |
| "epoch": 0.7253677211364095, | |
| "grad_norm": 9.4375, | |
| "learning_rate": 1.5165558465981597e-05, | |
| "loss": 0.8617, | |
| "mean_token_accuracy": 0.7840434491634369, | |
| "num_tokens": 3989279.0, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.7273826314728995, | |
| "grad_norm": 11.3125, | |
| "learning_rate": 1.5152125730404998e-05, | |
| "loss": 0.8872, | |
| "mean_token_accuracy": 0.7797149956226349, | |
| "num_tokens": 4000448.0, | |
| "step": 3610 | |
| }, | |
| { | |
| "epoch": 0.7293975418093894, | |
| "grad_norm": 9.375, | |
| "learning_rate": 1.5138692994828398e-05, | |
| "loss": 0.8267, | |
| "mean_token_accuracy": 0.7977706253528595, | |
| "num_tokens": 4010787.0, | |
| "step": 3620 | |
| }, | |
| { | |
| "epoch": 0.7314124521458795, | |
| "grad_norm": 11.5625, | |
| "learning_rate": 1.5125260259251797e-05, | |
| "loss": 0.9227, | |
| "mean_token_accuracy": 0.7788041710853577, | |
| "num_tokens": 4021823.0, | |
| "step": 3630 | |
| }, | |
| { | |
| "epoch": 0.7334273624823695, | |
| "grad_norm": 9.1875, | |
| "learning_rate": 1.5111827523675198e-05, | |
| "loss": 0.988, | |
| "mean_token_accuracy": 0.7647354364395141, | |
| "num_tokens": 4034278.0, | |
| "step": 3640 | |
| }, | |
| { | |
| "epoch": 0.7354422728188595, | |
| "grad_norm": 10.8125, | |
| "learning_rate": 1.5098394788098598e-05, | |
| "loss": 1.0355, | |
| "mean_token_accuracy": 0.7544133722782135, | |
| "num_tokens": 4045371.0, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 0.7374571831553496, | |
| "grad_norm": 11.875, | |
| "learning_rate": 1.5084962052521997e-05, | |
| "loss": 0.8856, | |
| "mean_token_accuracy": 0.7889864265918731, | |
| "num_tokens": 4056216.0, | |
| "step": 3660 | |
| }, | |
| { | |
| "epoch": 0.7394720934918396, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.5071529316945398e-05, | |
| "loss": 0.942, | |
| "mean_token_accuracy": 0.7709968864917756, | |
| "num_tokens": 4066100.0, | |
| "step": 3670 | |
| }, | |
| { | |
| "epoch": 0.7414870038283297, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.5058096581368798e-05, | |
| "loss": 0.8474, | |
| "mean_token_accuracy": 0.7917793452739715, | |
| "num_tokens": 4076638.0, | |
| "step": 3680 | |
| }, | |
| { | |
| "epoch": 0.7435019141648197, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.5044663845792197e-05, | |
| "loss": 0.8937, | |
| "mean_token_accuracy": 0.779036569595337, | |
| "num_tokens": 4088398.0, | |
| "step": 3690 | |
| }, | |
| { | |
| "epoch": 0.7455168245013097, | |
| "grad_norm": 13.0625, | |
| "learning_rate": 1.5031231110215596e-05, | |
| "loss": 0.9305, | |
| "mean_token_accuracy": 0.7710303366184235, | |
| "num_tokens": 4100854.0, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.7475317348377997, | |
| "grad_norm": 10.375, | |
| "learning_rate": 1.5017798374638996e-05, | |
| "loss": 0.9195, | |
| "mean_token_accuracy": 0.7792443215847016, | |
| "num_tokens": 4113144.0, | |
| "step": 3710 | |
| }, | |
| { | |
| "epoch": 0.7495466451742897, | |
| "grad_norm": 10.25, | |
| "learning_rate": 1.5004365639062397e-05, | |
| "loss": 0.8205, | |
| "mean_token_accuracy": 0.8025223255157471, | |
| "num_tokens": 4124241.0, | |
| "step": 3720 | |
| }, | |
| { | |
| "epoch": 0.7515615555107797, | |
| "grad_norm": 13.375, | |
| "learning_rate": 1.4990932903485796e-05, | |
| "loss": 0.7566, | |
| "mean_token_accuracy": 0.8099412024021149, | |
| "num_tokens": 4134131.0, | |
| "step": 3730 | |
| }, | |
| { | |
| "epoch": 0.7535764658472698, | |
| "grad_norm": 9.3125, | |
| "learning_rate": 1.4977500167909196e-05, | |
| "loss": 0.8882, | |
| "mean_token_accuracy": 0.7791573405265808, | |
| "num_tokens": 4144499.0, | |
| "step": 3740 | |
| }, | |
| { | |
| "epoch": 0.7555913761837598, | |
| "grad_norm": 13.4375, | |
| "learning_rate": 1.4964067432332597e-05, | |
| "loss": 0.8661, | |
| "mean_token_accuracy": 0.7914558589458466, | |
| "num_tokens": 4155442.0, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.7576062865202499, | |
| "grad_norm": 13.5625, | |
| "learning_rate": 1.4950634696755994e-05, | |
| "loss": 0.8986, | |
| "mean_token_accuracy": 0.7791661143302917, | |
| "num_tokens": 4165905.0, | |
| "step": 3760 | |
| }, | |
| { | |
| "epoch": 0.7596211968567399, | |
| "grad_norm": 10.875, | |
| "learning_rate": 1.4937201961179395e-05, | |
| "loss": 0.9857, | |
| "mean_token_accuracy": 0.7646209299564362, | |
| "num_tokens": 4177252.0, | |
| "step": 3770 | |
| }, | |
| { | |
| "epoch": 0.7616361071932299, | |
| "grad_norm": 13.1875, | |
| "learning_rate": 1.4923769225602795e-05, | |
| "loss": 0.8163, | |
| "mean_token_accuracy": 0.8001804709434509, | |
| "num_tokens": 4187603.0, | |
| "step": 3780 | |
| }, | |
| { | |
| "epoch": 0.76365101752972, | |
| "grad_norm": 12.5625, | |
| "learning_rate": 1.4910336490026196e-05, | |
| "loss": 0.8719, | |
| "mean_token_accuracy": 0.793983542919159, | |
| "num_tokens": 4198158.0, | |
| "step": 3790 | |
| }, | |
| { | |
| "epoch": 0.7656659278662099, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.4896903754449594e-05, | |
| "loss": 0.8003, | |
| "mean_token_accuracy": 0.8059770345687867, | |
| "num_tokens": 4209371.0, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.7676808382026999, | |
| "grad_norm": 11.375, | |
| "learning_rate": 1.4883471018872995e-05, | |
| "loss": 0.8484, | |
| "mean_token_accuracy": 0.791538542509079, | |
| "num_tokens": 4220051.0, | |
| "step": 3810 | |
| }, | |
| { | |
| "epoch": 0.76969574853919, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.4870038283296395e-05, | |
| "loss": 0.8216, | |
| "mean_token_accuracy": 0.7945187032222748, | |
| "num_tokens": 4230922.0, | |
| "step": 3820 | |
| }, | |
| { | |
| "epoch": 0.77171065887568, | |
| "grad_norm": 10.1875, | |
| "learning_rate": 1.4856605547719794e-05, | |
| "loss": 0.8319, | |
| "mean_token_accuracy": 0.7939063310623169, | |
| "num_tokens": 4242793.0, | |
| "step": 3830 | |
| }, | |
| { | |
| "epoch": 0.7737255692121701, | |
| "grad_norm": 14.125, | |
| "learning_rate": 1.4843172812143193e-05, | |
| "loss": 0.8577, | |
| "mean_token_accuracy": 0.7900285601615906, | |
| "num_tokens": 4253881.0, | |
| "step": 3840 | |
| }, | |
| { | |
| "epoch": 0.7757404795486601, | |
| "grad_norm": 10.875, | |
| "learning_rate": 1.4829740076566594e-05, | |
| "loss": 0.836, | |
| "mean_token_accuracy": 0.7931070744991302, | |
| "num_tokens": 4266304.0, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 0.7777553898851501, | |
| "grad_norm": 11.125, | |
| "learning_rate": 1.4816307340989994e-05, | |
| "loss": 1.0042, | |
| "mean_token_accuracy": 0.7616709470748901, | |
| "num_tokens": 4276817.0, | |
| "step": 3860 | |
| }, | |
| { | |
| "epoch": 0.7797703002216402, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.4802874605413393e-05, | |
| "loss": 0.7827, | |
| "mean_token_accuracy": 0.8023504674434662, | |
| "num_tokens": 4286833.0, | |
| "step": 3870 | |
| }, | |
| { | |
| "epoch": 0.7817852105581302, | |
| "grad_norm": 12.125, | |
| "learning_rate": 1.4789441869836794e-05, | |
| "loss": 0.8489, | |
| "mean_token_accuracy": 0.7849018990993499, | |
| "num_tokens": 4297516.0, | |
| "step": 3880 | |
| }, | |
| { | |
| "epoch": 0.7838001208946201, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.4776009134260194e-05, | |
| "loss": 0.8809, | |
| "mean_token_accuracy": 0.7819288611412049, | |
| "num_tokens": 4309049.0, | |
| "step": 3890 | |
| }, | |
| { | |
| "epoch": 0.7858150312311102, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.4762576398683593e-05, | |
| "loss": 0.9198, | |
| "mean_token_accuracy": 0.7767218172550201, | |
| "num_tokens": 4320154.0, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.7878299415676002, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.4749143663106993e-05, | |
| "loss": 0.9142, | |
| "mean_token_accuracy": 0.7742327690124512, | |
| "num_tokens": 4334166.0, | |
| "step": 3910 | |
| }, | |
| { | |
| "epoch": 0.7898448519040903, | |
| "grad_norm": 16.0, | |
| "learning_rate": 1.4735710927530394e-05, | |
| "loss": 0.8259, | |
| "mean_token_accuracy": 0.798123425245285, | |
| "num_tokens": 4344500.0, | |
| "step": 3920 | |
| }, | |
| { | |
| "epoch": 0.7918597622405803, | |
| "grad_norm": 12.1875, | |
| "learning_rate": 1.4722278191953791e-05, | |
| "loss": 0.8897, | |
| "mean_token_accuracy": 0.7863348364830017, | |
| "num_tokens": 4355554.0, | |
| "step": 3930 | |
| }, | |
| { | |
| "epoch": 0.7938746725770703, | |
| "grad_norm": 10.3125, | |
| "learning_rate": 1.4708845456377192e-05, | |
| "loss": 0.8904, | |
| "mean_token_accuracy": 0.7880643427371978, | |
| "num_tokens": 4365823.0, | |
| "step": 3940 | |
| }, | |
| { | |
| "epoch": 0.7958895829135604, | |
| "grad_norm": 15.375, | |
| "learning_rate": 1.4695412720800592e-05, | |
| "loss": 0.8622, | |
| "mean_token_accuracy": 0.7930482983589172, | |
| "num_tokens": 4377033.0, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 0.7979044932500504, | |
| "grad_norm": 11.75, | |
| "learning_rate": 1.4681979985223993e-05, | |
| "loss": 0.9426, | |
| "mean_token_accuracy": 0.7710152387619018, | |
| "num_tokens": 4387397.0, | |
| "step": 3960 | |
| }, | |
| { | |
| "epoch": 0.7999194035865403, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.4668547249647392e-05, | |
| "loss": 0.8042, | |
| "mean_token_accuracy": 0.8007622241973877, | |
| "num_tokens": 4397683.0, | |
| "step": 3970 | |
| }, | |
| { | |
| "epoch": 0.8019343139230304, | |
| "grad_norm": 9.75, | |
| "learning_rate": 1.4655114514070792e-05, | |
| "loss": 0.8862, | |
| "mean_token_accuracy": 0.7830459952354432, | |
| "num_tokens": 4408500.0, | |
| "step": 3980 | |
| }, | |
| { | |
| "epoch": 0.8039492242595204, | |
| "grad_norm": 13.375, | |
| "learning_rate": 1.4641681778494193e-05, | |
| "loss": 0.9356, | |
| "mean_token_accuracy": 0.7747329294681549, | |
| "num_tokens": 4419148.0, | |
| "step": 3990 | |
| }, | |
| { | |
| "epoch": 0.8059641345960105, | |
| "grad_norm": 13.0625, | |
| "learning_rate": 1.462824904291759e-05, | |
| "loss": 0.9006, | |
| "mean_token_accuracy": 0.7772108554840088, | |
| "num_tokens": 4430041.0, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.8079790449325005, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 1.461481630734099e-05, | |
| "loss": 0.803, | |
| "mean_token_accuracy": 0.7974342584609986, | |
| "num_tokens": 4440795.0, | |
| "step": 4010 | |
| }, | |
| { | |
| "epoch": 0.8099939552689905, | |
| "grad_norm": 9.625, | |
| "learning_rate": 1.460138357176439e-05, | |
| "loss": 0.885, | |
| "mean_token_accuracy": 0.7806336939334869, | |
| "num_tokens": 4452744.0, | |
| "step": 4020 | |
| }, | |
| { | |
| "epoch": 0.8120088656054806, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.458795083618779e-05, | |
| "loss": 0.9102, | |
| "mean_token_accuracy": 0.7730875134468078, | |
| "num_tokens": 4463539.0, | |
| "step": 4030 | |
| }, | |
| { | |
| "epoch": 0.8140237759419706, | |
| "grad_norm": 11.0, | |
| "learning_rate": 1.457451810061119e-05, | |
| "loss": 0.8561, | |
| "mean_token_accuracy": 0.7935479283332825, | |
| "num_tokens": 4474515.0, | |
| "step": 4040 | |
| }, | |
| { | |
| "epoch": 0.8160386862784607, | |
| "grad_norm": 12.8125, | |
| "learning_rate": 1.456108536503459e-05, | |
| "loss": 0.9461, | |
| "mean_token_accuracy": 0.7769980370998383, | |
| "num_tokens": 4485302.0, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 0.8180535966149506, | |
| "grad_norm": 11.125, | |
| "learning_rate": 1.4547652629457991e-05, | |
| "loss": 0.9002, | |
| "mean_token_accuracy": 0.7809113264083862, | |
| "num_tokens": 4497764.0, | |
| "step": 4060 | |
| }, | |
| { | |
| "epoch": 0.8200685069514406, | |
| "grad_norm": 12.4375, | |
| "learning_rate": 1.453421989388139e-05, | |
| "loss": 0.8366, | |
| "mean_token_accuracy": 0.798139876127243, | |
| "num_tokens": 4508154.0, | |
| "step": 4070 | |
| }, | |
| { | |
| "epoch": 0.8220834172879307, | |
| "grad_norm": 13.4375, | |
| "learning_rate": 1.452078715830479e-05, | |
| "loss": 0.8682, | |
| "mean_token_accuracy": 0.790400379896164, | |
| "num_tokens": 4518543.0, | |
| "step": 4080 | |
| }, | |
| { | |
| "epoch": 0.8240983276244207, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.4507354422728191e-05, | |
| "loss": 0.9201, | |
| "mean_token_accuracy": 0.7755493521690369, | |
| "num_tokens": 4529945.0, | |
| "step": 4090 | |
| }, | |
| { | |
| "epoch": 0.8261132379609107, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.4493921687151588e-05, | |
| "loss": 0.891, | |
| "mean_token_accuracy": 0.7825681924819946, | |
| "num_tokens": 4542358.0, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.8281281482974008, | |
| "grad_norm": 11.25, | |
| "learning_rate": 1.4480488951574989e-05, | |
| "loss": 0.9354, | |
| "mean_token_accuracy": 0.7727735102176666, | |
| "num_tokens": 4553817.0, | |
| "step": 4110 | |
| }, | |
| { | |
| "epoch": 0.8301430586338908, | |
| "grad_norm": 10.75, | |
| "learning_rate": 1.446705621599839e-05, | |
| "loss": 0.7511, | |
| "mean_token_accuracy": 0.8066883027553559, | |
| "num_tokens": 4563944.0, | |
| "step": 4120 | |
| }, | |
| { | |
| "epoch": 0.8321579689703809, | |
| "grad_norm": 12.75, | |
| "learning_rate": 1.445362348042179e-05, | |
| "loss": 0.8766, | |
| "mean_token_accuracy": 0.7842302858829499, | |
| "num_tokens": 4574025.0, | |
| "step": 4130 | |
| }, | |
| { | |
| "epoch": 0.8341728793068709, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.4440190744845189e-05, | |
| "loss": 0.8223, | |
| "mean_token_accuracy": 0.8018840789794922, | |
| "num_tokens": 4584261.0, | |
| "step": 4140 | |
| }, | |
| { | |
| "epoch": 0.8361877896433608, | |
| "grad_norm": 13.375, | |
| "learning_rate": 1.442675800926859e-05, | |
| "loss": 0.7998, | |
| "mean_token_accuracy": 0.7976557493209839, | |
| "num_tokens": 4594866.0, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 0.8382026999798509, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.441332527369199e-05, | |
| "loss": 0.9108, | |
| "mean_token_accuracy": 0.7819365322589874, | |
| "num_tokens": 4605638.0, | |
| "step": 4160 | |
| }, | |
| { | |
| "epoch": 0.8402176103163409, | |
| "grad_norm": 11.125, | |
| "learning_rate": 1.4399892538115387e-05, | |
| "loss": 0.8475, | |
| "mean_token_accuracy": 0.7921322703361511, | |
| "num_tokens": 4617406.0, | |
| "step": 4170 | |
| }, | |
| { | |
| "epoch": 0.8422325206528309, | |
| "grad_norm": 12.875, | |
| "learning_rate": 1.4386459802538787e-05, | |
| "loss": 0.9156, | |
| "mean_token_accuracy": 0.7791644930839539, | |
| "num_tokens": 4628983.0, | |
| "step": 4180 | |
| }, | |
| { | |
| "epoch": 0.844247430989321, | |
| "grad_norm": 12.75, | |
| "learning_rate": 1.4373027066962188e-05, | |
| "loss": 0.8978, | |
| "mean_token_accuracy": 0.7813169062137604, | |
| "num_tokens": 4640524.0, | |
| "step": 4190 | |
| }, | |
| { | |
| "epoch": 0.846262341325811, | |
| "grad_norm": 12.25, | |
| "learning_rate": 1.4359594331385587e-05, | |
| "loss": 0.8205, | |
| "mean_token_accuracy": 0.7985609114170075, | |
| "num_tokens": 4650666.0, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.8482772516623011, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.4346161595808987e-05, | |
| "loss": 0.9088, | |
| "mean_token_accuracy": 0.7806391000747681, | |
| "num_tokens": 4662823.0, | |
| "step": 4210 | |
| }, | |
| { | |
| "epoch": 0.8502921619987911, | |
| "grad_norm": 10.8125, | |
| "learning_rate": 1.4332728860232388e-05, | |
| "loss": 0.7677, | |
| "mean_token_accuracy": 0.8117966473102569, | |
| "num_tokens": 4672839.0, | |
| "step": 4220 | |
| }, | |
| { | |
| "epoch": 0.8523070723352811, | |
| "grad_norm": 12.1875, | |
| "learning_rate": 1.4319296124655788e-05, | |
| "loss": 0.8908, | |
| "mean_token_accuracy": 0.785522049665451, | |
| "num_tokens": 4684351.0, | |
| "step": 4230 | |
| }, | |
| { | |
| "epoch": 0.8543219826717711, | |
| "grad_norm": 11.6875, | |
| "learning_rate": 1.4305863389079187e-05, | |
| "loss": 0.8259, | |
| "mean_token_accuracy": 0.7945611894130706, | |
| "num_tokens": 4695517.0, | |
| "step": 4240 | |
| }, | |
| { | |
| "epoch": 0.8563368930082611, | |
| "grad_norm": 11.75, | |
| "learning_rate": 1.4292430653502588e-05, | |
| "loss": 0.8182, | |
| "mean_token_accuracy": 0.7957002699375153, | |
| "num_tokens": 4706066.0, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 0.8583518033447511, | |
| "grad_norm": 11.3125, | |
| "learning_rate": 1.4278997917925988e-05, | |
| "loss": 0.8748, | |
| "mean_token_accuracy": 0.7866592228412628, | |
| "num_tokens": 4717128.0, | |
| "step": 4260 | |
| }, | |
| { | |
| "epoch": 0.8603667136812412, | |
| "grad_norm": 12.125, | |
| "learning_rate": 1.4265565182349385e-05, | |
| "loss": 0.847, | |
| "mean_token_accuracy": 0.7929592907428742, | |
| "num_tokens": 4727372.0, | |
| "step": 4270 | |
| }, | |
| { | |
| "epoch": 0.8623816240177312, | |
| "grad_norm": 13.5, | |
| "learning_rate": 1.4252132446772786e-05, | |
| "loss": 0.8527, | |
| "mean_token_accuracy": 0.7962908685207367, | |
| "num_tokens": 4739316.0, | |
| "step": 4280 | |
| }, | |
| { | |
| "epoch": 0.8643965343542213, | |
| "grad_norm": 13.5, | |
| "learning_rate": 1.4238699711196186e-05, | |
| "loss": 0.9276, | |
| "mean_token_accuracy": 0.7727067172527313, | |
| "num_tokens": 4750907.0, | |
| "step": 4290 | |
| }, | |
| { | |
| "epoch": 0.8664114446907113, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.4225266975619587e-05, | |
| "loss": 0.8325, | |
| "mean_token_accuracy": 0.7947500467300415, | |
| "num_tokens": 4761642.0, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.8684263550272013, | |
| "grad_norm": 12.6875, | |
| "learning_rate": 1.4211834240042986e-05, | |
| "loss": 0.9264, | |
| "mean_token_accuracy": 0.7780522584915162, | |
| "num_tokens": 4772339.0, | |
| "step": 4310 | |
| }, | |
| { | |
| "epoch": 0.8704412653636913, | |
| "grad_norm": 10.9375, | |
| "learning_rate": 1.4198401504466386e-05, | |
| "loss": 0.8859, | |
| "mean_token_accuracy": 0.7833628177642822, | |
| "num_tokens": 4782957.0, | |
| "step": 4320 | |
| }, | |
| { | |
| "epoch": 0.8724561757001813, | |
| "grad_norm": 10.375, | |
| "learning_rate": 1.4184968768889787e-05, | |
| "loss": 0.9069, | |
| "mean_token_accuracy": 0.776193904876709, | |
| "num_tokens": 4794205.0, | |
| "step": 4330 | |
| }, | |
| { | |
| "epoch": 0.8744710860366713, | |
| "grad_norm": 10.4375, | |
| "learning_rate": 1.4171536033313184e-05, | |
| "loss": 0.8802, | |
| "mean_token_accuracy": 0.7828757107257843, | |
| "num_tokens": 4805198.0, | |
| "step": 4340 | |
| }, | |
| { | |
| "epoch": 0.8764859963731614, | |
| "grad_norm": 10.8125, | |
| "learning_rate": 1.4158103297736585e-05, | |
| "loss": 0.8658, | |
| "mean_token_accuracy": 0.7872898876667023, | |
| "num_tokens": 4815383.0, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 0.8785009067096514, | |
| "grad_norm": 11.3125, | |
| "learning_rate": 1.4144670562159985e-05, | |
| "loss": 0.8317, | |
| "mean_token_accuracy": 0.7920398592948914, | |
| "num_tokens": 4825473.0, | |
| "step": 4360 | |
| }, | |
| { | |
| "epoch": 0.8805158170461415, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.4131237826583384e-05, | |
| "loss": 0.8622, | |
| "mean_token_accuracy": 0.7888808488845825, | |
| "num_tokens": 4836736.0, | |
| "step": 4370 | |
| }, | |
| { | |
| "epoch": 0.8825307273826315, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.4117805091006784e-05, | |
| "loss": 0.9218, | |
| "mean_token_accuracy": 0.7805340230464936, | |
| "num_tokens": 4847997.0, | |
| "step": 4380 | |
| }, | |
| { | |
| "epoch": 0.8845456377191215, | |
| "grad_norm": 10.8125, | |
| "learning_rate": 1.4104372355430185e-05, | |
| "loss": 0.8248, | |
| "mean_token_accuracy": 0.7985675752162933, | |
| "num_tokens": 4858799.0, | |
| "step": 4390 | |
| }, | |
| { | |
| "epoch": 0.8865605480556116, | |
| "grad_norm": 12.4375, | |
| "learning_rate": 1.4090939619853585e-05, | |
| "loss": 0.8753, | |
| "mean_token_accuracy": 0.7870100736618042, | |
| "num_tokens": 4870150.0, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.8885754583921015, | |
| "grad_norm": 11.0, | |
| "learning_rate": 1.4077506884276984e-05, | |
| "loss": 0.858, | |
| "mean_token_accuracy": 0.7853298187255859, | |
| "num_tokens": 4881976.0, | |
| "step": 4410 | |
| }, | |
| { | |
| "epoch": 0.8905903687285915, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.4064074148700385e-05, | |
| "loss": 0.827, | |
| "mean_token_accuracy": 0.7916330635547638, | |
| "num_tokens": 4893088.0, | |
| "step": 4420 | |
| }, | |
| { | |
| "epoch": 0.8926052790650816, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.4050641413123784e-05, | |
| "loss": 0.918, | |
| "mean_token_accuracy": 0.7799353480339051, | |
| "num_tokens": 4904993.0, | |
| "step": 4430 | |
| }, | |
| { | |
| "epoch": 0.8946201894015716, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.4037208677547183e-05, | |
| "loss": 0.8315, | |
| "mean_token_accuracy": 0.7943594753742218, | |
| "num_tokens": 4915216.0, | |
| "step": 4440 | |
| }, | |
| { | |
| "epoch": 0.8966350997380617, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.4023775941970583e-05, | |
| "loss": 0.9096, | |
| "mean_token_accuracy": 0.7769247710704803, | |
| "num_tokens": 4925551.0, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 0.8986500100745517, | |
| "grad_norm": 11.6875, | |
| "learning_rate": 1.4010343206393984e-05, | |
| "loss": 0.8555, | |
| "mean_token_accuracy": 0.7878253519535064, | |
| "num_tokens": 4936738.0, | |
| "step": 4460 | |
| }, | |
| { | |
| "epoch": 0.9006649204110417, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 1.3996910470817384e-05, | |
| "loss": 0.8728, | |
| "mean_token_accuracy": 0.7859396934509277, | |
| "num_tokens": 4947725.0, | |
| "step": 4470 | |
| }, | |
| { | |
| "epoch": 0.9026798307475318, | |
| "grad_norm": 13.8125, | |
| "learning_rate": 1.3983477735240783e-05, | |
| "loss": 0.7828, | |
| "mean_token_accuracy": 0.7994441330432892, | |
| "num_tokens": 4958978.0, | |
| "step": 4480 | |
| }, | |
| { | |
| "epoch": 0.9046947410840218, | |
| "grad_norm": 10.0625, | |
| "learning_rate": 1.3970044999664183e-05, | |
| "loss": 0.856, | |
| "mean_token_accuracy": 0.7900491297245026, | |
| "num_tokens": 4970121.0, | |
| "step": 4490 | |
| }, | |
| { | |
| "epoch": 0.9067096514205117, | |
| "grad_norm": 8.0625, | |
| "learning_rate": 1.3956612264087584e-05, | |
| "loss": 0.7457, | |
| "mean_token_accuracy": 0.8125901579856872, | |
| "num_tokens": 4982012.0, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.9087245617570018, | |
| "grad_norm": 10.875, | |
| "learning_rate": 1.3943179528510981e-05, | |
| "loss": 0.9166, | |
| "mean_token_accuracy": 0.7789366781711579, | |
| "num_tokens": 4992691.0, | |
| "step": 4510 | |
| }, | |
| { | |
| "epoch": 0.9107394720934918, | |
| "grad_norm": 10.6875, | |
| "learning_rate": 1.3929746792934382e-05, | |
| "loss": 0.8536, | |
| "mean_token_accuracy": 0.7905638337135314, | |
| "num_tokens": 5004578.0, | |
| "step": 4520 | |
| }, | |
| { | |
| "epoch": 0.9127543824299819, | |
| "grad_norm": 14.25, | |
| "learning_rate": 1.3916314057357782e-05, | |
| "loss": 0.976, | |
| "mean_token_accuracy": 0.7722863137722016, | |
| "num_tokens": 5015364.0, | |
| "step": 4530 | |
| }, | |
| { | |
| "epoch": 0.9147692927664719, | |
| "grad_norm": 12.9375, | |
| "learning_rate": 1.3902881321781181e-05, | |
| "loss": 0.8254, | |
| "mean_token_accuracy": 0.7985598504543304, | |
| "num_tokens": 5026777.0, | |
| "step": 4540 | |
| }, | |
| { | |
| "epoch": 0.9167842031029619, | |
| "grad_norm": 14.3125, | |
| "learning_rate": 1.3889448586204582e-05, | |
| "loss": 0.8435, | |
| "mean_token_accuracy": 0.7964738607406616, | |
| "num_tokens": 5038156.0, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 0.918799113439452, | |
| "grad_norm": 10.8125, | |
| "learning_rate": 1.3876015850627982e-05, | |
| "loss": 0.9532, | |
| "mean_token_accuracy": 0.7702659487724304, | |
| "num_tokens": 5049182.0, | |
| "step": 4560 | |
| }, | |
| { | |
| "epoch": 0.920814023775942, | |
| "grad_norm": 10.375, | |
| "learning_rate": 1.3862583115051383e-05, | |
| "loss": 0.749, | |
| "mean_token_accuracy": 0.8094150185585022, | |
| "num_tokens": 5060508.0, | |
| "step": 4570 | |
| }, | |
| { | |
| "epoch": 0.9228289341124319, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.384915037947478e-05, | |
| "loss": 0.9101, | |
| "mean_token_accuracy": 0.780214524269104, | |
| "num_tokens": 5072338.0, | |
| "step": 4580 | |
| }, | |
| { | |
| "epoch": 0.924843844448922, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.383571764389818e-05, | |
| "loss": 0.8272, | |
| "mean_token_accuracy": 0.7955898463726043, | |
| "num_tokens": 5083561.0, | |
| "step": 4590 | |
| }, | |
| { | |
| "epoch": 0.926858754785412, | |
| "grad_norm": 11.125, | |
| "learning_rate": 1.382228490832158e-05, | |
| "loss": 0.8138, | |
| "mean_token_accuracy": 0.797929847240448, | |
| "num_tokens": 5095032.0, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.9288736651219021, | |
| "grad_norm": 13.5, | |
| "learning_rate": 1.380885217274498e-05, | |
| "loss": 0.8166, | |
| "mean_token_accuracy": 0.7943983316421509, | |
| "num_tokens": 5105707.0, | |
| "step": 4610 | |
| }, | |
| { | |
| "epoch": 0.9308885754583921, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.379541943716838e-05, | |
| "loss": 0.8926, | |
| "mean_token_accuracy": 0.7872261703014374, | |
| "num_tokens": 5116277.0, | |
| "step": 4620 | |
| }, | |
| { | |
| "epoch": 0.9329034857948821, | |
| "grad_norm": 10.125, | |
| "learning_rate": 1.378198670159178e-05, | |
| "loss": 0.8729, | |
| "mean_token_accuracy": 0.7849249064922332, | |
| "num_tokens": 5128524.0, | |
| "step": 4630 | |
| }, | |
| { | |
| "epoch": 0.9349183961313722, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.376855396601518e-05, | |
| "loss": 0.8558, | |
| "mean_token_accuracy": 0.7900417923927308, | |
| "num_tokens": 5139328.0, | |
| "step": 4640 | |
| }, | |
| { | |
| "epoch": 0.9369333064678622, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.375512123043858e-05, | |
| "loss": 0.8806, | |
| "mean_token_accuracy": 0.7793081521987915, | |
| "num_tokens": 5152047.0, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 0.9389482168043523, | |
| "grad_norm": 10.6875, | |
| "learning_rate": 1.374168849486198e-05, | |
| "loss": 0.9167, | |
| "mean_token_accuracy": 0.7699385344982147, | |
| "num_tokens": 5163236.0, | |
| "step": 4660 | |
| }, | |
| { | |
| "epoch": 0.9409631271408422, | |
| "grad_norm": 12.6875, | |
| "learning_rate": 1.3728255759285381e-05, | |
| "loss": 0.778, | |
| "mean_token_accuracy": 0.8066167533397675, | |
| "num_tokens": 5173182.0, | |
| "step": 4670 | |
| }, | |
| { | |
| "epoch": 0.9429780374773322, | |
| "grad_norm": 10.8125, | |
| "learning_rate": 1.3714823023708778e-05, | |
| "loss": 0.8731, | |
| "mean_token_accuracy": 0.7907391846179962, | |
| "num_tokens": 5184212.0, | |
| "step": 4680 | |
| }, | |
| { | |
| "epoch": 0.9449929478138223, | |
| "grad_norm": 14.1875, | |
| "learning_rate": 1.3701390288132179e-05, | |
| "loss": 0.8561, | |
| "mean_token_accuracy": 0.7872300326824189, | |
| "num_tokens": 5195178.0, | |
| "step": 4690 | |
| }, | |
| { | |
| "epoch": 0.9470078581503123, | |
| "grad_norm": 10.0625, | |
| "learning_rate": 1.368795755255558e-05, | |
| "loss": 0.8959, | |
| "mean_token_accuracy": 0.785253643989563, | |
| "num_tokens": 5206174.0, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.9490227684868023, | |
| "grad_norm": 10.1875, | |
| "learning_rate": 1.3674524816978978e-05, | |
| "loss": 0.82, | |
| "mean_token_accuracy": 0.8018651187419892, | |
| "num_tokens": 5216839.0, | |
| "step": 4710 | |
| }, | |
| { | |
| "epoch": 0.9510376788232924, | |
| "grad_norm": 12.375, | |
| "learning_rate": 1.3661092081402379e-05, | |
| "loss": 1.0637, | |
| "mean_token_accuracy": 0.7501500964164733, | |
| "num_tokens": 5228271.0, | |
| "step": 4720 | |
| }, | |
| { | |
| "epoch": 0.9530525891597824, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.364765934582578e-05, | |
| "loss": 0.9295, | |
| "mean_token_accuracy": 0.7782152414321899, | |
| "num_tokens": 5238828.0, | |
| "step": 4730 | |
| }, | |
| { | |
| "epoch": 0.9550674994962725, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.363422661024918e-05, | |
| "loss": 0.9794, | |
| "mean_token_accuracy": 0.7655089437961579, | |
| "num_tokens": 5250865.0, | |
| "step": 4740 | |
| }, | |
| { | |
| "epoch": 0.9570824098327625, | |
| "grad_norm": 12.75, | |
| "learning_rate": 1.3620793874672577e-05, | |
| "loss": 0.8559, | |
| "mean_token_accuracy": 0.783417934179306, | |
| "num_tokens": 5261161.0, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 0.9590973201692524, | |
| "grad_norm": 13.125, | |
| "learning_rate": 1.3607361139095977e-05, | |
| "loss": 0.8129, | |
| "mean_token_accuracy": 0.8023806989192963, | |
| "num_tokens": 5271735.0, | |
| "step": 4760 | |
| }, | |
| { | |
| "epoch": 0.9611122305057425, | |
| "grad_norm": 10.3125, | |
| "learning_rate": 1.3593928403519378e-05, | |
| "loss": 0.9024, | |
| "mean_token_accuracy": 0.7816856026649475, | |
| "num_tokens": 5282948.0, | |
| "step": 4770 | |
| }, | |
| { | |
| "epoch": 0.9631271408422325, | |
| "grad_norm": 16.375, | |
| "learning_rate": 1.3580495667942777e-05, | |
| "loss": 0.7899, | |
| "mean_token_accuracy": 0.8060350120067596, | |
| "num_tokens": 5293161.0, | |
| "step": 4780 | |
| }, | |
| { | |
| "epoch": 0.9651420511787225, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.3567062932366177e-05, | |
| "loss": 0.9497, | |
| "mean_token_accuracy": 0.7778710544109344, | |
| "num_tokens": 5303823.0, | |
| "step": 4790 | |
| }, | |
| { | |
| "epoch": 0.9671569615152126, | |
| "grad_norm": 12.375, | |
| "learning_rate": 1.3553630196789578e-05, | |
| "loss": 0.8374, | |
| "mean_token_accuracy": 0.7918814778327942, | |
| "num_tokens": 5314457.0, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.9691718718517026, | |
| "grad_norm": 13.4375, | |
| "learning_rate": 1.3540197461212977e-05, | |
| "loss": 1.0195, | |
| "mean_token_accuracy": 0.7530766189098358, | |
| "num_tokens": 5324766.0, | |
| "step": 4810 | |
| }, | |
| { | |
| "epoch": 0.9711867821881927, | |
| "grad_norm": 12.8125, | |
| "learning_rate": 1.3526764725636377e-05, | |
| "loss": 0.8813, | |
| "mean_token_accuracy": 0.7811478495597839, | |
| "num_tokens": 5336093.0, | |
| "step": 4820 | |
| }, | |
| { | |
| "epoch": 0.9732016925246827, | |
| "grad_norm": 10.4375, | |
| "learning_rate": 1.3513331990059778e-05, | |
| "loss": 0.8947, | |
| "mean_token_accuracy": 0.7876034200191497, | |
| "num_tokens": 5346706.0, | |
| "step": 4830 | |
| }, | |
| { | |
| "epoch": 0.9752166028611727, | |
| "grad_norm": 15.25, | |
| "learning_rate": 1.3499899254483178e-05, | |
| "loss": 0.8773, | |
| "mean_token_accuracy": 0.7921059668064118, | |
| "num_tokens": 5357534.0, | |
| "step": 4840 | |
| }, | |
| { | |
| "epoch": 0.9772315131976627, | |
| "grad_norm": 10.75, | |
| "learning_rate": 1.3486466518906575e-05, | |
| "loss": 0.895, | |
| "mean_token_accuracy": 0.7818942189216613, | |
| "num_tokens": 5370003.0, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 0.9792464235341527, | |
| "grad_norm": 11.3125, | |
| "learning_rate": 1.3473033783329976e-05, | |
| "loss": 0.7589, | |
| "mean_token_accuracy": 0.8098963499069214, | |
| "num_tokens": 5381355.0, | |
| "step": 4860 | |
| }, | |
| { | |
| "epoch": 0.9812613338706427, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.3459601047753376e-05, | |
| "loss": 0.793, | |
| "mean_token_accuracy": 0.8019460260868072, | |
| "num_tokens": 5392541.0, | |
| "step": 4870 | |
| }, | |
| { | |
| "epoch": 0.9832762442071328, | |
| "grad_norm": 14.5, | |
| "learning_rate": 1.3446168312176775e-05, | |
| "loss": 0.9046, | |
| "mean_token_accuracy": 0.781462025642395, | |
| "num_tokens": 5403838.0, | |
| "step": 4880 | |
| }, | |
| { | |
| "epoch": 0.9852911545436228, | |
| "grad_norm": 11.25, | |
| "learning_rate": 1.3432735576600176e-05, | |
| "loss": 0.9039, | |
| "mean_token_accuracy": 0.7840433418750763, | |
| "num_tokens": 5414995.0, | |
| "step": 4890 | |
| }, | |
| { | |
| "epoch": 0.9873060648801129, | |
| "grad_norm": 9.5, | |
| "learning_rate": 1.3419302841023576e-05, | |
| "loss": 0.8909, | |
| "mean_token_accuracy": 0.7859670460224152, | |
| "num_tokens": 5426001.0, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.9893209752166029, | |
| "grad_norm": 13.375, | |
| "learning_rate": 1.3405870105446977e-05, | |
| "loss": 0.8171, | |
| "mean_token_accuracy": 0.7865999937057495, | |
| "num_tokens": 5438081.0, | |
| "step": 4910 | |
| }, | |
| { | |
| "epoch": 0.9913358855530929, | |
| "grad_norm": 14.3125, | |
| "learning_rate": 1.3392437369870374e-05, | |
| "loss": 0.9477, | |
| "mean_token_accuracy": 0.773062938451767, | |
| "num_tokens": 5449464.0, | |
| "step": 4920 | |
| }, | |
| { | |
| "epoch": 0.9933507958895829, | |
| "grad_norm": 11.3125, | |
| "learning_rate": 1.3379004634293775e-05, | |
| "loss": 0.9291, | |
| "mean_token_accuracy": 0.7776412189006805, | |
| "num_tokens": 5461080.0, | |
| "step": 4930 | |
| }, | |
| { | |
| "epoch": 0.9953657062260729, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.3365571898717175e-05, | |
| "loss": 0.8828, | |
| "mean_token_accuracy": 0.7811066091060639, | |
| "num_tokens": 5472144.0, | |
| "step": 4940 | |
| }, | |
| { | |
| "epoch": 0.9973806165625629, | |
| "grad_norm": 9.875, | |
| "learning_rate": 1.3352139163140574e-05, | |
| "loss": 0.8439, | |
| "mean_token_accuracy": 0.7922865450382233, | |
| "num_tokens": 5484117.0, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 0.999395526899053, | |
| "grad_norm": 10.5, | |
| "learning_rate": 1.3338706427563974e-05, | |
| "loss": 0.8985, | |
| "mean_token_accuracy": 0.7755892872810364, | |
| "num_tokens": 5495916.0, | |
| "step": 4960 | |
| }, | |
| { | |
| "epoch": 1.001410437235543, | |
| "grad_norm": 12.875, | |
| "learning_rate": 1.3325273691987375e-05, | |
| "loss": 0.8131, | |
| "mean_token_accuracy": 0.7963548183441163, | |
| "num_tokens": 5506891.0, | |
| "step": 4970 | |
| }, | |
| { | |
| "epoch": 1.003425347572033, | |
| "grad_norm": 10.3125, | |
| "learning_rate": 1.3311840956410774e-05, | |
| "loss": 0.7879, | |
| "mean_token_accuracy": 0.7989233016967774, | |
| "num_tokens": 5519454.0, | |
| "step": 4980 | |
| }, | |
| { | |
| "epoch": 1.005440257908523, | |
| "grad_norm": 11.875, | |
| "learning_rate": 1.3298408220834174e-05, | |
| "loss": 0.7878, | |
| "mean_token_accuracy": 0.8067593216896057, | |
| "num_tokens": 5529707.0, | |
| "step": 4990 | |
| }, | |
| { | |
| "epoch": 1.007455168245013, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.3284975485257575e-05, | |
| "loss": 0.7955, | |
| "mean_token_accuracy": 0.800259780883789, | |
| "num_tokens": 5541015.0, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.0094700785815032, | |
| "grad_norm": 9.875, | |
| "learning_rate": 1.3271542749680975e-05, | |
| "loss": 0.9383, | |
| "mean_token_accuracy": 0.7706878125667572, | |
| "num_tokens": 5553090.0, | |
| "step": 5010 | |
| }, | |
| { | |
| "epoch": 1.011484988917993, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.3258110014104373e-05, | |
| "loss": 0.8471, | |
| "mean_token_accuracy": 0.7942995607852936, | |
| "num_tokens": 5564220.0, | |
| "step": 5020 | |
| }, | |
| { | |
| "epoch": 1.0134998992544832, | |
| "grad_norm": 10.375, | |
| "learning_rate": 1.3244677278527773e-05, | |
| "loss": 0.8018, | |
| "mean_token_accuracy": 0.7939584195613861, | |
| "num_tokens": 5574966.0, | |
| "step": 5030 | |
| }, | |
| { | |
| "epoch": 1.0155148095909732, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.3231244542951174e-05, | |
| "loss": 0.9348, | |
| "mean_token_accuracy": 0.7773958921432496, | |
| "num_tokens": 5586140.0, | |
| "step": 5040 | |
| }, | |
| { | |
| "epoch": 1.0175297199274633, | |
| "grad_norm": 9.625, | |
| "learning_rate": 1.3217811807374572e-05, | |
| "loss": 0.8882, | |
| "mean_token_accuracy": 0.7792610108852387, | |
| "num_tokens": 5597440.0, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 1.0195446302639533, | |
| "grad_norm": 11.75, | |
| "learning_rate": 1.3204379071797973e-05, | |
| "loss": 0.7882, | |
| "mean_token_accuracy": 0.8046412229537964, | |
| "num_tokens": 5609321.0, | |
| "step": 5060 | |
| }, | |
| { | |
| "epoch": 1.0215595406004432, | |
| "grad_norm": 9.4375, | |
| "learning_rate": 1.3190946336221373e-05, | |
| "loss": 0.8062, | |
| "mean_token_accuracy": 0.7952991247177124, | |
| "num_tokens": 5619194.0, | |
| "step": 5070 | |
| }, | |
| { | |
| "epoch": 1.0235744509369333, | |
| "grad_norm": 12.4375, | |
| "learning_rate": 1.3177513600644774e-05, | |
| "loss": 0.921, | |
| "mean_token_accuracy": 0.7800089240074157, | |
| "num_tokens": 5631065.0, | |
| "step": 5080 | |
| }, | |
| { | |
| "epoch": 1.0255893612734233, | |
| "grad_norm": 10.875, | |
| "learning_rate": 1.3164080865068171e-05, | |
| "loss": 0.799, | |
| "mean_token_accuracy": 0.8071331679821014, | |
| "num_tokens": 5642580.0, | |
| "step": 5090 | |
| }, | |
| { | |
| "epoch": 1.0276042716099134, | |
| "grad_norm": 10.1875, | |
| "learning_rate": 1.3150648129491572e-05, | |
| "loss": 0.7776, | |
| "mean_token_accuracy": 0.8046740829944611, | |
| "num_tokens": 5651910.0, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 1.0296191819464033, | |
| "grad_norm": 14.0, | |
| "learning_rate": 1.3137215393914972e-05, | |
| "loss": 0.8056, | |
| "mean_token_accuracy": 0.8012421131134033, | |
| "num_tokens": 5663726.0, | |
| "step": 5110 | |
| }, | |
| { | |
| "epoch": 1.0316340922828935, | |
| "grad_norm": 11.5, | |
| "learning_rate": 1.3123782658338371e-05, | |
| "loss": 0.7681, | |
| "mean_token_accuracy": 0.8097535610198975, | |
| "num_tokens": 5675558.0, | |
| "step": 5120 | |
| }, | |
| { | |
| "epoch": 1.0336490026193834, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.3110349922761772e-05, | |
| "loss": 0.8813, | |
| "mean_token_accuracy": 0.7838487148284912, | |
| "num_tokens": 5687969.0, | |
| "step": 5130 | |
| }, | |
| { | |
| "epoch": 1.0356639129558736, | |
| "grad_norm": 9.3125, | |
| "learning_rate": 1.3096917187185172e-05, | |
| "loss": 0.9072, | |
| "mean_token_accuracy": 0.7834949135780335, | |
| "num_tokens": 5700354.0, | |
| "step": 5140 | |
| }, | |
| { | |
| "epoch": 1.0376788232923635, | |
| "grad_norm": 14.6875, | |
| "learning_rate": 1.3083484451608571e-05, | |
| "loss": 0.903, | |
| "mean_token_accuracy": 0.7816505491733551, | |
| "num_tokens": 5711090.0, | |
| "step": 5150 | |
| }, | |
| { | |
| "epoch": 1.0396937336288534, | |
| "grad_norm": 8.9375, | |
| "learning_rate": 1.3070051716031971e-05, | |
| "loss": 0.7961, | |
| "mean_token_accuracy": 0.8029458582401275, | |
| "num_tokens": 5721667.0, | |
| "step": 5160 | |
| }, | |
| { | |
| "epoch": 1.0417086439653436, | |
| "grad_norm": 10.8125, | |
| "learning_rate": 1.305661898045537e-05, | |
| "loss": 0.8394, | |
| "mean_token_accuracy": 0.7979920387268067, | |
| "num_tokens": 5733015.0, | |
| "step": 5170 | |
| }, | |
| { | |
| "epoch": 1.0437235543018335, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.304318624487877e-05, | |
| "loss": 0.8749, | |
| "mean_token_accuracy": 0.7899072051048279, | |
| "num_tokens": 5743473.0, | |
| "step": 5180 | |
| }, | |
| { | |
| "epoch": 1.0457384646383237, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.302975350930217e-05, | |
| "loss": 0.8553, | |
| "mean_token_accuracy": 0.7900504052639008, | |
| "num_tokens": 5754579.0, | |
| "step": 5190 | |
| }, | |
| { | |
| "epoch": 1.0477533749748136, | |
| "grad_norm": 10.9375, | |
| "learning_rate": 1.301632077372557e-05, | |
| "loss": 0.8735, | |
| "mean_token_accuracy": 0.7891764640808105, | |
| "num_tokens": 5765340.0, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 1.0497682853113037, | |
| "grad_norm": 9.0, | |
| "learning_rate": 1.300288803814897e-05, | |
| "loss": 0.7709, | |
| "mean_token_accuracy": 0.8050879895687103, | |
| "num_tokens": 5775710.0, | |
| "step": 5210 | |
| }, | |
| { | |
| "epoch": 1.0517831956477937, | |
| "grad_norm": 18.25, | |
| "learning_rate": 1.298945530257237e-05, | |
| "loss": 0.7335, | |
| "mean_token_accuracy": 0.8071872234344483, | |
| "num_tokens": 5785996.0, | |
| "step": 5220 | |
| }, | |
| { | |
| "epoch": 1.0537981059842838, | |
| "grad_norm": 13.375, | |
| "learning_rate": 1.297602256699577e-05, | |
| "loss": 0.877, | |
| "mean_token_accuracy": 0.7817419946193696, | |
| "num_tokens": 5796629.0, | |
| "step": 5230 | |
| }, | |
| { | |
| "epoch": 1.0558130163207737, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.296258983141917e-05, | |
| "loss": 0.7858, | |
| "mean_token_accuracy": 0.8022194325923919, | |
| "num_tokens": 5806790.0, | |
| "step": 5240 | |
| }, | |
| { | |
| "epoch": 1.0578279266572637, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.2949157095842568e-05, | |
| "loss": 0.8409, | |
| "mean_token_accuracy": 0.7854238629341126, | |
| "num_tokens": 5818974.0, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 1.0598428369937538, | |
| "grad_norm": 13.0, | |
| "learning_rate": 1.2935724360265968e-05, | |
| "loss": 0.7023, | |
| "mean_token_accuracy": 0.8206122577190399, | |
| "num_tokens": 5828962.0, | |
| "step": 5260 | |
| }, | |
| { | |
| "epoch": 1.0618577473302437, | |
| "grad_norm": 12.75, | |
| "learning_rate": 1.2922291624689369e-05, | |
| "loss": 0.8116, | |
| "mean_token_accuracy": 0.7957081377506257, | |
| "num_tokens": 5840475.0, | |
| "step": 5270 | |
| }, | |
| { | |
| "epoch": 1.063872657666734, | |
| "grad_norm": 12.375, | |
| "learning_rate": 1.290885888911277e-05, | |
| "loss": 0.876, | |
| "mean_token_accuracy": 0.7848715245723724, | |
| "num_tokens": 5851626.0, | |
| "step": 5280 | |
| }, | |
| { | |
| "epoch": 1.0658875680032238, | |
| "grad_norm": 12.1875, | |
| "learning_rate": 1.2895426153536168e-05, | |
| "loss": 0.8648, | |
| "mean_token_accuracy": 0.7879779160022735, | |
| "num_tokens": 5861745.0, | |
| "step": 5290 | |
| }, | |
| { | |
| "epoch": 1.067902478339714, | |
| "grad_norm": 11.5625, | |
| "learning_rate": 1.2881993417959569e-05, | |
| "loss": 0.7807, | |
| "mean_token_accuracy": 0.8065967261791229, | |
| "num_tokens": 5871744.0, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 1.069917388676204, | |
| "grad_norm": 11.875, | |
| "learning_rate": 1.286856068238297e-05, | |
| "loss": 0.8184, | |
| "mean_token_accuracy": 0.7950898349285126, | |
| "num_tokens": 5882570.0, | |
| "step": 5310 | |
| }, | |
| { | |
| "epoch": 1.071932299012694, | |
| "grad_norm": 12.125, | |
| "learning_rate": 1.2855127946806366e-05, | |
| "loss": 0.7624, | |
| "mean_token_accuracy": 0.8084113836288452, | |
| "num_tokens": 5893477.0, | |
| "step": 5320 | |
| }, | |
| { | |
| "epoch": 1.073947209349184, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.2841695211229767e-05, | |
| "loss": 0.8525, | |
| "mean_token_accuracy": 0.8004627406597138, | |
| "num_tokens": 5906228.0, | |
| "step": 5330 | |
| }, | |
| { | |
| "epoch": 1.075962119685674, | |
| "grad_norm": 10.1875, | |
| "learning_rate": 1.2828262475653167e-05, | |
| "loss": 0.7381, | |
| "mean_token_accuracy": 0.815189528465271, | |
| "num_tokens": 5917163.0, | |
| "step": 5340 | |
| }, | |
| { | |
| "epoch": 1.077977030022164, | |
| "grad_norm": 13.4375, | |
| "learning_rate": 1.2814829740076568e-05, | |
| "loss": 0.8192, | |
| "mean_token_accuracy": 0.7983390390872955, | |
| "num_tokens": 5927959.0, | |
| "step": 5350 | |
| }, | |
| { | |
| "epoch": 1.079991940358654, | |
| "grad_norm": 11.125, | |
| "learning_rate": 1.2801397004499967e-05, | |
| "loss": 0.8847, | |
| "mean_token_accuracy": 0.7825915396213532, | |
| "num_tokens": 5938684.0, | |
| "step": 5360 | |
| }, | |
| { | |
| "epoch": 1.0820068506951441, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.2787964268923367e-05, | |
| "loss": 0.8451, | |
| "mean_token_accuracy": 0.7878111064434051, | |
| "num_tokens": 5948765.0, | |
| "step": 5370 | |
| }, | |
| { | |
| "epoch": 1.084021761031634, | |
| "grad_norm": 13.0, | |
| "learning_rate": 1.2774531533346768e-05, | |
| "loss": 0.7971, | |
| "mean_token_accuracy": 0.8030431568622589, | |
| "num_tokens": 5960108.0, | |
| "step": 5380 | |
| }, | |
| { | |
| "epoch": 1.0860366713681242, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.2761098797770167e-05, | |
| "loss": 0.8786, | |
| "mean_token_accuracy": 0.7854897439479828, | |
| "num_tokens": 5972007.0, | |
| "step": 5390 | |
| }, | |
| { | |
| "epoch": 1.0880515817046141, | |
| "grad_norm": 10.0625, | |
| "learning_rate": 1.2747666062193567e-05, | |
| "loss": 0.8395, | |
| "mean_token_accuracy": 0.7956344962120057, | |
| "num_tokens": 5983211.0, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 1.090066492041104, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.2734233326616968e-05, | |
| "loss": 0.9274, | |
| "mean_token_accuracy": 0.7794575989246368, | |
| "num_tokens": 5995219.0, | |
| "step": 5410 | |
| }, | |
| { | |
| "epoch": 1.0920814023775942, | |
| "grad_norm": 13.1875, | |
| "learning_rate": 1.2720800591040365e-05, | |
| "loss": 0.8251, | |
| "mean_token_accuracy": 0.802078241109848, | |
| "num_tokens": 6006324.0, | |
| "step": 5420 | |
| }, | |
| { | |
| "epoch": 1.0940963127140841, | |
| "grad_norm": 14.0625, | |
| "learning_rate": 1.2707367855463765e-05, | |
| "loss": 0.8402, | |
| "mean_token_accuracy": 0.7896000027656556, | |
| "num_tokens": 6017542.0, | |
| "step": 5430 | |
| }, | |
| { | |
| "epoch": 1.0961112230505743, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 1.2693935119887166e-05, | |
| "loss": 0.8307, | |
| "mean_token_accuracy": 0.7981148719787597, | |
| "num_tokens": 6027523.0, | |
| "step": 5440 | |
| }, | |
| { | |
| "epoch": 1.0981261333870642, | |
| "grad_norm": 9.6875, | |
| "learning_rate": 1.2680502384310566e-05, | |
| "loss": 0.866, | |
| "mean_token_accuracy": 0.7834112644195557, | |
| "num_tokens": 6038697.0, | |
| "step": 5450 | |
| }, | |
| { | |
| "epoch": 1.1001410437235544, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.2667069648733965e-05, | |
| "loss": 0.793, | |
| "mean_token_accuracy": 0.7983521819114685, | |
| "num_tokens": 6049813.0, | |
| "step": 5460 | |
| }, | |
| { | |
| "epoch": 1.1021559540600443, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.2653636913157366e-05, | |
| "loss": 0.7633, | |
| "mean_token_accuracy": 0.811886590719223, | |
| "num_tokens": 6060176.0, | |
| "step": 5470 | |
| }, | |
| { | |
| "epoch": 1.1041708643965344, | |
| "grad_norm": 12.875, | |
| "learning_rate": 1.2640204177580766e-05, | |
| "loss": 0.8755, | |
| "mean_token_accuracy": 0.7823013424873352, | |
| "num_tokens": 6069957.0, | |
| "step": 5480 | |
| }, | |
| { | |
| "epoch": 1.1061857747330244, | |
| "grad_norm": 12.6875, | |
| "learning_rate": 1.2626771442004164e-05, | |
| "loss": 0.8468, | |
| "mean_token_accuracy": 0.7942144453525544, | |
| "num_tokens": 6080224.0, | |
| "step": 5490 | |
| }, | |
| { | |
| "epoch": 1.1082006850695145, | |
| "grad_norm": 10.5, | |
| "learning_rate": 1.2613338706427564e-05, | |
| "loss": 0.8926, | |
| "mean_token_accuracy": 0.7852272689342499, | |
| "num_tokens": 6091516.0, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.1102155954060045, | |
| "grad_norm": 13.9375, | |
| "learning_rate": 1.2599905970850965e-05, | |
| "loss": 0.8984, | |
| "mean_token_accuracy": 0.7786856353282928, | |
| "num_tokens": 6101758.0, | |
| "step": 5510 | |
| }, | |
| { | |
| "epoch": 1.1122305057424944, | |
| "grad_norm": 10.5, | |
| "learning_rate": 1.2586473235274365e-05, | |
| "loss": 0.8218, | |
| "mean_token_accuracy": 0.8003330588340759, | |
| "num_tokens": 6112860.0, | |
| "step": 5520 | |
| }, | |
| { | |
| "epoch": 1.1142454160789845, | |
| "grad_norm": 10.25, | |
| "learning_rate": 1.2573040499697764e-05, | |
| "loss": 0.8069, | |
| "mean_token_accuracy": 0.7983652293682099, | |
| "num_tokens": 6124481.0, | |
| "step": 5530 | |
| }, | |
| { | |
| "epoch": 1.1162603264154745, | |
| "grad_norm": 10.5, | |
| "learning_rate": 1.2559607764121164e-05, | |
| "loss": 0.9377, | |
| "mean_token_accuracy": 0.7729220628738404, | |
| "num_tokens": 6137558.0, | |
| "step": 5540 | |
| }, | |
| { | |
| "epoch": 1.1182752367519646, | |
| "grad_norm": 10.0, | |
| "learning_rate": 1.2546175028544565e-05, | |
| "loss": 0.8107, | |
| "mean_token_accuracy": 0.8016826927661895, | |
| "num_tokens": 6149919.0, | |
| "step": 5550 | |
| }, | |
| { | |
| "epoch": 1.1202901470884545, | |
| "grad_norm": 11.875, | |
| "learning_rate": 1.2532742292967964e-05, | |
| "loss": 0.8375, | |
| "mean_token_accuracy": 0.7957591891288758, | |
| "num_tokens": 6160493.0, | |
| "step": 5560 | |
| }, | |
| { | |
| "epoch": 1.1223050574249447, | |
| "grad_norm": 9.25, | |
| "learning_rate": 1.2519309557391364e-05, | |
| "loss": 0.8292, | |
| "mean_token_accuracy": 0.7971819519996644, | |
| "num_tokens": 6172744.0, | |
| "step": 5570 | |
| }, | |
| { | |
| "epoch": 1.1243199677614346, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.2505876821814765e-05, | |
| "loss": 0.8092, | |
| "mean_token_accuracy": 0.8008480191230773, | |
| "num_tokens": 6183620.0, | |
| "step": 5580 | |
| }, | |
| { | |
| "epoch": 1.1263348780979245, | |
| "grad_norm": 9.8125, | |
| "learning_rate": 1.2492444086238162e-05, | |
| "loss": 0.7622, | |
| "mean_token_accuracy": 0.8064143776893615, | |
| "num_tokens": 6194516.0, | |
| "step": 5590 | |
| }, | |
| { | |
| "epoch": 1.1283497884344147, | |
| "grad_norm": 12.875, | |
| "learning_rate": 1.2479011350661563e-05, | |
| "loss": 0.8479, | |
| "mean_token_accuracy": 0.7917460918426513, | |
| "num_tokens": 6205641.0, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 1.1303646987709046, | |
| "grad_norm": 13.125, | |
| "learning_rate": 1.2465578615084963e-05, | |
| "loss": 0.9111, | |
| "mean_token_accuracy": 0.77914879322052, | |
| "num_tokens": 6218283.0, | |
| "step": 5610 | |
| }, | |
| { | |
| "epoch": 1.1323796091073948, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.2452145879508364e-05, | |
| "loss": 0.8832, | |
| "mean_token_accuracy": 0.784546959400177, | |
| "num_tokens": 6230024.0, | |
| "step": 5620 | |
| }, | |
| { | |
| "epoch": 1.1343945194438847, | |
| "grad_norm": 13.4375, | |
| "learning_rate": 1.2438713143931762e-05, | |
| "loss": 0.8859, | |
| "mean_token_accuracy": 0.7834093928337097, | |
| "num_tokens": 6242378.0, | |
| "step": 5630 | |
| }, | |
| { | |
| "epoch": 1.1364094297803748, | |
| "grad_norm": 9.875, | |
| "learning_rate": 1.2425280408355163e-05, | |
| "loss": 0.9509, | |
| "mean_token_accuracy": 0.7756809532642365, | |
| "num_tokens": 6253645.0, | |
| "step": 5640 | |
| }, | |
| { | |
| "epoch": 1.1384243401168648, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.2411847672778563e-05, | |
| "loss": 0.8785, | |
| "mean_token_accuracy": 0.7828778207302094, | |
| "num_tokens": 6266360.0, | |
| "step": 5650 | |
| }, | |
| { | |
| "epoch": 1.1404392504533547, | |
| "grad_norm": 12.1875, | |
| "learning_rate": 1.239841493720196e-05, | |
| "loss": 0.8892, | |
| "mean_token_accuracy": 0.7838792741298676, | |
| "num_tokens": 6277349.0, | |
| "step": 5660 | |
| }, | |
| { | |
| "epoch": 1.1424541607898449, | |
| "grad_norm": 12.875, | |
| "learning_rate": 1.2384982201625361e-05, | |
| "loss": 0.8632, | |
| "mean_token_accuracy": 0.790741640329361, | |
| "num_tokens": 6288664.0, | |
| "step": 5670 | |
| }, | |
| { | |
| "epoch": 1.144469071126335, | |
| "grad_norm": 9.1875, | |
| "learning_rate": 1.2371549466048762e-05, | |
| "loss": 0.8403, | |
| "mean_token_accuracy": 0.7891623616218567, | |
| "num_tokens": 6302179.0, | |
| "step": 5680 | |
| }, | |
| { | |
| "epoch": 1.146483981462825, | |
| "grad_norm": 11.75, | |
| "learning_rate": 1.2358116730472162e-05, | |
| "loss": 0.9125, | |
| "mean_token_accuracy": 0.7795878767967224, | |
| "num_tokens": 6313611.0, | |
| "step": 5690 | |
| }, | |
| { | |
| "epoch": 1.1484988917993149, | |
| "grad_norm": 11.9375, | |
| "learning_rate": 1.2344683994895561e-05, | |
| "loss": 0.8863, | |
| "mean_token_accuracy": 0.787626963853836, | |
| "num_tokens": 6324550.0, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 1.150513802135805, | |
| "grad_norm": 11.5625, | |
| "learning_rate": 1.2331251259318962e-05, | |
| "loss": 0.9229, | |
| "mean_token_accuracy": 0.7838542103767395, | |
| "num_tokens": 6336013.0, | |
| "step": 5710 | |
| }, | |
| { | |
| "epoch": 1.152528712472295, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.2317818523742362e-05, | |
| "loss": 0.7724, | |
| "mean_token_accuracy": 0.8055199205875396, | |
| "num_tokens": 6346134.0, | |
| "step": 5720 | |
| }, | |
| { | |
| "epoch": 1.154543622808785, | |
| "grad_norm": 11.125, | |
| "learning_rate": 1.2304385788165761e-05, | |
| "loss": 0.7996, | |
| "mean_token_accuracy": 0.8010989010334015, | |
| "num_tokens": 6356419.0, | |
| "step": 5730 | |
| }, | |
| { | |
| "epoch": 1.156558533145275, | |
| "grad_norm": 12.6875, | |
| "learning_rate": 1.2290953052589161e-05, | |
| "loss": 0.7861, | |
| "mean_token_accuracy": 0.8053541004657745, | |
| "num_tokens": 6367086.0, | |
| "step": 5740 | |
| }, | |
| { | |
| "epoch": 1.1585734434817652, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.2277520317012562e-05, | |
| "loss": 0.7886, | |
| "mean_token_accuracy": 0.8104895174503326, | |
| "num_tokens": 6378360.0, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 1.160588353818255, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.226408758143596e-05, | |
| "loss": 0.7607, | |
| "mean_token_accuracy": 0.807652473449707, | |
| "num_tokens": 6390770.0, | |
| "step": 5760 | |
| }, | |
| { | |
| "epoch": 1.162603264154745, | |
| "grad_norm": 15.4375, | |
| "learning_rate": 1.225065484585936e-05, | |
| "loss": 0.8117, | |
| "mean_token_accuracy": 0.8016961336135864, | |
| "num_tokens": 6400647.0, | |
| "step": 5770 | |
| }, | |
| { | |
| "epoch": 1.1646181744912352, | |
| "grad_norm": 13.9375, | |
| "learning_rate": 1.223722211028276e-05, | |
| "loss": 0.8433, | |
| "mean_token_accuracy": 0.7853075683116912, | |
| "num_tokens": 6410061.0, | |
| "step": 5780 | |
| }, | |
| { | |
| "epoch": 1.166633084827725, | |
| "grad_norm": 13.6875, | |
| "learning_rate": 1.222378937470616e-05, | |
| "loss": 0.7607, | |
| "mean_token_accuracy": 0.8095987677574158, | |
| "num_tokens": 6420080.0, | |
| "step": 5790 | |
| }, | |
| { | |
| "epoch": 1.1686479951642152, | |
| "grad_norm": 12.25, | |
| "learning_rate": 1.221035663912956e-05, | |
| "loss": 0.9257, | |
| "mean_token_accuracy": 0.778600412607193, | |
| "num_tokens": 6432179.0, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 1.1706629055007052, | |
| "grad_norm": 11.0, | |
| "learning_rate": 1.219692390355296e-05, | |
| "loss": 0.8849, | |
| "mean_token_accuracy": 0.7801730871200562, | |
| "num_tokens": 6442848.0, | |
| "step": 5810 | |
| }, | |
| { | |
| "epoch": 1.1726778158371953, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.218349116797636e-05, | |
| "loss": 0.765, | |
| "mean_token_accuracy": 0.8116752684116364, | |
| "num_tokens": 6453565.0, | |
| "step": 5820 | |
| }, | |
| { | |
| "epoch": 1.1746927261736853, | |
| "grad_norm": 14.0, | |
| "learning_rate": 1.2170058432399758e-05, | |
| "loss": 0.8767, | |
| "mean_token_accuracy": 0.7874524176120759, | |
| "num_tokens": 6464704.0, | |
| "step": 5830 | |
| }, | |
| { | |
| "epoch": 1.1767076365101752, | |
| "grad_norm": 14.625, | |
| "learning_rate": 1.2156625696823158e-05, | |
| "loss": 0.9735, | |
| "mean_token_accuracy": 0.7777929544448853, | |
| "num_tokens": 6476700.0, | |
| "step": 5840 | |
| }, | |
| { | |
| "epoch": 1.1787225468466653, | |
| "grad_norm": 10.9375, | |
| "learning_rate": 1.2143192961246559e-05, | |
| "loss": 0.9004, | |
| "mean_token_accuracy": 0.7828619062900544, | |
| "num_tokens": 6487976.0, | |
| "step": 5850 | |
| }, | |
| { | |
| "epoch": 1.1807374571831553, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.2129760225669958e-05, | |
| "loss": 0.8873, | |
| "mean_token_accuracy": 0.7900948286056518, | |
| "num_tokens": 6499362.0, | |
| "step": 5860 | |
| }, | |
| { | |
| "epoch": 1.1827523675196454, | |
| "grad_norm": 13.4375, | |
| "learning_rate": 1.2116327490093358e-05, | |
| "loss": 0.9967, | |
| "mean_token_accuracy": 0.7665694057941437, | |
| "num_tokens": 6510505.0, | |
| "step": 5870 | |
| }, | |
| { | |
| "epoch": 1.1847672778561353, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.2102894754516759e-05, | |
| "loss": 0.8207, | |
| "mean_token_accuracy": 0.7956820368766785, | |
| "num_tokens": 6522577.0, | |
| "step": 5880 | |
| }, | |
| { | |
| "epoch": 1.1867821881926255, | |
| "grad_norm": 13.25, | |
| "learning_rate": 1.208946201894016e-05, | |
| "loss": 0.7919, | |
| "mean_token_accuracy": 0.7999676465988159, | |
| "num_tokens": 6533179.0, | |
| "step": 5890 | |
| }, | |
| { | |
| "epoch": 1.1887970985291154, | |
| "grad_norm": 11.5, | |
| "learning_rate": 1.2076029283363558e-05, | |
| "loss": 0.8339, | |
| "mean_token_accuracy": 0.791290158033371, | |
| "num_tokens": 6544059.0, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 1.1908120088656056, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.2062596547786957e-05, | |
| "loss": 0.876, | |
| "mean_token_accuracy": 0.7901681363582611, | |
| "num_tokens": 6554941.0, | |
| "step": 5910 | |
| }, | |
| { | |
| "epoch": 1.1928269192020955, | |
| "grad_norm": 9.6875, | |
| "learning_rate": 1.2049163812210357e-05, | |
| "loss": 0.969, | |
| "mean_token_accuracy": 0.7663461267948151, | |
| "num_tokens": 6567559.0, | |
| "step": 5920 | |
| }, | |
| { | |
| "epoch": 1.1948418295385856, | |
| "grad_norm": 11.25, | |
| "learning_rate": 1.2035731076633756e-05, | |
| "loss": 0.796, | |
| "mean_token_accuracy": 0.7978686451911926, | |
| "num_tokens": 6577821.0, | |
| "step": 5930 | |
| }, | |
| { | |
| "epoch": 1.1968567398750756, | |
| "grad_norm": 11.3125, | |
| "learning_rate": 1.2022298341057157e-05, | |
| "loss": 0.8198, | |
| "mean_token_accuracy": 0.7984302759170532, | |
| "num_tokens": 6590610.0, | |
| "step": 5940 | |
| }, | |
| { | |
| "epoch": 1.1988716502115655, | |
| "grad_norm": 10.25, | |
| "learning_rate": 1.2008865605480557e-05, | |
| "loss": 0.9362, | |
| "mean_token_accuracy": 0.7766359865665435, | |
| "num_tokens": 6601976.0, | |
| "step": 5950 | |
| }, | |
| { | |
| "epoch": 1.2008865605480556, | |
| "grad_norm": 10.0625, | |
| "learning_rate": 1.1995432869903958e-05, | |
| "loss": 0.7565, | |
| "mean_token_accuracy": 0.8038519501686097, | |
| "num_tokens": 6613911.0, | |
| "step": 5960 | |
| }, | |
| { | |
| "epoch": 1.2029014708845456, | |
| "grad_norm": 14.0625, | |
| "learning_rate": 1.1982000134327357e-05, | |
| "loss": 0.89, | |
| "mean_token_accuracy": 0.7806312680244446, | |
| "num_tokens": 6625392.0, | |
| "step": 5970 | |
| }, | |
| { | |
| "epoch": 1.2049163812210357, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.1968567398750757e-05, | |
| "loss": 0.7505, | |
| "mean_token_accuracy": 0.8176207900047302, | |
| "num_tokens": 6635806.0, | |
| "step": 5980 | |
| }, | |
| { | |
| "epoch": 1.2069312915575257, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.1955134663174158e-05, | |
| "loss": 0.7663, | |
| "mean_token_accuracy": 0.8061110198497772, | |
| "num_tokens": 6646644.0, | |
| "step": 5990 | |
| }, | |
| { | |
| "epoch": 1.2089462018940158, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.1941701927597555e-05, | |
| "loss": 0.7133, | |
| "mean_token_accuracy": 0.8181872367858887, | |
| "num_tokens": 6657605.0, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.2109611122305057, | |
| "grad_norm": 16.125, | |
| "learning_rate": 1.1928269192020955e-05, | |
| "loss": 0.8571, | |
| "mean_token_accuracy": 0.7833750724792481, | |
| "num_tokens": 6667905.0, | |
| "step": 6010 | |
| }, | |
| { | |
| "epoch": 1.2129760225669957, | |
| "grad_norm": 9.8125, | |
| "learning_rate": 1.1914836456444356e-05, | |
| "loss": 0.7898, | |
| "mean_token_accuracy": 0.7975350022315979, | |
| "num_tokens": 6678888.0, | |
| "step": 6020 | |
| }, | |
| { | |
| "epoch": 1.2149909329034858, | |
| "grad_norm": 13.5, | |
| "learning_rate": 1.1901403720867755e-05, | |
| "loss": 0.8104, | |
| "mean_token_accuracy": 0.7968179106712341, | |
| "num_tokens": 6690767.0, | |
| "step": 6030 | |
| }, | |
| { | |
| "epoch": 1.2170058432399757, | |
| "grad_norm": 12.9375, | |
| "learning_rate": 1.1887970985291155e-05, | |
| "loss": 0.8847, | |
| "mean_token_accuracy": 0.7826601445674897, | |
| "num_tokens": 6701963.0, | |
| "step": 6040 | |
| }, | |
| { | |
| "epoch": 1.2190207535764659, | |
| "grad_norm": 10.375, | |
| "learning_rate": 1.1874538249714556e-05, | |
| "loss": 0.7929, | |
| "mean_token_accuracy": 0.795721584558487, | |
| "num_tokens": 6712765.0, | |
| "step": 6050 | |
| }, | |
| { | |
| "epoch": 1.2210356639129558, | |
| "grad_norm": 9.5625, | |
| "learning_rate": 1.1861105514137956e-05, | |
| "loss": 0.8396, | |
| "mean_token_accuracy": 0.7961892068386078, | |
| "num_tokens": 6723997.0, | |
| "step": 6060 | |
| }, | |
| { | |
| "epoch": 1.223050574249446, | |
| "grad_norm": 10.4375, | |
| "learning_rate": 1.1847672778561354e-05, | |
| "loss": 0.7774, | |
| "mean_token_accuracy": 0.8090421617031097, | |
| "num_tokens": 6734959.0, | |
| "step": 6070 | |
| }, | |
| { | |
| "epoch": 1.225065484585936, | |
| "grad_norm": 13.75, | |
| "learning_rate": 1.1834240042984754e-05, | |
| "loss": 0.8442, | |
| "mean_token_accuracy": 0.7920287191867829, | |
| "num_tokens": 6745125.0, | |
| "step": 6080 | |
| }, | |
| { | |
| "epoch": 1.227080394922426, | |
| "grad_norm": 12.375, | |
| "learning_rate": 1.1820807307408155e-05, | |
| "loss": 0.7339, | |
| "mean_token_accuracy": 0.8152937352657318, | |
| "num_tokens": 6755578.0, | |
| "step": 6090 | |
| }, | |
| { | |
| "epoch": 1.229095305258916, | |
| "grad_norm": 10.5, | |
| "learning_rate": 1.1807374571831553e-05, | |
| "loss": 0.7799, | |
| "mean_token_accuracy": 0.8055865943431855, | |
| "num_tokens": 6766161.0, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 1.2311102155954061, | |
| "grad_norm": 12.5, | |
| "learning_rate": 1.1793941836254954e-05, | |
| "loss": 0.8072, | |
| "mean_token_accuracy": 0.7963293552398681, | |
| "num_tokens": 6776660.0, | |
| "step": 6110 | |
| }, | |
| { | |
| "epoch": 1.233125125931896, | |
| "grad_norm": 14.0, | |
| "learning_rate": 1.1780509100678354e-05, | |
| "loss": 0.882, | |
| "mean_token_accuracy": 0.7818022012710572, | |
| "num_tokens": 6787702.0, | |
| "step": 6120 | |
| }, | |
| { | |
| "epoch": 1.235140036268386, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.1767076365101755e-05, | |
| "loss": 0.8014, | |
| "mean_token_accuracy": 0.8063171863555908, | |
| "num_tokens": 6798904.0, | |
| "step": 6130 | |
| }, | |
| { | |
| "epoch": 1.2371549466048761, | |
| "grad_norm": 13.75, | |
| "learning_rate": 1.1753643629525154e-05, | |
| "loss": 0.8453, | |
| "mean_token_accuracy": 0.7982756316661834, | |
| "num_tokens": 6808990.0, | |
| "step": 6140 | |
| }, | |
| { | |
| "epoch": 1.239169856941366, | |
| "grad_norm": 10.8125, | |
| "learning_rate": 1.1740210893948554e-05, | |
| "loss": 0.7952, | |
| "mean_token_accuracy": 0.8004013955593109, | |
| "num_tokens": 6818726.0, | |
| "step": 6150 | |
| }, | |
| { | |
| "epoch": 1.2411847672778562, | |
| "grad_norm": 9.3125, | |
| "learning_rate": 1.1726778158371955e-05, | |
| "loss": 0.8466, | |
| "mean_token_accuracy": 0.794275438785553, | |
| "num_tokens": 6830189.0, | |
| "step": 6160 | |
| }, | |
| { | |
| "epoch": 1.2431996776143461, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 1.1713345422795352e-05, | |
| "loss": 0.809, | |
| "mean_token_accuracy": 0.7935117900371551, | |
| "num_tokens": 6840564.0, | |
| "step": 6170 | |
| }, | |
| { | |
| "epoch": 1.2452145879508363, | |
| "grad_norm": 10.1875, | |
| "learning_rate": 1.1699912687218753e-05, | |
| "loss": 0.8684, | |
| "mean_token_accuracy": 0.7853965878486633, | |
| "num_tokens": 6851057.0, | |
| "step": 6180 | |
| }, | |
| { | |
| "epoch": 1.2472294982873262, | |
| "grad_norm": 12.3125, | |
| "learning_rate": 1.1686479951642153e-05, | |
| "loss": 0.7914, | |
| "mean_token_accuracy": 0.7986515760421753, | |
| "num_tokens": 6863098.0, | |
| "step": 6190 | |
| }, | |
| { | |
| "epoch": 1.2492444086238161, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.1673047216065552e-05, | |
| "loss": 0.7723, | |
| "mean_token_accuracy": 0.8050061583518981, | |
| "num_tokens": 6873361.0, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 1.2512593189603063, | |
| "grad_norm": 10.6875, | |
| "learning_rate": 1.1659614480488952e-05, | |
| "loss": 0.7892, | |
| "mean_token_accuracy": 0.8081447362899781, | |
| "num_tokens": 6885561.0, | |
| "step": 6210 | |
| }, | |
| { | |
| "epoch": 1.2532742292967962, | |
| "grad_norm": 11.5625, | |
| "learning_rate": 1.1646181744912353e-05, | |
| "loss": 0.8335, | |
| "mean_token_accuracy": 0.7930399179458618, | |
| "num_tokens": 6896678.0, | |
| "step": 6220 | |
| }, | |
| { | |
| "epoch": 1.2552891396332864, | |
| "grad_norm": 12.5, | |
| "learning_rate": 1.1632749009335754e-05, | |
| "loss": 0.8161, | |
| "mean_token_accuracy": 0.8012160181999206, | |
| "num_tokens": 6906436.0, | |
| "step": 6230 | |
| }, | |
| { | |
| "epoch": 1.2573040499697763, | |
| "grad_norm": 14.5625, | |
| "learning_rate": 1.161931627375915e-05, | |
| "loss": 0.8408, | |
| "mean_token_accuracy": 0.7945603370666504, | |
| "num_tokens": 6916620.0, | |
| "step": 6240 | |
| }, | |
| { | |
| "epoch": 1.2593189603062664, | |
| "grad_norm": 16.25, | |
| "learning_rate": 1.1605883538182551e-05, | |
| "loss": 0.8039, | |
| "mean_token_accuracy": 0.801008677482605, | |
| "num_tokens": 6928094.0, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 1.2613338706427564, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.1592450802605952e-05, | |
| "loss": 0.7906, | |
| "mean_token_accuracy": 0.8035953044891357, | |
| "num_tokens": 6937852.0, | |
| "step": 6260 | |
| }, | |
| { | |
| "epoch": 1.2633487809792463, | |
| "grad_norm": 13.0, | |
| "learning_rate": 1.157901806702935e-05, | |
| "loss": 0.8865, | |
| "mean_token_accuracy": 0.7758103013038635, | |
| "num_tokens": 6948566.0, | |
| "step": 6270 | |
| }, | |
| { | |
| "epoch": 1.2653636913157364, | |
| "grad_norm": 14.125, | |
| "learning_rate": 1.1565585331452751e-05, | |
| "loss": 0.9311, | |
| "mean_token_accuracy": 0.7705212533473969, | |
| "num_tokens": 6959264.0, | |
| "step": 6280 | |
| }, | |
| { | |
| "epoch": 1.2673786016522266, | |
| "grad_norm": 11.3125, | |
| "learning_rate": 1.1552152595876152e-05, | |
| "loss": 0.8076, | |
| "mean_token_accuracy": 0.7959451377391815, | |
| "num_tokens": 6970706.0, | |
| "step": 6290 | |
| }, | |
| { | |
| "epoch": 1.2693935119887165, | |
| "grad_norm": 12.9375, | |
| "learning_rate": 1.1538719860299552e-05, | |
| "loss": 0.8435, | |
| "mean_token_accuracy": 0.7901066780090332, | |
| "num_tokens": 6982005.0, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 1.2714084223252065, | |
| "grad_norm": 15.875, | |
| "learning_rate": 1.1525287124722951e-05, | |
| "loss": 0.7781, | |
| "mean_token_accuracy": 0.8100695073604584, | |
| "num_tokens": 6993228.0, | |
| "step": 6310 | |
| }, | |
| { | |
| "epoch": 1.2734233326616966, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 1.1511854389146352e-05, | |
| "loss": 0.7771, | |
| "mean_token_accuracy": 0.7994659662246704, | |
| "num_tokens": 7003855.0, | |
| "step": 6320 | |
| }, | |
| { | |
| "epoch": 1.2754382429981865, | |
| "grad_norm": 11.5625, | |
| "learning_rate": 1.1498421653569752e-05, | |
| "loss": 0.8883, | |
| "mean_token_accuracy": 0.7779460906982422, | |
| "num_tokens": 7016389.0, | |
| "step": 6330 | |
| }, | |
| { | |
| "epoch": 1.2774531533346767, | |
| "grad_norm": 10.6875, | |
| "learning_rate": 1.148498891799315e-05, | |
| "loss": 0.7674, | |
| "mean_token_accuracy": 0.8048594057559967, | |
| "num_tokens": 7027229.0, | |
| "step": 6340 | |
| }, | |
| { | |
| "epoch": 1.2794680636711666, | |
| "grad_norm": 13.0, | |
| "learning_rate": 1.147155618241655e-05, | |
| "loss": 0.8556, | |
| "mean_token_accuracy": 0.7895182788372039, | |
| "num_tokens": 7038636.0, | |
| "step": 6350 | |
| }, | |
| { | |
| "epoch": 1.2814829740076568, | |
| "grad_norm": 10.75, | |
| "learning_rate": 1.145812344683995e-05, | |
| "loss": 0.8673, | |
| "mean_token_accuracy": 0.7855879724025726, | |
| "num_tokens": 7049602.0, | |
| "step": 6360 | |
| }, | |
| { | |
| "epoch": 1.2834978843441467, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 1.1444690711263349e-05, | |
| "loss": 0.8281, | |
| "mean_token_accuracy": 0.798017168045044, | |
| "num_tokens": 7060783.0, | |
| "step": 6370 | |
| }, | |
| { | |
| "epoch": 1.2855127946806366, | |
| "grad_norm": 10.5, | |
| "learning_rate": 1.143125797568675e-05, | |
| "loss": 0.781, | |
| "mean_token_accuracy": 0.8092272758483887, | |
| "num_tokens": 7072749.0, | |
| "step": 6380 | |
| }, | |
| { | |
| "epoch": 1.2875277050171268, | |
| "grad_norm": 9.75, | |
| "learning_rate": 1.141782524011015e-05, | |
| "loss": 0.8664, | |
| "mean_token_accuracy": 0.7885208010673523, | |
| "num_tokens": 7085190.0, | |
| "step": 6390 | |
| }, | |
| { | |
| "epoch": 1.2895426153536167, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.140439250453355e-05, | |
| "loss": 0.7861, | |
| "mean_token_accuracy": 0.80192711353302, | |
| "num_tokens": 7095259.0, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 1.2915575256901068, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.1390959768956948e-05, | |
| "loss": 0.7456, | |
| "mean_token_accuracy": 0.8084029912948608, | |
| "num_tokens": 7104840.0, | |
| "step": 6410 | |
| }, | |
| { | |
| "epoch": 1.2935724360265968, | |
| "grad_norm": 9.25, | |
| "learning_rate": 1.1377527033380348e-05, | |
| "loss": 0.9101, | |
| "mean_token_accuracy": 0.7775183081626892, | |
| "num_tokens": 7116704.0, | |
| "step": 6420 | |
| }, | |
| { | |
| "epoch": 1.295587346363087, | |
| "grad_norm": 10.5625, | |
| "learning_rate": 1.1364094297803749e-05, | |
| "loss": 0.8154, | |
| "mean_token_accuracy": 0.7952620327472687, | |
| "num_tokens": 7128360.0, | |
| "step": 6430 | |
| }, | |
| { | |
| "epoch": 1.2976022566995769, | |
| "grad_norm": 10.0625, | |
| "learning_rate": 1.1350661562227148e-05, | |
| "loss": 0.7922, | |
| "mean_token_accuracy": 0.8016098260879516, | |
| "num_tokens": 7139177.0, | |
| "step": 6440 | |
| }, | |
| { | |
| "epoch": 1.2996171670360668, | |
| "grad_norm": 13.0625, | |
| "learning_rate": 1.1337228826650548e-05, | |
| "loss": 0.9353, | |
| "mean_token_accuracy": 0.7753040254116058, | |
| "num_tokens": 7149827.0, | |
| "step": 6450 | |
| }, | |
| { | |
| "epoch": 1.301632077372557, | |
| "grad_norm": 9.4375, | |
| "learning_rate": 1.1323796091073949e-05, | |
| "loss": 0.8437, | |
| "mean_token_accuracy": 0.7929971814155579, | |
| "num_tokens": 7162136.0, | |
| "step": 6460 | |
| }, | |
| { | |
| "epoch": 1.303646987709047, | |
| "grad_norm": 10.875, | |
| "learning_rate": 1.1310363355497348e-05, | |
| "loss": 0.9565, | |
| "mean_token_accuracy": 0.7702773094177247, | |
| "num_tokens": 7173369.0, | |
| "step": 6470 | |
| }, | |
| { | |
| "epoch": 1.305661898045537, | |
| "grad_norm": 10.75, | |
| "learning_rate": 1.1296930619920748e-05, | |
| "loss": 0.8265, | |
| "mean_token_accuracy": 0.7950271546840668, | |
| "num_tokens": 7184403.0, | |
| "step": 6480 | |
| }, | |
| { | |
| "epoch": 1.307676808382027, | |
| "grad_norm": 14.9375, | |
| "learning_rate": 1.1283497884344149e-05, | |
| "loss": 0.7715, | |
| "mean_token_accuracy": 0.8035805761814118, | |
| "num_tokens": 7195550.0, | |
| "step": 6490 | |
| }, | |
| { | |
| "epoch": 1.309691718718517, | |
| "grad_norm": 13.8125, | |
| "learning_rate": 1.1270065148767547e-05, | |
| "loss": 0.8843, | |
| "mean_token_accuracy": 0.7886571526527405, | |
| "num_tokens": 7207013.0, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.311706629055007, | |
| "grad_norm": 10.0625, | |
| "learning_rate": 1.1256632413190946e-05, | |
| "loss": 0.7398, | |
| "mean_token_accuracy": 0.8139720261096954, | |
| "num_tokens": 7217616.0, | |
| "step": 6510 | |
| }, | |
| { | |
| "epoch": 1.313721539391497, | |
| "grad_norm": 12.875, | |
| "learning_rate": 1.1243199677614347e-05, | |
| "loss": 0.8148, | |
| "mean_token_accuracy": 0.7918058097362518, | |
| "num_tokens": 7228902.0, | |
| "step": 6520 | |
| }, | |
| { | |
| "epoch": 1.315736449727987, | |
| "grad_norm": 12.5, | |
| "learning_rate": 1.1229766942037747e-05, | |
| "loss": 0.849, | |
| "mean_token_accuracy": 0.7959416568279266, | |
| "num_tokens": 7239105.0, | |
| "step": 6530 | |
| }, | |
| { | |
| "epoch": 1.3177513600644772, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 1.1216334206461146e-05, | |
| "loss": 0.7653, | |
| "mean_token_accuracy": 0.8075124859809876, | |
| "num_tokens": 7248600.0, | |
| "step": 6540 | |
| }, | |
| { | |
| "epoch": 1.3197662704009672, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.1202901470884547e-05, | |
| "loss": 0.8824, | |
| "mean_token_accuracy": 0.7894264698028565, | |
| "num_tokens": 7260572.0, | |
| "step": 6550 | |
| }, | |
| { | |
| "epoch": 1.321781180737457, | |
| "grad_norm": 12.1875, | |
| "learning_rate": 1.1189468735307947e-05, | |
| "loss": 0.8524, | |
| "mean_token_accuracy": 0.7949903309345245, | |
| "num_tokens": 7270963.0, | |
| "step": 6560 | |
| }, | |
| { | |
| "epoch": 1.3237960910739472, | |
| "grad_norm": 13.0, | |
| "learning_rate": 1.1176035999731348e-05, | |
| "loss": 0.7687, | |
| "mean_token_accuracy": 0.8052358329296112, | |
| "num_tokens": 7282254.0, | |
| "step": 6570 | |
| }, | |
| { | |
| "epoch": 1.3258110014104372, | |
| "grad_norm": 12.1875, | |
| "learning_rate": 1.1162603264154745e-05, | |
| "loss": 0.771, | |
| "mean_token_accuracy": 0.8071496605873107, | |
| "num_tokens": 7293472.0, | |
| "step": 6580 | |
| }, | |
| { | |
| "epoch": 1.3278259117469273, | |
| "grad_norm": 16.0, | |
| "learning_rate": 1.1149170528578145e-05, | |
| "loss": 0.7544, | |
| "mean_token_accuracy": 0.8064080238342285, | |
| "num_tokens": 7302549.0, | |
| "step": 6590 | |
| }, | |
| { | |
| "epoch": 1.3298408220834173, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.1135737793001546e-05, | |
| "loss": 0.7427, | |
| "mean_token_accuracy": 0.8121874392032623, | |
| "num_tokens": 7313898.0, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 1.3318557324199074, | |
| "grad_norm": 11.4375, | |
| "learning_rate": 1.1122305057424945e-05, | |
| "loss": 0.8996, | |
| "mean_token_accuracy": 0.7810469567775726, | |
| "num_tokens": 7324658.0, | |
| "step": 6610 | |
| }, | |
| { | |
| "epoch": 1.3338706427563973, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.1108872321848345e-05, | |
| "loss": 0.7865, | |
| "mean_token_accuracy": 0.806594967842102, | |
| "num_tokens": 7335673.0, | |
| "step": 6620 | |
| }, | |
| { | |
| "epoch": 1.3358855530928873, | |
| "grad_norm": 12.6875, | |
| "learning_rate": 1.1095439586271746e-05, | |
| "loss": 0.9176, | |
| "mean_token_accuracy": 0.7800018846988678, | |
| "num_tokens": 7347253.0, | |
| "step": 6630 | |
| }, | |
| { | |
| "epoch": 1.3379004634293774, | |
| "grad_norm": 12.6875, | |
| "learning_rate": 1.1082006850695145e-05, | |
| "loss": 0.8083, | |
| "mean_token_accuracy": 0.8040257275104523, | |
| "num_tokens": 7358476.0, | |
| "step": 6640 | |
| }, | |
| { | |
| "epoch": 1.3399153737658676, | |
| "grad_norm": 14.0625, | |
| "learning_rate": 1.1068574115118545e-05, | |
| "loss": 0.9627, | |
| "mean_token_accuracy": 0.7791651308536529, | |
| "num_tokens": 7369343.0, | |
| "step": 6650 | |
| }, | |
| { | |
| "epoch": 1.3419302841023575, | |
| "grad_norm": 14.375, | |
| "learning_rate": 1.1055141379541944e-05, | |
| "loss": 0.869, | |
| "mean_token_accuracy": 0.791654235124588, | |
| "num_tokens": 7380142.0, | |
| "step": 6660 | |
| }, | |
| { | |
| "epoch": 1.3439451944388474, | |
| "grad_norm": 16.75, | |
| "learning_rate": 1.1041708643965345e-05, | |
| "loss": 0.8959, | |
| "mean_token_accuracy": 0.7814781248569489, | |
| "num_tokens": 7391681.0, | |
| "step": 6670 | |
| }, | |
| { | |
| "epoch": 1.3459601047753376, | |
| "grad_norm": 25.75, | |
| "learning_rate": 1.1028275908388743e-05, | |
| "loss": 0.7937, | |
| "mean_token_accuracy": 0.8048185467720032, | |
| "num_tokens": 7402373.0, | |
| "step": 6680 | |
| }, | |
| { | |
| "epoch": 1.3479750151118275, | |
| "grad_norm": 11.25, | |
| "learning_rate": 1.1014843172812144e-05, | |
| "loss": 0.8797, | |
| "mean_token_accuracy": 0.7896045446395874, | |
| "num_tokens": 7412646.0, | |
| "step": 6690 | |
| }, | |
| { | |
| "epoch": 1.3499899254483174, | |
| "grad_norm": 11.5625, | |
| "learning_rate": 1.1001410437235544e-05, | |
| "loss": 0.7739, | |
| "mean_token_accuracy": 0.8085869729518891, | |
| "num_tokens": 7423087.0, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 1.3520048357848076, | |
| "grad_norm": 42.5, | |
| "learning_rate": 1.0987977701658943e-05, | |
| "loss": 0.8267, | |
| "mean_token_accuracy": 0.7931196630001068, | |
| "num_tokens": 7433496.0, | |
| "step": 6710 | |
| }, | |
| { | |
| "epoch": 1.3540197461212977, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.0974544966082344e-05, | |
| "loss": 0.7688, | |
| "mean_token_accuracy": 0.8101352214813232, | |
| "num_tokens": 7444890.0, | |
| "step": 6720 | |
| }, | |
| { | |
| "epoch": 1.3560346564577876, | |
| "grad_norm": 9.9375, | |
| "learning_rate": 1.0961112230505744e-05, | |
| "loss": 0.8294, | |
| "mean_token_accuracy": 0.7928038239479065, | |
| "num_tokens": 7456497.0, | |
| "step": 6730 | |
| }, | |
| { | |
| "epoch": 1.3580495667942776, | |
| "grad_norm": 13.0, | |
| "learning_rate": 1.0947679494929145e-05, | |
| "loss": 0.8978, | |
| "mean_token_accuracy": 0.7794729173183441, | |
| "num_tokens": 7466678.0, | |
| "step": 6740 | |
| }, | |
| { | |
| "epoch": 1.3600644771307677, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.0934246759352542e-05, | |
| "loss": 0.8052, | |
| "mean_token_accuracy": 0.8050779700279236, | |
| "num_tokens": 7477058.0, | |
| "step": 6750 | |
| }, | |
| { | |
| "epoch": 1.3620793874672577, | |
| "grad_norm": 11.75, | |
| "learning_rate": 1.0920814023775943e-05, | |
| "loss": 0.8537, | |
| "mean_token_accuracy": 0.7877449512481689, | |
| "num_tokens": 7487077.0, | |
| "step": 6760 | |
| }, | |
| { | |
| "epoch": 1.3640942978037478, | |
| "grad_norm": 14.0, | |
| "learning_rate": 1.0907381288199343e-05, | |
| "loss": 0.8519, | |
| "mean_token_accuracy": 0.7857004582881928, | |
| "num_tokens": 7497355.0, | |
| "step": 6770 | |
| }, | |
| { | |
| "epoch": 1.3661092081402377, | |
| "grad_norm": 10.4375, | |
| "learning_rate": 1.0893948552622742e-05, | |
| "loss": 0.796, | |
| "mean_token_accuracy": 0.7996562838554382, | |
| "num_tokens": 7508344.0, | |
| "step": 6780 | |
| }, | |
| { | |
| "epoch": 1.3681241184767279, | |
| "grad_norm": 11.75, | |
| "learning_rate": 1.0880515817046142e-05, | |
| "loss": 0.8427, | |
| "mean_token_accuracy": 0.7912454545497895, | |
| "num_tokens": 7519520.0, | |
| "step": 6790 | |
| }, | |
| { | |
| "epoch": 1.3701390288132178, | |
| "grad_norm": 9.25, | |
| "learning_rate": 1.0867083081469543e-05, | |
| "loss": 0.8216, | |
| "mean_token_accuracy": 0.7908532798290253, | |
| "num_tokens": 7531198.0, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 1.3721539391497077, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.085365034589294e-05, | |
| "loss": 0.8054, | |
| "mean_token_accuracy": 0.8009598433971405, | |
| "num_tokens": 7542242.0, | |
| "step": 6810 | |
| }, | |
| { | |
| "epoch": 1.3741688494861979, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 1.084021761031634e-05, | |
| "loss": 0.8353, | |
| "mean_token_accuracy": 0.7952351868152618, | |
| "num_tokens": 7553773.0, | |
| "step": 6820 | |
| }, | |
| { | |
| "epoch": 1.3761837598226878, | |
| "grad_norm": 14.4375, | |
| "learning_rate": 1.0826784874739741e-05, | |
| "loss": 0.7143, | |
| "mean_token_accuracy": 0.8181480646133423, | |
| "num_tokens": 7563382.0, | |
| "step": 6830 | |
| }, | |
| { | |
| "epoch": 1.378198670159178, | |
| "grad_norm": 10.3125, | |
| "learning_rate": 1.0813352139163142e-05, | |
| "loss": 0.8876, | |
| "mean_token_accuracy": 0.7816132783889771, | |
| "num_tokens": 7575581.0, | |
| "step": 6840 | |
| }, | |
| { | |
| "epoch": 1.380213580495668, | |
| "grad_norm": 10.4375, | |
| "learning_rate": 1.079991940358654e-05, | |
| "loss": 0.8494, | |
| "mean_token_accuracy": 0.7854238271713256, | |
| "num_tokens": 7587514.0, | |
| "step": 6850 | |
| }, | |
| { | |
| "epoch": 1.382228490832158, | |
| "grad_norm": 10.1875, | |
| "learning_rate": 1.0786486668009941e-05, | |
| "loss": 0.8978, | |
| "mean_token_accuracy": 0.7826396405696869, | |
| "num_tokens": 7599849.0, | |
| "step": 6860 | |
| }, | |
| { | |
| "epoch": 1.384243401168648, | |
| "grad_norm": 10.25, | |
| "learning_rate": 1.0773053932433342e-05, | |
| "loss": 0.7709, | |
| "mean_token_accuracy": 0.8018522441387177, | |
| "num_tokens": 7611329.0, | |
| "step": 6870 | |
| }, | |
| { | |
| "epoch": 1.386258311505138, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.075962119685674e-05, | |
| "loss": 0.8472, | |
| "mean_token_accuracy": 0.7918815612792969, | |
| "num_tokens": 7621873.0, | |
| "step": 6880 | |
| }, | |
| { | |
| "epoch": 1.388273221841628, | |
| "grad_norm": 14.75, | |
| "learning_rate": 1.0746188461280141e-05, | |
| "loss": 0.9434, | |
| "mean_token_accuracy": 0.7729089677333831, | |
| "num_tokens": 7633787.0, | |
| "step": 6890 | |
| }, | |
| { | |
| "epoch": 1.3902881321781182, | |
| "grad_norm": 11.9375, | |
| "learning_rate": 1.0732755725703542e-05, | |
| "loss": 0.8285, | |
| "mean_token_accuracy": 0.7966946125030517, | |
| "num_tokens": 7643889.0, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 1.3923030425146081, | |
| "grad_norm": 11.9375, | |
| "learning_rate": 1.0719322990126942e-05, | |
| "loss": 0.8414, | |
| "mean_token_accuracy": 0.7909166395664216, | |
| "num_tokens": 7655243.0, | |
| "step": 6910 | |
| }, | |
| { | |
| "epoch": 1.394317952851098, | |
| "grad_norm": 11.6875, | |
| "learning_rate": 1.070589025455034e-05, | |
| "loss": 0.7375, | |
| "mean_token_accuracy": 0.8181369364261627, | |
| "num_tokens": 7666042.0, | |
| "step": 6920 | |
| }, | |
| { | |
| "epoch": 1.3963328631875882, | |
| "grad_norm": 8.4375, | |
| "learning_rate": 1.069245751897374e-05, | |
| "loss": 0.8464, | |
| "mean_token_accuracy": 0.797934228181839, | |
| "num_tokens": 7676887.0, | |
| "step": 6930 | |
| }, | |
| { | |
| "epoch": 1.3983477735240781, | |
| "grad_norm": 12.625, | |
| "learning_rate": 1.067902478339714e-05, | |
| "loss": 0.8094, | |
| "mean_token_accuracy": 0.7948006153106689, | |
| "num_tokens": 7688051.0, | |
| "step": 6940 | |
| }, | |
| { | |
| "epoch": 1.4003626838605683, | |
| "grad_norm": 10.9375, | |
| "learning_rate": 1.0665592047820539e-05, | |
| "loss": 0.8955, | |
| "mean_token_accuracy": 0.782581114768982, | |
| "num_tokens": 7699480.0, | |
| "step": 6950 | |
| }, | |
| { | |
| "epoch": 1.4023775941970582, | |
| "grad_norm": 12.375, | |
| "learning_rate": 1.065215931224394e-05, | |
| "loss": 0.8763, | |
| "mean_token_accuracy": 0.7808327317237854, | |
| "num_tokens": 7710853.0, | |
| "step": 6960 | |
| }, | |
| { | |
| "epoch": 1.4043925045335484, | |
| "grad_norm": 12.875, | |
| "learning_rate": 1.063872657666734e-05, | |
| "loss": 0.8204, | |
| "mean_token_accuracy": 0.8006475508213043, | |
| "num_tokens": 7722290.0, | |
| "step": 6970 | |
| }, | |
| { | |
| "epoch": 1.4064074148700383, | |
| "grad_norm": 10.625, | |
| "learning_rate": 1.0625293841090737e-05, | |
| "loss": 0.839, | |
| "mean_token_accuracy": 0.7950416922569274, | |
| "num_tokens": 7733653.0, | |
| "step": 6980 | |
| }, | |
| { | |
| "epoch": 1.4084223252065282, | |
| "grad_norm": 12.75, | |
| "learning_rate": 1.0611861105514138e-05, | |
| "loss": 0.8964, | |
| "mean_token_accuracy": 0.7838905036449433, | |
| "num_tokens": 7744184.0, | |
| "step": 6990 | |
| }, | |
| { | |
| "epoch": 1.4104372355430184, | |
| "grad_norm": 12.0, | |
| "learning_rate": 1.0598428369937538e-05, | |
| "loss": 0.7874, | |
| "mean_token_accuracy": 0.7988959193229676, | |
| "num_tokens": 7754571.0, | |
| "step": 7000 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 14889, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9382261075611648.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |