infllm_qwen3-8b / trainer_state.json
ZetangForward's picture
Synced from ModelScope: LCM_group/infllm_qwen3-8b (Auto-fixed license)
0f8dc68 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.2467308166790032,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002467308166790032,
"grad_norm": 0.3934416174888611,
"learning_rate": 0.0,
"loss": 1.6394,
"num_input_tokens_seen": 262144,
"step": 1
},
{
"epoch": 0.0004934616333580064,
"grad_norm": 0.31525278091430664,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.352,
"num_input_tokens_seen": 524288,
"step": 2
},
{
"epoch": 0.0007401924500370096,
"grad_norm": 0.3826882243156433,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.3624,
"num_input_tokens_seen": 786432,
"step": 3
},
{
"epoch": 0.0009869232667160128,
"grad_norm": 0.29664933681488037,
"learning_rate": 3e-06,
"loss": 1.1182,
"num_input_tokens_seen": 1048576,
"step": 4
},
{
"epoch": 0.001233654083395016,
"grad_norm": 0.45358872413635254,
"learning_rate": 4.000000000000001e-06,
"loss": 1.5811,
"num_input_tokens_seen": 1310720,
"step": 5
},
{
"epoch": 0.0014803849000740192,
"grad_norm": 0.35151898860931396,
"learning_rate": 5e-06,
"loss": 1.5204,
"num_input_tokens_seen": 1572864,
"step": 6
},
{
"epoch": 0.0017271157167530224,
"grad_norm": 0.3637697696685791,
"learning_rate": 6e-06,
"loss": 1.7379,
"num_input_tokens_seen": 1835008,
"step": 7
},
{
"epoch": 0.0019738465334320256,
"grad_norm": 0.3194856345653534,
"learning_rate": 7.000000000000001e-06,
"loss": 1.5226,
"num_input_tokens_seen": 2097152,
"step": 8
},
{
"epoch": 0.0022205773501110288,
"grad_norm": 0.34234800934791565,
"learning_rate": 8.000000000000001e-06,
"loss": 1.2595,
"num_input_tokens_seen": 2359296,
"step": 9
},
{
"epoch": 0.002467308166790032,
"grad_norm": 0.4049583077430725,
"learning_rate": 9e-06,
"loss": 1.7128,
"num_input_tokens_seen": 2621440,
"step": 10
},
{
"epoch": 0.002714038983469035,
"grad_norm": 0.47542592883110046,
"learning_rate": 1e-05,
"loss": 1.7534,
"num_input_tokens_seen": 2883584,
"step": 11
},
{
"epoch": 0.0029607698001480384,
"grad_norm": 0.4367331564426422,
"learning_rate": 1.1000000000000001e-05,
"loss": 1.437,
"num_input_tokens_seen": 3145728,
"step": 12
},
{
"epoch": 0.0032075006168270415,
"grad_norm": 0.4086206555366516,
"learning_rate": 1.2e-05,
"loss": 1.2994,
"num_input_tokens_seen": 3407872,
"step": 13
},
{
"epoch": 0.0034542314335060447,
"grad_norm": 0.38126683235168457,
"learning_rate": 1.3000000000000001e-05,
"loss": 1.5317,
"num_input_tokens_seen": 3670016,
"step": 14
},
{
"epoch": 0.003700962250185048,
"grad_norm": 0.3201878070831299,
"learning_rate": 1.4000000000000001e-05,
"loss": 1.4077,
"num_input_tokens_seen": 3932160,
"step": 15
},
{
"epoch": 0.003947693066864051,
"grad_norm": 0.23100663721561432,
"learning_rate": 1.5e-05,
"loss": 1.0506,
"num_input_tokens_seen": 4194304,
"step": 16
},
{
"epoch": 0.004194423883543055,
"grad_norm": 0.4399906098842621,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.3533,
"num_input_tokens_seen": 4456448,
"step": 17
},
{
"epoch": 0.0044411547002220575,
"grad_norm": 0.5323538184165955,
"learning_rate": 1.7000000000000003e-05,
"loss": 1.2901,
"num_input_tokens_seen": 4718592,
"step": 18
},
{
"epoch": 0.004687885516901061,
"grad_norm": 0.37430882453918457,
"learning_rate": 1.8e-05,
"loss": 1.5109,
"num_input_tokens_seen": 4980736,
"step": 19
},
{
"epoch": 0.004934616333580064,
"grad_norm": 0.3446390628814697,
"learning_rate": 1.9e-05,
"loss": 1.2963,
"num_input_tokens_seen": 5242880,
"step": 20
},
{
"epoch": 0.0051813471502590676,
"grad_norm": 0.47858181595802307,
"learning_rate": 2e-05,
"loss": 1.4663,
"num_input_tokens_seen": 5505024,
"step": 21
},
{
"epoch": 0.00542807796693807,
"grad_norm": 0.35884952545166016,
"learning_rate": 2.1e-05,
"loss": 1.4294,
"num_input_tokens_seen": 5767168,
"step": 22
},
{
"epoch": 0.005674808783617074,
"grad_norm": 0.4062343239784241,
"learning_rate": 2.2000000000000003e-05,
"loss": 1.7024,
"num_input_tokens_seen": 6029312,
"step": 23
},
{
"epoch": 0.005921539600296077,
"grad_norm": 0.35999780893325806,
"learning_rate": 2.3000000000000003e-05,
"loss": 1.4329,
"num_input_tokens_seen": 6291456,
"step": 24
},
{
"epoch": 0.00616827041697508,
"grad_norm": 0.46909573674201965,
"learning_rate": 2.4e-05,
"loss": 1.7412,
"num_input_tokens_seen": 6553600,
"step": 25
},
{
"epoch": 0.006415001233654083,
"grad_norm": 0.39666008949279785,
"learning_rate": 2.5e-05,
"loss": 1.6779,
"num_input_tokens_seen": 6815744,
"step": 26
},
{
"epoch": 0.006661732050333087,
"grad_norm": 0.3486783504486084,
"learning_rate": 2.6000000000000002e-05,
"loss": 1.3641,
"num_input_tokens_seen": 7077888,
"step": 27
},
{
"epoch": 0.0069084628670120895,
"grad_norm": 0.3600463271141052,
"learning_rate": 2.7000000000000002e-05,
"loss": 1.5059,
"num_input_tokens_seen": 7340032,
"step": 28
},
{
"epoch": 0.007155193683691093,
"grad_norm": 0.386270135641098,
"learning_rate": 2.8000000000000003e-05,
"loss": 1.2714,
"num_input_tokens_seen": 7602176,
"step": 29
},
{
"epoch": 0.007401924500370096,
"grad_norm": 0.30916154384613037,
"learning_rate": 2.9e-05,
"loss": 1.4676,
"num_input_tokens_seen": 7864320,
"step": 30
},
{
"epoch": 0.0076486553170490995,
"grad_norm": 0.5932436585426331,
"learning_rate": 3e-05,
"loss": 1.2557,
"num_input_tokens_seen": 8126464,
"step": 31
},
{
"epoch": 0.007895386133728102,
"grad_norm": 0.4815100133419037,
"learning_rate": 3.1e-05,
"loss": 1.7859,
"num_input_tokens_seen": 8388608,
"step": 32
},
{
"epoch": 0.008142116950407105,
"grad_norm": 0.3603292405605316,
"learning_rate": 3.2000000000000005e-05,
"loss": 1.5668,
"num_input_tokens_seen": 8650752,
"step": 33
},
{
"epoch": 0.00838884776708611,
"grad_norm": 0.4051978886127472,
"learning_rate": 3.3e-05,
"loss": 1.7096,
"num_input_tokens_seen": 8912896,
"step": 34
},
{
"epoch": 0.008635578583765112,
"grad_norm": 0.353782057762146,
"learning_rate": 3.4000000000000007e-05,
"loss": 1.7492,
"num_input_tokens_seen": 9175040,
"step": 35
},
{
"epoch": 0.008882309400444115,
"grad_norm": 0.38991788029670715,
"learning_rate": 3.5e-05,
"loss": 1.2246,
"num_input_tokens_seen": 9437184,
"step": 36
},
{
"epoch": 0.009129040217123118,
"grad_norm": 0.44350436329841614,
"learning_rate": 3.6e-05,
"loss": 0.982,
"num_input_tokens_seen": 9699328,
"step": 37
},
{
"epoch": 0.009375771033802122,
"grad_norm": 4.577005863189697,
"learning_rate": 3.7e-05,
"loss": 1.6495,
"num_input_tokens_seen": 9961472,
"step": 38
},
{
"epoch": 0.009622501850481125,
"grad_norm": 0.4128067195415497,
"learning_rate": 3.8e-05,
"loss": 1.7109,
"num_input_tokens_seen": 10223616,
"step": 39
},
{
"epoch": 0.009869232667160128,
"grad_norm": 0.36403635144233704,
"learning_rate": 3.9000000000000006e-05,
"loss": 1.2335,
"num_input_tokens_seen": 10485760,
"step": 40
},
{
"epoch": 0.010115963483839132,
"grad_norm": 0.5799858570098877,
"learning_rate": 4e-05,
"loss": 1.2349,
"num_input_tokens_seen": 10747904,
"step": 41
},
{
"epoch": 0.010362694300518135,
"grad_norm": 0.922766923904419,
"learning_rate": 4.1e-05,
"loss": 1.4363,
"num_input_tokens_seen": 11010048,
"step": 42
},
{
"epoch": 0.010609425117197138,
"grad_norm": 0.35666200518608093,
"learning_rate": 4.2e-05,
"loss": 1.5887,
"num_input_tokens_seen": 11272192,
"step": 43
},
{
"epoch": 0.01085615593387614,
"grad_norm": 0.5212157964706421,
"learning_rate": 4.3e-05,
"loss": 1.3912,
"num_input_tokens_seen": 11534336,
"step": 44
},
{
"epoch": 0.011102886750555145,
"grad_norm": 0.3892502188682556,
"learning_rate": 4.4000000000000006e-05,
"loss": 1.3146,
"num_input_tokens_seen": 11796480,
"step": 45
},
{
"epoch": 0.011349617567234148,
"grad_norm": 0.7055127024650574,
"learning_rate": 4.5e-05,
"loss": 1.3679,
"num_input_tokens_seen": 12058624,
"step": 46
},
{
"epoch": 0.01159634838391315,
"grad_norm": 0.272801011800766,
"learning_rate": 4.600000000000001e-05,
"loss": 1.7381,
"num_input_tokens_seen": 12320768,
"step": 47
},
{
"epoch": 0.011843079200592153,
"grad_norm": 0.3756349980831146,
"learning_rate": 4.7e-05,
"loss": 1.3893,
"num_input_tokens_seen": 12582912,
"step": 48
},
{
"epoch": 0.012089810017271158,
"grad_norm": 0.5426861643791199,
"learning_rate": 4.8e-05,
"loss": 1.4895,
"num_input_tokens_seen": 12845056,
"step": 49
},
{
"epoch": 0.01233654083395016,
"grad_norm": 0.49536797404289246,
"learning_rate": 4.9e-05,
"loss": 1.4181,
"num_input_tokens_seen": 13107200,
"step": 50
},
{
"epoch": 0.012583271650629163,
"grad_norm": 0.49056124687194824,
"learning_rate": 5e-05,
"loss": 1.5316,
"num_input_tokens_seen": 13369344,
"step": 51
},
{
"epoch": 0.012830002467308166,
"grad_norm": 0.6074831485748291,
"learning_rate": 5.1000000000000006e-05,
"loss": 1.5521,
"num_input_tokens_seen": 13631488,
"step": 52
},
{
"epoch": 0.01307673328398717,
"grad_norm": 0.38954874873161316,
"learning_rate": 5.2000000000000004e-05,
"loss": 1.8396,
"num_input_tokens_seen": 13893632,
"step": 53
},
{
"epoch": 0.013323464100666173,
"grad_norm": 0.4438922107219696,
"learning_rate": 5.300000000000001e-05,
"loss": 1.4353,
"num_input_tokens_seen": 14155776,
"step": 54
},
{
"epoch": 0.013570194917345176,
"grad_norm": 1.364662766456604,
"learning_rate": 5.4000000000000005e-05,
"loss": 1.3837,
"num_input_tokens_seen": 14417920,
"step": 55
},
{
"epoch": 0.013816925734024179,
"grad_norm": 0.5157312750816345,
"learning_rate": 5.500000000000001e-05,
"loss": 1.2278,
"num_input_tokens_seen": 14680064,
"step": 56
},
{
"epoch": 0.014063656550703183,
"grad_norm": 0.35758545994758606,
"learning_rate": 5.6000000000000006e-05,
"loss": 1.092,
"num_input_tokens_seen": 14942208,
"step": 57
},
{
"epoch": 0.014310387367382186,
"grad_norm": 0.37459567189216614,
"learning_rate": 5.6999999999999996e-05,
"loss": 1.47,
"num_input_tokens_seen": 15204352,
"step": 58
},
{
"epoch": 0.014557118184061189,
"grad_norm": 2.30647349357605,
"learning_rate": 5.8e-05,
"loss": 1.2387,
"num_input_tokens_seen": 15466496,
"step": 59
},
{
"epoch": 0.014803849000740192,
"grad_norm": 0.44988682866096497,
"learning_rate": 5.9e-05,
"loss": 1.168,
"num_input_tokens_seen": 15728640,
"step": 60
},
{
"epoch": 0.015050579817419196,
"grad_norm": 0.5521331429481506,
"learning_rate": 6e-05,
"loss": 1.9515,
"num_input_tokens_seen": 15990784,
"step": 61
},
{
"epoch": 0.015297310634098199,
"grad_norm": 1.0549205541610718,
"learning_rate": 6.1e-05,
"loss": 1.5939,
"num_input_tokens_seen": 16252928,
"step": 62
},
{
"epoch": 0.015544041450777202,
"grad_norm": 0.42327436804771423,
"learning_rate": 6.2e-05,
"loss": 1.1043,
"num_input_tokens_seen": 16515072,
"step": 63
},
{
"epoch": 0.015790772267456205,
"grad_norm": 0.3283379077911377,
"learning_rate": 6.3e-05,
"loss": 1.7087,
"num_input_tokens_seen": 16777216,
"step": 64
},
{
"epoch": 0.016037503084135207,
"grad_norm": 0.4458586573600769,
"learning_rate": 6.400000000000001e-05,
"loss": 1.0612,
"num_input_tokens_seen": 17039360,
"step": 65
},
{
"epoch": 0.01628423390081421,
"grad_norm": 0.48078301548957825,
"learning_rate": 6.500000000000001e-05,
"loss": 1.7329,
"num_input_tokens_seen": 17301504,
"step": 66
},
{
"epoch": 0.016530964717493216,
"grad_norm": 0.794426441192627,
"learning_rate": 6.6e-05,
"loss": 1.2415,
"num_input_tokens_seen": 17563648,
"step": 67
},
{
"epoch": 0.01677769553417222,
"grad_norm": 0.6253416538238525,
"learning_rate": 6.7e-05,
"loss": 1.4783,
"num_input_tokens_seen": 17825792,
"step": 68
},
{
"epoch": 0.017024426350851222,
"grad_norm": 0.4074600040912628,
"learning_rate": 6.800000000000001e-05,
"loss": 1.4269,
"num_input_tokens_seen": 18087936,
"step": 69
},
{
"epoch": 0.017271157167530225,
"grad_norm": 0.46961918473243713,
"learning_rate": 6.9e-05,
"loss": 1.3363,
"num_input_tokens_seen": 18350080,
"step": 70
},
{
"epoch": 0.017517887984209227,
"grad_norm": 0.46610549092292786,
"learning_rate": 7e-05,
"loss": 1.2189,
"num_input_tokens_seen": 18612224,
"step": 71
},
{
"epoch": 0.01776461880088823,
"grad_norm": 0.267548531293869,
"learning_rate": 7.1e-05,
"loss": 0.873,
"num_input_tokens_seen": 18874368,
"step": 72
},
{
"epoch": 0.018011349617567233,
"grad_norm": 0.8327587842941284,
"learning_rate": 7.2e-05,
"loss": 1.0492,
"num_input_tokens_seen": 19136512,
"step": 73
},
{
"epoch": 0.018258080434246236,
"grad_norm": 0.5072880387306213,
"learning_rate": 7.3e-05,
"loss": 1.6973,
"num_input_tokens_seen": 19398656,
"step": 74
},
{
"epoch": 0.018504811250925242,
"grad_norm": 0.6288747787475586,
"learning_rate": 7.4e-05,
"loss": 1.5766,
"num_input_tokens_seen": 19660800,
"step": 75
},
{
"epoch": 0.018751542067604245,
"grad_norm": 0.8449070453643799,
"learning_rate": 7.500000000000001e-05,
"loss": 1.3305,
"num_input_tokens_seen": 19922944,
"step": 76
},
{
"epoch": 0.018998272884283247,
"grad_norm": 0.6885783672332764,
"learning_rate": 7.6e-05,
"loss": 1.6091,
"num_input_tokens_seen": 20185088,
"step": 77
},
{
"epoch": 0.01924500370096225,
"grad_norm": 0.39635786414146423,
"learning_rate": 7.7e-05,
"loss": 1.3095,
"num_input_tokens_seen": 20447232,
"step": 78
},
{
"epoch": 0.019491734517641253,
"grad_norm": 0.6766145825386047,
"learning_rate": 7.800000000000001e-05,
"loss": 1.8017,
"num_input_tokens_seen": 20709376,
"step": 79
},
{
"epoch": 0.019738465334320256,
"grad_norm": 0.6316500902175903,
"learning_rate": 7.900000000000001e-05,
"loss": 1.2112,
"num_input_tokens_seen": 20971520,
"step": 80
},
{
"epoch": 0.01998519615099926,
"grad_norm": 0.3548561632633209,
"learning_rate": 8e-05,
"loss": 1.6936,
"num_input_tokens_seen": 21233664,
"step": 81
},
{
"epoch": 0.020231926967678265,
"grad_norm": 0.4684405028820038,
"learning_rate": 8.1e-05,
"loss": 1.7941,
"num_input_tokens_seen": 21495808,
"step": 82
},
{
"epoch": 0.020478657784357267,
"grad_norm": 0.3727128803730011,
"learning_rate": 8.2e-05,
"loss": 1.5322,
"num_input_tokens_seen": 21757952,
"step": 83
},
{
"epoch": 0.02072538860103627,
"grad_norm": 0.4873284697532654,
"learning_rate": 8.3e-05,
"loss": 1.2567,
"num_input_tokens_seen": 22020096,
"step": 84
},
{
"epoch": 0.020972119417715273,
"grad_norm": 0.5489657521247864,
"learning_rate": 8.4e-05,
"loss": 1.7528,
"num_input_tokens_seen": 22282240,
"step": 85
},
{
"epoch": 0.021218850234394276,
"grad_norm": 0.630950391292572,
"learning_rate": 8.5e-05,
"loss": 1.7512,
"num_input_tokens_seen": 22544384,
"step": 86
},
{
"epoch": 0.02146558105107328,
"grad_norm": 0.3701987862586975,
"learning_rate": 8.6e-05,
"loss": 1.7504,
"num_input_tokens_seen": 22806528,
"step": 87
},
{
"epoch": 0.02171231186775228,
"grad_norm": 0.4489085376262665,
"learning_rate": 8.7e-05,
"loss": 1.8433,
"num_input_tokens_seen": 23068672,
"step": 88
},
{
"epoch": 0.021959042684431284,
"grad_norm": 0.5469816327095032,
"learning_rate": 8.800000000000001e-05,
"loss": 1.7353,
"num_input_tokens_seen": 23330816,
"step": 89
},
{
"epoch": 0.02220577350111029,
"grad_norm": 0.27171701192855835,
"learning_rate": 8.900000000000001e-05,
"loss": 1.3133,
"num_input_tokens_seen": 23592960,
"step": 90
},
{
"epoch": 0.022452504317789293,
"grad_norm": 0.45458948612213135,
"learning_rate": 9e-05,
"loss": 1.2749,
"num_input_tokens_seen": 23855104,
"step": 91
},
{
"epoch": 0.022699235134468296,
"grad_norm": 0.42225536704063416,
"learning_rate": 9.1e-05,
"loss": 1.763,
"num_input_tokens_seen": 24117248,
"step": 92
},
{
"epoch": 0.0229459659511473,
"grad_norm": 0.35495418310165405,
"learning_rate": 9.200000000000001e-05,
"loss": 1.1096,
"num_input_tokens_seen": 24379392,
"step": 93
},
{
"epoch": 0.0231926967678263,
"grad_norm": 0.36772969365119934,
"learning_rate": 9.300000000000001e-05,
"loss": 1.053,
"num_input_tokens_seen": 24641536,
"step": 94
},
{
"epoch": 0.023439427584505304,
"grad_norm": 0.41521337628364563,
"learning_rate": 9.4e-05,
"loss": 1.3017,
"num_input_tokens_seen": 24903680,
"step": 95
},
{
"epoch": 0.023686158401184307,
"grad_norm": 0.45779621601104736,
"learning_rate": 9.5e-05,
"loss": 1.5996,
"num_input_tokens_seen": 25165824,
"step": 96
},
{
"epoch": 0.02393288921786331,
"grad_norm": 0.7741313576698303,
"learning_rate": 9.6e-05,
"loss": 1.7635,
"num_input_tokens_seen": 25427968,
"step": 97
},
{
"epoch": 0.024179620034542316,
"grad_norm": 0.5691494345664978,
"learning_rate": 9.7e-05,
"loss": 1.4335,
"num_input_tokens_seen": 25690112,
"step": 98
},
{
"epoch": 0.02442635085122132,
"grad_norm": 0.37105467915534973,
"learning_rate": 9.8e-05,
"loss": 1.6566,
"num_input_tokens_seen": 25952256,
"step": 99
},
{
"epoch": 0.02467308166790032,
"grad_norm": 1.3863261938095093,
"learning_rate": 9.900000000000001e-05,
"loss": 1.3461,
"num_input_tokens_seen": 26214400,
"step": 100
},
{
"epoch": 0.024919812484579324,
"grad_norm": 0.7654994130134583,
"learning_rate": 0.0001,
"loss": 1.2691,
"num_input_tokens_seen": 26476544,
"step": 101
},
{
"epoch": 0.025166543301258327,
"grad_norm": 0.4445570707321167,
"learning_rate": 9.999969538288952e-05,
"loss": 1.402,
"num_input_tokens_seen": 26738688,
"step": 102
},
{
"epoch": 0.02541327411793733,
"grad_norm": 0.526924192905426,
"learning_rate": 9.999878153526974e-05,
"loss": 2.0995,
"num_input_tokens_seen": 27000832,
"step": 103
},
{
"epoch": 0.025660004934616332,
"grad_norm": 0.46278372406959534,
"learning_rate": 9.999725846827562e-05,
"loss": 1.5967,
"num_input_tokens_seen": 27262976,
"step": 104
},
{
"epoch": 0.025906735751295335,
"grad_norm": 1.914865493774414,
"learning_rate": 9.999512620046522e-05,
"loss": 1.1665,
"num_input_tokens_seen": 27525120,
"step": 105
},
{
"epoch": 0.02615346656797434,
"grad_norm": 0.5774622559547424,
"learning_rate": 9.999238475781957e-05,
"loss": 1.2963,
"num_input_tokens_seen": 27787264,
"step": 106
},
{
"epoch": 0.026400197384653344,
"grad_norm": 0.3932192623615265,
"learning_rate": 9.998903417374228e-05,
"loss": 1.8184,
"num_input_tokens_seen": 28049408,
"step": 107
},
{
"epoch": 0.026646928201332347,
"grad_norm": 0.6024709343910217,
"learning_rate": 9.998507448905917e-05,
"loss": 1.3204,
"num_input_tokens_seen": 28311552,
"step": 108
},
{
"epoch": 0.02689365901801135,
"grad_norm": 0.3759059011936188,
"learning_rate": 9.998050575201771e-05,
"loss": 1.2489,
"num_input_tokens_seen": 28573696,
"step": 109
},
{
"epoch": 0.027140389834690352,
"grad_norm": 0.5642956495285034,
"learning_rate": 9.997532801828658e-05,
"loss": 1.79,
"num_input_tokens_seen": 28835840,
"step": 110
},
{
"epoch": 0.027387120651369355,
"grad_norm": 0.38600432872772217,
"learning_rate": 9.99695413509548e-05,
"loss": 0.8276,
"num_input_tokens_seen": 29097984,
"step": 111
},
{
"epoch": 0.027633851468048358,
"grad_norm": 0.5042186379432678,
"learning_rate": 9.996314582053106e-05,
"loss": 1.1216,
"num_input_tokens_seen": 29360128,
"step": 112
},
{
"epoch": 0.02788058228472736,
"grad_norm": 1.0430231094360352,
"learning_rate": 9.995614150494293e-05,
"loss": 1.2334,
"num_input_tokens_seen": 29622272,
"step": 113
},
{
"epoch": 0.028127313101406367,
"grad_norm": 1.2170257568359375,
"learning_rate": 9.994852848953574e-05,
"loss": 1.2479,
"num_input_tokens_seen": 29884416,
"step": 114
},
{
"epoch": 0.02837404391808537,
"grad_norm": 0.47523143887519836,
"learning_rate": 9.99403068670717e-05,
"loss": 1.2303,
"num_input_tokens_seen": 30146560,
"step": 115
},
{
"epoch": 0.028620774734764372,
"grad_norm": 0.751774787902832,
"learning_rate": 9.99314767377287e-05,
"loss": 1.0186,
"num_input_tokens_seen": 30408704,
"step": 116
},
{
"epoch": 0.028867505551443375,
"grad_norm": 0.39458879828453064,
"learning_rate": 9.992203820909906e-05,
"loss": 1.3563,
"num_input_tokens_seen": 30670848,
"step": 117
},
{
"epoch": 0.029114236368122378,
"grad_norm": 0.3054855167865753,
"learning_rate": 9.991199139618827e-05,
"loss": 1.2616,
"num_input_tokens_seen": 30932992,
"step": 118
},
{
"epoch": 0.02936096718480138,
"grad_norm": 0.6859539747238159,
"learning_rate": 9.990133642141359e-05,
"loss": 1.6226,
"num_input_tokens_seen": 31195136,
"step": 119
},
{
"epoch": 0.029607698001480384,
"grad_norm": 0.6344724297523499,
"learning_rate": 9.98900734146025e-05,
"loss": 1.7549,
"num_input_tokens_seen": 31457280,
"step": 120
},
{
"epoch": 0.02985442881815939,
"grad_norm": 0.6025246381759644,
"learning_rate": 9.987820251299122e-05,
"loss": 1.4919,
"num_input_tokens_seen": 31719424,
"step": 121
},
{
"epoch": 0.030101159634838393,
"grad_norm": 0.7202728986740112,
"learning_rate": 9.986572386122291e-05,
"loss": 1.3798,
"num_input_tokens_seen": 31981568,
"step": 122
},
{
"epoch": 0.030347890451517395,
"grad_norm": 0.6441293358802795,
"learning_rate": 9.985263761134602e-05,
"loss": 1.2326,
"num_input_tokens_seen": 32243712,
"step": 123
},
{
"epoch": 0.030594621268196398,
"grad_norm": 0.3855745494365692,
"learning_rate": 9.983894392281237e-05,
"loss": 0.9208,
"num_input_tokens_seen": 32505856,
"step": 124
},
{
"epoch": 0.0308413520848754,
"grad_norm": 0.4079477787017822,
"learning_rate": 9.982464296247522e-05,
"loss": 1.553,
"num_input_tokens_seen": 32768000,
"step": 125
},
{
"epoch": 0.031088082901554404,
"grad_norm": 0.9520876407623291,
"learning_rate": 9.980973490458728e-05,
"loss": 1.5549,
"num_input_tokens_seen": 33030144,
"step": 126
},
{
"epoch": 0.031334813718233406,
"grad_norm": 0.4380514919757843,
"learning_rate": 9.979421993079852e-05,
"loss": 1.9656,
"num_input_tokens_seen": 33292288,
"step": 127
},
{
"epoch": 0.03158154453491241,
"grad_norm": 0.6032424569129944,
"learning_rate": 9.977809823015401e-05,
"loss": 1.6197,
"num_input_tokens_seen": 33554432,
"step": 128
},
{
"epoch": 0.03182827535159141,
"grad_norm": 0.3634704053401947,
"learning_rate": 9.976136999909156e-05,
"loss": 1.7347,
"num_input_tokens_seen": 33816576,
"step": 129
},
{
"epoch": 0.032075006168270415,
"grad_norm": 0.39964982867240906,
"learning_rate": 9.974403544143941e-05,
"loss": 1.1744,
"num_input_tokens_seen": 34078720,
"step": 130
},
{
"epoch": 0.03232173698494942,
"grad_norm": 0.4048938453197479,
"learning_rate": 9.972609476841367e-05,
"loss": 1.1374,
"num_input_tokens_seen": 34340864,
"step": 131
},
{
"epoch": 0.03256846780162842,
"grad_norm": 0.4770902097225189,
"learning_rate": 9.970754819861577e-05,
"loss": 1.5735,
"num_input_tokens_seen": 34603008,
"step": 132
},
{
"epoch": 0.03281519861830743,
"grad_norm": 0.3113003075122833,
"learning_rate": 9.968839595802982e-05,
"loss": 1.6585,
"num_input_tokens_seen": 34865152,
"step": 133
},
{
"epoch": 0.03306192943498643,
"grad_norm": 0.45338618755340576,
"learning_rate": 9.966863828001982e-05,
"loss": 1.2114,
"num_input_tokens_seen": 35127296,
"step": 134
},
{
"epoch": 0.033308660251665435,
"grad_norm": 0.5252267718315125,
"learning_rate": 9.964827540532685e-05,
"loss": 1.6776,
"num_input_tokens_seen": 35389440,
"step": 135
},
{
"epoch": 0.03355539106834444,
"grad_norm": 0.473983496427536,
"learning_rate": 9.962730758206611e-05,
"loss": 1.6288,
"num_input_tokens_seen": 35651584,
"step": 136
},
{
"epoch": 0.03380212188502344,
"grad_norm": 0.5303152799606323,
"learning_rate": 9.96057350657239e-05,
"loss": 1.5329,
"num_input_tokens_seen": 35913728,
"step": 137
},
{
"epoch": 0.034048852701702444,
"grad_norm": 0.392697274684906,
"learning_rate": 9.958355811915451e-05,
"loss": 1.4416,
"num_input_tokens_seen": 36175872,
"step": 138
},
{
"epoch": 0.034295583518381446,
"grad_norm": 0.5483731031417847,
"learning_rate": 9.956077701257709e-05,
"loss": 1.792,
"num_input_tokens_seen": 36438016,
"step": 139
},
{
"epoch": 0.03454231433506045,
"grad_norm": 0.356476753950119,
"learning_rate": 9.953739202357218e-05,
"loss": 0.9378,
"num_input_tokens_seen": 36700160,
"step": 140
},
{
"epoch": 0.03478904515173945,
"grad_norm": 0.8314815163612366,
"learning_rate": 9.951340343707852e-05,
"loss": 1.4669,
"num_input_tokens_seen": 36962304,
"step": 141
},
{
"epoch": 0.035035775968418455,
"grad_norm": 0.39392971992492676,
"learning_rate": 9.948881154538945e-05,
"loss": 1.2483,
"num_input_tokens_seen": 37224448,
"step": 142
},
{
"epoch": 0.03528250678509746,
"grad_norm": 0.8657582998275757,
"learning_rate": 9.946361664814943e-05,
"loss": 1.4105,
"num_input_tokens_seen": 37486592,
"step": 143
},
{
"epoch": 0.03552923760177646,
"grad_norm": 0.5054273009300232,
"learning_rate": 9.94378190523503e-05,
"loss": 1.2803,
"num_input_tokens_seen": 37748736,
"step": 144
},
{
"epoch": 0.03577596841845546,
"grad_norm": 1.148282766342163,
"learning_rate": 9.941141907232765e-05,
"loss": 1.339,
"num_input_tokens_seen": 38010880,
"step": 145
},
{
"epoch": 0.036022699235134466,
"grad_norm": 0.5148569941520691,
"learning_rate": 9.938441702975689e-05,
"loss": 2.0715,
"num_input_tokens_seen": 38273024,
"step": 146
},
{
"epoch": 0.03626943005181347,
"grad_norm": 0.3367674648761749,
"learning_rate": 9.93568132536494e-05,
"loss": 1.3216,
"num_input_tokens_seen": 38535168,
"step": 147
},
{
"epoch": 0.03651616086849247,
"grad_norm": 0.410000205039978,
"learning_rate": 9.932860808034848e-05,
"loss": 1.265,
"num_input_tokens_seen": 38797312,
"step": 148
},
{
"epoch": 0.03676289168517148,
"grad_norm": 0.5379494428634644,
"learning_rate": 9.929980185352526e-05,
"loss": 1.7243,
"num_input_tokens_seen": 39059456,
"step": 149
},
{
"epoch": 0.037009622501850484,
"grad_norm": 1.1965843439102173,
"learning_rate": 9.927039492417452e-05,
"loss": 1.561,
"num_input_tokens_seen": 39321600,
"step": 150
},
{
"epoch": 0.037256353318529487,
"grad_norm": 0.5376553535461426,
"learning_rate": 9.924038765061042e-05,
"loss": 1.5289,
"num_input_tokens_seen": 39583744,
"step": 151
},
{
"epoch": 0.03750308413520849,
"grad_norm": 0.9800808429718018,
"learning_rate": 9.92097803984621e-05,
"loss": 1.6463,
"num_input_tokens_seen": 39845888,
"step": 152
},
{
"epoch": 0.03774981495188749,
"grad_norm": 0.4321368932723999,
"learning_rate": 9.917857354066931e-05,
"loss": 1.6065,
"num_input_tokens_seen": 40108032,
"step": 153
},
{
"epoch": 0.037996545768566495,
"grad_norm": 0.49912288784980774,
"learning_rate": 9.914676745747772e-05,
"loss": 1.4269,
"num_input_tokens_seen": 40370176,
"step": 154
},
{
"epoch": 0.0382432765852455,
"grad_norm": 0.4205571115016937,
"learning_rate": 9.911436253643445e-05,
"loss": 1.7257,
"num_input_tokens_seen": 40632320,
"step": 155
},
{
"epoch": 0.0384900074019245,
"grad_norm": 1.0689679384231567,
"learning_rate": 9.908135917238321e-05,
"loss": 1.3598,
"num_input_tokens_seen": 40894464,
"step": 156
},
{
"epoch": 0.0387367382186035,
"grad_norm": 0.4593941569328308,
"learning_rate": 9.904775776745958e-05,
"loss": 1.3507,
"num_input_tokens_seen": 41156608,
"step": 157
},
{
"epoch": 0.038983469035282506,
"grad_norm": 0.7887967228889465,
"learning_rate": 9.901355873108609e-05,
"loss": 1.1453,
"num_input_tokens_seen": 41418752,
"step": 158
},
{
"epoch": 0.03923019985196151,
"grad_norm": 0.5248556137084961,
"learning_rate": 9.89787624799672e-05,
"loss": 1.5824,
"num_input_tokens_seen": 41680896,
"step": 159
},
{
"epoch": 0.03947693066864051,
"grad_norm": 0.8247896432876587,
"learning_rate": 9.894336943808426e-05,
"loss": 1.6218,
"num_input_tokens_seen": 41943040,
"step": 160
},
{
"epoch": 0.039723661485319514,
"grad_norm": 0.6275323629379272,
"learning_rate": 9.890738003669029e-05,
"loss": 1.4403,
"num_input_tokens_seen": 42205184,
"step": 161
},
{
"epoch": 0.03997039230199852,
"grad_norm": 0.43640977144241333,
"learning_rate": 9.88707947143048e-05,
"loss": 1.8345,
"num_input_tokens_seen": 42467328,
"step": 162
},
{
"epoch": 0.04021712311867752,
"grad_norm": 0.44494208693504333,
"learning_rate": 9.88336139167084e-05,
"loss": 1.644,
"num_input_tokens_seen": 42729472,
"step": 163
},
{
"epoch": 0.04046385393535653,
"grad_norm": 0.769094705581665,
"learning_rate": 9.879583809693738e-05,
"loss": 1.3223,
"num_input_tokens_seen": 42991616,
"step": 164
},
{
"epoch": 0.04071058475203553,
"grad_norm": 0.5450881123542786,
"learning_rate": 9.875746771527816e-05,
"loss": 1.2423,
"num_input_tokens_seen": 43253760,
"step": 165
},
{
"epoch": 0.040957315568714535,
"grad_norm": 0.45923635363578796,
"learning_rate": 9.871850323926177e-05,
"loss": 1.2862,
"num_input_tokens_seen": 43515904,
"step": 166
},
{
"epoch": 0.04120404638539354,
"grad_norm": 0.4331710636615753,
"learning_rate": 9.867894514365802e-05,
"loss": 1.6613,
"num_input_tokens_seen": 43778048,
"step": 167
},
{
"epoch": 0.04145077720207254,
"grad_norm": 0.41940009593963623,
"learning_rate": 9.863879391046984e-05,
"loss": 1.7933,
"num_input_tokens_seen": 44040192,
"step": 168
},
{
"epoch": 0.04169750801875154,
"grad_norm": 0.6464211344718933,
"learning_rate": 9.859805002892732e-05,
"loss": 1.3993,
"num_input_tokens_seen": 44302336,
"step": 169
},
{
"epoch": 0.041944238835430546,
"grad_norm": 0.328808456659317,
"learning_rate": 9.855671399548181e-05,
"loss": 1.3791,
"num_input_tokens_seen": 44564480,
"step": 170
},
{
"epoch": 0.04219096965210955,
"grad_norm": 0.5391842126846313,
"learning_rate": 9.851478631379982e-05,
"loss": 1.8677,
"num_input_tokens_seen": 44826624,
"step": 171
},
{
"epoch": 0.04243770046878855,
"grad_norm": 0.65064537525177,
"learning_rate": 9.847226749475695e-05,
"loss": 1.1168,
"num_input_tokens_seen": 45088768,
"step": 172
},
{
"epoch": 0.042684431285467554,
"grad_norm": 0.43219423294067383,
"learning_rate": 9.842915805643155e-05,
"loss": 1.5461,
"num_input_tokens_seen": 45350912,
"step": 173
},
{
"epoch": 0.04293116210214656,
"grad_norm": 0.8228839039802551,
"learning_rate": 9.838545852409857e-05,
"loss": 1.5798,
"num_input_tokens_seen": 45613056,
"step": 174
},
{
"epoch": 0.04317789291882556,
"grad_norm": 0.4489528238773346,
"learning_rate": 9.834116943022298e-05,
"loss": 1.5903,
"num_input_tokens_seen": 45875200,
"step": 175
},
{
"epoch": 0.04342462373550456,
"grad_norm": 0.4763755202293396,
"learning_rate": 9.829629131445342e-05,
"loss": 1.4622,
"num_input_tokens_seen": 46137344,
"step": 176
},
{
"epoch": 0.043671354552183565,
"grad_norm": 0.904607355594635,
"learning_rate": 9.825082472361557e-05,
"loss": 1.6451,
"num_input_tokens_seen": 46399488,
"step": 177
},
{
"epoch": 0.04391808536886257,
"grad_norm": 0.4937230348587036,
"learning_rate": 9.820477021170551e-05,
"loss": 1.6693,
"num_input_tokens_seen": 46661632,
"step": 178
},
{
"epoch": 0.04416481618554157,
"grad_norm": 0.6768471598625183,
"learning_rate": 9.815812833988291e-05,
"loss": 1.6162,
"num_input_tokens_seen": 46923776,
"step": 179
},
{
"epoch": 0.04441154700222058,
"grad_norm": 0.4788267910480499,
"learning_rate": 9.811089967646428e-05,
"loss": 1.6687,
"num_input_tokens_seen": 47185920,
"step": 180
},
{
"epoch": 0.04465827781889958,
"grad_norm": 0.44184860587120056,
"learning_rate": 9.806308479691595e-05,
"loss": 1.5909,
"num_input_tokens_seen": 47448064,
"step": 181
},
{
"epoch": 0.044905008635578586,
"grad_norm": 0.45382142066955566,
"learning_rate": 9.801468428384716e-05,
"loss": 1.9489,
"num_input_tokens_seen": 47710208,
"step": 182
},
{
"epoch": 0.04515173945225759,
"grad_norm": 0.4858924448490143,
"learning_rate": 9.796569872700288e-05,
"loss": 1.5498,
"num_input_tokens_seen": 47972352,
"step": 183
},
{
"epoch": 0.04539847026893659,
"grad_norm": 0.4693243205547333,
"learning_rate": 9.791612872325667e-05,
"loss": 1.6293,
"num_input_tokens_seen": 48234496,
"step": 184
},
{
"epoch": 0.045645201085615594,
"grad_norm": 1.195617914199829,
"learning_rate": 9.786597487660337e-05,
"loss": 1.5874,
"num_input_tokens_seen": 48496640,
"step": 185
},
{
"epoch": 0.0458919319022946,
"grad_norm": 0.46441569924354553,
"learning_rate": 9.781523779815179e-05,
"loss": 1.6804,
"num_input_tokens_seen": 48758784,
"step": 186
},
{
"epoch": 0.0461386627189736,
"grad_norm": 0.5432348847389221,
"learning_rate": 9.776391810611718e-05,
"loss": 1.7017,
"num_input_tokens_seen": 49020928,
"step": 187
},
{
"epoch": 0.0463853935356526,
"grad_norm": 0.39682507514953613,
"learning_rate": 9.771201642581385e-05,
"loss": 1.451,
"num_input_tokens_seen": 49283072,
"step": 188
},
{
"epoch": 0.046632124352331605,
"grad_norm": 0.6172711849212646,
"learning_rate": 9.765953338964735e-05,
"loss": 1.647,
"num_input_tokens_seen": 49545216,
"step": 189
},
{
"epoch": 0.04687885516901061,
"grad_norm": 0.4956727921962738,
"learning_rate": 9.760646963710694e-05,
"loss": 1.3432,
"num_input_tokens_seen": 49807360,
"step": 190
},
{
"epoch": 0.04712558598568961,
"grad_norm": 1.0350934267044067,
"learning_rate": 9.755282581475769e-05,
"loss": 1.1852,
"num_input_tokens_seen": 50069504,
"step": 191
},
{
"epoch": 0.047372316802368614,
"grad_norm": 0.3049958348274231,
"learning_rate": 9.749860257623263e-05,
"loss": 1.4054,
"num_input_tokens_seen": 50331648,
"step": 192
},
{
"epoch": 0.047619047619047616,
"grad_norm": 0.47937139868736267,
"learning_rate": 9.744380058222483e-05,
"loss": 1.2734,
"num_input_tokens_seen": 50593792,
"step": 193
},
{
"epoch": 0.04786577843572662,
"grad_norm": 0.50199955701828,
"learning_rate": 9.73884205004793e-05,
"loss": 1.6453,
"num_input_tokens_seen": 50855936,
"step": 194
},
{
"epoch": 0.04811250925240563,
"grad_norm": 2.6262714862823486,
"learning_rate": 9.733246300578483e-05,
"loss": 1.4149,
"num_input_tokens_seen": 51118080,
"step": 195
},
{
"epoch": 0.04835924006908463,
"grad_norm": 0.4393710792064667,
"learning_rate": 9.727592877996585e-05,
"loss": 1.5932,
"num_input_tokens_seen": 51380224,
"step": 196
},
{
"epoch": 0.048605970885763634,
"grad_norm": 0.48173320293426514,
"learning_rate": 9.721881851187406e-05,
"loss": 2.0543,
"num_input_tokens_seen": 51642368,
"step": 197
},
{
"epoch": 0.04885270170244264,
"grad_norm": 0.3667338490486145,
"learning_rate": 9.716113289738004e-05,
"loss": 1.8266,
"num_input_tokens_seen": 51904512,
"step": 198
},
{
"epoch": 0.04909943251912164,
"grad_norm": 0.7761420011520386,
"learning_rate": 9.710287263936484e-05,
"loss": 1.4863,
"num_input_tokens_seen": 52166656,
"step": 199
},
{
"epoch": 0.04934616333580064,
"grad_norm": 0.2770567834377289,
"learning_rate": 9.704403844771128e-05,
"loss": 1.4059,
"num_input_tokens_seen": 52428800,
"step": 200
},
{
"epoch": 0.049592894152479645,
"grad_norm": 0.5254491567611694,
"learning_rate": 9.698463103929542e-05,
"loss": 1.3301,
"num_input_tokens_seen": 52690944,
"step": 201
},
{
"epoch": 0.04983962496915865,
"grad_norm": 0.3954887092113495,
"learning_rate": 9.69246511379778e-05,
"loss": 1.3543,
"num_input_tokens_seen": 52953088,
"step": 202
},
{
"epoch": 0.05008635578583765,
"grad_norm": 0.5048865079879761,
"learning_rate": 9.686409947459458e-05,
"loss": 1.8387,
"num_input_tokens_seen": 53215232,
"step": 203
},
{
"epoch": 0.050333086602516654,
"grad_norm": 0.3256657123565674,
"learning_rate": 9.680297678694867e-05,
"loss": 1.7805,
"num_input_tokens_seen": 53477376,
"step": 204
},
{
"epoch": 0.050579817419195656,
"grad_norm": 0.46845078468322754,
"learning_rate": 9.674128381980072e-05,
"loss": 1.1174,
"num_input_tokens_seen": 53739520,
"step": 205
},
{
"epoch": 0.05082654823587466,
"grad_norm": 0.47223731875419617,
"learning_rate": 9.667902132486009e-05,
"loss": 1.5451,
"num_input_tokens_seen": 54001664,
"step": 206
},
{
"epoch": 0.05107327905255366,
"grad_norm": 0.431727796792984,
"learning_rate": 9.661619006077561e-05,
"loss": 1.6365,
"num_input_tokens_seen": 54263808,
"step": 207
},
{
"epoch": 0.051320009869232665,
"grad_norm": 0.43686380982398987,
"learning_rate": 9.655279079312642e-05,
"loss": 1.358,
"num_input_tokens_seen": 54525952,
"step": 208
},
{
"epoch": 0.05156674068591167,
"grad_norm": 0.44880571961402893,
"learning_rate": 9.648882429441257e-05,
"loss": 1.7113,
"num_input_tokens_seen": 54788096,
"step": 209
},
{
"epoch": 0.05181347150259067,
"grad_norm": 0.3017417788505554,
"learning_rate": 9.642429134404569e-05,
"loss": 0.9447,
"num_input_tokens_seen": 55050240,
"step": 210
},
{
"epoch": 0.05206020231926968,
"grad_norm": 0.48962676525115967,
"learning_rate": 9.635919272833938e-05,
"loss": 1.8678,
"num_input_tokens_seen": 55312384,
"step": 211
},
{
"epoch": 0.05230693313594868,
"grad_norm": 0.42365866899490356,
"learning_rate": 9.629352924049975e-05,
"loss": 1.3535,
"num_input_tokens_seen": 55574528,
"step": 212
},
{
"epoch": 0.052553663952627686,
"grad_norm": 0.7170348763465881,
"learning_rate": 9.622730168061567e-05,
"loss": 1.2568,
"num_input_tokens_seen": 55836672,
"step": 213
},
{
"epoch": 0.05280039476930669,
"grad_norm": 0.35506054759025574,
"learning_rate": 9.616051085564906e-05,
"loss": 1.0919,
"num_input_tokens_seen": 56098816,
"step": 214
},
{
"epoch": 0.05304712558598569,
"grad_norm": 0.41648605465888977,
"learning_rate": 9.609315757942503e-05,
"loss": 1.233,
"num_input_tokens_seen": 56360960,
"step": 215
},
{
"epoch": 0.053293856402664694,
"grad_norm": 0.313808798789978,
"learning_rate": 9.602524267262203e-05,
"loss": 1.2015,
"num_input_tokens_seen": 56623104,
"step": 216
},
{
"epoch": 0.0535405872193437,
"grad_norm": 0.3957485854625702,
"learning_rate": 9.595676696276172e-05,
"loss": 1.5099,
"num_input_tokens_seen": 56885248,
"step": 217
},
{
"epoch": 0.0537873180360227,
"grad_norm": 0.31148213148117065,
"learning_rate": 9.588773128419906e-05,
"loss": 1.1991,
"num_input_tokens_seen": 57147392,
"step": 218
},
{
"epoch": 0.0540340488527017,
"grad_norm": 0.3659258186817169,
"learning_rate": 9.581813647811198e-05,
"loss": 1.373,
"num_input_tokens_seen": 57409536,
"step": 219
},
{
"epoch": 0.054280779669380705,
"grad_norm": 0.38427308201789856,
"learning_rate": 9.574798339249125e-05,
"loss": 1.1492,
"num_input_tokens_seen": 57671680,
"step": 220
},
{
"epoch": 0.05452751048605971,
"grad_norm": 0.35653167963027954,
"learning_rate": 9.567727288213005e-05,
"loss": 1.5735,
"num_input_tokens_seen": 57933824,
"step": 221
},
{
"epoch": 0.05477424130273871,
"grad_norm": 0.37471625208854675,
"learning_rate": 9.560600580861365e-05,
"loss": 1.2814,
"num_input_tokens_seen": 58195968,
"step": 222
},
{
"epoch": 0.05502097211941771,
"grad_norm": 0.36910685896873474,
"learning_rate": 9.553418304030886e-05,
"loss": 1.4826,
"num_input_tokens_seen": 58458112,
"step": 223
},
{
"epoch": 0.055267702936096716,
"grad_norm": 0.6973375082015991,
"learning_rate": 9.546180545235344e-05,
"loss": 1.6205,
"num_input_tokens_seen": 58720256,
"step": 224
},
{
"epoch": 0.05551443375277572,
"grad_norm": 0.4620996415615082,
"learning_rate": 9.538887392664544e-05,
"loss": 1.3336,
"num_input_tokens_seen": 58982400,
"step": 225
},
{
"epoch": 0.05576116456945472,
"grad_norm": 0.4059785008430481,
"learning_rate": 9.53153893518325e-05,
"loss": 1.4121,
"num_input_tokens_seen": 59244544,
"step": 226
},
{
"epoch": 0.05600789538613373,
"grad_norm": 0.6425535678863525,
"learning_rate": 9.524135262330098e-05,
"loss": 1.8197,
"num_input_tokens_seen": 59506688,
"step": 227
},
{
"epoch": 0.056254626202812734,
"grad_norm": 0.41906410455703735,
"learning_rate": 9.516676464316505e-05,
"loss": 1.1346,
"num_input_tokens_seen": 59768832,
"step": 228
},
{
"epoch": 0.05650135701949174,
"grad_norm": 0.3719293773174286,
"learning_rate": 9.50916263202557e-05,
"loss": 1.6634,
"num_input_tokens_seen": 60030976,
"step": 229
},
{
"epoch": 0.05674808783617074,
"grad_norm": 0.4112604558467865,
"learning_rate": 9.501593857010969e-05,
"loss": 1.5247,
"num_input_tokens_seen": 60293120,
"step": 230
},
{
"epoch": 0.05699481865284974,
"grad_norm": 0.45164841413497925,
"learning_rate": 9.493970231495835e-05,
"loss": 1.4905,
"num_input_tokens_seen": 60555264,
"step": 231
},
{
"epoch": 0.057241549469528745,
"grad_norm": 0.46919164061546326,
"learning_rate": 9.486291848371643e-05,
"loss": 1.2503,
"num_input_tokens_seen": 60817408,
"step": 232
},
{
"epoch": 0.05748828028620775,
"grad_norm": 0.8016965985298157,
"learning_rate": 9.478558801197065e-05,
"loss": 1.1185,
"num_input_tokens_seen": 61079552,
"step": 233
},
{
"epoch": 0.05773501110288675,
"grad_norm": 0.7115291357040405,
"learning_rate": 9.47077118419684e-05,
"loss": 1.4153,
"num_input_tokens_seen": 61341696,
"step": 234
},
{
"epoch": 0.05798174191956575,
"grad_norm": 0.518478512763977,
"learning_rate": 9.462929092260628e-05,
"loss": 1.4587,
"num_input_tokens_seen": 61603840,
"step": 235
},
{
"epoch": 0.058228472736244756,
"grad_norm": 0.524122416973114,
"learning_rate": 9.45503262094184e-05,
"loss": 1.5911,
"num_input_tokens_seen": 61865984,
"step": 236
},
{
"epoch": 0.05847520355292376,
"grad_norm": 0.46665579080581665,
"learning_rate": 9.447081866456489e-05,
"loss": 1.3185,
"num_input_tokens_seen": 62128128,
"step": 237
},
{
"epoch": 0.05872193436960276,
"grad_norm": 0.4962238073348999,
"learning_rate": 9.439076925682006e-05,
"loss": 1.617,
"num_input_tokens_seen": 62390272,
"step": 238
},
{
"epoch": 0.058968665186281764,
"grad_norm": 0.47364774346351624,
"learning_rate": 9.431017896156074e-05,
"loss": 1.5324,
"num_input_tokens_seen": 62652416,
"step": 239
},
{
"epoch": 0.05921539600296077,
"grad_norm": 0.37841781973838806,
"learning_rate": 9.42290487607542e-05,
"loss": 1.6952,
"num_input_tokens_seen": 62914560,
"step": 240
},
{
"epoch": 0.05946212681963977,
"grad_norm": 0.5535987615585327,
"learning_rate": 9.414737964294636e-05,
"loss": 1.2238,
"num_input_tokens_seen": 63176704,
"step": 241
},
{
"epoch": 0.05970885763631878,
"grad_norm": 0.43056029081344604,
"learning_rate": 9.40651726032496e-05,
"loss": 1.1467,
"num_input_tokens_seen": 63438848,
"step": 242
},
{
"epoch": 0.05995558845299778,
"grad_norm": 1.7811694145202637,
"learning_rate": 9.398242864333083e-05,
"loss": 1.506,
"num_input_tokens_seen": 63700992,
"step": 243
},
{
"epoch": 0.060202319269676785,
"grad_norm": 0.8254825472831726,
"learning_rate": 9.389914877139903e-05,
"loss": 1.3196,
"num_input_tokens_seen": 63963136,
"step": 244
},
{
"epoch": 0.06044905008635579,
"grad_norm": 0.43748101592063904,
"learning_rate": 9.381533400219318e-05,
"loss": 1.4198,
"num_input_tokens_seen": 64225280,
"step": 245
},
{
"epoch": 0.06069578090303479,
"grad_norm": 0.5970239043235779,
"learning_rate": 9.373098535696979e-05,
"loss": 1.2035,
"num_input_tokens_seen": 64487424,
"step": 246
},
{
"epoch": 0.06094251171971379,
"grad_norm": 0.5925011038780212,
"learning_rate": 9.364610386349049e-05,
"loss": 1.4749,
"num_input_tokens_seen": 64749568,
"step": 247
},
{
"epoch": 0.061189242536392796,
"grad_norm": 0.3569955825805664,
"learning_rate": 9.356069055600948e-05,
"loss": 1.5233,
"num_input_tokens_seen": 65011712,
"step": 248
},
{
"epoch": 0.0614359733530718,
"grad_norm": 0.5011444091796875,
"learning_rate": 9.347474647526095e-05,
"loss": 1.374,
"num_input_tokens_seen": 65273856,
"step": 249
},
{
"epoch": 0.0616827041697508,
"grad_norm": 0.446790874004364,
"learning_rate": 9.338827266844644e-05,
"loss": 1.4554,
"num_input_tokens_seen": 65536000,
"step": 250
},
{
"epoch": 0.061929434986429804,
"grad_norm": 0.5686383843421936,
"learning_rate": 9.330127018922194e-05,
"loss": 1.0886,
"num_input_tokens_seen": 65798144,
"step": 251
},
{
"epoch": 0.06217616580310881,
"grad_norm": 0.4082619249820709,
"learning_rate": 9.321374009768525e-05,
"loss": 1.1814,
"num_input_tokens_seen": 66060288,
"step": 252
},
{
"epoch": 0.06242289661978781,
"grad_norm": 0.546432614326477,
"learning_rate": 9.312568346036288e-05,
"loss": 1.0888,
"num_input_tokens_seen": 66322432,
"step": 253
},
{
"epoch": 0.06266962743646681,
"grad_norm": 0.4293651580810547,
"learning_rate": 9.30371013501972e-05,
"loss": 1.5083,
"num_input_tokens_seen": 66584576,
"step": 254
},
{
"epoch": 0.06291635825314582,
"grad_norm": 0.4493894875049591,
"learning_rate": 9.294799484653323e-05,
"loss": 1.4914,
"num_input_tokens_seen": 66846720,
"step": 255
},
{
"epoch": 0.06316308906982482,
"grad_norm": 0.3884902894496918,
"learning_rate": 9.285836503510562e-05,
"loss": 1.4629,
"num_input_tokens_seen": 67108864,
"step": 256
},
{
"epoch": 0.06340981988650382,
"grad_norm": 0.36732032895088196,
"learning_rate": 9.276821300802534e-05,
"loss": 1.8556,
"num_input_tokens_seen": 67371008,
"step": 257
},
{
"epoch": 0.06365655070318282,
"grad_norm": 0.47064924240112305,
"learning_rate": 9.267753986376637e-05,
"loss": 1.3646,
"num_input_tokens_seen": 67633152,
"step": 258
},
{
"epoch": 0.06390328151986183,
"grad_norm": 0.3313111960887909,
"learning_rate": 9.258634670715238e-05,
"loss": 1.699,
"num_input_tokens_seen": 67895296,
"step": 259
},
{
"epoch": 0.06415001233654083,
"grad_norm": 0.46750375628471375,
"learning_rate": 9.249463464934321e-05,
"loss": 1.4547,
"num_input_tokens_seen": 68157440,
"step": 260
},
{
"epoch": 0.06439674315321983,
"grad_norm": 0.44980961084365845,
"learning_rate": 9.24024048078213e-05,
"loss": 1.946,
"num_input_tokens_seen": 68419584,
"step": 261
},
{
"epoch": 0.06464347396989883,
"grad_norm": 0.40025463700294495,
"learning_rate": 9.230965830637821e-05,
"loss": 1.64,
"num_input_tokens_seen": 68681728,
"step": 262
},
{
"epoch": 0.06489020478657784,
"grad_norm": 0.3600539267063141,
"learning_rate": 9.221639627510076e-05,
"loss": 1.7228,
"num_input_tokens_seen": 68943872,
"step": 263
},
{
"epoch": 0.06513693560325684,
"grad_norm": 0.41291505098342896,
"learning_rate": 9.212261985035739e-05,
"loss": 1.4029,
"num_input_tokens_seen": 69206016,
"step": 264
},
{
"epoch": 0.06538366641993584,
"grad_norm": 0.44612574577331543,
"learning_rate": 9.202833017478422e-05,
"loss": 1.5331,
"num_input_tokens_seen": 69468160,
"step": 265
},
{
"epoch": 0.06563039723661486,
"grad_norm": 0.6552479267120361,
"learning_rate": 9.193352839727121e-05,
"loss": 1.2348,
"num_input_tokens_seen": 69730304,
"step": 266
},
{
"epoch": 0.06587712805329386,
"grad_norm": 0.3901737928390503,
"learning_rate": 9.183821567294809e-05,
"loss": 1.582,
"num_input_tokens_seen": 69992448,
"step": 267
},
{
"epoch": 0.06612385886997287,
"grad_norm": 0.48864665627479553,
"learning_rate": 9.174239316317033e-05,
"loss": 1.2625,
"num_input_tokens_seen": 70254592,
"step": 268
},
{
"epoch": 0.06637058968665187,
"grad_norm": 0.4407101571559906,
"learning_rate": 9.164606203550497e-05,
"loss": 1.611,
"num_input_tokens_seen": 70516736,
"step": 269
},
{
"epoch": 0.06661732050333087,
"grad_norm": 1.0702364444732666,
"learning_rate": 9.154922346371642e-05,
"loss": 1.5025,
"num_input_tokens_seen": 70778880,
"step": 270
},
{
"epoch": 0.06686405132000987,
"grad_norm": 0.44479334354400635,
"learning_rate": 9.145187862775209e-05,
"loss": 1.6566,
"num_input_tokens_seen": 71041024,
"step": 271
},
{
"epoch": 0.06711078213668888,
"grad_norm": 0.4969049096107483,
"learning_rate": 9.135402871372808e-05,
"loss": 1.17,
"num_input_tokens_seen": 71303168,
"step": 272
},
{
"epoch": 0.06735751295336788,
"grad_norm": 0.5161774158477783,
"learning_rate": 9.125567491391476e-05,
"loss": 1.3541,
"num_input_tokens_seen": 71565312,
"step": 273
},
{
"epoch": 0.06760424377004688,
"grad_norm": 0.6329514384269714,
"learning_rate": 9.11568184267221e-05,
"loss": 1.3956,
"num_input_tokens_seen": 71827456,
"step": 274
},
{
"epoch": 0.06785097458672588,
"grad_norm": 0.5841862559318542,
"learning_rate": 9.105746045668521e-05,
"loss": 2.0107,
"num_input_tokens_seen": 72089600,
"step": 275
},
{
"epoch": 0.06809770540340489,
"grad_norm": 0.3485393226146698,
"learning_rate": 9.09576022144496e-05,
"loss": 1.3152,
"num_input_tokens_seen": 72351744,
"step": 276
},
{
"epoch": 0.06834443622008389,
"grad_norm": 0.33349213004112244,
"learning_rate": 9.085724491675642e-05,
"loss": 1.3513,
"num_input_tokens_seen": 72613888,
"step": 277
},
{
"epoch": 0.06859116703676289,
"grad_norm": 1.688419222831726,
"learning_rate": 9.075638978642771e-05,
"loss": 1.6373,
"num_input_tokens_seen": 72876032,
"step": 278
},
{
"epoch": 0.0688378978534419,
"grad_norm": 0.6972132325172424,
"learning_rate": 9.065503805235138e-05,
"loss": 1.2006,
"num_input_tokens_seen": 73138176,
"step": 279
},
{
"epoch": 0.0690846286701209,
"grad_norm": 0.7142652273178101,
"learning_rate": 9.055319094946633e-05,
"loss": 1.1536,
"num_input_tokens_seen": 73400320,
"step": 280
},
{
"epoch": 0.0693313594867999,
"grad_norm": 0.6890919208526611,
"learning_rate": 9.045084971874738e-05,
"loss": 1.5108,
"num_input_tokens_seen": 73662464,
"step": 281
},
{
"epoch": 0.0695780903034789,
"grad_norm": 0.38137179613113403,
"learning_rate": 9.034801560719011e-05,
"loss": 1.6791,
"num_input_tokens_seen": 73924608,
"step": 282
},
{
"epoch": 0.0698248211201579,
"grad_norm": 0.4041261672973633,
"learning_rate": 9.02446898677957e-05,
"loss": 1.5718,
"num_input_tokens_seen": 74186752,
"step": 283
},
{
"epoch": 0.07007155193683691,
"grad_norm": 0.35075971484184265,
"learning_rate": 9.014087375955573e-05,
"loss": 1.6267,
"num_input_tokens_seen": 74448896,
"step": 284
},
{
"epoch": 0.07031828275351591,
"grad_norm": 0.3557937443256378,
"learning_rate": 9.003656854743667e-05,
"loss": 1.066,
"num_input_tokens_seen": 74711040,
"step": 285
},
{
"epoch": 0.07056501357019491,
"grad_norm": 0.426527738571167,
"learning_rate": 8.993177550236464e-05,
"loss": 1.3819,
"num_input_tokens_seen": 74973184,
"step": 286
},
{
"epoch": 0.07081174438687392,
"grad_norm": 0.2790268361568451,
"learning_rate": 8.982649590120982e-05,
"loss": 1.0298,
"num_input_tokens_seen": 75235328,
"step": 287
},
{
"epoch": 0.07105847520355292,
"grad_norm": 0.36474624276161194,
"learning_rate": 8.972073102677091e-05,
"loss": 1.297,
"num_input_tokens_seen": 75497472,
"step": 288
},
{
"epoch": 0.07130520602023192,
"grad_norm": 0.5412529110908508,
"learning_rate": 8.961448216775954e-05,
"loss": 1.7678,
"num_input_tokens_seen": 75759616,
"step": 289
},
{
"epoch": 0.07155193683691093,
"grad_norm": 0.3591950535774231,
"learning_rate": 8.950775061878453e-05,
"loss": 1.8175,
"num_input_tokens_seen": 76021760,
"step": 290
},
{
"epoch": 0.07179866765358993,
"grad_norm": 0.35655391216278076,
"learning_rate": 8.940053768033609e-05,
"loss": 0.915,
"num_input_tokens_seen": 76283904,
"step": 291
},
{
"epoch": 0.07204539847026893,
"grad_norm": 0.4709143042564392,
"learning_rate": 8.92928446587701e-05,
"loss": 1.4917,
"num_input_tokens_seen": 76546048,
"step": 292
},
{
"epoch": 0.07229212928694793,
"grad_norm": 0.4007657468318939,
"learning_rate": 8.9184672866292e-05,
"loss": 1.1993,
"num_input_tokens_seen": 76808192,
"step": 293
},
{
"epoch": 0.07253886010362694,
"grad_norm": 0.4333249032497406,
"learning_rate": 8.907602362094094e-05,
"loss": 1.4542,
"num_input_tokens_seen": 77070336,
"step": 294
},
{
"epoch": 0.07278559092030594,
"grad_norm": 0.4471242427825928,
"learning_rate": 8.896689824657372e-05,
"loss": 1.5767,
"num_input_tokens_seen": 77332480,
"step": 295
},
{
"epoch": 0.07303232173698494,
"grad_norm": 0.45520561933517456,
"learning_rate": 8.885729807284856e-05,
"loss": 1.3451,
"num_input_tokens_seen": 77594624,
"step": 296
},
{
"epoch": 0.07327905255366396,
"grad_norm": 0.5006072521209717,
"learning_rate": 8.874722443520899e-05,
"loss": 1.7068,
"num_input_tokens_seen": 77856768,
"step": 297
},
{
"epoch": 0.07352578337034296,
"grad_norm": 0.5564849376678467,
"learning_rate": 8.863667867486756e-05,
"loss": 1.5161,
"num_input_tokens_seen": 78118912,
"step": 298
},
{
"epoch": 0.07377251418702196,
"grad_norm": 0.6380155086517334,
"learning_rate": 8.852566213878947e-05,
"loss": 1.4766,
"num_input_tokens_seen": 78381056,
"step": 299
},
{
"epoch": 0.07401924500370097,
"grad_norm": 0.44310492277145386,
"learning_rate": 8.841417617967618e-05,
"loss": 1.6394,
"num_input_tokens_seen": 78643200,
"step": 300
},
{
"epoch": 0.07426597582037997,
"grad_norm": 0.41495969891548157,
"learning_rate": 8.83022221559489e-05,
"loss": 1.3418,
"num_input_tokens_seen": 78905344,
"step": 301
},
{
"epoch": 0.07451270663705897,
"grad_norm": 0.3353452980518341,
"learning_rate": 8.818980143173213e-05,
"loss": 1.3674,
"num_input_tokens_seen": 79167488,
"step": 302
},
{
"epoch": 0.07475943745373798,
"grad_norm": 0.46011558175086975,
"learning_rate": 8.807691537683685e-05,
"loss": 1.4383,
"num_input_tokens_seen": 79429632,
"step": 303
},
{
"epoch": 0.07500616827041698,
"grad_norm": 0.35528600215911865,
"learning_rate": 8.796356536674403e-05,
"loss": 1.4142,
"num_input_tokens_seen": 79691776,
"step": 304
},
{
"epoch": 0.07525289908709598,
"grad_norm": 0.46472465991973877,
"learning_rate": 8.784975278258783e-05,
"loss": 1.5237,
"num_input_tokens_seen": 79953920,
"step": 305
},
{
"epoch": 0.07549962990377498,
"grad_norm": 0.4007113575935364,
"learning_rate": 8.773547901113862e-05,
"loss": 1.4556,
"num_input_tokens_seen": 80216064,
"step": 306
},
{
"epoch": 0.07574636072045399,
"grad_norm": 0.3904101848602295,
"learning_rate": 8.762074544478623e-05,
"loss": 1.8598,
"num_input_tokens_seen": 80478208,
"step": 307
},
{
"epoch": 0.07599309153713299,
"grad_norm": 0.42733052372932434,
"learning_rate": 8.750555348152298e-05,
"loss": 1.4964,
"num_input_tokens_seen": 80740352,
"step": 308
},
{
"epoch": 0.07623982235381199,
"grad_norm": 0.3596790134906769,
"learning_rate": 8.73899045249266e-05,
"loss": 1.4464,
"num_input_tokens_seen": 81002496,
"step": 309
},
{
"epoch": 0.076486553170491,
"grad_norm": 0.39557668566703796,
"learning_rate": 8.727379998414311e-05,
"loss": 1.4792,
"num_input_tokens_seen": 81264640,
"step": 310
},
{
"epoch": 0.07673328398717,
"grad_norm": 0.4642884433269501,
"learning_rate": 8.715724127386972e-05,
"loss": 1.352,
"num_input_tokens_seen": 81526784,
"step": 311
},
{
"epoch": 0.076980014803849,
"grad_norm": 0.359645277261734,
"learning_rate": 8.70402298143375e-05,
"loss": 1.5067,
"num_input_tokens_seen": 81788928,
"step": 312
},
{
"epoch": 0.077226745620528,
"grad_norm": 0.44443607330322266,
"learning_rate": 8.692276703129421e-05,
"loss": 1.4489,
"num_input_tokens_seen": 82051072,
"step": 313
},
{
"epoch": 0.077473476437207,
"grad_norm": 0.5763232111930847,
"learning_rate": 8.680485435598673e-05,
"loss": 1.3569,
"num_input_tokens_seen": 82313216,
"step": 314
},
{
"epoch": 0.07772020725388601,
"grad_norm": 0.42504552006721497,
"learning_rate": 8.668649322514382e-05,
"loss": 1.4502,
"num_input_tokens_seen": 82575360,
"step": 315
},
{
"epoch": 0.07796693807056501,
"grad_norm": 0.44531187415122986,
"learning_rate": 8.656768508095853e-05,
"loss": 1.1609,
"num_input_tokens_seen": 82837504,
"step": 316
},
{
"epoch": 0.07821366888724401,
"grad_norm": 0.4542759358882904,
"learning_rate": 8.644843137107059e-05,
"loss": 1.4919,
"num_input_tokens_seen": 83099648,
"step": 317
},
{
"epoch": 0.07846039970392302,
"grad_norm": 0.34647974371910095,
"learning_rate": 8.63287335485488e-05,
"loss": 1.4395,
"num_input_tokens_seen": 83361792,
"step": 318
},
{
"epoch": 0.07870713052060202,
"grad_norm": 0.39204534888267517,
"learning_rate": 8.620859307187339e-05,
"loss": 1.1588,
"num_input_tokens_seen": 83623936,
"step": 319
},
{
"epoch": 0.07895386133728102,
"grad_norm": 0.36839020252227783,
"learning_rate": 8.608801140491811e-05,
"loss": 1.0677,
"num_input_tokens_seen": 83886080,
"step": 320
},
{
"epoch": 0.07920059215396003,
"grad_norm": 0.3239259421825409,
"learning_rate": 8.596699001693255e-05,
"loss": 1.6877,
"num_input_tokens_seen": 84148224,
"step": 321
},
{
"epoch": 0.07944732297063903,
"grad_norm": 0.4033769965171814,
"learning_rate": 8.584553038252414e-05,
"loss": 1.4702,
"num_input_tokens_seen": 84410368,
"step": 322
},
{
"epoch": 0.07969405378731803,
"grad_norm": 0.4204678237438202,
"learning_rate": 8.572363398164017e-05,
"loss": 1.267,
"num_input_tokens_seen": 84672512,
"step": 323
},
{
"epoch": 0.07994078460399703,
"grad_norm": 0.40684056282043457,
"learning_rate": 8.560130229954984e-05,
"loss": 1.76,
"num_input_tokens_seen": 84934656,
"step": 324
},
{
"epoch": 0.08018751542067604,
"grad_norm": 0.46505317091941833,
"learning_rate": 8.547853682682604e-05,
"loss": 1.2491,
"num_input_tokens_seen": 85196800,
"step": 325
},
{
"epoch": 0.08043424623735504,
"grad_norm": 1.6766362190246582,
"learning_rate": 8.535533905932738e-05,
"loss": 1.5482,
"num_input_tokens_seen": 85458944,
"step": 326
},
{
"epoch": 0.08068097705403404,
"grad_norm": 0.47785836458206177,
"learning_rate": 8.523171049817974e-05,
"loss": 1.3179,
"num_input_tokens_seen": 85721088,
"step": 327
},
{
"epoch": 0.08092770787071306,
"grad_norm": 1.1745095252990723,
"learning_rate": 8.510765264975813e-05,
"loss": 1.325,
"num_input_tokens_seen": 85983232,
"step": 328
},
{
"epoch": 0.08117443868739206,
"grad_norm": 0.5384896397590637,
"learning_rate": 8.498316702566828e-05,
"loss": 1.6173,
"num_input_tokens_seen": 86245376,
"step": 329
},
{
"epoch": 0.08142116950407106,
"grad_norm": 0.45555058121681213,
"learning_rate": 8.485825514272824e-05,
"loss": 1.3696,
"num_input_tokens_seen": 86507520,
"step": 330
},
{
"epoch": 0.08166790032075007,
"grad_norm": 0.3669653832912445,
"learning_rate": 8.473291852294987e-05,
"loss": 1.8283,
"num_input_tokens_seen": 86769664,
"step": 331
},
{
"epoch": 0.08191463113742907,
"grad_norm": 0.5621717572212219,
"learning_rate": 8.460715869352035e-05,
"loss": 1.2308,
"num_input_tokens_seen": 87031808,
"step": 332
},
{
"epoch": 0.08216136195410807,
"grad_norm": 0.34389549493789673,
"learning_rate": 8.44809771867835e-05,
"loss": 1.4118,
"num_input_tokens_seen": 87293952,
"step": 333
},
{
"epoch": 0.08240809277078708,
"grad_norm": 0.4457602798938751,
"learning_rate": 8.435437554022115e-05,
"loss": 1.7889,
"num_input_tokens_seen": 87556096,
"step": 334
},
{
"epoch": 0.08265482358746608,
"grad_norm": 0.5850016474723816,
"learning_rate": 8.422735529643444e-05,
"loss": 1.5911,
"num_input_tokens_seen": 87818240,
"step": 335
},
{
"epoch": 0.08290155440414508,
"grad_norm": 0.34275853633880615,
"learning_rate": 8.409991800312493e-05,
"loss": 1.0467,
"num_input_tokens_seen": 88080384,
"step": 336
},
{
"epoch": 0.08314828522082408,
"grad_norm": 0.36903801560401917,
"learning_rate": 8.397206521307584e-05,
"loss": 1.8271,
"num_input_tokens_seen": 88342528,
"step": 337
},
{
"epoch": 0.08339501603750309,
"grad_norm": 0.6859552264213562,
"learning_rate": 8.384379848413304e-05,
"loss": 1.6549,
"num_input_tokens_seen": 88604672,
"step": 338
},
{
"epoch": 0.08364174685418209,
"grad_norm": 0.36567091941833496,
"learning_rate": 8.371511937918616e-05,
"loss": 1.5964,
"num_input_tokens_seen": 88866816,
"step": 339
},
{
"epoch": 0.08388847767086109,
"grad_norm": 0.4594205617904663,
"learning_rate": 8.358602946614951e-05,
"loss": 1.832,
"num_input_tokens_seen": 89128960,
"step": 340
},
{
"epoch": 0.0841352084875401,
"grad_norm": 0.436885267496109,
"learning_rate": 8.345653031794292e-05,
"loss": 1.3788,
"num_input_tokens_seen": 89391104,
"step": 341
},
{
"epoch": 0.0843819393042191,
"grad_norm": 0.42924320697784424,
"learning_rate": 8.332662351247262e-05,
"loss": 1.789,
"num_input_tokens_seen": 89653248,
"step": 342
},
{
"epoch": 0.0846286701208981,
"grad_norm": 0.7718057036399841,
"learning_rate": 8.319631063261209e-05,
"loss": 1.3742,
"num_input_tokens_seen": 89915392,
"step": 343
},
{
"epoch": 0.0848754009375771,
"grad_norm": 0.6790041327476501,
"learning_rate": 8.306559326618259e-05,
"loss": 1.5064,
"num_input_tokens_seen": 90177536,
"step": 344
},
{
"epoch": 0.0851221317542561,
"grad_norm": 0.3628585636615753,
"learning_rate": 8.293447300593402e-05,
"loss": 1.5297,
"num_input_tokens_seen": 90439680,
"step": 345
},
{
"epoch": 0.08536886257093511,
"grad_norm": 0.48777052760124207,
"learning_rate": 8.280295144952536e-05,
"loss": 1.5753,
"num_input_tokens_seen": 90701824,
"step": 346
},
{
"epoch": 0.08561559338761411,
"grad_norm": 0.5725603103637695,
"learning_rate": 8.267103019950529e-05,
"loss": 1.496,
"num_input_tokens_seen": 90963968,
"step": 347
},
{
"epoch": 0.08586232420429311,
"grad_norm": 0.29641348123550415,
"learning_rate": 8.253871086329255e-05,
"loss": 0.961,
"num_input_tokens_seen": 91226112,
"step": 348
},
{
"epoch": 0.08610905502097212,
"grad_norm": 0.4289126694202423,
"learning_rate": 8.240599505315655e-05,
"loss": 1.6334,
"num_input_tokens_seen": 91488256,
"step": 349
},
{
"epoch": 0.08635578583765112,
"grad_norm": 0.44064462184906006,
"learning_rate": 8.227288438619754e-05,
"loss": 0.747,
"num_input_tokens_seen": 91750400,
"step": 350
},
{
"epoch": 0.08660251665433012,
"grad_norm": 0.7460625767707825,
"learning_rate": 8.213938048432697e-05,
"loss": 1.8326,
"num_input_tokens_seen": 92012544,
"step": 351
},
{
"epoch": 0.08684924747100913,
"grad_norm": 0.5317106246948242,
"learning_rate": 8.200548497424778e-05,
"loss": 1.4688,
"num_input_tokens_seen": 92274688,
"step": 352
},
{
"epoch": 0.08709597828768813,
"grad_norm": 0.5864390730857849,
"learning_rate": 8.18711994874345e-05,
"loss": 1.3881,
"num_input_tokens_seen": 92536832,
"step": 353
},
{
"epoch": 0.08734270910436713,
"grad_norm": 0.5385507941246033,
"learning_rate": 8.173652566011338e-05,
"loss": 1.5504,
"num_input_tokens_seen": 92798976,
"step": 354
},
{
"epoch": 0.08758943992104613,
"grad_norm": 0.5184814929962158,
"learning_rate": 8.160146513324254e-05,
"loss": 1.5473,
"num_input_tokens_seen": 93061120,
"step": 355
},
{
"epoch": 0.08783617073772514,
"grad_norm": 0.5830583572387695,
"learning_rate": 8.146601955249188e-05,
"loss": 1.2477,
"num_input_tokens_seen": 93323264,
"step": 356
},
{
"epoch": 0.08808290155440414,
"grad_norm": 0.5308154225349426,
"learning_rate": 8.133019056822304e-05,
"loss": 1.4123,
"num_input_tokens_seen": 93585408,
"step": 357
},
{
"epoch": 0.08832963237108314,
"grad_norm": 0.5979369878768921,
"learning_rate": 8.119397983546932e-05,
"loss": 1.2428,
"num_input_tokens_seen": 93847552,
"step": 358
},
{
"epoch": 0.08857636318776216,
"grad_norm": 0.36300981044769287,
"learning_rate": 8.105738901391552e-05,
"loss": 1.3904,
"num_input_tokens_seen": 94109696,
"step": 359
},
{
"epoch": 0.08882309400444116,
"grad_norm": 0.576484739780426,
"learning_rate": 8.09204197678777e-05,
"loss": 1.6413,
"num_input_tokens_seen": 94371840,
"step": 360
},
{
"epoch": 0.08906982482112016,
"grad_norm": 0.5766769051551819,
"learning_rate": 8.07830737662829e-05,
"loss": 1.3048,
"num_input_tokens_seen": 94633984,
"step": 361
},
{
"epoch": 0.08931655563779917,
"grad_norm": 0.6954322457313538,
"learning_rate": 8.064535268264883e-05,
"loss": 1.2608,
"num_input_tokens_seen": 94896128,
"step": 362
},
{
"epoch": 0.08956328645447817,
"grad_norm": 0.3823780119419098,
"learning_rate": 8.05072581950634e-05,
"loss": 1.2457,
"num_input_tokens_seen": 95158272,
"step": 363
},
{
"epoch": 0.08981001727115717,
"grad_norm": 0.31498485803604126,
"learning_rate": 8.036879198616434e-05,
"loss": 1.2267,
"num_input_tokens_seen": 95420416,
"step": 364
},
{
"epoch": 0.09005674808783617,
"grad_norm": 0.507363498210907,
"learning_rate": 8.022995574311876e-05,
"loss": 1.3449,
"num_input_tokens_seen": 95682560,
"step": 365
},
{
"epoch": 0.09030347890451518,
"grad_norm": 0.43557116389274597,
"learning_rate": 8.009075115760243e-05,
"loss": 1.1165,
"num_input_tokens_seen": 95944704,
"step": 366
},
{
"epoch": 0.09055020972119418,
"grad_norm": 0.38875076174736023,
"learning_rate": 7.99511799257793e-05,
"loss": 0.975,
"num_input_tokens_seen": 96206848,
"step": 367
},
{
"epoch": 0.09079694053787318,
"grad_norm": 0.32490673661231995,
"learning_rate": 7.98112437482808e-05,
"loss": 1.5828,
"num_input_tokens_seen": 96468992,
"step": 368
},
{
"epoch": 0.09104367135455219,
"grad_norm": 0.4650174677371979,
"learning_rate": 7.967094433018508e-05,
"loss": 1.4551,
"num_input_tokens_seen": 96731136,
"step": 369
},
{
"epoch": 0.09129040217123119,
"grad_norm": 0.41471830010414124,
"learning_rate": 7.953028338099627e-05,
"loss": 1.312,
"num_input_tokens_seen": 96993280,
"step": 370
},
{
"epoch": 0.09153713298791019,
"grad_norm": 0.42038694024086,
"learning_rate": 7.938926261462366e-05,
"loss": 1.5741,
"num_input_tokens_seen": 97255424,
"step": 371
},
{
"epoch": 0.0917838638045892,
"grad_norm": 5.518871784210205,
"learning_rate": 7.924788374936078e-05,
"loss": 1.9387,
"num_input_tokens_seen": 97517568,
"step": 372
},
{
"epoch": 0.0920305946212682,
"grad_norm": 0.46844223141670227,
"learning_rate": 7.910614850786448e-05,
"loss": 1.7879,
"num_input_tokens_seen": 97779712,
"step": 373
},
{
"epoch": 0.0922773254379472,
"grad_norm": 0.3254941403865814,
"learning_rate": 7.896405861713394e-05,
"loss": 1.63,
"num_input_tokens_seen": 98041856,
"step": 374
},
{
"epoch": 0.0925240562546262,
"grad_norm": 0.48669666051864624,
"learning_rate": 7.882161580848967e-05,
"loss": 1.1868,
"num_input_tokens_seen": 98304000,
"step": 375
},
{
"epoch": 0.0927707870713052,
"grad_norm": 0.6237612366676331,
"learning_rate": 7.86788218175523e-05,
"loss": 1.3713,
"num_input_tokens_seen": 98566144,
"step": 376
},
{
"epoch": 0.09301751788798421,
"grad_norm": 0.3922407925128937,
"learning_rate": 7.85356783842216e-05,
"loss": 1.5787,
"num_input_tokens_seen": 98828288,
"step": 377
},
{
"epoch": 0.09326424870466321,
"grad_norm": 0.312225341796875,
"learning_rate": 7.839218725265506e-05,
"loss": 0.9997,
"num_input_tokens_seen": 99090432,
"step": 378
},
{
"epoch": 0.09351097952134221,
"grad_norm": 0.5094678997993469,
"learning_rate": 7.82483501712469e-05,
"loss": 1.4778,
"num_input_tokens_seen": 99352576,
"step": 379
},
{
"epoch": 0.09375771033802122,
"grad_norm": 0.4983142614364624,
"learning_rate": 7.810416889260653e-05,
"loss": 1.3939,
"num_input_tokens_seen": 99614720,
"step": 380
},
{
"epoch": 0.09400444115470022,
"grad_norm": 0.5200369954109192,
"learning_rate": 7.795964517353735e-05,
"loss": 1.799,
"num_input_tokens_seen": 99876864,
"step": 381
},
{
"epoch": 0.09425117197137922,
"grad_norm": 0.42416560649871826,
"learning_rate": 7.781478077501525e-05,
"loss": 1.4051,
"num_input_tokens_seen": 100139008,
"step": 382
},
{
"epoch": 0.09449790278805822,
"grad_norm": 0.5520444512367249,
"learning_rate": 7.766957746216721e-05,
"loss": 1.7456,
"num_input_tokens_seen": 100401152,
"step": 383
},
{
"epoch": 0.09474463360473723,
"grad_norm": 0.4007224440574646,
"learning_rate": 7.752403700424979e-05,
"loss": 1.8702,
"num_input_tokens_seen": 100663296,
"step": 384
},
{
"epoch": 0.09499136442141623,
"grad_norm": 0.41428691148757935,
"learning_rate": 7.737816117462752e-05,
"loss": 1.1437,
"num_input_tokens_seen": 100925440,
"step": 385
},
{
"epoch": 0.09523809523809523,
"grad_norm": 0.49010515213012695,
"learning_rate": 7.723195175075136e-05,
"loss": 1.5437,
"num_input_tokens_seen": 101187584,
"step": 386
},
{
"epoch": 0.09548482605477424,
"grad_norm": 0.6629204154014587,
"learning_rate": 7.7085410514137e-05,
"loss": 1.2254,
"num_input_tokens_seen": 101449728,
"step": 387
},
{
"epoch": 0.09573155687145324,
"grad_norm": 0.5595076084136963,
"learning_rate": 7.693853925034315e-05,
"loss": 1.126,
"num_input_tokens_seen": 101711872,
"step": 388
},
{
"epoch": 0.09597828768813224,
"grad_norm": 0.35291388630867004,
"learning_rate": 7.679133974894983e-05,
"loss": 1.2986,
"num_input_tokens_seen": 101974016,
"step": 389
},
{
"epoch": 0.09622501850481126,
"grad_norm": 0.2904811203479767,
"learning_rate": 7.66438138035365e-05,
"loss": 1.2742,
"num_input_tokens_seen": 102236160,
"step": 390
},
{
"epoch": 0.09647174932149026,
"grad_norm": 0.5374343991279602,
"learning_rate": 7.649596321166024e-05,
"loss": 1.5825,
"num_input_tokens_seen": 102498304,
"step": 391
},
{
"epoch": 0.09671848013816926,
"grad_norm": 0.6067750453948975,
"learning_rate": 7.634778977483389e-05,
"loss": 1.441,
"num_input_tokens_seen": 102760448,
"step": 392
},
{
"epoch": 0.09696521095484827,
"grad_norm": 0.3974727690219879,
"learning_rate": 7.619929529850397e-05,
"loss": 1.5915,
"num_input_tokens_seen": 103022592,
"step": 393
},
{
"epoch": 0.09721194177152727,
"grad_norm": 0.434687077999115,
"learning_rate": 7.605048159202883e-05,
"loss": 1.2823,
"num_input_tokens_seen": 103284736,
"step": 394
},
{
"epoch": 0.09745867258820627,
"grad_norm": 0.3449552357196808,
"learning_rate": 7.590135046865651e-05,
"loss": 1.526,
"num_input_tokens_seen": 103546880,
"step": 395
},
{
"epoch": 0.09770540340488527,
"grad_norm": 0.5032194256782532,
"learning_rate": 7.575190374550272e-05,
"loss": 1.4524,
"num_input_tokens_seen": 103809024,
"step": 396
},
{
"epoch": 0.09795213422156428,
"grad_norm": 0.3858168423175812,
"learning_rate": 7.560214324352858e-05,
"loss": 1.3311,
"num_input_tokens_seen": 104071168,
"step": 397
},
{
"epoch": 0.09819886503824328,
"grad_norm": 0.311840295791626,
"learning_rate": 7.545207078751857e-05,
"loss": 1.5949,
"num_input_tokens_seen": 104333312,
"step": 398
},
{
"epoch": 0.09844559585492228,
"grad_norm": 0.49627599120140076,
"learning_rate": 7.530168820605818e-05,
"loss": 1.3623,
"num_input_tokens_seen": 104595456,
"step": 399
},
{
"epoch": 0.09869232667160129,
"grad_norm": 0.4324486255645752,
"learning_rate": 7.515099733151177e-05,
"loss": 1.5535,
"num_input_tokens_seen": 104857600,
"step": 400
},
{
"epoch": 0.09893905748828029,
"grad_norm": 0.3988152742385864,
"learning_rate": 7.500000000000001e-05,
"loss": 1.563,
"num_input_tokens_seen": 105119744,
"step": 401
},
{
"epoch": 0.09918578830495929,
"grad_norm": 0.5879530906677246,
"learning_rate": 7.484869805137778e-05,
"loss": 1.4323,
"num_input_tokens_seen": 105381888,
"step": 402
},
{
"epoch": 0.0994325191216383,
"grad_norm": 0.8125831484794617,
"learning_rate": 7.469709332921155e-05,
"loss": 1.3246,
"num_input_tokens_seen": 105644032,
"step": 403
},
{
"epoch": 0.0996792499383173,
"grad_norm": 0.48790380358695984,
"learning_rate": 7.454518768075704e-05,
"loss": 1.4349,
"num_input_tokens_seen": 105906176,
"step": 404
},
{
"epoch": 0.0999259807549963,
"grad_norm": 0.28600436449050903,
"learning_rate": 7.439298295693665e-05,
"loss": 1.5826,
"num_input_tokens_seen": 106168320,
"step": 405
},
{
"epoch": 0.1001727115716753,
"grad_norm": 0.41148728132247925,
"learning_rate": 7.424048101231686e-05,
"loss": 1.1911,
"num_input_tokens_seen": 106430464,
"step": 406
},
{
"epoch": 0.1004194423883543,
"grad_norm": 0.4992868900299072,
"learning_rate": 7.408768370508576e-05,
"loss": 1.455,
"num_input_tokens_seen": 106692608,
"step": 407
},
{
"epoch": 0.10066617320503331,
"grad_norm": 0.49499616026878357,
"learning_rate": 7.393459289703035e-05,
"loss": 1.5915,
"num_input_tokens_seen": 106954752,
"step": 408
},
{
"epoch": 0.10091290402171231,
"grad_norm": 0.42855536937713623,
"learning_rate": 7.378121045351378e-05,
"loss": 1.5863,
"num_input_tokens_seen": 107216896,
"step": 409
},
{
"epoch": 0.10115963483839131,
"grad_norm": 0.6514034867286682,
"learning_rate": 7.362753824345272e-05,
"loss": 1.1089,
"num_input_tokens_seen": 107479040,
"step": 410
},
{
"epoch": 0.10140636565507032,
"grad_norm": 0.44934651255607605,
"learning_rate": 7.347357813929454e-05,
"loss": 1.2187,
"num_input_tokens_seen": 107741184,
"step": 411
},
{
"epoch": 0.10165309647174932,
"grad_norm": 0.5146008729934692,
"learning_rate": 7.331933201699457e-05,
"loss": 1.2553,
"num_input_tokens_seen": 108003328,
"step": 412
},
{
"epoch": 0.10189982728842832,
"grad_norm": 0.3021552860736847,
"learning_rate": 7.316480175599309e-05,
"loss": 1.4477,
"num_input_tokens_seen": 108265472,
"step": 413
},
{
"epoch": 0.10214655810510732,
"grad_norm": 0.4061047434806824,
"learning_rate": 7.300998923919259e-05,
"loss": 1.8129,
"num_input_tokens_seen": 108527616,
"step": 414
},
{
"epoch": 0.10239328892178633,
"grad_norm": 0.35356923937797546,
"learning_rate": 7.285489635293472e-05,
"loss": 1.1773,
"num_input_tokens_seen": 108789760,
"step": 415
},
{
"epoch": 0.10264001973846533,
"grad_norm": 0.2584899365901947,
"learning_rate": 7.269952498697734e-05,
"loss": 1.1841,
"num_input_tokens_seen": 109051904,
"step": 416
},
{
"epoch": 0.10288675055514433,
"grad_norm": 0.39329686760902405,
"learning_rate": 7.254387703447154e-05,
"loss": 1.3427,
"num_input_tokens_seen": 109314048,
"step": 417
},
{
"epoch": 0.10313348137182334,
"grad_norm": 0.47551411390304565,
"learning_rate": 7.238795439193848e-05,
"loss": 1.2287,
"num_input_tokens_seen": 109576192,
"step": 418
},
{
"epoch": 0.10338021218850234,
"grad_norm": 0.3897443413734436,
"learning_rate": 7.223175895924638e-05,
"loss": 1.1333,
"num_input_tokens_seen": 109838336,
"step": 419
},
{
"epoch": 0.10362694300518134,
"grad_norm": 0.4791469871997833,
"learning_rate": 7.207529263958726e-05,
"loss": 1.5687,
"num_input_tokens_seen": 110100480,
"step": 420
},
{
"epoch": 0.10387367382186036,
"grad_norm": 0.38370898365974426,
"learning_rate": 7.191855733945387e-05,
"loss": 1.3282,
"num_input_tokens_seen": 110362624,
"step": 421
},
{
"epoch": 0.10412040463853936,
"grad_norm": 0.3348982036113739,
"learning_rate": 7.176155496861638e-05,
"loss": 1.5486,
"num_input_tokens_seen": 110624768,
"step": 422
},
{
"epoch": 0.10436713545521836,
"grad_norm": 0.7512357831001282,
"learning_rate": 7.160428744009912e-05,
"loss": 1.2073,
"num_input_tokens_seen": 110886912,
"step": 423
},
{
"epoch": 0.10461386627189737,
"grad_norm": 0.43737906217575073,
"learning_rate": 7.14467566701573e-05,
"loss": 1.7103,
"num_input_tokens_seen": 111149056,
"step": 424
},
{
"epoch": 0.10486059708857637,
"grad_norm": 0.5568506717681885,
"learning_rate": 7.128896457825364e-05,
"loss": 1.2337,
"num_input_tokens_seen": 111411200,
"step": 425
},
{
"epoch": 0.10510732790525537,
"grad_norm": 0.28417298197746277,
"learning_rate": 7.113091308703498e-05,
"loss": 1.5231,
"num_input_tokens_seen": 111673344,
"step": 426
},
{
"epoch": 0.10535405872193437,
"grad_norm": 0.31169256567955017,
"learning_rate": 7.097260412230886e-05,
"loss": 1.5398,
"num_input_tokens_seen": 111935488,
"step": 427
},
{
"epoch": 0.10560078953861338,
"grad_norm": 0.24298615753650665,
"learning_rate": 7.081403961302006e-05,
"loss": 1.4368,
"num_input_tokens_seen": 112197632,
"step": 428
},
{
"epoch": 0.10584752035529238,
"grad_norm": 0.5086839199066162,
"learning_rate": 7.06552214912271e-05,
"loss": 1.5113,
"num_input_tokens_seen": 112459776,
"step": 429
},
{
"epoch": 0.10609425117197138,
"grad_norm": 0.45831137895584106,
"learning_rate": 7.049615169207864e-05,
"loss": 1.5455,
"num_input_tokens_seen": 112721920,
"step": 430
},
{
"epoch": 0.10634098198865038,
"grad_norm": 0.6165284514427185,
"learning_rate": 7.033683215379002e-05,
"loss": 1.535,
"num_input_tokens_seen": 112984064,
"step": 431
},
{
"epoch": 0.10658771280532939,
"grad_norm": 0.25280535221099854,
"learning_rate": 7.017726481761951e-05,
"loss": 1.0821,
"num_input_tokens_seen": 113246208,
"step": 432
},
{
"epoch": 0.10683444362200839,
"grad_norm": 0.6137649416923523,
"learning_rate": 7.001745162784477e-05,
"loss": 1.2638,
"num_input_tokens_seen": 113508352,
"step": 433
},
{
"epoch": 0.1070811744386874,
"grad_norm": 0.449706107378006,
"learning_rate": 6.985739453173903e-05,
"loss": 1.7029,
"num_input_tokens_seen": 113770496,
"step": 434
},
{
"epoch": 0.1073279052553664,
"grad_norm": 0.5144222974777222,
"learning_rate": 6.969709547954756e-05,
"loss": 1.2772,
"num_input_tokens_seen": 114032640,
"step": 435
},
{
"epoch": 0.1075746360720454,
"grad_norm": 0.5287466049194336,
"learning_rate": 6.953655642446368e-05,
"loss": 1.4101,
"num_input_tokens_seen": 114294784,
"step": 436
},
{
"epoch": 0.1078213668887244,
"grad_norm": 0.4584052264690399,
"learning_rate": 6.937577932260515e-05,
"loss": 1.3729,
"num_input_tokens_seen": 114556928,
"step": 437
},
{
"epoch": 0.1080680977054034,
"grad_norm": 0.5260595083236694,
"learning_rate": 6.921476613299018e-05,
"loss": 1.6248,
"num_input_tokens_seen": 114819072,
"step": 438
},
{
"epoch": 0.10831482852208241,
"grad_norm": 0.41803038120269775,
"learning_rate": 6.905351881751372e-05,
"loss": 1.6069,
"num_input_tokens_seen": 115081216,
"step": 439
},
{
"epoch": 0.10856155933876141,
"grad_norm": 0.509223997592926,
"learning_rate": 6.889203934092336e-05,
"loss": 1.4642,
"num_input_tokens_seen": 115343360,
"step": 440
},
{
"epoch": 0.10880829015544041,
"grad_norm": 0.49517786502838135,
"learning_rate": 6.873032967079561e-05,
"loss": 1.4863,
"num_input_tokens_seen": 115605504,
"step": 441
},
{
"epoch": 0.10905502097211942,
"grad_norm": 0.37127602100372314,
"learning_rate": 6.856839177751176e-05,
"loss": 1.5365,
"num_input_tokens_seen": 115867648,
"step": 442
},
{
"epoch": 0.10930175178879842,
"grad_norm": 0.4723079204559326,
"learning_rate": 6.840622763423391e-05,
"loss": 1.6152,
"num_input_tokens_seen": 116129792,
"step": 443
},
{
"epoch": 0.10954848260547742,
"grad_norm": 0.40566620230674744,
"learning_rate": 6.824383921688098e-05,
"loss": 0.8527,
"num_input_tokens_seen": 116391936,
"step": 444
},
{
"epoch": 0.10979521342215642,
"grad_norm": 0.559238076210022,
"learning_rate": 6.808122850410461e-05,
"loss": 1.5958,
"num_input_tokens_seen": 116654080,
"step": 445
},
{
"epoch": 0.11004194423883543,
"grad_norm": 0.5383118987083435,
"learning_rate": 6.7918397477265e-05,
"loss": 1.8148,
"num_input_tokens_seen": 116916224,
"step": 446
},
{
"epoch": 0.11028867505551443,
"grad_norm": 0.3208450973033905,
"learning_rate": 6.775534812040685e-05,
"loss": 1.3657,
"num_input_tokens_seen": 117178368,
"step": 447
},
{
"epoch": 0.11053540587219343,
"grad_norm": 0.41013282537460327,
"learning_rate": 6.759208242023509e-05,
"loss": 1.2008,
"num_input_tokens_seen": 117440512,
"step": 448
},
{
"epoch": 0.11078213668887243,
"grad_norm": 0.38757890462875366,
"learning_rate": 6.742860236609077e-05,
"loss": 1.3416,
"num_input_tokens_seen": 117702656,
"step": 449
},
{
"epoch": 0.11102886750555144,
"grad_norm": 0.39333197474479675,
"learning_rate": 6.726490994992674e-05,
"loss": 1.3309,
"num_input_tokens_seen": 117964800,
"step": 450
},
{
"epoch": 0.11127559832223044,
"grad_norm": 0.5117759108543396,
"learning_rate": 6.710100716628344e-05,
"loss": 1.3636,
"num_input_tokens_seen": 118226944,
"step": 451
},
{
"epoch": 0.11152232913890944,
"grad_norm": 0.6466837525367737,
"learning_rate": 6.693689601226458e-05,
"loss": 1.2245,
"num_input_tokens_seen": 118489088,
"step": 452
},
{
"epoch": 0.11176905995558846,
"grad_norm": 0.5484910011291504,
"learning_rate": 6.677257848751277e-05,
"loss": 1.6369,
"num_input_tokens_seen": 118751232,
"step": 453
},
{
"epoch": 0.11201579077226746,
"grad_norm": 0.3835620880126953,
"learning_rate": 6.660805659418516e-05,
"loss": 1.2575,
"num_input_tokens_seen": 119013376,
"step": 454
},
{
"epoch": 0.11226252158894647,
"grad_norm": 0.479839026927948,
"learning_rate": 6.644333233692916e-05,
"loss": 1.8851,
"num_input_tokens_seen": 119275520,
"step": 455
},
{
"epoch": 0.11250925240562547,
"grad_norm": 0.46820715069770813,
"learning_rate": 6.627840772285784e-05,
"loss": 1.4417,
"num_input_tokens_seen": 119537664,
"step": 456
},
{
"epoch": 0.11275598322230447,
"grad_norm": 0.3619309067726135,
"learning_rate": 6.611328476152557e-05,
"loss": 1.314,
"num_input_tokens_seen": 119799808,
"step": 457
},
{
"epoch": 0.11300271403898347,
"grad_norm": 0.37368395924568176,
"learning_rate": 6.594796546490351e-05,
"loss": 1.8529,
"num_input_tokens_seen": 120061952,
"step": 458
},
{
"epoch": 0.11324944485566248,
"grad_norm": 0.5047134160995483,
"learning_rate": 6.578245184735513e-05,
"loss": 1.6389,
"num_input_tokens_seen": 120324096,
"step": 459
},
{
"epoch": 0.11349617567234148,
"grad_norm": 0.47305071353912354,
"learning_rate": 6.561674592561163e-05,
"loss": 1.4733,
"num_input_tokens_seen": 120586240,
"step": 460
},
{
"epoch": 0.11374290648902048,
"grad_norm": 0.4112611413002014,
"learning_rate": 6.545084971874738e-05,
"loss": 1.6039,
"num_input_tokens_seen": 120848384,
"step": 461
},
{
"epoch": 0.11398963730569948,
"grad_norm": 0.3452404737472534,
"learning_rate": 6.528476524815528e-05,
"loss": 1.182,
"num_input_tokens_seen": 121110528,
"step": 462
},
{
"epoch": 0.11423636812237849,
"grad_norm": 0.5911282300949097,
"learning_rate": 6.511849453752223e-05,
"loss": 1.2968,
"num_input_tokens_seen": 121372672,
"step": 463
},
{
"epoch": 0.11448309893905749,
"grad_norm": 0.43673065304756165,
"learning_rate": 6.495203961280434e-05,
"loss": 1.1236,
"num_input_tokens_seen": 121634816,
"step": 464
},
{
"epoch": 0.11472982975573649,
"grad_norm": 0.4529929459095001,
"learning_rate": 6.478540250220234e-05,
"loss": 1.3622,
"num_input_tokens_seen": 121896960,
"step": 465
},
{
"epoch": 0.1149765605724155,
"grad_norm": 0.4453410506248474,
"learning_rate": 6.461858523613684e-05,
"loss": 1.2336,
"num_input_tokens_seen": 122159104,
"step": 466
},
{
"epoch": 0.1152232913890945,
"grad_norm": 0.4362781047821045,
"learning_rate": 6.445158984722358e-05,
"loss": 1.254,
"num_input_tokens_seen": 122421248,
"step": 467
},
{
"epoch": 0.1154700222057735,
"grad_norm": 0.7498301863670349,
"learning_rate": 6.428441837024868e-05,
"loss": 1.7482,
"num_input_tokens_seen": 122683392,
"step": 468
},
{
"epoch": 0.1157167530224525,
"grad_norm": 0.37122827768325806,
"learning_rate": 6.411707284214384e-05,
"loss": 1.7719,
"num_input_tokens_seen": 122945536,
"step": 469
},
{
"epoch": 0.1159634838391315,
"grad_norm": 0.32280775904655457,
"learning_rate": 6.394955530196147e-05,
"loss": 1.5948,
"num_input_tokens_seen": 123207680,
"step": 470
},
{
"epoch": 0.11621021465581051,
"grad_norm": 0.40337470173835754,
"learning_rate": 6.378186779084995e-05,
"loss": 1.2603,
"num_input_tokens_seen": 123469824,
"step": 471
},
{
"epoch": 0.11645694547248951,
"grad_norm": 0.44294825196266174,
"learning_rate": 6.361401235202872e-05,
"loss": 1.76,
"num_input_tokens_seen": 123731968,
"step": 472
},
{
"epoch": 0.11670367628916851,
"grad_norm": 0.43434253334999084,
"learning_rate": 6.344599103076329e-05,
"loss": 1.2013,
"num_input_tokens_seen": 123994112,
"step": 473
},
{
"epoch": 0.11695040710584752,
"grad_norm": 0.7428044080734253,
"learning_rate": 6.327780587434044e-05,
"loss": 1.2979,
"num_input_tokens_seen": 124256256,
"step": 474
},
{
"epoch": 0.11719713792252652,
"grad_norm": 0.48722395300865173,
"learning_rate": 6.310945893204324e-05,
"loss": 0.9954,
"num_input_tokens_seen": 124518400,
"step": 475
},
{
"epoch": 0.11744386873920552,
"grad_norm": 0.33104240894317627,
"learning_rate": 6.294095225512603e-05,
"loss": 1.3589,
"num_input_tokens_seen": 124780544,
"step": 476
},
{
"epoch": 0.11769059955588453,
"grad_norm": 0.3963764011859894,
"learning_rate": 6.277228789678953e-05,
"loss": 1.4602,
"num_input_tokens_seen": 125042688,
"step": 477
},
{
"epoch": 0.11793733037256353,
"grad_norm": 0.2808891832828522,
"learning_rate": 6.26034679121557e-05,
"loss": 1.556,
"num_input_tokens_seen": 125304832,
"step": 478
},
{
"epoch": 0.11818406118924253,
"grad_norm": 0.39862188696861267,
"learning_rate": 6.243449435824276e-05,
"loss": 1.9454,
"num_input_tokens_seen": 125566976,
"step": 479
},
{
"epoch": 0.11843079200592153,
"grad_norm": 0.3807569444179535,
"learning_rate": 6.226536929394013e-05,
"loss": 1.3788,
"num_input_tokens_seen": 125829120,
"step": 480
},
{
"epoch": 0.11867752282260054,
"grad_norm": 0.36682799458503723,
"learning_rate": 6.209609477998338e-05,
"loss": 1.2969,
"num_input_tokens_seen": 126091264,
"step": 481
},
{
"epoch": 0.11892425363927954,
"grad_norm": 0.47444048523902893,
"learning_rate": 6.192667287892905e-05,
"loss": 1.8209,
"num_input_tokens_seen": 126353408,
"step": 482
},
{
"epoch": 0.11917098445595854,
"grad_norm": 0.47788330912590027,
"learning_rate": 6.17571056551295e-05,
"loss": 1.9536,
"num_input_tokens_seen": 126615552,
"step": 483
},
{
"epoch": 0.11941771527263756,
"grad_norm": 0.4263227880001068,
"learning_rate": 6.158739517470786e-05,
"loss": 1.3451,
"num_input_tokens_seen": 126877696,
"step": 484
},
{
"epoch": 0.11966444608931656,
"grad_norm": 0.3616585433483124,
"learning_rate": 6.141754350553279e-05,
"loss": 1.2872,
"num_input_tokens_seen": 127139840,
"step": 485
},
{
"epoch": 0.11991117690599556,
"grad_norm": 0.3233768343925476,
"learning_rate": 6.124755271719325e-05,
"loss": 1.1209,
"num_input_tokens_seen": 127401984,
"step": 486
},
{
"epoch": 0.12015790772267457,
"grad_norm": 0.44189000129699707,
"learning_rate": 6.107742488097338e-05,
"loss": 1.2286,
"num_input_tokens_seen": 127664128,
"step": 487
},
{
"epoch": 0.12040463853935357,
"grad_norm": 0.544161856174469,
"learning_rate": 6.090716206982714e-05,
"loss": 1.2229,
"num_input_tokens_seen": 127926272,
"step": 488
},
{
"epoch": 0.12065136935603257,
"grad_norm": 0.391888827085495,
"learning_rate": 6.073676635835317e-05,
"loss": 1.4871,
"num_input_tokens_seen": 128188416,
"step": 489
},
{
"epoch": 0.12089810017271158,
"grad_norm": 0.3784489333629608,
"learning_rate": 6.056623982276944e-05,
"loss": 0.5786,
"num_input_tokens_seen": 128450560,
"step": 490
},
{
"epoch": 0.12114483098939058,
"grad_norm": 0.419486939907074,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.3576,
"num_input_tokens_seen": 128712704,
"step": 491
},
{
"epoch": 0.12139156180606958,
"grad_norm": 0.4199804961681366,
"learning_rate": 6.0224802592089513e-05,
"loss": 1.6053,
"num_input_tokens_seen": 128974848,
"step": 492
},
{
"epoch": 0.12163829262274858,
"grad_norm": 0.4623740017414093,
"learning_rate": 6.005389605729824e-05,
"loss": 1.2944,
"num_input_tokens_seen": 129236992,
"step": 493
},
{
"epoch": 0.12188502343942759,
"grad_norm": 0.42237526178359985,
"learning_rate": 5.988286701895631e-05,
"loss": 1.6558,
"num_input_tokens_seen": 129499136,
"step": 494
},
{
"epoch": 0.12213175425610659,
"grad_norm": 0.4772239625453949,
"learning_rate": 5.97117175609986e-05,
"loss": 1.2807,
"num_input_tokens_seen": 129761280,
"step": 495
},
{
"epoch": 0.12237848507278559,
"grad_norm": 0.4234151840209961,
"learning_rate": 5.9540449768827246e-05,
"loss": 1.3557,
"num_input_tokens_seen": 130023424,
"step": 496
},
{
"epoch": 0.1226252158894646,
"grad_norm": 0.3814954161643982,
"learning_rate": 5.9369065729286245e-05,
"loss": 1.7307,
"num_input_tokens_seen": 130285568,
"step": 497
},
{
"epoch": 0.1228719467061436,
"grad_norm": 0.5017201900482178,
"learning_rate": 5.9197567530636014e-05,
"loss": 1.6599,
"num_input_tokens_seen": 130547712,
"step": 498
},
{
"epoch": 0.1231186775228226,
"grad_norm": 0.4710695147514343,
"learning_rate": 5.902595726252801e-05,
"loss": 1.7653,
"num_input_tokens_seen": 130809856,
"step": 499
},
{
"epoch": 0.1233654083395016,
"grad_norm": 0.4460209310054779,
"learning_rate": 5.885423701597917e-05,
"loss": 1.8497,
"num_input_tokens_seen": 131072000,
"step": 500
},
{
"epoch": 0.1236121391561806,
"grad_norm": 0.40563341975212097,
"learning_rate": 5.868240888334653e-05,
"loss": 1.75,
"num_input_tokens_seen": 131334144,
"step": 501
},
{
"epoch": 0.12385886997285961,
"grad_norm": 0.5194706916809082,
"learning_rate": 5.851047495830163e-05,
"loss": 1.361,
"num_input_tokens_seen": 131596288,
"step": 502
},
{
"epoch": 0.12410560078953861,
"grad_norm": 0.3415733873844147,
"learning_rate": 5.833843733580512e-05,
"loss": 1.303,
"num_input_tokens_seen": 131858432,
"step": 503
},
{
"epoch": 0.12435233160621761,
"grad_norm": 0.6099317669868469,
"learning_rate": 5.816629811208112e-05,
"loss": 1.5934,
"num_input_tokens_seen": 132120576,
"step": 504
},
{
"epoch": 0.12459906242289662,
"grad_norm": 0.3613656759262085,
"learning_rate": 5.799405938459175e-05,
"loss": 1.6132,
"num_input_tokens_seen": 132382720,
"step": 505
},
{
"epoch": 0.12484579323957562,
"grad_norm": 0.3092043697834015,
"learning_rate": 5.782172325201155e-05,
"loss": 1.6267,
"num_input_tokens_seen": 132644864,
"step": 506
},
{
"epoch": 0.12509252405625462,
"grad_norm": 0.5578877925872803,
"learning_rate": 5.764929181420191e-05,
"loss": 1.4283,
"num_input_tokens_seen": 132907008,
"step": 507
},
{
"epoch": 0.12533925487293363,
"grad_norm": 0.40944454073905945,
"learning_rate": 5.747676717218549e-05,
"loss": 1.5385,
"num_input_tokens_seen": 133169152,
"step": 508
},
{
"epoch": 0.12558598568961263,
"grad_norm": 0.5208156704902649,
"learning_rate": 5.730415142812059e-05,
"loss": 1.535,
"num_input_tokens_seen": 133431296,
"step": 509
},
{
"epoch": 0.12583271650629163,
"grad_norm": 0.4434719979763031,
"learning_rate": 5.713144668527559e-05,
"loss": 1.5663,
"num_input_tokens_seen": 133693440,
"step": 510
},
{
"epoch": 0.12607944732297063,
"grad_norm": 0.46345973014831543,
"learning_rate": 5.695865504800327e-05,
"loss": 1.6915,
"num_input_tokens_seen": 133955584,
"step": 511
},
{
"epoch": 0.12632617813964964,
"grad_norm": 0.4293109178543091,
"learning_rate": 5.6785778621715225e-05,
"loss": 1.6867,
"num_input_tokens_seen": 134217728,
"step": 512
},
{
"epoch": 0.12657290895632864,
"grad_norm": 0.5305678248405457,
"learning_rate": 5.661281951285613e-05,
"loss": 1.5729,
"num_input_tokens_seen": 134479872,
"step": 513
},
{
"epoch": 0.12681963977300764,
"grad_norm": 0.40899133682250977,
"learning_rate": 5.643977982887815e-05,
"loss": 1.2663,
"num_input_tokens_seen": 134742016,
"step": 514
},
{
"epoch": 0.12706637058968664,
"grad_norm": 0.47092074155807495,
"learning_rate": 5.6266661678215216e-05,
"loss": 1.7356,
"num_input_tokens_seen": 135004160,
"step": 515
},
{
"epoch": 0.12731310140636565,
"grad_norm": 0.3236401379108429,
"learning_rate": 5.6093467170257374e-05,
"loss": 1.4607,
"num_input_tokens_seen": 135266304,
"step": 516
},
{
"epoch": 0.12755983222304465,
"grad_norm": 0.5006436705589294,
"learning_rate": 5.5920198415325064e-05,
"loss": 1.5724,
"num_input_tokens_seen": 135528448,
"step": 517
},
{
"epoch": 0.12780656303972365,
"grad_norm": 0.3072086572647095,
"learning_rate": 5.574685752464334e-05,
"loss": 1.3547,
"num_input_tokens_seen": 135790592,
"step": 518
},
{
"epoch": 0.12805329385640266,
"grad_norm": 0.3969310224056244,
"learning_rate": 5.557344661031627e-05,
"loss": 1.6657,
"num_input_tokens_seen": 136052736,
"step": 519
},
{
"epoch": 0.12830002467308166,
"grad_norm": 0.4531516432762146,
"learning_rate": 5.539996778530115e-05,
"loss": 1.1859,
"num_input_tokens_seen": 136314880,
"step": 520
},
{
"epoch": 0.12854675548976066,
"grad_norm": 0.33052849769592285,
"learning_rate": 5.522642316338268e-05,
"loss": 1.8518,
"num_input_tokens_seen": 136577024,
"step": 521
},
{
"epoch": 0.12879348630643966,
"grad_norm": 0.44960132241249084,
"learning_rate": 5.5052814859147315e-05,
"loss": 1.6946,
"num_input_tokens_seen": 136839168,
"step": 522
},
{
"epoch": 0.12904021712311867,
"grad_norm": 0.46277108788490295,
"learning_rate": 5.487914498795747e-05,
"loss": 1.395,
"num_input_tokens_seen": 137101312,
"step": 523
},
{
"epoch": 0.12928694793979767,
"grad_norm": 0.35661888122558594,
"learning_rate": 5.470541566592573e-05,
"loss": 0.9987,
"num_input_tokens_seen": 137363456,
"step": 524
},
{
"epoch": 0.12953367875647667,
"grad_norm": 0.3381035625934601,
"learning_rate": 5.453162900988902e-05,
"loss": 1.1251,
"num_input_tokens_seen": 137625600,
"step": 525
},
{
"epoch": 0.12978040957315568,
"grad_norm": 0.5418933033943176,
"learning_rate": 5.435778713738292e-05,
"loss": 1.5873,
"num_input_tokens_seen": 137887744,
"step": 526
},
{
"epoch": 0.13002714038983468,
"grad_norm": 0.40096211433410645,
"learning_rate": 5.418389216661579e-05,
"loss": 1.0274,
"num_input_tokens_seen": 138149888,
"step": 527
},
{
"epoch": 0.13027387120651368,
"grad_norm": 0.46591490507125854,
"learning_rate": 5.4009946216442944e-05,
"loss": 1.5977,
"num_input_tokens_seen": 138412032,
"step": 528
},
{
"epoch": 0.13052060202319268,
"grad_norm": 0.5208688974380493,
"learning_rate": 5.383595140634093e-05,
"loss": 1.7274,
"num_input_tokens_seen": 138674176,
"step": 529
},
{
"epoch": 0.13076733283987169,
"grad_norm": 0.3030504584312439,
"learning_rate": 5.366190985638159e-05,
"loss": 0.9647,
"num_input_tokens_seen": 138936320,
"step": 530
},
{
"epoch": 0.13101406365655072,
"grad_norm": 0.45822811126708984,
"learning_rate": 5.348782368720626e-05,
"loss": 1.2234,
"num_input_tokens_seen": 139198464,
"step": 531
},
{
"epoch": 0.13126079447322972,
"grad_norm": 0.650368332862854,
"learning_rate": 5.3313695020000024e-05,
"loss": 1.7527,
"num_input_tokens_seen": 139460608,
"step": 532
},
{
"epoch": 0.13150752528990872,
"grad_norm": 0.4637644290924072,
"learning_rate": 5.313952597646568e-05,
"loss": 1.48,
"num_input_tokens_seen": 139722752,
"step": 533
},
{
"epoch": 0.13175425610658772,
"grad_norm": 0.5175304412841797,
"learning_rate": 5.296531867879809e-05,
"loss": 1.6836,
"num_input_tokens_seen": 139984896,
"step": 534
},
{
"epoch": 0.13200098692326673,
"grad_norm": 0.6300275921821594,
"learning_rate": 5.279107524965819e-05,
"loss": 1.4865,
"num_input_tokens_seen": 140247040,
"step": 535
},
{
"epoch": 0.13224771773994573,
"grad_norm": 0.5622620582580566,
"learning_rate": 5.26167978121472e-05,
"loss": 1.6625,
"num_input_tokens_seen": 140509184,
"step": 536
},
{
"epoch": 0.13249444855662473,
"grad_norm": 0.450957715511322,
"learning_rate": 5.244248848978067e-05,
"loss": 1.3511,
"num_input_tokens_seen": 140771328,
"step": 537
},
{
"epoch": 0.13274117937330374,
"grad_norm": 0.32074758410453796,
"learning_rate": 5.226814940646269e-05,
"loss": 1.0604,
"num_input_tokens_seen": 141033472,
"step": 538
},
{
"epoch": 0.13298791018998274,
"grad_norm": 0.4316653907299042,
"learning_rate": 5.209378268645998e-05,
"loss": 1.3447,
"num_input_tokens_seen": 141295616,
"step": 539
},
{
"epoch": 0.13323464100666174,
"grad_norm": 1.2764695882797241,
"learning_rate": 5.191939045437601e-05,
"loss": 1.708,
"num_input_tokens_seen": 141557760,
"step": 540
},
{
"epoch": 0.13348137182334074,
"grad_norm": 0.5784677863121033,
"learning_rate": 5.174497483512506e-05,
"loss": 1.5203,
"num_input_tokens_seen": 141819904,
"step": 541
},
{
"epoch": 0.13372810264001975,
"grad_norm": 0.48645123839378357,
"learning_rate": 5.157053795390642e-05,
"loss": 1.3748,
"num_input_tokens_seen": 142082048,
"step": 542
},
{
"epoch": 0.13397483345669875,
"grad_norm": 0.24234865605831146,
"learning_rate": 5.139608193617845e-05,
"loss": 0.9776,
"num_input_tokens_seen": 142344192,
"step": 543
},
{
"epoch": 0.13422156427337775,
"grad_norm": 0.37544649839401245,
"learning_rate": 5.1221608907632665e-05,
"loss": 1.6227,
"num_input_tokens_seen": 142606336,
"step": 544
},
{
"epoch": 0.13446829509005676,
"grad_norm": 0.2981484532356262,
"learning_rate": 5.104712099416785e-05,
"loss": 1.4055,
"num_input_tokens_seen": 142868480,
"step": 545
},
{
"epoch": 0.13471502590673576,
"grad_norm": 0.43188732862472534,
"learning_rate": 5.0872620321864185e-05,
"loss": 1.2737,
"num_input_tokens_seen": 143130624,
"step": 546
},
{
"epoch": 0.13496175672341476,
"grad_norm": 0.2748105525970459,
"learning_rate": 5.0698109016957274e-05,
"loss": 1.5069,
"num_input_tokens_seen": 143392768,
"step": 547
},
{
"epoch": 0.13520848754009376,
"grad_norm": 0.5175468921661377,
"learning_rate": 5.052358920581229e-05,
"loss": 1.4274,
"num_input_tokens_seen": 143654912,
"step": 548
},
{
"epoch": 0.13545521835677277,
"grad_norm": 0.2804499864578247,
"learning_rate": 5.034906301489808e-05,
"loss": 1.1306,
"num_input_tokens_seen": 143917056,
"step": 549
},
{
"epoch": 0.13570194917345177,
"grad_norm": 0.48032549023628235,
"learning_rate": 5.017453257076119e-05,
"loss": 1.5932,
"num_input_tokens_seen": 144179200,
"step": 550
},
{
"epoch": 0.13594867999013077,
"grad_norm": 0.718036413192749,
"learning_rate": 5e-05,
"loss": 1.1992,
"num_input_tokens_seen": 144441344,
"step": 551
},
{
"epoch": 0.13619541080680977,
"grad_norm": 0.8548443913459778,
"learning_rate": 4.9825467429238834e-05,
"loss": 1.9061,
"num_input_tokens_seen": 144703488,
"step": 552
},
{
"epoch": 0.13644214162348878,
"grad_norm": 0.4835663139820099,
"learning_rate": 4.965093698510193e-05,
"loss": 1.304,
"num_input_tokens_seen": 144965632,
"step": 553
},
{
"epoch": 0.13668887244016778,
"grad_norm": 0.46579673886299133,
"learning_rate": 4.947641079418773e-05,
"loss": 1.0379,
"num_input_tokens_seen": 145227776,
"step": 554
},
{
"epoch": 0.13693560325684678,
"grad_norm": 0.31949591636657715,
"learning_rate": 4.9301890983042744e-05,
"loss": 1.2834,
"num_input_tokens_seen": 145489920,
"step": 555
},
{
"epoch": 0.13718233407352579,
"grad_norm": 0.3905438482761383,
"learning_rate": 4.912737967813583e-05,
"loss": 1.2388,
"num_input_tokens_seen": 145752064,
"step": 556
},
{
"epoch": 0.1374290648902048,
"grad_norm": 0.40413108468055725,
"learning_rate": 4.895287900583216e-05,
"loss": 1.9645,
"num_input_tokens_seen": 146014208,
"step": 557
},
{
"epoch": 0.1376757957068838,
"grad_norm": 0.4360823929309845,
"learning_rate": 4.8778391092367346e-05,
"loss": 1.935,
"num_input_tokens_seen": 146276352,
"step": 558
},
{
"epoch": 0.1379225265235628,
"grad_norm": 0.3165574371814728,
"learning_rate": 4.860391806382157e-05,
"loss": 1.2331,
"num_input_tokens_seen": 146538496,
"step": 559
},
{
"epoch": 0.1381692573402418,
"grad_norm": 0.4741459786891937,
"learning_rate": 4.8429462046093585e-05,
"loss": 1.7303,
"num_input_tokens_seen": 146800640,
"step": 560
},
{
"epoch": 0.1384159881569208,
"grad_norm": 0.4940711557865143,
"learning_rate": 4.825502516487497e-05,
"loss": 1.7287,
"num_input_tokens_seen": 147062784,
"step": 561
},
{
"epoch": 0.1386627189735998,
"grad_norm": 0.37318700551986694,
"learning_rate": 4.8080609545624004e-05,
"loss": 1.2874,
"num_input_tokens_seen": 147324928,
"step": 562
},
{
"epoch": 0.1389094497902788,
"grad_norm": 0.4317730963230133,
"learning_rate": 4.790621731354003e-05,
"loss": 1.9204,
"num_input_tokens_seen": 147587072,
"step": 563
},
{
"epoch": 0.1391561806069578,
"grad_norm": 0.3825749158859253,
"learning_rate": 4.773185059353732e-05,
"loss": 1.836,
"num_input_tokens_seen": 147849216,
"step": 564
},
{
"epoch": 0.1394029114236368,
"grad_norm": 0.3591608703136444,
"learning_rate": 4.755751151021934e-05,
"loss": 1.4711,
"num_input_tokens_seen": 148111360,
"step": 565
},
{
"epoch": 0.1396496422403158,
"grad_norm": 0.5317450165748596,
"learning_rate": 4.738320218785281e-05,
"loss": 1.4428,
"num_input_tokens_seen": 148373504,
"step": 566
},
{
"epoch": 0.13989637305699482,
"grad_norm": 0.3051570653915405,
"learning_rate": 4.720892475034181e-05,
"loss": 1.8257,
"num_input_tokens_seen": 148635648,
"step": 567
},
{
"epoch": 0.14014310387367382,
"grad_norm": 0.40506741404533386,
"learning_rate": 4.703468132120193e-05,
"loss": 1.5873,
"num_input_tokens_seen": 148897792,
"step": 568
},
{
"epoch": 0.14038983469035282,
"grad_norm": 0.35452091693878174,
"learning_rate": 4.6860474023534335e-05,
"loss": 1.5223,
"num_input_tokens_seen": 149159936,
"step": 569
},
{
"epoch": 0.14063656550703182,
"grad_norm": 0.7315366864204407,
"learning_rate": 4.668630498000001e-05,
"loss": 1.6133,
"num_input_tokens_seen": 149422080,
"step": 570
},
{
"epoch": 0.14088329632371083,
"grad_norm": 0.42512834072113037,
"learning_rate": 4.6512176312793736e-05,
"loss": 1.62,
"num_input_tokens_seen": 149684224,
"step": 571
},
{
"epoch": 0.14113002714038983,
"grad_norm": 0.40341562032699585,
"learning_rate": 4.633809014361843e-05,
"loss": 1.2722,
"num_input_tokens_seen": 149946368,
"step": 572
},
{
"epoch": 0.14137675795706883,
"grad_norm": 0.6295319199562073,
"learning_rate": 4.616404859365907e-05,
"loss": 1.6756,
"num_input_tokens_seen": 150208512,
"step": 573
},
{
"epoch": 0.14162348877374784,
"grad_norm": 0.42949360609054565,
"learning_rate": 4.599005378355706e-05,
"loss": 1.5944,
"num_input_tokens_seen": 150470656,
"step": 574
},
{
"epoch": 0.14187021959042684,
"grad_norm": 0.5056847333908081,
"learning_rate": 4.5816107833384234e-05,
"loss": 1.8344,
"num_input_tokens_seen": 150732800,
"step": 575
},
{
"epoch": 0.14211695040710584,
"grad_norm": 0.46629583835601807,
"learning_rate": 4.564221286261709e-05,
"loss": 1.4963,
"num_input_tokens_seen": 150994944,
"step": 576
},
{
"epoch": 0.14236368122378484,
"grad_norm": 0.6094178557395935,
"learning_rate": 4.5468370990111006e-05,
"loss": 1.3047,
"num_input_tokens_seen": 151257088,
"step": 577
},
{
"epoch": 0.14261041204046385,
"grad_norm": 0.3319358229637146,
"learning_rate": 4.529458433407429e-05,
"loss": 0.9717,
"num_input_tokens_seen": 151519232,
"step": 578
},
{
"epoch": 0.14285714285714285,
"grad_norm": 0.5464443564414978,
"learning_rate": 4.512085501204253e-05,
"loss": 1.5817,
"num_input_tokens_seen": 151781376,
"step": 579
},
{
"epoch": 0.14310387367382185,
"grad_norm": 0.3190373182296753,
"learning_rate": 4.494718514085268e-05,
"loss": 0.8934,
"num_input_tokens_seen": 152043520,
"step": 580
},
{
"epoch": 0.14335060449050085,
"grad_norm": 0.5776032209396362,
"learning_rate": 4.477357683661734e-05,
"loss": 1.5868,
"num_input_tokens_seen": 152305664,
"step": 581
},
{
"epoch": 0.14359733530717986,
"grad_norm": 0.3422704339027405,
"learning_rate": 4.460003221469886e-05,
"loss": 1.3598,
"num_input_tokens_seen": 152567808,
"step": 582
},
{
"epoch": 0.14384406612385886,
"grad_norm": 0.4885745346546173,
"learning_rate": 4.442655338968373e-05,
"loss": 1.6365,
"num_input_tokens_seen": 152829952,
"step": 583
},
{
"epoch": 0.14409079694053786,
"grad_norm": 0.4098423719406128,
"learning_rate": 4.425314247535668e-05,
"loss": 1.6599,
"num_input_tokens_seen": 153092096,
"step": 584
},
{
"epoch": 0.14433752775721687,
"grad_norm": 0.4424959123134613,
"learning_rate": 4.407980158467495e-05,
"loss": 1.304,
"num_input_tokens_seen": 153354240,
"step": 585
},
{
"epoch": 0.14458425857389587,
"grad_norm": 0.6284481287002563,
"learning_rate": 4.390653282974264e-05,
"loss": 1.366,
"num_input_tokens_seen": 153616384,
"step": 586
},
{
"epoch": 0.14483098939057487,
"grad_norm": 0.4497601091861725,
"learning_rate": 4.373333832178478e-05,
"loss": 1.5778,
"num_input_tokens_seen": 153878528,
"step": 587
},
{
"epoch": 0.14507772020725387,
"grad_norm": 0.43979567289352417,
"learning_rate": 4.356022017112187e-05,
"loss": 1.8713,
"num_input_tokens_seen": 154140672,
"step": 588
},
{
"epoch": 0.14532445102393288,
"grad_norm": 0.49371346831321716,
"learning_rate": 4.3387180487143876e-05,
"loss": 1.4494,
"num_input_tokens_seen": 154402816,
"step": 589
},
{
"epoch": 0.14557118184061188,
"grad_norm": 0.48771700263023376,
"learning_rate": 4.321422137828479e-05,
"loss": 1.2336,
"num_input_tokens_seen": 154664960,
"step": 590
},
{
"epoch": 0.14581791265729088,
"grad_norm": 0.5311357975006104,
"learning_rate": 4.3041344951996746e-05,
"loss": 1.4141,
"num_input_tokens_seen": 154927104,
"step": 591
},
{
"epoch": 0.14606464347396989,
"grad_norm": 0.8907399773597717,
"learning_rate": 4.2868553314724425e-05,
"loss": 1.3688,
"num_input_tokens_seen": 155189248,
"step": 592
},
{
"epoch": 0.14631137429064892,
"grad_norm": 0.35455289483070374,
"learning_rate": 4.269584857187943e-05,
"loss": 1.237,
"num_input_tokens_seen": 155451392,
"step": 593
},
{
"epoch": 0.14655810510732792,
"grad_norm": 0.3664483428001404,
"learning_rate": 4.252323282781453e-05,
"loss": 1.1333,
"num_input_tokens_seen": 155713536,
"step": 594
},
{
"epoch": 0.14680483592400692,
"grad_norm": 0.5533236265182495,
"learning_rate": 4.23507081857981e-05,
"loss": 1.2187,
"num_input_tokens_seen": 155975680,
"step": 595
},
{
"epoch": 0.14705156674068592,
"grad_norm": 0.35301920771598816,
"learning_rate": 4.2178276747988446e-05,
"loss": 1.2276,
"num_input_tokens_seen": 156237824,
"step": 596
},
{
"epoch": 0.14729829755736493,
"grad_norm": 0.3042898178100586,
"learning_rate": 4.2005940615408264e-05,
"loss": 1.5472,
"num_input_tokens_seen": 156499968,
"step": 597
},
{
"epoch": 0.14754502837404393,
"grad_norm": 0.38035786151885986,
"learning_rate": 4.1833701887918904e-05,
"loss": 1.2387,
"num_input_tokens_seen": 156762112,
"step": 598
},
{
"epoch": 0.14779175919072293,
"grad_norm": 0.4864104390144348,
"learning_rate": 4.166156266419489e-05,
"loss": 1.5304,
"num_input_tokens_seen": 157024256,
"step": 599
},
{
"epoch": 0.14803849000740193,
"grad_norm": 0.5711836218833923,
"learning_rate": 4.1489525041698387e-05,
"loss": 1.4102,
"num_input_tokens_seen": 157286400,
"step": 600
},
{
"epoch": 0.14828522082408094,
"grad_norm": 0.43272215127944946,
"learning_rate": 4.131759111665349e-05,
"loss": 1.268,
"num_input_tokens_seen": 157548544,
"step": 601
},
{
"epoch": 0.14853195164075994,
"grad_norm": 0.31057846546173096,
"learning_rate": 4.114576298402084e-05,
"loss": 1.1202,
"num_input_tokens_seen": 157810688,
"step": 602
},
{
"epoch": 0.14877868245743894,
"grad_norm": 0.48547300696372986,
"learning_rate": 4.0974042737472006e-05,
"loss": 1.5392,
"num_input_tokens_seen": 158072832,
"step": 603
},
{
"epoch": 0.14902541327411795,
"grad_norm": 0.3425385653972626,
"learning_rate": 4.080243246936399e-05,
"loss": 1.4502,
"num_input_tokens_seen": 158334976,
"step": 604
},
{
"epoch": 0.14927214409079695,
"grad_norm": 0.43901634216308594,
"learning_rate": 4.063093427071376e-05,
"loss": 1.8348,
"num_input_tokens_seen": 158597120,
"step": 605
},
{
"epoch": 0.14951887490747595,
"grad_norm": 0.4542570412158966,
"learning_rate": 4.045955023117276e-05,
"loss": 1.7931,
"num_input_tokens_seen": 158859264,
"step": 606
},
{
"epoch": 0.14976560572415495,
"grad_norm": 0.39371147751808167,
"learning_rate": 4.028828243900141e-05,
"loss": 1.9931,
"num_input_tokens_seen": 159121408,
"step": 607
},
{
"epoch": 0.15001233654083396,
"grad_norm": 0.4978816509246826,
"learning_rate": 4.0117132981043693e-05,
"loss": 1.3801,
"num_input_tokens_seen": 159383552,
"step": 608
},
{
"epoch": 0.15025906735751296,
"grad_norm": 0.43954601883888245,
"learning_rate": 3.9946103942701777e-05,
"loss": 1.7816,
"num_input_tokens_seen": 159645696,
"step": 609
},
{
"epoch": 0.15050579817419196,
"grad_norm": 0.5910866260528564,
"learning_rate": 3.9775197407910485e-05,
"loss": 1.3484,
"num_input_tokens_seen": 159907840,
"step": 610
},
{
"epoch": 0.15075252899087097,
"grad_norm": 0.34320688247680664,
"learning_rate": 3.960441545911204e-05,
"loss": 1.4339,
"num_input_tokens_seen": 160169984,
"step": 611
},
{
"epoch": 0.15099925980754997,
"grad_norm": 0.41999390721321106,
"learning_rate": 3.943376017723057e-05,
"loss": 1.1897,
"num_input_tokens_seen": 160432128,
"step": 612
},
{
"epoch": 0.15124599062422897,
"grad_norm": 0.9088113903999329,
"learning_rate": 3.926323364164684e-05,
"loss": 1.622,
"num_input_tokens_seen": 160694272,
"step": 613
},
{
"epoch": 0.15149272144090797,
"grad_norm": 0.3928620517253876,
"learning_rate": 3.9092837930172884e-05,
"loss": 1.503,
"num_input_tokens_seen": 160956416,
"step": 614
},
{
"epoch": 0.15173945225758698,
"grad_norm": 0.5755316019058228,
"learning_rate": 3.892257511902664e-05,
"loss": 1.5733,
"num_input_tokens_seen": 161218560,
"step": 615
},
{
"epoch": 0.15198618307426598,
"grad_norm": 0.3288033604621887,
"learning_rate": 3.875244728280676e-05,
"loss": 0.8331,
"num_input_tokens_seen": 161480704,
"step": 616
},
{
"epoch": 0.15223291389094498,
"grad_norm": 0.3691161870956421,
"learning_rate": 3.858245649446721e-05,
"loss": 1.3793,
"num_input_tokens_seen": 161742848,
"step": 617
},
{
"epoch": 0.15247964470762398,
"grad_norm": 0.6293520927429199,
"learning_rate": 3.841260482529214e-05,
"loss": 1.2762,
"num_input_tokens_seen": 162004992,
"step": 618
},
{
"epoch": 0.152726375524303,
"grad_norm": 0.520823061466217,
"learning_rate": 3.82428943448705e-05,
"loss": 1.1906,
"num_input_tokens_seen": 162267136,
"step": 619
},
{
"epoch": 0.152973106340982,
"grad_norm": 0.3488697111606598,
"learning_rate": 3.807332712107097e-05,
"loss": 0.9492,
"num_input_tokens_seen": 162529280,
"step": 620
},
{
"epoch": 0.153219837157661,
"grad_norm": 0.508271336555481,
"learning_rate": 3.790390522001662e-05,
"loss": 1.359,
"num_input_tokens_seen": 162791424,
"step": 621
},
{
"epoch": 0.15346656797434,
"grad_norm": 0.7611494064331055,
"learning_rate": 3.773463070605987e-05,
"loss": 0.8415,
"num_input_tokens_seen": 163053568,
"step": 622
},
{
"epoch": 0.153713298791019,
"grad_norm": 0.7101661562919617,
"learning_rate": 3.756550564175727e-05,
"loss": 1.6923,
"num_input_tokens_seen": 163315712,
"step": 623
},
{
"epoch": 0.153960029607698,
"grad_norm": 0.2696649432182312,
"learning_rate": 3.739653208784432e-05,
"loss": 0.9171,
"num_input_tokens_seen": 163577856,
"step": 624
},
{
"epoch": 0.154206760424377,
"grad_norm": 0.47646230459213257,
"learning_rate": 3.7227712103210486e-05,
"loss": 1.1606,
"num_input_tokens_seen": 163840000,
"step": 625
},
{
"epoch": 0.154453491241056,
"grad_norm": 0.4178116023540497,
"learning_rate": 3.705904774487396e-05,
"loss": 1.5887,
"num_input_tokens_seen": 164102144,
"step": 626
},
{
"epoch": 0.154700222057735,
"grad_norm": 0.3736078441143036,
"learning_rate": 3.6890541067956776e-05,
"loss": 1.8497,
"num_input_tokens_seen": 164364288,
"step": 627
},
{
"epoch": 0.154946952874414,
"grad_norm": 0.3591687083244324,
"learning_rate": 3.6722194125659556e-05,
"loss": 1.5293,
"num_input_tokens_seen": 164626432,
"step": 628
},
{
"epoch": 0.15519368369109302,
"grad_norm": 0.5354999303817749,
"learning_rate": 3.655400896923672e-05,
"loss": 1.3981,
"num_input_tokens_seen": 164888576,
"step": 629
},
{
"epoch": 0.15544041450777202,
"grad_norm": 0.47634822130203247,
"learning_rate": 3.6385987647971285e-05,
"loss": 1.2345,
"num_input_tokens_seen": 165150720,
"step": 630
},
{
"epoch": 0.15568714532445102,
"grad_norm": 0.4649348855018616,
"learning_rate": 3.6218132209150045e-05,
"loss": 1.6408,
"num_input_tokens_seen": 165412864,
"step": 631
},
{
"epoch": 0.15593387614113002,
"grad_norm": 0.38776272535324097,
"learning_rate": 3.605044469803854e-05,
"loss": 1.5168,
"num_input_tokens_seen": 165675008,
"step": 632
},
{
"epoch": 0.15618060695780903,
"grad_norm": 0.4118279218673706,
"learning_rate": 3.588292715785617e-05,
"loss": 1.8121,
"num_input_tokens_seen": 165937152,
"step": 633
},
{
"epoch": 0.15642733777448803,
"grad_norm": 0.2985859215259552,
"learning_rate": 3.5715581629751326e-05,
"loss": 1.6041,
"num_input_tokens_seen": 166199296,
"step": 634
},
{
"epoch": 0.15667406859116703,
"grad_norm": 0.37588563561439514,
"learning_rate": 3.554841015277641e-05,
"loss": 0.9964,
"num_input_tokens_seen": 166461440,
"step": 635
},
{
"epoch": 0.15692079940784603,
"grad_norm": 0.5400651097297668,
"learning_rate": 3.5381414763863166e-05,
"loss": 1.292,
"num_input_tokens_seen": 166723584,
"step": 636
},
{
"epoch": 0.15716753022452504,
"grad_norm": 0.4089282751083374,
"learning_rate": 3.5214597497797684e-05,
"loss": 1.5548,
"num_input_tokens_seen": 166985728,
"step": 637
},
{
"epoch": 0.15741426104120404,
"grad_norm": 0.5228886008262634,
"learning_rate": 3.504796038719567e-05,
"loss": 1.5065,
"num_input_tokens_seen": 167247872,
"step": 638
},
{
"epoch": 0.15766099185788304,
"grad_norm": 0.6308531165122986,
"learning_rate": 3.488150546247778e-05,
"loss": 1.0718,
"num_input_tokens_seen": 167510016,
"step": 639
},
{
"epoch": 0.15790772267456205,
"grad_norm": 0.4296139180660248,
"learning_rate": 3.471523475184472e-05,
"loss": 1.8302,
"num_input_tokens_seen": 167772160,
"step": 640
},
{
"epoch": 0.15815445349124105,
"grad_norm": 0.5463234782218933,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.5352,
"num_input_tokens_seen": 168034304,
"step": 641
},
{
"epoch": 0.15840118430792005,
"grad_norm": 0.4520261883735657,
"learning_rate": 3.438325407438837e-05,
"loss": 1.2395,
"num_input_tokens_seen": 168296448,
"step": 642
},
{
"epoch": 0.15864791512459905,
"grad_norm": 0.4640827178955078,
"learning_rate": 3.4217548152644885e-05,
"loss": 1.5161,
"num_input_tokens_seen": 168558592,
"step": 643
},
{
"epoch": 0.15889464594127806,
"grad_norm": 0.509616494178772,
"learning_rate": 3.40520345350965e-05,
"loss": 1.3259,
"num_input_tokens_seen": 168820736,
"step": 644
},
{
"epoch": 0.15914137675795706,
"grad_norm": 0.26751509308815,
"learning_rate": 3.388671523847445e-05,
"loss": 0.684,
"num_input_tokens_seen": 169082880,
"step": 645
},
{
"epoch": 0.15938810757463606,
"grad_norm": 0.4281842112541199,
"learning_rate": 3.372159227714218e-05,
"loss": 1.5933,
"num_input_tokens_seen": 169345024,
"step": 646
},
{
"epoch": 0.15963483839131506,
"grad_norm": 0.41694679856300354,
"learning_rate": 3.355666766307084e-05,
"loss": 1.2466,
"num_input_tokens_seen": 169607168,
"step": 647
},
{
"epoch": 0.15988156920799407,
"grad_norm": 0.33783477544784546,
"learning_rate": 3.339194340581485e-05,
"loss": 1.4749,
"num_input_tokens_seen": 169869312,
"step": 648
},
{
"epoch": 0.16012830002467307,
"grad_norm": 0.5284684300422668,
"learning_rate": 3.322742151248725e-05,
"loss": 1.3416,
"num_input_tokens_seen": 170131456,
"step": 649
},
{
"epoch": 0.16037503084135207,
"grad_norm": 0.36244672536849976,
"learning_rate": 3.3063103987735433e-05,
"loss": 1.4416,
"num_input_tokens_seen": 170393600,
"step": 650
},
{
"epoch": 0.16062176165803108,
"grad_norm": 0.4545578062534332,
"learning_rate": 3.289899283371657e-05,
"loss": 1.7422,
"num_input_tokens_seen": 170655744,
"step": 651
},
{
"epoch": 0.16086849247471008,
"grad_norm": 0.3529415726661682,
"learning_rate": 3.273509005007327e-05,
"loss": 1.0659,
"num_input_tokens_seen": 170917888,
"step": 652
},
{
"epoch": 0.16111522329138908,
"grad_norm": 0.533159077167511,
"learning_rate": 3.257139763390925e-05,
"loss": 1.8225,
"num_input_tokens_seen": 171180032,
"step": 653
},
{
"epoch": 0.16136195410806808,
"grad_norm": 0.34746259450912476,
"learning_rate": 3.240791757976491e-05,
"loss": 1.3835,
"num_input_tokens_seen": 171442176,
"step": 654
},
{
"epoch": 0.16160868492474711,
"grad_norm": 0.3530820608139038,
"learning_rate": 3.224465187959316e-05,
"loss": 1.5501,
"num_input_tokens_seen": 171704320,
"step": 655
},
{
"epoch": 0.16185541574142612,
"grad_norm": 0.487118124961853,
"learning_rate": 3.2081602522734986e-05,
"loss": 1.5581,
"num_input_tokens_seen": 171966464,
"step": 656
},
{
"epoch": 0.16210214655810512,
"grad_norm": 0.42287710309028625,
"learning_rate": 3.1918771495895396e-05,
"loss": 1.4477,
"num_input_tokens_seen": 172228608,
"step": 657
},
{
"epoch": 0.16234887737478412,
"grad_norm": 0.48304563760757446,
"learning_rate": 3.1756160783119016e-05,
"loss": 1.3314,
"num_input_tokens_seen": 172490752,
"step": 658
},
{
"epoch": 0.16259560819146313,
"grad_norm": 0.4121909439563751,
"learning_rate": 3.1593772365766105e-05,
"loss": 1.0765,
"num_input_tokens_seen": 172752896,
"step": 659
},
{
"epoch": 0.16284233900814213,
"grad_norm": 0.3886505663394928,
"learning_rate": 3.1431608222488275e-05,
"loss": 1.2887,
"num_input_tokens_seen": 173015040,
"step": 660
},
{
"epoch": 0.16308906982482113,
"grad_norm": 0.4908694326877594,
"learning_rate": 3.12696703292044e-05,
"loss": 1.7151,
"num_input_tokens_seen": 173277184,
"step": 661
},
{
"epoch": 0.16333580064150013,
"grad_norm": 0.40245357155799866,
"learning_rate": 3.110796065907665e-05,
"loss": 1.1706,
"num_input_tokens_seen": 173539328,
"step": 662
},
{
"epoch": 0.16358253145817914,
"grad_norm": 0.443495512008667,
"learning_rate": 3.09464811824863e-05,
"loss": 1.1397,
"num_input_tokens_seen": 173801472,
"step": 663
},
{
"epoch": 0.16382926227485814,
"grad_norm": 0.3620133101940155,
"learning_rate": 3.078523386700982e-05,
"loss": 1.5915,
"num_input_tokens_seen": 174063616,
"step": 664
},
{
"epoch": 0.16407599309153714,
"grad_norm": 0.4306368827819824,
"learning_rate": 3.062422067739485e-05,
"loss": 1.1994,
"num_input_tokens_seen": 174325760,
"step": 665
},
{
"epoch": 0.16432272390821615,
"grad_norm": 0.3241046369075775,
"learning_rate": 3.046344357553632e-05,
"loss": 1.0487,
"num_input_tokens_seen": 174587904,
"step": 666
},
{
"epoch": 0.16456945472489515,
"grad_norm": 0.6947260499000549,
"learning_rate": 3.0302904520452447e-05,
"loss": 1.4031,
"num_input_tokens_seen": 174850048,
"step": 667
},
{
"epoch": 0.16481618554157415,
"grad_norm": 0.4316936433315277,
"learning_rate": 3.0142605468260978e-05,
"loss": 1.661,
"num_input_tokens_seen": 175112192,
"step": 668
},
{
"epoch": 0.16506291635825315,
"grad_norm": 0.47495076060295105,
"learning_rate": 2.9982548372155263e-05,
"loss": 1.2845,
"num_input_tokens_seen": 175374336,
"step": 669
},
{
"epoch": 0.16530964717493216,
"grad_norm": 0.395973265171051,
"learning_rate": 2.9822735182380496e-05,
"loss": 1.0712,
"num_input_tokens_seen": 175636480,
"step": 670
},
{
"epoch": 0.16555637799161116,
"grad_norm": 0.5247637033462524,
"learning_rate": 2.9663167846209998e-05,
"loss": 1.626,
"num_input_tokens_seen": 175898624,
"step": 671
},
{
"epoch": 0.16580310880829016,
"grad_norm": 0.5624692440032959,
"learning_rate": 2.950384830792136e-05,
"loss": 1.354,
"num_input_tokens_seen": 176160768,
"step": 672
},
{
"epoch": 0.16604983962496916,
"grad_norm": 0.5763942003250122,
"learning_rate": 2.934477850877292e-05,
"loss": 1.2654,
"num_input_tokens_seen": 176422912,
"step": 673
},
{
"epoch": 0.16629657044164817,
"grad_norm": 0.40236273407936096,
"learning_rate": 2.918596038697995e-05,
"loss": 1.1814,
"num_input_tokens_seen": 176685056,
"step": 674
},
{
"epoch": 0.16654330125832717,
"grad_norm": 0.402811735868454,
"learning_rate": 2.9027395877691144e-05,
"loss": 0.8873,
"num_input_tokens_seen": 176947200,
"step": 675
},
{
"epoch": 0.16679003207500617,
"grad_norm": 0.3976457118988037,
"learning_rate": 2.886908691296504e-05,
"loss": 1.122,
"num_input_tokens_seen": 177209344,
"step": 676
},
{
"epoch": 0.16703676289168518,
"grad_norm": 0.34381216764450073,
"learning_rate": 2.8711035421746367e-05,
"loss": 1.777,
"num_input_tokens_seen": 177471488,
"step": 677
},
{
"epoch": 0.16728349370836418,
"grad_norm": 0.41561493277549744,
"learning_rate": 2.8553243329842714e-05,
"loss": 1.1863,
"num_input_tokens_seen": 177733632,
"step": 678
},
{
"epoch": 0.16753022452504318,
"grad_norm": 0.4907110631465912,
"learning_rate": 2.8395712559900877e-05,
"loss": 1.4543,
"num_input_tokens_seen": 177995776,
"step": 679
},
{
"epoch": 0.16777695534172218,
"grad_norm": 0.5273538827896118,
"learning_rate": 2.823844503138363e-05,
"loss": 1.1865,
"num_input_tokens_seen": 178257920,
"step": 680
},
{
"epoch": 0.1680236861584012,
"grad_norm": 0.3505066931247711,
"learning_rate": 2.8081442660546125e-05,
"loss": 1.2031,
"num_input_tokens_seen": 178520064,
"step": 681
},
{
"epoch": 0.1682704169750802,
"grad_norm": 0.37119948863983154,
"learning_rate": 2.7924707360412746e-05,
"loss": 1.5129,
"num_input_tokens_seen": 178782208,
"step": 682
},
{
"epoch": 0.1685171477917592,
"grad_norm": 0.48191511631011963,
"learning_rate": 2.776824104075364e-05,
"loss": 1.6263,
"num_input_tokens_seen": 179044352,
"step": 683
},
{
"epoch": 0.1687638786084382,
"grad_norm": 0.35735267400741577,
"learning_rate": 2.761204560806152e-05,
"loss": 1.1865,
"num_input_tokens_seen": 179306496,
"step": 684
},
{
"epoch": 0.1690106094251172,
"grad_norm": 0.44410082697868347,
"learning_rate": 2.7456122965528475e-05,
"loss": 1.6549,
"num_input_tokens_seen": 179568640,
"step": 685
},
{
"epoch": 0.1692573402417962,
"grad_norm": 0.47468581795692444,
"learning_rate": 2.7300475013022663e-05,
"loss": 0.9347,
"num_input_tokens_seen": 179830784,
"step": 686
},
{
"epoch": 0.1695040710584752,
"grad_norm": 1.0566141605377197,
"learning_rate": 2.7145103647065308e-05,
"loss": 1.8353,
"num_input_tokens_seen": 180092928,
"step": 687
},
{
"epoch": 0.1697508018751542,
"grad_norm": 0.4620446264743805,
"learning_rate": 2.699001076080742e-05,
"loss": 1.5695,
"num_input_tokens_seen": 180355072,
"step": 688
},
{
"epoch": 0.1699975326918332,
"grad_norm": 0.3748123049736023,
"learning_rate": 2.6835198244006927e-05,
"loss": 1.4859,
"num_input_tokens_seen": 180617216,
"step": 689
},
{
"epoch": 0.1702442635085122,
"grad_norm": 0.538343071937561,
"learning_rate": 2.668066798300545e-05,
"loss": 1.0841,
"num_input_tokens_seen": 180879360,
"step": 690
},
{
"epoch": 0.17049099432519121,
"grad_norm": 0.3192008137702942,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.7964,
"num_input_tokens_seen": 181141504,
"step": 691
},
{
"epoch": 0.17073772514187022,
"grad_norm": 0.5199547410011292,
"learning_rate": 2.6372461756547306e-05,
"loss": 1.3402,
"num_input_tokens_seen": 181403648,
"step": 692
},
{
"epoch": 0.17098445595854922,
"grad_norm": 0.41002166271209717,
"learning_rate": 2.6218789546486234e-05,
"loss": 1.3859,
"num_input_tokens_seen": 181665792,
"step": 693
},
{
"epoch": 0.17123118677522822,
"grad_norm": 0.330893337726593,
"learning_rate": 2.6065407102969664e-05,
"loss": 1.5969,
"num_input_tokens_seen": 181927936,
"step": 694
},
{
"epoch": 0.17147791759190723,
"grad_norm": 0.3525722622871399,
"learning_rate": 2.591231629491423e-05,
"loss": 1.5899,
"num_input_tokens_seen": 182190080,
"step": 695
},
{
"epoch": 0.17172464840858623,
"grad_norm": 0.485008180141449,
"learning_rate": 2.575951898768315e-05,
"loss": 0.9098,
"num_input_tokens_seen": 182452224,
"step": 696
},
{
"epoch": 0.17197137922526523,
"grad_norm": 0.36693790555000305,
"learning_rate": 2.560701704306336e-05,
"loss": 1.4575,
"num_input_tokens_seen": 182714368,
"step": 697
},
{
"epoch": 0.17221811004194423,
"grad_norm": 0.4442266523838043,
"learning_rate": 2.545481231924296e-05,
"loss": 1.5569,
"num_input_tokens_seen": 182976512,
"step": 698
},
{
"epoch": 0.17246484085862324,
"grad_norm": 0.5508103966712952,
"learning_rate": 2.5302906670788462e-05,
"loss": 1.452,
"num_input_tokens_seen": 183238656,
"step": 699
},
{
"epoch": 0.17271157167530224,
"grad_norm": 0.36465567350387573,
"learning_rate": 2.5151301948622237e-05,
"loss": 1.302,
"num_input_tokens_seen": 183500800,
"step": 700
},
{
"epoch": 0.17295830249198124,
"grad_norm": 0.3768947720527649,
"learning_rate": 2.500000000000001e-05,
"loss": 1.0643,
"num_input_tokens_seen": 183762944,
"step": 701
},
{
"epoch": 0.17320503330866024,
"grad_norm": 0.4164443612098694,
"learning_rate": 2.4849002668488245e-05,
"loss": 1.3509,
"num_input_tokens_seen": 184025088,
"step": 702
},
{
"epoch": 0.17345176412533925,
"grad_norm": 0.2983926236629486,
"learning_rate": 2.469831179394182e-05,
"loss": 0.9228,
"num_input_tokens_seen": 184287232,
"step": 703
},
{
"epoch": 0.17369849494201825,
"grad_norm": 0.40506187081336975,
"learning_rate": 2.4547929212481435e-05,
"loss": 1.0096,
"num_input_tokens_seen": 184549376,
"step": 704
},
{
"epoch": 0.17394522575869725,
"grad_norm": 0.49655207991600037,
"learning_rate": 2.4397856756471432e-05,
"loss": 1.6969,
"num_input_tokens_seen": 184811520,
"step": 705
},
{
"epoch": 0.17419195657537626,
"grad_norm": 0.5349634289741516,
"learning_rate": 2.4248096254497288e-05,
"loss": 1.3747,
"num_input_tokens_seen": 185073664,
"step": 706
},
{
"epoch": 0.17443868739205526,
"grad_norm": 0.6262538433074951,
"learning_rate": 2.4098649531343497e-05,
"loss": 1.1831,
"num_input_tokens_seen": 185335808,
"step": 707
},
{
"epoch": 0.17468541820873426,
"grad_norm": 0.3409517705440521,
"learning_rate": 2.39495184079712e-05,
"loss": 0.8747,
"num_input_tokens_seen": 185597952,
"step": 708
},
{
"epoch": 0.17493214902541326,
"grad_norm": 0.5066428780555725,
"learning_rate": 2.3800704701496053e-05,
"loss": 1.3583,
"num_input_tokens_seen": 185860096,
"step": 709
},
{
"epoch": 0.17517887984209227,
"grad_norm": 0.5086933374404907,
"learning_rate": 2.3652210225166122e-05,
"loss": 1.2945,
"num_input_tokens_seen": 186122240,
"step": 710
},
{
"epoch": 0.17542561065877127,
"grad_norm": 0.42083799839019775,
"learning_rate": 2.350403678833976e-05,
"loss": 1.3138,
"num_input_tokens_seen": 186384384,
"step": 711
},
{
"epoch": 0.17567234147545027,
"grad_norm": 0.3579288125038147,
"learning_rate": 2.33561861964635e-05,
"loss": 1.1903,
"num_input_tokens_seen": 186646528,
"step": 712
},
{
"epoch": 0.17591907229212927,
"grad_norm": 0.33680543303489685,
"learning_rate": 2.3208660251050158e-05,
"loss": 1.4195,
"num_input_tokens_seen": 186908672,
"step": 713
},
{
"epoch": 0.17616580310880828,
"grad_norm": 0.6753774881362915,
"learning_rate": 2.3061460749656844e-05,
"loss": 1.4563,
"num_input_tokens_seen": 187170816,
"step": 714
},
{
"epoch": 0.17641253392548728,
"grad_norm": 0.4402250051498413,
"learning_rate": 2.2914589485863014e-05,
"loss": 1.3711,
"num_input_tokens_seen": 187432960,
"step": 715
},
{
"epoch": 0.17665926474216628,
"grad_norm": 0.41593843698501587,
"learning_rate": 2.2768048249248648e-05,
"loss": 1.8253,
"num_input_tokens_seen": 187695104,
"step": 716
},
{
"epoch": 0.17690599555884529,
"grad_norm": 0.4054698646068573,
"learning_rate": 2.2621838825372493e-05,
"loss": 1.5316,
"num_input_tokens_seen": 187957248,
"step": 717
},
{
"epoch": 0.17715272637552432,
"grad_norm": 0.2774104177951813,
"learning_rate": 2.247596299575022e-05,
"loss": 1.2301,
"num_input_tokens_seen": 188219392,
"step": 718
},
{
"epoch": 0.17739945719220332,
"grad_norm": 0.5608285665512085,
"learning_rate": 2.23304225378328e-05,
"loss": 1.4682,
"num_input_tokens_seen": 188481536,
"step": 719
},
{
"epoch": 0.17764618800888232,
"grad_norm": 0.30434879660606384,
"learning_rate": 2.218521922498476e-05,
"loss": 0.9433,
"num_input_tokens_seen": 188743680,
"step": 720
},
{
"epoch": 0.17789291882556132,
"grad_norm": 0.4088476002216339,
"learning_rate": 2.2040354826462668e-05,
"loss": 1.2791,
"num_input_tokens_seen": 189005824,
"step": 721
},
{
"epoch": 0.17813964964224033,
"grad_norm": 0.3614683151245117,
"learning_rate": 2.1895831107393484e-05,
"loss": 1.4412,
"num_input_tokens_seen": 189267968,
"step": 722
},
{
"epoch": 0.17838638045891933,
"grad_norm": 0.3683152198791504,
"learning_rate": 2.1751649828753106e-05,
"loss": 1.1826,
"num_input_tokens_seen": 189530112,
"step": 723
},
{
"epoch": 0.17863311127559833,
"grad_norm": 0.3462359309196472,
"learning_rate": 2.160781274734495e-05,
"loss": 1.4778,
"num_input_tokens_seen": 189792256,
"step": 724
},
{
"epoch": 0.17887984209227734,
"grad_norm": 0.43126240372657776,
"learning_rate": 2.1464321615778422e-05,
"loss": 1.7286,
"num_input_tokens_seen": 190054400,
"step": 725
},
{
"epoch": 0.17912657290895634,
"grad_norm": 0.554000973701477,
"learning_rate": 2.132117818244771e-05,
"loss": 1.5582,
"num_input_tokens_seen": 190316544,
"step": 726
},
{
"epoch": 0.17937330372563534,
"grad_norm": 0.36458858847618103,
"learning_rate": 2.117838419151034e-05,
"loss": 1.7551,
"num_input_tokens_seen": 190578688,
"step": 727
},
{
"epoch": 0.17962003454231434,
"grad_norm": 0.4858095943927765,
"learning_rate": 2.103594138286607e-05,
"loss": 1.8501,
"num_input_tokens_seen": 190840832,
"step": 728
},
{
"epoch": 0.17986676535899335,
"grad_norm": 0.47116655111312866,
"learning_rate": 2.0893851492135537e-05,
"loss": 1.5338,
"num_input_tokens_seen": 191102976,
"step": 729
},
{
"epoch": 0.18011349617567235,
"grad_norm": 0.43690940737724304,
"learning_rate": 2.0752116250639225e-05,
"loss": 1.0307,
"num_input_tokens_seen": 191365120,
"step": 730
},
{
"epoch": 0.18036022699235135,
"grad_norm": 0.5056291818618774,
"learning_rate": 2.061073738537635e-05,
"loss": 1.3162,
"num_input_tokens_seen": 191627264,
"step": 731
},
{
"epoch": 0.18060695780903036,
"grad_norm": 0.29009297490119934,
"learning_rate": 2.0469716619003725e-05,
"loss": 1.7578,
"num_input_tokens_seen": 191889408,
"step": 732
},
{
"epoch": 0.18085368862570936,
"grad_norm": 0.2661360204219818,
"learning_rate": 2.0329055669814934e-05,
"loss": 1.3534,
"num_input_tokens_seen": 192151552,
"step": 733
},
{
"epoch": 0.18110041944238836,
"grad_norm": 0.5041150450706482,
"learning_rate": 2.0188756251719203e-05,
"loss": 1.4008,
"num_input_tokens_seen": 192413696,
"step": 734
},
{
"epoch": 0.18134715025906736,
"grad_norm": 0.6888830661773682,
"learning_rate": 2.0048820074220715e-05,
"loss": 1.4928,
"num_input_tokens_seen": 192675840,
"step": 735
},
{
"epoch": 0.18159388107574637,
"grad_norm": 0.377041757106781,
"learning_rate": 1.9909248842397584e-05,
"loss": 1.2978,
"num_input_tokens_seen": 192937984,
"step": 736
},
{
"epoch": 0.18184061189242537,
"grad_norm": 0.5541605353355408,
"learning_rate": 1.977004425688126e-05,
"loss": 1.4159,
"num_input_tokens_seen": 193200128,
"step": 737
},
{
"epoch": 0.18208734270910437,
"grad_norm": 0.405954509973526,
"learning_rate": 1.9631208013835678e-05,
"loss": 1.3671,
"num_input_tokens_seen": 193462272,
"step": 738
},
{
"epoch": 0.18233407352578337,
"grad_norm": 0.21426331996917725,
"learning_rate": 1.9492741804936622e-05,
"loss": 1.0753,
"num_input_tokens_seen": 193724416,
"step": 739
},
{
"epoch": 0.18258080434246238,
"grad_norm": 0.4015602767467499,
"learning_rate": 1.9354647317351188e-05,
"loss": 1.907,
"num_input_tokens_seen": 193986560,
"step": 740
},
{
"epoch": 0.18282753515914138,
"grad_norm": 0.5001158118247986,
"learning_rate": 1.9216926233717085e-05,
"loss": 1.9134,
"num_input_tokens_seen": 194248704,
"step": 741
},
{
"epoch": 0.18307426597582038,
"grad_norm": 0.44799885153770447,
"learning_rate": 1.9079580232122303e-05,
"loss": 1.6095,
"num_input_tokens_seen": 194510848,
"step": 742
},
{
"epoch": 0.18332099679249939,
"grad_norm": 0.28924810886383057,
"learning_rate": 1.8942610986084486e-05,
"loss": 1.0634,
"num_input_tokens_seen": 194772992,
"step": 743
},
{
"epoch": 0.1835677276091784,
"grad_norm": 0.5791164636611938,
"learning_rate": 1.8806020164530702e-05,
"loss": 1.2595,
"num_input_tokens_seen": 195035136,
"step": 744
},
{
"epoch": 0.1838144584258574,
"grad_norm": 0.4224323332309723,
"learning_rate": 1.866980943177699e-05,
"loss": 1.4435,
"num_input_tokens_seen": 195297280,
"step": 745
},
{
"epoch": 0.1840611892425364,
"grad_norm": 0.26733824610710144,
"learning_rate": 1.8533980447508137e-05,
"loss": 1.3317,
"num_input_tokens_seen": 195559424,
"step": 746
},
{
"epoch": 0.1843079200592154,
"grad_norm": 0.35904163122177124,
"learning_rate": 1.8398534866757454e-05,
"loss": 1.5698,
"num_input_tokens_seen": 195821568,
"step": 747
},
{
"epoch": 0.1845546508758944,
"grad_norm": 0.4189493656158447,
"learning_rate": 1.8263474339886628e-05,
"loss": 1.6898,
"num_input_tokens_seen": 196083712,
"step": 748
},
{
"epoch": 0.1848013816925734,
"grad_norm": 0.44442206621170044,
"learning_rate": 1.8128800512565513e-05,
"loss": 1.5764,
"num_input_tokens_seen": 196345856,
"step": 749
},
{
"epoch": 0.1850481125092524,
"grad_norm": 0.5954610705375671,
"learning_rate": 1.7994515025752217e-05,
"loss": 1.1511,
"num_input_tokens_seen": 196608000,
"step": 750
},
{
"epoch": 0.1852948433259314,
"grad_norm": 0.43949389457702637,
"learning_rate": 1.7860619515673033e-05,
"loss": 1.7354,
"num_input_tokens_seen": 196870144,
"step": 751
},
{
"epoch": 0.1855415741426104,
"grad_norm": 0.5289075374603271,
"learning_rate": 1.7727115613802465e-05,
"loss": 1.6699,
"num_input_tokens_seen": 197132288,
"step": 752
},
{
"epoch": 0.1857883049592894,
"grad_norm": 0.2665049731731415,
"learning_rate": 1.7594004946843456e-05,
"loss": 1.4969,
"num_input_tokens_seen": 197394432,
"step": 753
},
{
"epoch": 0.18603503577596842,
"grad_norm": 0.49943655729293823,
"learning_rate": 1.746128913670746e-05,
"loss": 1.7965,
"num_input_tokens_seen": 197656576,
"step": 754
},
{
"epoch": 0.18628176659264742,
"grad_norm": 0.4090293347835541,
"learning_rate": 1.7328969800494726e-05,
"loss": 1.5285,
"num_input_tokens_seen": 197918720,
"step": 755
},
{
"epoch": 0.18652849740932642,
"grad_norm": 0.3579832911491394,
"learning_rate": 1.7197048550474643e-05,
"loss": 1.2835,
"num_input_tokens_seen": 198180864,
"step": 756
},
{
"epoch": 0.18677522822600542,
"grad_norm": 0.2599361836910248,
"learning_rate": 1.7065526994065973e-05,
"loss": 1.2545,
"num_input_tokens_seen": 198443008,
"step": 757
},
{
"epoch": 0.18702195904268443,
"grad_norm": 0.4274197518825531,
"learning_rate": 1.6934406733817414e-05,
"loss": 1.0542,
"num_input_tokens_seen": 198705152,
"step": 758
},
{
"epoch": 0.18726868985936343,
"grad_norm": 0.5360140204429626,
"learning_rate": 1.680368936738792e-05,
"loss": 1.6395,
"num_input_tokens_seen": 198967296,
"step": 759
},
{
"epoch": 0.18751542067604243,
"grad_norm": 0.5353621244430542,
"learning_rate": 1.667337648752738e-05,
"loss": 0.9602,
"num_input_tokens_seen": 199229440,
"step": 760
},
{
"epoch": 0.18776215149272144,
"grad_norm": 0.4723873436450958,
"learning_rate": 1.6543469682057106e-05,
"loss": 1.5746,
"num_input_tokens_seen": 199491584,
"step": 761
},
{
"epoch": 0.18800888230940044,
"grad_norm": 0.39472222328186035,
"learning_rate": 1.6413970533850498e-05,
"loss": 1.4711,
"num_input_tokens_seen": 199753728,
"step": 762
},
{
"epoch": 0.18825561312607944,
"grad_norm": 0.2996825575828552,
"learning_rate": 1.6284880620813848e-05,
"loss": 1.627,
"num_input_tokens_seen": 200015872,
"step": 763
},
{
"epoch": 0.18850234394275844,
"grad_norm": 0.27132153511047363,
"learning_rate": 1.615620151586697e-05,
"loss": 1.1888,
"num_input_tokens_seen": 200278016,
"step": 764
},
{
"epoch": 0.18874907475943745,
"grad_norm": 0.42808398604393005,
"learning_rate": 1.602793478692419e-05,
"loss": 1.8828,
"num_input_tokens_seen": 200540160,
"step": 765
},
{
"epoch": 0.18899580557611645,
"grad_norm": 0.403182715177536,
"learning_rate": 1.5900081996875083e-05,
"loss": 1.2262,
"num_input_tokens_seen": 200802304,
"step": 766
},
{
"epoch": 0.18924253639279545,
"grad_norm": 0.4309109151363373,
"learning_rate": 1.5772644703565565e-05,
"loss": 1.45,
"num_input_tokens_seen": 201064448,
"step": 767
},
{
"epoch": 0.18948926720947445,
"grad_norm": 0.417865514755249,
"learning_rate": 1.5645624459778856e-05,
"loss": 1.5417,
"num_input_tokens_seen": 201326592,
"step": 768
},
{
"epoch": 0.18973599802615346,
"grad_norm": 0.3549293875694275,
"learning_rate": 1.551902281321651e-05,
"loss": 1.3173,
"num_input_tokens_seen": 201588736,
"step": 769
},
{
"epoch": 0.18998272884283246,
"grad_norm": 0.35859763622283936,
"learning_rate": 1.5392841306479666e-05,
"loss": 1.1348,
"num_input_tokens_seen": 201850880,
"step": 770
},
{
"epoch": 0.19022945965951146,
"grad_norm": 0.4094098210334778,
"learning_rate": 1.526708147705013e-05,
"loss": 1.4399,
"num_input_tokens_seen": 202113024,
"step": 771
},
{
"epoch": 0.19047619047619047,
"grad_norm": 0.5043712258338928,
"learning_rate": 1.5141744857271778e-05,
"loss": 1.2487,
"num_input_tokens_seen": 202375168,
"step": 772
},
{
"epoch": 0.19072292129286947,
"grad_norm": 0.3936559557914734,
"learning_rate": 1.5016832974331724e-05,
"loss": 1.2476,
"num_input_tokens_seen": 202637312,
"step": 773
},
{
"epoch": 0.19096965210954847,
"grad_norm": 0.4496963620185852,
"learning_rate": 1.4892347350241881e-05,
"loss": 1.304,
"num_input_tokens_seen": 202899456,
"step": 774
},
{
"epoch": 0.19121638292622747,
"grad_norm": 0.3752399981021881,
"learning_rate": 1.4768289501820265e-05,
"loss": 1.5437,
"num_input_tokens_seen": 203161600,
"step": 775
},
{
"epoch": 0.19146311374290648,
"grad_norm": 0.4740826189517975,
"learning_rate": 1.4644660940672627e-05,
"loss": 1.4018,
"num_input_tokens_seen": 203423744,
"step": 776
},
{
"epoch": 0.19170984455958548,
"grad_norm": 0.6707932949066162,
"learning_rate": 1.4521463173173965e-05,
"loss": 1.3839,
"num_input_tokens_seen": 203685888,
"step": 777
},
{
"epoch": 0.19195657537626448,
"grad_norm": 0.9192182421684265,
"learning_rate": 1.439869770045018e-05,
"loss": 0.939,
"num_input_tokens_seen": 203948032,
"step": 778
},
{
"epoch": 0.19220330619294348,
"grad_norm": 0.505362868309021,
"learning_rate": 1.4276366018359844e-05,
"loss": 1.7479,
"num_input_tokens_seen": 204210176,
"step": 779
},
{
"epoch": 0.19245003700962252,
"grad_norm": 0.41583451628685,
"learning_rate": 1.4154469617475863e-05,
"loss": 1.8105,
"num_input_tokens_seen": 204472320,
"step": 780
},
{
"epoch": 0.19269676782630152,
"grad_norm": 0.33144688606262207,
"learning_rate": 1.4033009983067452e-05,
"loss": 1.6383,
"num_input_tokens_seen": 204734464,
"step": 781
},
{
"epoch": 0.19294349864298052,
"grad_norm": 0.4531012177467346,
"learning_rate": 1.3911988595081893e-05,
"loss": 1.7034,
"num_input_tokens_seen": 204996608,
"step": 782
},
{
"epoch": 0.19319022945965952,
"grad_norm": 0.4074859619140625,
"learning_rate": 1.3791406928126638e-05,
"loss": 1.2926,
"num_input_tokens_seen": 205258752,
"step": 783
},
{
"epoch": 0.19343696027633853,
"grad_norm": 0.5179128050804138,
"learning_rate": 1.367126645145121e-05,
"loss": 1.6544,
"num_input_tokens_seen": 205520896,
"step": 784
},
{
"epoch": 0.19368369109301753,
"grad_norm": 0.29768508672714233,
"learning_rate": 1.3551568628929434e-05,
"loss": 1.2235,
"num_input_tokens_seen": 205783040,
"step": 785
},
{
"epoch": 0.19393042190969653,
"grad_norm": 0.5028024315834045,
"learning_rate": 1.3432314919041478e-05,
"loss": 1.685,
"num_input_tokens_seen": 206045184,
"step": 786
},
{
"epoch": 0.19417715272637553,
"grad_norm": 0.33347153663635254,
"learning_rate": 1.3313506774856177e-05,
"loss": 1.2162,
"num_input_tokens_seen": 206307328,
"step": 787
},
{
"epoch": 0.19442388354305454,
"grad_norm": 0.4157863259315491,
"learning_rate": 1.3195145644013285e-05,
"loss": 1.4711,
"num_input_tokens_seen": 206569472,
"step": 788
},
{
"epoch": 0.19467061435973354,
"grad_norm": 0.42381206154823303,
"learning_rate": 1.3077232968705805e-05,
"loss": 1.3982,
"num_input_tokens_seen": 206831616,
"step": 789
},
{
"epoch": 0.19491734517641254,
"grad_norm": 0.41108641028404236,
"learning_rate": 1.29597701856625e-05,
"loss": 1.4016,
"num_input_tokens_seen": 207093760,
"step": 790
},
{
"epoch": 0.19516407599309155,
"grad_norm": 0.39059126377105713,
"learning_rate": 1.2842758726130283e-05,
"loss": 1.0198,
"num_input_tokens_seen": 207355904,
"step": 791
},
{
"epoch": 0.19541080680977055,
"grad_norm": 0.31544917821884155,
"learning_rate": 1.2726200015856892e-05,
"loss": 0.8597,
"num_input_tokens_seen": 207618048,
"step": 792
},
{
"epoch": 0.19565753762644955,
"grad_norm": 0.36484408378601074,
"learning_rate": 1.2610095475073414e-05,
"loss": 1.721,
"num_input_tokens_seen": 207880192,
"step": 793
},
{
"epoch": 0.19590426844312855,
"grad_norm": 0.4410194456577301,
"learning_rate": 1.2494446518477022e-05,
"loss": 1.5508,
"num_input_tokens_seen": 208142336,
"step": 794
},
{
"epoch": 0.19615099925980756,
"grad_norm": 0.3165090084075928,
"learning_rate": 1.2379254555213788e-05,
"loss": 1.254,
"num_input_tokens_seen": 208404480,
"step": 795
},
{
"epoch": 0.19639773007648656,
"grad_norm": 0.3774069547653198,
"learning_rate": 1.22645209888614e-05,
"loss": 1.1777,
"num_input_tokens_seen": 208666624,
"step": 796
},
{
"epoch": 0.19664446089316556,
"grad_norm": 0.35231688618659973,
"learning_rate": 1.2150247217412186e-05,
"loss": 1.3234,
"num_input_tokens_seen": 208928768,
"step": 797
},
{
"epoch": 0.19689119170984457,
"grad_norm": 0.3808956742286682,
"learning_rate": 1.203643463325596e-05,
"loss": 1.3179,
"num_input_tokens_seen": 209190912,
"step": 798
},
{
"epoch": 0.19713792252652357,
"grad_norm": 0.3891003131866455,
"learning_rate": 1.1923084623163172e-05,
"loss": 1.1893,
"num_input_tokens_seen": 209453056,
"step": 799
},
{
"epoch": 0.19738465334320257,
"grad_norm": 0.5867771506309509,
"learning_rate": 1.1810198568267905e-05,
"loss": 1.0442,
"num_input_tokens_seen": 209715200,
"step": 800
},
{
"epoch": 0.19763138415988157,
"grad_norm": 0.40049314498901367,
"learning_rate": 1.1697777844051105e-05,
"loss": 1.2473,
"num_input_tokens_seen": 209977344,
"step": 801
},
{
"epoch": 0.19787811497656058,
"grad_norm": 0.4540206491947174,
"learning_rate": 1.1585823820323843e-05,
"loss": 1.4841,
"num_input_tokens_seen": 210239488,
"step": 802
},
{
"epoch": 0.19812484579323958,
"grad_norm": 0.39836302399635315,
"learning_rate": 1.1474337861210543e-05,
"loss": 1.4644,
"num_input_tokens_seen": 210501632,
"step": 803
},
{
"epoch": 0.19837157660991858,
"grad_norm": 0.4528809189796448,
"learning_rate": 1.1363321325132447e-05,
"loss": 1.3358,
"num_input_tokens_seen": 210763776,
"step": 804
},
{
"epoch": 0.19861830742659758,
"grad_norm": 0.4948221445083618,
"learning_rate": 1.1252775564791024e-05,
"loss": 1.2837,
"num_input_tokens_seen": 211025920,
"step": 805
},
{
"epoch": 0.1988650382432766,
"grad_norm": 0.3994447886943817,
"learning_rate": 1.1142701927151456e-05,
"loss": 1.6242,
"num_input_tokens_seen": 211288064,
"step": 806
},
{
"epoch": 0.1991117690599556,
"grad_norm": 0.40876397490501404,
"learning_rate": 1.1033101753426283e-05,
"loss": 1.6753,
"num_input_tokens_seen": 211550208,
"step": 807
},
{
"epoch": 0.1993584998766346,
"grad_norm": 0.520258903503418,
"learning_rate": 1.0923976379059058e-05,
"loss": 1.3293,
"num_input_tokens_seen": 211812352,
"step": 808
},
{
"epoch": 0.1996052306933136,
"grad_norm": 0.39503636956214905,
"learning_rate": 1.0815327133708015e-05,
"loss": 0.9596,
"num_input_tokens_seen": 212074496,
"step": 809
},
{
"epoch": 0.1998519615099926,
"grad_norm": 0.36732059717178345,
"learning_rate": 1.0707155341229901e-05,
"loss": 1.3204,
"num_input_tokens_seen": 212336640,
"step": 810
},
{
"epoch": 0.2000986923266716,
"grad_norm": 0.397003710269928,
"learning_rate": 1.0599462319663905e-05,
"loss": 1.5278,
"num_input_tokens_seen": 212598784,
"step": 811
},
{
"epoch": 0.2003454231433506,
"grad_norm": 0.3989708125591278,
"learning_rate": 1.049224938121548e-05,
"loss": 1.7442,
"num_input_tokens_seen": 212860928,
"step": 812
},
{
"epoch": 0.2005921539600296,
"grad_norm": 0.384064257144928,
"learning_rate": 1.0385517832240471e-05,
"loss": 1.3603,
"num_input_tokens_seen": 213123072,
"step": 813
},
{
"epoch": 0.2008388847767086,
"grad_norm": 0.3299417495727539,
"learning_rate": 1.0279268973229089e-05,
"loss": 1.4073,
"num_input_tokens_seen": 213385216,
"step": 814
},
{
"epoch": 0.2010856155933876,
"grad_norm": 0.4555274546146393,
"learning_rate": 1.0173504098790187e-05,
"loss": 0.9654,
"num_input_tokens_seen": 213647360,
"step": 815
},
{
"epoch": 0.20133234641006661,
"grad_norm": 0.4112314283847809,
"learning_rate": 1.006822449763537e-05,
"loss": 1.6154,
"num_input_tokens_seen": 213909504,
"step": 816
},
{
"epoch": 0.20157907722674562,
"grad_norm": 0.5586087703704834,
"learning_rate": 9.963431452563332e-06,
"loss": 1.7266,
"num_input_tokens_seen": 214171648,
"step": 817
},
{
"epoch": 0.20182580804342462,
"grad_norm": 0.5256840586662292,
"learning_rate": 9.859126240444283e-06,
"loss": 1.1253,
"num_input_tokens_seen": 214433792,
"step": 818
},
{
"epoch": 0.20207253886010362,
"grad_norm": 0.4175276756286621,
"learning_rate": 9.755310132204298e-06,
"loss": 1.8384,
"num_input_tokens_seen": 214695936,
"step": 819
},
{
"epoch": 0.20231926967678263,
"grad_norm": 0.40371331572532654,
"learning_rate": 9.651984392809914e-06,
"loss": 1.5432,
"num_input_tokens_seen": 214958080,
"step": 820
},
{
"epoch": 0.20256600049346163,
"grad_norm": 0.5226199626922607,
"learning_rate": 9.549150281252633e-06,
"loss": 1.4658,
"num_input_tokens_seen": 215220224,
"step": 821
},
{
"epoch": 0.20281273131014063,
"grad_norm": 0.6090238690376282,
"learning_rate": 9.446809050533678e-06,
"loss": 1.6102,
"num_input_tokens_seen": 215482368,
"step": 822
},
{
"epoch": 0.20305946212681963,
"grad_norm": 0.48321157693862915,
"learning_rate": 9.344961947648623e-06,
"loss": 1.6008,
"num_input_tokens_seen": 215744512,
"step": 823
},
{
"epoch": 0.20330619294349864,
"grad_norm": 0.8070583343505859,
"learning_rate": 9.243610213572285e-06,
"loss": 1.6528,
"num_input_tokens_seen": 216006656,
"step": 824
},
{
"epoch": 0.20355292376017764,
"grad_norm": 0.4205542802810669,
"learning_rate": 9.142755083243576e-06,
"loss": 0.932,
"num_input_tokens_seen": 216268800,
"step": 825
},
{
"epoch": 0.20379965457685664,
"grad_norm": 0.40099719166755676,
"learning_rate": 9.042397785550405e-06,
"loss": 1.3213,
"num_input_tokens_seen": 216530944,
"step": 826
},
{
"epoch": 0.20404638539353565,
"grad_norm": 0.42739397287368774,
"learning_rate": 8.9425395433148e-06,
"loss": 1.2968,
"num_input_tokens_seen": 216793088,
"step": 827
},
{
"epoch": 0.20429311621021465,
"grad_norm": 0.41927245259284973,
"learning_rate": 8.843181573277902e-06,
"loss": 1.2249,
"num_input_tokens_seen": 217055232,
"step": 828
},
{
"epoch": 0.20453984702689365,
"grad_norm": 0.40233132243156433,
"learning_rate": 8.744325086085248e-06,
"loss": 1.3714,
"num_input_tokens_seen": 217317376,
"step": 829
},
{
"epoch": 0.20478657784357265,
"grad_norm": 0.4221806228160858,
"learning_rate": 8.645971286271904e-06,
"loss": 1.4538,
"num_input_tokens_seen": 217579520,
"step": 830
},
{
"epoch": 0.20503330866025166,
"grad_norm": 0.3155604600906372,
"learning_rate": 8.548121372247918e-06,
"loss": 1.381,
"num_input_tokens_seen": 217841664,
"step": 831
},
{
"epoch": 0.20528003947693066,
"grad_norm": 0.4414291977882385,
"learning_rate": 8.450776536283594e-06,
"loss": 1.8841,
"num_input_tokens_seen": 218103808,
"step": 832
},
{
"epoch": 0.20552677029360966,
"grad_norm": 0.3706108629703522,
"learning_rate": 8.353937964495029e-06,
"loss": 1.5019,
"num_input_tokens_seen": 218365952,
"step": 833
},
{
"epoch": 0.20577350111028866,
"grad_norm": 0.4128405749797821,
"learning_rate": 8.257606836829678e-06,
"loss": 1.2751,
"num_input_tokens_seen": 218628096,
"step": 834
},
{
"epoch": 0.20602023192696767,
"grad_norm": 0.34812140464782715,
"learning_rate": 8.16178432705192e-06,
"loss": 1.1729,
"num_input_tokens_seen": 218890240,
"step": 835
},
{
"epoch": 0.20626696274364667,
"grad_norm": 0.4723651111125946,
"learning_rate": 8.066471602728803e-06,
"loss": 1.4141,
"num_input_tokens_seen": 219152384,
"step": 836
},
{
"epoch": 0.20651369356032567,
"grad_norm": 0.676806628704071,
"learning_rate": 7.971669825215788e-06,
"loss": 1.3758,
"num_input_tokens_seen": 219414528,
"step": 837
},
{
"epoch": 0.20676042437700468,
"grad_norm": 0.45954978466033936,
"learning_rate": 7.877380149642626e-06,
"loss": 1.6613,
"num_input_tokens_seen": 219676672,
"step": 838
},
{
"epoch": 0.20700715519368368,
"grad_norm": 0.5440905690193176,
"learning_rate": 7.783603724899257e-06,
"loss": 1.2742,
"num_input_tokens_seen": 219938816,
"step": 839
},
{
"epoch": 0.20725388601036268,
"grad_norm": 0.5499442219734192,
"learning_rate": 7.690341693621805e-06,
"loss": 1.3345,
"num_input_tokens_seen": 220200960,
"step": 840
},
{
"epoch": 0.20750061682704168,
"grad_norm": 0.4645845592021942,
"learning_rate": 7.597595192178702e-06,
"loss": 1.1404,
"num_input_tokens_seen": 220463104,
"step": 841
},
{
"epoch": 0.20774734764372071,
"grad_norm": 0.41414812207221985,
"learning_rate": 7.505365350656812e-06,
"loss": 1.7001,
"num_input_tokens_seen": 220725248,
"step": 842
},
{
"epoch": 0.20799407846039972,
"grad_norm": 0.3073612451553345,
"learning_rate": 7.413653292847617e-06,
"loss": 0.9913,
"num_input_tokens_seen": 220987392,
"step": 843
},
{
"epoch": 0.20824080927707872,
"grad_norm": 0.5080744624137878,
"learning_rate": 7.322460136233622e-06,
"loss": 1.0587,
"num_input_tokens_seen": 221249536,
"step": 844
},
{
"epoch": 0.20848754009375772,
"grad_norm": 0.46083757281303406,
"learning_rate": 7.2317869919746705e-06,
"loss": 1.6628,
"num_input_tokens_seen": 221511680,
"step": 845
},
{
"epoch": 0.20873427091043673,
"grad_norm": 0.26140403747558594,
"learning_rate": 7.1416349648943894e-06,
"loss": 0.8193,
"num_input_tokens_seen": 221773824,
"step": 846
},
{
"epoch": 0.20898100172711573,
"grad_norm": 0.5622504949569702,
"learning_rate": 7.052005153466779e-06,
"loss": 1.0913,
"num_input_tokens_seen": 222035968,
"step": 847
},
{
"epoch": 0.20922773254379473,
"grad_norm": 0.36274051666259766,
"learning_rate": 6.962898649802823e-06,
"loss": 1.7564,
"num_input_tokens_seen": 222298112,
"step": 848
},
{
"epoch": 0.20947446336047373,
"grad_norm": 0.514798104763031,
"learning_rate": 6.874316539637127e-06,
"loss": 1.3341,
"num_input_tokens_seen": 222560256,
"step": 849
},
{
"epoch": 0.20972119417715274,
"grad_norm": 0.5033217668533325,
"learning_rate": 6.786259902314768e-06,
"loss": 1.3534,
"num_input_tokens_seen": 222822400,
"step": 850
},
{
"epoch": 0.20996792499383174,
"grad_norm": 0.28330883383750916,
"learning_rate": 6.698729810778065e-06,
"loss": 1.359,
"num_input_tokens_seen": 223084544,
"step": 851
},
{
"epoch": 0.21021465581051074,
"grad_norm": 0.44284510612487793,
"learning_rate": 6.611727331553586e-06,
"loss": 1.8436,
"num_input_tokens_seen": 223346688,
"step": 852
},
{
"epoch": 0.21046138662718974,
"grad_norm": 0.42221397161483765,
"learning_rate": 6.52525352473905e-06,
"loss": 1.298,
"num_input_tokens_seen": 223608832,
"step": 853
},
{
"epoch": 0.21070811744386875,
"grad_norm": 0.4385905861854553,
"learning_rate": 6.439309443990532e-06,
"loss": 1.8371,
"num_input_tokens_seen": 223870976,
"step": 854
},
{
"epoch": 0.21095484826054775,
"grad_norm": 0.6157058477401733,
"learning_rate": 6.353896136509524e-06,
"loss": 1.6388,
"num_input_tokens_seen": 224133120,
"step": 855
},
{
"epoch": 0.21120157907722675,
"grad_norm": 0.3624531328678131,
"learning_rate": 6.269014643030213e-06,
"loss": 1.2448,
"num_input_tokens_seen": 224395264,
"step": 856
},
{
"epoch": 0.21144830989390576,
"grad_norm": 0.37662044167518616,
"learning_rate": 6.184665997806832e-06,
"loss": 1.406,
"num_input_tokens_seen": 224657408,
"step": 857
},
{
"epoch": 0.21169504071058476,
"grad_norm": 0.3561292886734009,
"learning_rate": 6.100851228600973e-06,
"loss": 1.5924,
"num_input_tokens_seen": 224919552,
"step": 858
},
{
"epoch": 0.21194177152726376,
"grad_norm": 0.4230511784553528,
"learning_rate": 6.017571356669183e-06,
"loss": 1.4013,
"num_input_tokens_seen": 225181696,
"step": 859
},
{
"epoch": 0.21218850234394276,
"grad_norm": 0.3490230143070221,
"learning_rate": 5.934827396750392e-06,
"loss": 1.1493,
"num_input_tokens_seen": 225443840,
"step": 860
},
{
"epoch": 0.21243523316062177,
"grad_norm": 0.5331534743309021,
"learning_rate": 5.852620357053651e-06,
"loss": 1.7272,
"num_input_tokens_seen": 225705984,
"step": 861
},
{
"epoch": 0.21268196397730077,
"grad_norm": 0.5563039779663086,
"learning_rate": 5.770951239245803e-06,
"loss": 0.9588,
"num_input_tokens_seen": 225968128,
"step": 862
},
{
"epoch": 0.21292869479397977,
"grad_norm": 0.4433322250843048,
"learning_rate": 5.689821038439263e-06,
"loss": 1.6342,
"num_input_tokens_seen": 226230272,
"step": 863
},
{
"epoch": 0.21317542561065878,
"grad_norm": 0.32567211985588074,
"learning_rate": 5.6092307431799384e-06,
"loss": 1.6398,
"num_input_tokens_seen": 226492416,
"step": 864
},
{
"epoch": 0.21342215642733778,
"grad_norm": 0.4406616985797882,
"learning_rate": 5.529181335435124e-06,
"loss": 1.5042,
"num_input_tokens_seen": 226754560,
"step": 865
},
{
"epoch": 0.21366888724401678,
"grad_norm": 0.3349403738975525,
"learning_rate": 5.449673790581611e-06,
"loss": 1.0215,
"num_input_tokens_seen": 227016704,
"step": 866
},
{
"epoch": 0.21391561806069578,
"grad_norm": 0.35062626004219055,
"learning_rate": 5.370709077393721e-06,
"loss": 1.3084,
"num_input_tokens_seen": 227278848,
"step": 867
},
{
"epoch": 0.2141623488773748,
"grad_norm": 0.36387965083122253,
"learning_rate": 5.292288158031594e-06,
"loss": 1.1064,
"num_input_tokens_seen": 227540992,
"step": 868
},
{
"epoch": 0.2144090796940538,
"grad_norm": 0.4105871021747589,
"learning_rate": 5.214411988029355e-06,
"loss": 1.332,
"num_input_tokens_seen": 227803136,
"step": 869
},
{
"epoch": 0.2146558105107328,
"grad_norm": 0.4561648964881897,
"learning_rate": 5.137081516283581e-06,
"loss": 1.6019,
"num_input_tokens_seen": 228065280,
"step": 870
},
{
"epoch": 0.2149025413274118,
"grad_norm": 0.35186466574668884,
"learning_rate": 5.060297685041659e-06,
"loss": 1.3176,
"num_input_tokens_seen": 228327424,
"step": 871
},
{
"epoch": 0.2151492721440908,
"grad_norm": 0.5014945268630981,
"learning_rate": 4.984061429890324e-06,
"loss": 1.6415,
"num_input_tokens_seen": 228589568,
"step": 872
},
{
"epoch": 0.2153960029607698,
"grad_norm": 0.4358992874622345,
"learning_rate": 4.908373679744316e-06,
"loss": 1.8463,
"num_input_tokens_seen": 228851712,
"step": 873
},
{
"epoch": 0.2156427337774488,
"grad_norm": 0.4536201059818268,
"learning_rate": 4.833235356834959e-06,
"loss": 1.0817,
"num_input_tokens_seen": 229113856,
"step": 874
},
{
"epoch": 0.2158894645941278,
"grad_norm": 0.4896175265312195,
"learning_rate": 4.758647376699032e-06,
"loss": 1.5664,
"num_input_tokens_seen": 229376000,
"step": 875
},
{
"epoch": 0.2161361954108068,
"grad_norm": 0.5124403834342957,
"learning_rate": 4.684610648167503e-06,
"loss": 1.7831,
"num_input_tokens_seen": 229638144,
"step": 876
},
{
"epoch": 0.2163829262274858,
"grad_norm": 0.3705924153327942,
"learning_rate": 4.611126073354571e-06,
"loss": 1.2338,
"num_input_tokens_seen": 229900288,
"step": 877
},
{
"epoch": 0.21662965704416481,
"grad_norm": 0.4410094618797302,
"learning_rate": 4.538194547646574e-06,
"loss": 1.6064,
"num_input_tokens_seen": 230162432,
"step": 878
},
{
"epoch": 0.21687638786084382,
"grad_norm": 0.39419636130332947,
"learning_rate": 4.465816959691149e-06,
"loss": 1.374,
"num_input_tokens_seen": 230424576,
"step": 879
},
{
"epoch": 0.21712311867752282,
"grad_norm": 0.5444217920303345,
"learning_rate": 4.3939941913863525e-06,
"loss": 1.2999,
"num_input_tokens_seen": 230686720,
"step": 880
},
{
"epoch": 0.21736984949420182,
"grad_norm": 0.3695988059043884,
"learning_rate": 4.322727117869951e-06,
"loss": 1.6548,
"num_input_tokens_seen": 230948864,
"step": 881
},
{
"epoch": 0.21761658031088082,
"grad_norm": 0.44270896911621094,
"learning_rate": 4.2520166075087635e-06,
"loss": 0.963,
"num_input_tokens_seen": 231211008,
"step": 882
},
{
"epoch": 0.21786331112755983,
"grad_norm": 0.4120027422904968,
"learning_rate": 4.181863521888019e-06,
"loss": 1.4263,
"num_input_tokens_seen": 231473152,
"step": 883
},
{
"epoch": 0.21811004194423883,
"grad_norm": 0.22883374989032745,
"learning_rate": 4.112268715800943e-06,
"loss": 1.0965,
"num_input_tokens_seen": 231735296,
"step": 884
},
{
"epoch": 0.21835677276091783,
"grad_norm": 0.43632200360298157,
"learning_rate": 4.043233037238281e-06,
"loss": 1.0893,
"num_input_tokens_seen": 231997440,
"step": 885
},
{
"epoch": 0.21860350357759684,
"grad_norm": 0.35074254870414734,
"learning_rate": 3.974757327377981e-06,
"loss": 1.8799,
"num_input_tokens_seen": 232259584,
"step": 886
},
{
"epoch": 0.21885023439427584,
"grad_norm": 0.2914575934410095,
"learning_rate": 3.90684242057498e-06,
"loss": 1.4432,
"num_input_tokens_seen": 232521728,
"step": 887
},
{
"epoch": 0.21909696521095484,
"grad_norm": 0.347256064414978,
"learning_rate": 3.839489144350955e-06,
"loss": 1.5199,
"num_input_tokens_seen": 232783872,
"step": 888
},
{
"epoch": 0.21934369602763384,
"grad_norm": 0.31574973464012146,
"learning_rate": 3.772698319384349e-06,
"loss": 1.3739,
"num_input_tokens_seen": 233046016,
"step": 889
},
{
"epoch": 0.21959042684431285,
"grad_norm": 0.24996541440486908,
"learning_rate": 3.7064707595002635e-06,
"loss": 1.3053,
"num_input_tokens_seen": 233308160,
"step": 890
},
{
"epoch": 0.21983715766099185,
"grad_norm": 0.3357880413532257,
"learning_rate": 3.6408072716606346e-06,
"loss": 1.231,
"num_input_tokens_seen": 233570304,
"step": 891
},
{
"epoch": 0.22008388847767085,
"grad_norm": 0.31843653321266174,
"learning_rate": 3.575708655954324e-06,
"loss": 1.1161,
"num_input_tokens_seen": 233832448,
"step": 892
},
{
"epoch": 0.22033061929434986,
"grad_norm": 0.38734325766563416,
"learning_rate": 3.511175705587433e-06,
"loss": 1.1013,
"num_input_tokens_seen": 234094592,
"step": 893
},
{
"epoch": 0.22057735011102886,
"grad_norm": 0.3909737765789032,
"learning_rate": 3.4472092068735916e-06,
"loss": 1.75,
"num_input_tokens_seen": 234356736,
"step": 894
},
{
"epoch": 0.22082408092770786,
"grad_norm": 0.4034940004348755,
"learning_rate": 3.3838099392243916e-06,
"loss": 1.3209,
"num_input_tokens_seen": 234618880,
"step": 895
},
{
"epoch": 0.22107081174438686,
"grad_norm": 0.37450823187828064,
"learning_rate": 3.3209786751399187e-06,
"loss": 1.6501,
"num_input_tokens_seen": 234881024,
"step": 896
},
{
"epoch": 0.22131754256106587,
"grad_norm": 0.4845336675643921,
"learning_rate": 3.258716180199278e-06,
"loss": 1.0887,
"num_input_tokens_seen": 235143168,
"step": 897
},
{
"epoch": 0.22156427337774487,
"grad_norm": 0.2771964967250824,
"learning_rate": 3.197023213051337e-06,
"loss": 1.7267,
"num_input_tokens_seen": 235405312,
"step": 898
},
{
"epoch": 0.22181100419442387,
"grad_norm": 0.48373907804489136,
"learning_rate": 3.1359005254054273e-06,
"loss": 1.2309,
"num_input_tokens_seen": 235667456,
"step": 899
},
{
"epoch": 0.22205773501110287,
"grad_norm": 0.3501412272453308,
"learning_rate": 3.0753488620222037e-06,
"loss": 1.4436,
"num_input_tokens_seen": 235929600,
"step": 900
},
{
"epoch": 0.22230446582778188,
"grad_norm": 0.4575711488723755,
"learning_rate": 3.0153689607045845e-06,
"loss": 1.6343,
"num_input_tokens_seen": 236191744,
"step": 901
},
{
"epoch": 0.22255119664446088,
"grad_norm": 0.32950469851493835,
"learning_rate": 2.9559615522887273e-06,
"loss": 1.5041,
"num_input_tokens_seen": 236453888,
"step": 902
},
{
"epoch": 0.22279792746113988,
"grad_norm": 0.34429049491882324,
"learning_rate": 2.8971273606351658e-06,
"loss": 1.8894,
"num_input_tokens_seen": 236716032,
"step": 903
},
{
"epoch": 0.22304465827781889,
"grad_norm": 0.4798198640346527,
"learning_rate": 2.8388671026199522e-06,
"loss": 1.2334,
"num_input_tokens_seen": 236978176,
"step": 904
},
{
"epoch": 0.22329138909449792,
"grad_norm": 0.5834410190582275,
"learning_rate": 2.7811814881259503e-06,
"loss": 1.3716,
"num_input_tokens_seen": 237240320,
"step": 905
},
{
"epoch": 0.22353811991117692,
"grad_norm": 0.3931249678134918,
"learning_rate": 2.724071220034158e-06,
"loss": 1.7828,
"num_input_tokens_seen": 237502464,
"step": 906
},
{
"epoch": 0.22378485072785592,
"grad_norm": 0.48202821612358093,
"learning_rate": 2.667536994215186e-06,
"loss": 1.4329,
"num_input_tokens_seen": 237764608,
"step": 907
},
{
"epoch": 0.22403158154453492,
"grad_norm": 0.5228536128997803,
"learning_rate": 2.611579499520722e-06,
"loss": 1.4149,
"num_input_tokens_seen": 238026752,
"step": 908
},
{
"epoch": 0.22427831236121393,
"grad_norm": 0.43905991315841675,
"learning_rate": 2.5561994177751737e-06,
"loss": 1.1839,
"num_input_tokens_seen": 238288896,
"step": 909
},
{
"epoch": 0.22452504317789293,
"grad_norm": 0.5174866318702698,
"learning_rate": 2.501397423767382e-06,
"loss": 1.8725,
"num_input_tokens_seen": 238551040,
"step": 910
},
{
"epoch": 0.22477177399457193,
"grad_norm": 0.4453175663948059,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.3905,
"num_input_tokens_seen": 238813184,
"step": 911
},
{
"epoch": 0.22501850481125094,
"grad_norm": 0.525449275970459,
"learning_rate": 2.3935303628930707e-06,
"loss": 1.7753,
"num_input_tokens_seen": 239075328,
"step": 912
},
{
"epoch": 0.22526523562792994,
"grad_norm": 0.4205642640590668,
"learning_rate": 2.340466610352654e-06,
"loss": 1.1492,
"num_input_tokens_seen": 239337472,
"step": 913
},
{
"epoch": 0.22551196644460894,
"grad_norm": 0.46652594208717346,
"learning_rate": 2.2879835741861586e-06,
"loss": 1.223,
"num_input_tokens_seen": 239599616,
"step": 914
},
{
"epoch": 0.22575869726128794,
"grad_norm": 0.23859062790870667,
"learning_rate": 2.2360818938828187e-06,
"loss": 1.2992,
"num_input_tokens_seen": 239861760,
"step": 915
},
{
"epoch": 0.22600542807796695,
"grad_norm": 0.44085046648979187,
"learning_rate": 2.1847622018482283e-06,
"loss": 1.6227,
"num_input_tokens_seen": 240123904,
"step": 916
},
{
"epoch": 0.22625215889464595,
"grad_norm": 0.49628278613090515,
"learning_rate": 2.134025123396638e-06,
"loss": 1.0806,
"num_input_tokens_seen": 240386048,
"step": 917
},
{
"epoch": 0.22649888971132495,
"grad_norm": 0.38931065797805786,
"learning_rate": 2.0838712767433375e-06,
"loss": 0.9679,
"num_input_tokens_seen": 240648192,
"step": 918
},
{
"epoch": 0.22674562052800395,
"grad_norm": 0.4007751941680908,
"learning_rate": 2.0343012729971243e-06,
"loss": 1.6495,
"num_input_tokens_seen": 240910336,
"step": 919
},
{
"epoch": 0.22699235134468296,
"grad_norm": 0.40981659293174744,
"learning_rate": 1.985315716152847e-06,
"loss": 1.7843,
"num_input_tokens_seen": 241172480,
"step": 920
},
{
"epoch": 0.22723908216136196,
"grad_norm": 0.33023422956466675,
"learning_rate": 1.9369152030840556e-06,
"loss": 2.0067,
"num_input_tokens_seen": 241434624,
"step": 921
},
{
"epoch": 0.22748581297804096,
"grad_norm": 0.4902541935443878,
"learning_rate": 1.8891003235357308e-06,
"loss": 1.6419,
"num_input_tokens_seen": 241696768,
"step": 922
},
{
"epoch": 0.22773254379471997,
"grad_norm": 0.406667023897171,
"learning_rate": 1.841871660117095e-06,
"loss": 1.5435,
"num_input_tokens_seen": 241958912,
"step": 923
},
{
"epoch": 0.22797927461139897,
"grad_norm": 0.4211887717247009,
"learning_rate": 1.7952297882945003e-06,
"loss": 1.563,
"num_input_tokens_seen": 242221056,
"step": 924
},
{
"epoch": 0.22822600542807797,
"grad_norm": 0.40413257479667664,
"learning_rate": 1.7491752763844293e-06,
"loss": 1.6204,
"num_input_tokens_seen": 242483200,
"step": 925
},
{
"epoch": 0.22847273624475697,
"grad_norm": 0.38972026109695435,
"learning_rate": 1.70370868554659e-06,
"loss": 1.2054,
"num_input_tokens_seen": 242745344,
"step": 926
},
{
"epoch": 0.22871946706143598,
"grad_norm": 0.3521660566329956,
"learning_rate": 1.6588305697770313e-06,
"loss": 1.086,
"num_input_tokens_seen": 243007488,
"step": 927
},
{
"epoch": 0.22896619787811498,
"grad_norm": 0.328576922416687,
"learning_rate": 1.6145414759014431e-06,
"loss": 1.1721,
"num_input_tokens_seen": 243269632,
"step": 928
},
{
"epoch": 0.22921292869479398,
"grad_norm": 0.36693474650382996,
"learning_rate": 1.5708419435684462e-06,
"loss": 1.4912,
"num_input_tokens_seen": 243531776,
"step": 929
},
{
"epoch": 0.22945965951147299,
"grad_norm": 0.4244510233402252,
"learning_rate": 1.5277325052430568e-06,
"loss": 1.9722,
"num_input_tokens_seen": 243793920,
"step": 930
},
{
"epoch": 0.229706390328152,
"grad_norm": 0.36215126514434814,
"learning_rate": 1.4852136862001764e-06,
"loss": 1.2516,
"num_input_tokens_seen": 244056064,
"step": 931
},
{
"epoch": 0.229953121144831,
"grad_norm": 0.3520113527774811,
"learning_rate": 1.4432860045182017e-06,
"loss": 1.4905,
"num_input_tokens_seen": 244318208,
"step": 932
},
{
"epoch": 0.23019985196151,
"grad_norm": 0.41491174697875977,
"learning_rate": 1.4019499710726913e-06,
"loss": 1.7869,
"num_input_tokens_seen": 244580352,
"step": 933
},
{
"epoch": 0.230446582778189,
"grad_norm": 0.47250011563301086,
"learning_rate": 1.3612060895301759e-06,
"loss": 1.4418,
"num_input_tokens_seen": 244842496,
"step": 934
},
{
"epoch": 0.230693313594868,
"grad_norm": 0.3612948954105377,
"learning_rate": 1.3210548563419856e-06,
"loss": 1.5529,
"num_input_tokens_seen": 245104640,
"step": 935
},
{
"epoch": 0.230940044411547,
"grad_norm": 0.2578742802143097,
"learning_rate": 1.2814967607382432e-06,
"loss": 0.6559,
"num_input_tokens_seen": 245366784,
"step": 936
},
{
"epoch": 0.231186775228226,
"grad_norm": 0.3276498019695282,
"learning_rate": 1.2425322847218368e-06,
"loss": 1.6959,
"num_input_tokens_seen": 245628928,
"step": 937
},
{
"epoch": 0.231433506044905,
"grad_norm": 0.36643821001052856,
"learning_rate": 1.2041619030626284e-06,
"loss": 1.7047,
"num_input_tokens_seen": 245891072,
"step": 938
},
{
"epoch": 0.231680236861584,
"grad_norm": 0.4881236255168915,
"learning_rate": 1.166386083291604e-06,
"loss": 1.5233,
"num_input_tokens_seen": 246153216,
"step": 939
},
{
"epoch": 0.231926967678263,
"grad_norm": 0.45456019043922424,
"learning_rate": 1.1292052856952062e-06,
"loss": 0.8762,
"num_input_tokens_seen": 246415360,
"step": 940
},
{
"epoch": 0.23217369849494202,
"grad_norm": 0.32440200448036194,
"learning_rate": 1.0926199633097157e-06,
"loss": 1.0543,
"num_input_tokens_seen": 246677504,
"step": 941
},
{
"epoch": 0.23242042931162102,
"grad_norm": 0.4651922881603241,
"learning_rate": 1.0566305619157502e-06,
"loss": 1.5887,
"num_input_tokens_seen": 246939648,
"step": 942
},
{
"epoch": 0.23266716012830002,
"grad_norm": 0.304892897605896,
"learning_rate": 1.0212375200327973e-06,
"loss": 1.0267,
"num_input_tokens_seen": 247201792,
"step": 943
},
{
"epoch": 0.23291389094497902,
"grad_norm": 0.48778969049453735,
"learning_rate": 9.864412689139123e-07,
"loss": 1.4165,
"num_input_tokens_seen": 247463936,
"step": 944
},
{
"epoch": 0.23316062176165803,
"grad_norm": 0.5654739737510681,
"learning_rate": 9.522422325404235e-07,
"loss": 1.4352,
"num_input_tokens_seen": 247726080,
"step": 945
},
{
"epoch": 0.23340735257833703,
"grad_norm": 0.43988433480262756,
"learning_rate": 9.186408276168013e-07,
"loss": 1.4513,
"num_input_tokens_seen": 247988224,
"step": 946
},
{
"epoch": 0.23365408339501603,
"grad_norm": 1.4060965776443481,
"learning_rate": 8.856374635655695e-07,
"loss": 1.7443,
"num_input_tokens_seen": 248250368,
"step": 947
},
{
"epoch": 0.23390081421169504,
"grad_norm": 0.4969773590564728,
"learning_rate": 8.53232542522292e-07,
"loss": 1.432,
"num_input_tokens_seen": 248512512,
"step": 948
},
{
"epoch": 0.23414754502837404,
"grad_norm": 0.3034925162792206,
"learning_rate": 8.214264593307098e-07,
"loss": 1.3612,
"num_input_tokens_seen": 248774656,
"step": 949
},
{
"epoch": 0.23439427584505304,
"grad_norm": 0.41453835368156433,
"learning_rate": 7.90219601537906e-07,
"loss": 1.7427,
"num_input_tokens_seen": 249036800,
"step": 950
},
{
"epoch": 0.23464100666173204,
"grad_norm": 0.4272692799568176,
"learning_rate": 7.596123493895991e-07,
"loss": 1.7807,
"num_input_tokens_seen": 249298944,
"step": 951
},
{
"epoch": 0.23488773747841105,
"grad_norm": 0.40432244539260864,
"learning_rate": 7.296050758254957e-07,
"loss": 1.2765,
"num_input_tokens_seen": 249561088,
"step": 952
},
{
"epoch": 0.23513446829509005,
"grad_norm": 0.4936521053314209,
"learning_rate": 7.001981464747565e-07,
"loss": 1.6568,
"num_input_tokens_seen": 249823232,
"step": 953
},
{
"epoch": 0.23538119911176905,
"grad_norm": 0.44063809514045715,
"learning_rate": 6.713919196515317e-07,
"loss": 1.3187,
"num_input_tokens_seen": 250085376,
"step": 954
},
{
"epoch": 0.23562792992844805,
"grad_norm": 0.5423821806907654,
"learning_rate": 6.431867463506048e-07,
"loss": 1.4531,
"num_input_tokens_seen": 250347520,
"step": 955
},
{
"epoch": 0.23587466074512706,
"grad_norm": 0.3687763214111328,
"learning_rate": 6.15582970243117e-07,
"loss": 1.841,
"num_input_tokens_seen": 250609664,
"step": 956
},
{
"epoch": 0.23612139156180606,
"grad_norm": 0.37120380997657776,
"learning_rate": 5.885809276723608e-07,
"loss": 1.5953,
"num_input_tokens_seen": 250871808,
"step": 957
},
{
"epoch": 0.23636812237848506,
"grad_norm": 0.29450616240501404,
"learning_rate": 5.621809476497098e-07,
"loss": 0.9224,
"num_input_tokens_seen": 251133952,
"step": 958
},
{
"epoch": 0.23661485319516407,
"grad_norm": 0.43965956568717957,
"learning_rate": 5.363833518505834e-07,
"loss": 1.2595,
"num_input_tokens_seen": 251396096,
"step": 959
},
{
"epoch": 0.23686158401184307,
"grad_norm": 0.4543672502040863,
"learning_rate": 5.111884546105506e-07,
"loss": 1.776,
"num_input_tokens_seen": 251658240,
"step": 960
},
{
"epoch": 0.23710831482852207,
"grad_norm": 0.3763900101184845,
"learning_rate": 4.865965629214819e-07,
"loss": 1.2479,
"num_input_tokens_seen": 251920384,
"step": 961
},
{
"epoch": 0.23735504564520107,
"grad_norm": 0.6350561380386353,
"learning_rate": 4.6260797642782014e-07,
"loss": 1.0469,
"num_input_tokens_seen": 252182528,
"step": 962
},
{
"epoch": 0.23760177646188008,
"grad_norm": 0.5649178624153137,
"learning_rate": 4.392229874229159e-07,
"loss": 1.1796,
"num_input_tokens_seen": 252444672,
"step": 963
},
{
"epoch": 0.23784850727855908,
"grad_norm": 0.40324440598487854,
"learning_rate": 4.1644188084548063e-07,
"loss": 1.8002,
"num_input_tokens_seen": 252706816,
"step": 964
},
{
"epoch": 0.23809523809523808,
"grad_norm": 0.32778018712997437,
"learning_rate": 3.9426493427611177e-07,
"loss": 0.8405,
"num_input_tokens_seen": 252968960,
"step": 965
},
{
"epoch": 0.23834196891191708,
"grad_norm": 0.3592855632305145,
"learning_rate": 3.7269241793390085e-07,
"loss": 1.2062,
"num_input_tokens_seen": 253231104,
"step": 966
},
{
"epoch": 0.23858869972859612,
"grad_norm": 0.6307246088981628,
"learning_rate": 3.517245946731529e-07,
"loss": 1.6272,
"num_input_tokens_seen": 253493248,
"step": 967
},
{
"epoch": 0.23883543054527512,
"grad_norm": 0.4334414303302765,
"learning_rate": 3.3136171998017775e-07,
"loss": 1.2055,
"num_input_tokens_seen": 253755392,
"step": 968
},
{
"epoch": 0.23908216136195412,
"grad_norm": 0.5530074238777161,
"learning_rate": 3.1160404197018154e-07,
"loss": 1.369,
"num_input_tokens_seen": 254017536,
"step": 969
},
{
"epoch": 0.23932889217863312,
"grad_norm": 0.9987282752990723,
"learning_rate": 2.924518013842303e-07,
"loss": 1.2334,
"num_input_tokens_seen": 254279680,
"step": 970
},
{
"epoch": 0.23957562299531213,
"grad_norm": 1.3839845657348633,
"learning_rate": 2.7390523158633554e-07,
"loss": 1.9204,
"num_input_tokens_seen": 254541824,
"step": 971
},
{
"epoch": 0.23982235381199113,
"grad_norm": 0.41010022163391113,
"learning_rate": 2.5596455856058963e-07,
"loss": 1.1164,
"num_input_tokens_seen": 254803968,
"step": 972
},
{
"epoch": 0.24006908462867013,
"grad_norm": 0.42800965905189514,
"learning_rate": 2.386300009084408e-07,
"loss": 1.5385,
"num_input_tokens_seen": 255066112,
"step": 973
},
{
"epoch": 0.24031581544534913,
"grad_norm": 0.3884003162384033,
"learning_rate": 2.219017698460002e-07,
"loss": 1.5113,
"num_input_tokens_seen": 255328256,
"step": 974
},
{
"epoch": 0.24056254626202814,
"grad_norm": 0.43167439103126526,
"learning_rate": 2.057800692014833e-07,
"loss": 1.3612,
"num_input_tokens_seen": 255590400,
"step": 975
},
{
"epoch": 0.24080927707870714,
"grad_norm": 0.27551332116127014,
"learning_rate": 1.9026509541272275e-07,
"loss": 1.5745,
"num_input_tokens_seen": 255852544,
"step": 976
},
{
"epoch": 0.24105600789538614,
"grad_norm": 0.619727373123169,
"learning_rate": 1.753570375247815e-07,
"loss": 0.9898,
"num_input_tokens_seen": 256114688,
"step": 977
},
{
"epoch": 0.24130273871206515,
"grad_norm": 0.39491206407546997,
"learning_rate": 1.6105607718764347e-07,
"loss": 1.7884,
"num_input_tokens_seen": 256376832,
"step": 978
},
{
"epoch": 0.24154946952874415,
"grad_norm": 0.34670019149780273,
"learning_rate": 1.4736238865398765e-07,
"loss": 1.5751,
"num_input_tokens_seen": 256638976,
"step": 979
},
{
"epoch": 0.24179620034542315,
"grad_norm": 0.34424442052841187,
"learning_rate": 1.342761387770952e-07,
"loss": 0.9909,
"num_input_tokens_seen": 256901120,
"step": 980
},
{
"epoch": 0.24204293116210215,
"grad_norm": 0.3579651415348053,
"learning_rate": 1.2179748700879012e-07,
"loss": 1.5647,
"num_input_tokens_seen": 257163264,
"step": 981
},
{
"epoch": 0.24228966197878116,
"grad_norm": 0.38628777861595154,
"learning_rate": 1.0992658539750178e-07,
"loss": 1.6755,
"num_input_tokens_seen": 257425408,
"step": 982
},
{
"epoch": 0.24253639279546016,
"grad_norm": 0.48262420296669006,
"learning_rate": 9.866357858642205e-08,
"loss": 1.0472,
"num_input_tokens_seen": 257687552,
"step": 983
},
{
"epoch": 0.24278312361213916,
"grad_norm": 0.5650882720947266,
"learning_rate": 8.800860381173448e-08,
"loss": 1.3565,
"num_input_tokens_seen": 257949696,
"step": 984
},
{
"epoch": 0.24302985442881817,
"grad_norm": 0.49615639448165894,
"learning_rate": 7.796179090094891e-08,
"loss": 1.511,
"num_input_tokens_seen": 258211840,
"step": 985
},
{
"epoch": 0.24327658524549717,
"grad_norm": 0.42988377809524536,
"learning_rate": 6.852326227130834e-08,
"loss": 1.1782,
"num_input_tokens_seen": 258473984,
"step": 986
},
{
"epoch": 0.24352331606217617,
"grad_norm": 0.36158114671707153,
"learning_rate": 5.969313292830125e-08,
"loss": 1.4792,
"num_input_tokens_seen": 258736128,
"step": 987
},
{
"epoch": 0.24377004687885517,
"grad_norm": 0.25366005301475525,
"learning_rate": 5.1471510464268236e-08,
"loss": 1.1013,
"num_input_tokens_seen": 258998272,
"step": 988
},
{
"epoch": 0.24401677769553418,
"grad_norm": 0.44153720140457153,
"learning_rate": 4.385849505708084e-08,
"loss": 1.5259,
"num_input_tokens_seen": 259260416,
"step": 989
},
{
"epoch": 0.24426350851221318,
"grad_norm": 0.17850150167942047,
"learning_rate": 3.685417946894254e-08,
"loss": 0.7834,
"num_input_tokens_seen": 259522560,
"step": 990
},
{
"epoch": 0.24451023932889218,
"grad_norm": 0.39674296975135803,
"learning_rate": 3.04586490452119e-08,
"loss": 1.5097,
"num_input_tokens_seen": 259784704,
"step": 991
},
{
"epoch": 0.24475697014557118,
"grad_norm": 0.5191229581832886,
"learning_rate": 2.467198171342e-08,
"loss": 1.7867,
"num_input_tokens_seen": 260046848,
"step": 992
},
{
"epoch": 0.2450037009622502,
"grad_norm": 0.43235430121421814,
"learning_rate": 1.949424798228239e-08,
"loss": 1.3496,
"num_input_tokens_seen": 260308992,
"step": 993
},
{
"epoch": 0.2452504317789292,
"grad_norm": 0.3359636068344116,
"learning_rate": 1.4925510940844156e-08,
"loss": 2.0862,
"num_input_tokens_seen": 260571136,
"step": 994
},
{
"epoch": 0.2454971625956082,
"grad_norm": 0.44033053517341614,
"learning_rate": 1.096582625772502e-08,
"loss": 1.5529,
"num_input_tokens_seen": 260833280,
"step": 995
},
{
"epoch": 0.2457438934122872,
"grad_norm": 0.3549114167690277,
"learning_rate": 7.615242180436522e-09,
"loss": 1.1346,
"num_input_tokens_seen": 261095424,
"step": 996
},
{
"epoch": 0.2459906242289662,
"grad_norm": 0.4625118672847748,
"learning_rate": 4.873799534788059e-09,
"loss": 0.8879,
"num_input_tokens_seen": 261357568,
"step": 997
},
{
"epoch": 0.2462373550456452,
"grad_norm": 0.3425498604774475,
"learning_rate": 2.741531724392843e-09,
"loss": 1.2615,
"num_input_tokens_seen": 261619712,
"step": 998
},
{
"epoch": 0.2464840858623242,
"grad_norm": 0.5017341375350952,
"learning_rate": 1.2184647302626583e-09,
"loss": 1.837,
"num_input_tokens_seen": 261881856,
"step": 999
},
{
"epoch": 0.2467308166790032,
"grad_norm": 0.47057420015335083,
"learning_rate": 3.0461711048035415e-10,
"loss": 1.6028,
"num_input_tokens_seen": 262144000,
"step": 1000
}
],
"logging_steps": 1.0,
"max_steps": 1000,
"num_input_tokens_seen": 262144000,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 484794433536000.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}