nsa_qwen3-8b / trainer_state.json
ZetangForward's picture
Synced from ModelScope: LCM_group/nsa_qwen3-8b (Auto-fixed license)
63dc5b9 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.2467308166790032,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002467308166790032,
"grad_norm": 16.683284759521484,
"learning_rate": 0.0,
"loss": 2.5206,
"num_input_tokens_seen": 262144,
"step": 1
},
{
"epoch": 0.0004934616333580064,
"grad_norm": 19.499160766601562,
"learning_rate": 1.0000000000000001e-07,
"loss": 2.1037,
"num_input_tokens_seen": 524288,
"step": 2
},
{
"epoch": 0.0007401924500370096,
"grad_norm": 15.584578514099121,
"learning_rate": 2.0000000000000002e-07,
"loss": 2.1221,
"num_input_tokens_seen": 786432,
"step": 3
},
{
"epoch": 0.0009869232667160128,
"grad_norm": 18.40481185913086,
"learning_rate": 3.0000000000000004e-07,
"loss": 1.9157,
"num_input_tokens_seen": 1048576,
"step": 4
},
{
"epoch": 0.001233654083395016,
"grad_norm": 19.92998504638672,
"learning_rate": 4.0000000000000003e-07,
"loss": 2.6474,
"num_input_tokens_seen": 1310720,
"step": 5
},
{
"epoch": 0.0014803849000740192,
"grad_norm": 19.297449111938477,
"learning_rate": 5.000000000000001e-07,
"loss": 2.4133,
"num_input_tokens_seen": 1572864,
"step": 6
},
{
"epoch": 0.0017271157167530224,
"grad_norm": 22.468198776245117,
"learning_rate": 6.000000000000001e-07,
"loss": 2.6828,
"num_input_tokens_seen": 1835008,
"step": 7
},
{
"epoch": 0.0019738465334320256,
"grad_norm": 17.352039337158203,
"learning_rate": 7.000000000000001e-07,
"loss": 2.3061,
"num_input_tokens_seen": 2097152,
"step": 8
},
{
"epoch": 0.0022205773501110288,
"grad_norm": 12.8675537109375,
"learning_rate": 8.000000000000001e-07,
"loss": 2.0333,
"num_input_tokens_seen": 2359296,
"step": 9
},
{
"epoch": 0.002467308166790032,
"grad_norm": 14.969992637634277,
"learning_rate": 9.000000000000001e-07,
"loss": 2.5559,
"num_input_tokens_seen": 2621440,
"step": 10
},
{
"epoch": 0.002714038983469035,
"grad_norm": 15.295183181762695,
"learning_rate": 1.0000000000000002e-06,
"loss": 2.557,
"num_input_tokens_seen": 2883584,
"step": 11
},
{
"epoch": 0.0029607698001480384,
"grad_norm": 10.054173469543457,
"learning_rate": 1.1e-06,
"loss": 2.1582,
"num_input_tokens_seen": 3145728,
"step": 12
},
{
"epoch": 0.0032075006168270415,
"grad_norm": 10.735544204711914,
"learning_rate": 1.2000000000000002e-06,
"loss": 2.0169,
"num_input_tokens_seen": 3407872,
"step": 13
},
{
"epoch": 0.0034542314335060447,
"grad_norm": 11.899219512939453,
"learning_rate": 1.3e-06,
"loss": 2.4531,
"num_input_tokens_seen": 3670016,
"step": 14
},
{
"epoch": 0.003700962250185048,
"grad_norm": 13.826565742492676,
"learning_rate": 1.4000000000000001e-06,
"loss": 2.3567,
"num_input_tokens_seen": 3932160,
"step": 15
},
{
"epoch": 0.003947693066864051,
"grad_norm": 6.6798224449157715,
"learning_rate": 1.5e-06,
"loss": 1.6159,
"num_input_tokens_seen": 4194304,
"step": 16
},
{
"epoch": 0.004194423883543055,
"grad_norm": 8.633581161499023,
"learning_rate": 1.6000000000000001e-06,
"loss": 2.4096,
"num_input_tokens_seen": 4456448,
"step": 17
},
{
"epoch": 0.0044411547002220575,
"grad_norm": 7.755622863769531,
"learning_rate": 1.7000000000000002e-06,
"loss": 2.2198,
"num_input_tokens_seen": 4718592,
"step": 18
},
{
"epoch": 0.004687885516901061,
"grad_norm": 7.24108362197876,
"learning_rate": 1.8000000000000001e-06,
"loss": 2.195,
"num_input_tokens_seen": 4980736,
"step": 19
},
{
"epoch": 0.004934616333580064,
"grad_norm": 5.231243133544922,
"learning_rate": 1.9000000000000002e-06,
"loss": 1.9149,
"num_input_tokens_seen": 5242880,
"step": 20
},
{
"epoch": 0.0051813471502590676,
"grad_norm": 15.724678993225098,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.4373,
"num_input_tokens_seen": 5505024,
"step": 21
},
{
"epoch": 0.00542807796693807,
"grad_norm": 4.023858547210693,
"learning_rate": 2.1000000000000002e-06,
"loss": 2.093,
"num_input_tokens_seen": 5767168,
"step": 22
},
{
"epoch": 0.005674808783617074,
"grad_norm": 3.049189329147339,
"learning_rate": 2.2e-06,
"loss": 2.3108,
"num_input_tokens_seen": 6029312,
"step": 23
},
{
"epoch": 0.005921539600296077,
"grad_norm": 4.3906354904174805,
"learning_rate": 2.3000000000000004e-06,
"loss": 2.0827,
"num_input_tokens_seen": 6291456,
"step": 24
},
{
"epoch": 0.00616827041697508,
"grad_norm": 3.7468109130859375,
"learning_rate": 2.4000000000000003e-06,
"loss": 2.312,
"num_input_tokens_seen": 6553600,
"step": 25
},
{
"epoch": 0.006415001233654083,
"grad_norm": 3.3470516204833984,
"learning_rate": 2.5e-06,
"loss": 2.5401,
"num_input_tokens_seen": 6815744,
"step": 26
},
{
"epoch": 0.006661732050333087,
"grad_norm": 2.9284937381744385,
"learning_rate": 2.6e-06,
"loss": 1.8949,
"num_input_tokens_seen": 7077888,
"step": 27
},
{
"epoch": 0.0069084628670120895,
"grad_norm": 3.228973388671875,
"learning_rate": 2.7000000000000004e-06,
"loss": 2.123,
"num_input_tokens_seen": 7340032,
"step": 28
},
{
"epoch": 0.007155193683691093,
"grad_norm": 2.123669385910034,
"learning_rate": 2.8000000000000003e-06,
"loss": 2.0715,
"num_input_tokens_seen": 7602176,
"step": 29
},
{
"epoch": 0.007401924500370096,
"grad_norm": 2.808058977127075,
"learning_rate": 2.9e-06,
"loss": 1.9762,
"num_input_tokens_seen": 7864320,
"step": 30
},
{
"epoch": 0.0076486553170490995,
"grad_norm": 2.9628617763519287,
"learning_rate": 3e-06,
"loss": 2.4305,
"num_input_tokens_seen": 8126464,
"step": 31
},
{
"epoch": 0.007895386133728102,
"grad_norm": 2.185053586959839,
"learning_rate": 3.1000000000000004e-06,
"loss": 2.3956,
"num_input_tokens_seen": 8388608,
"step": 32
},
{
"epoch": 0.008142116950407105,
"grad_norm": 1.7794125080108643,
"learning_rate": 3.2000000000000003e-06,
"loss": 2.3345,
"num_input_tokens_seen": 8650752,
"step": 33
},
{
"epoch": 0.00838884776708611,
"grad_norm": 1.2772505283355713,
"learning_rate": 3.3000000000000006e-06,
"loss": 2.173,
"num_input_tokens_seen": 8912896,
"step": 34
},
{
"epoch": 0.008635578583765112,
"grad_norm": 1.7572567462921143,
"learning_rate": 3.4000000000000005e-06,
"loss": 2.2358,
"num_input_tokens_seen": 9175040,
"step": 35
},
{
"epoch": 0.008882309400444115,
"grad_norm": 4.534196853637695,
"learning_rate": 3.5e-06,
"loss": 1.7227,
"num_input_tokens_seen": 9437184,
"step": 36
},
{
"epoch": 0.009129040217123118,
"grad_norm": 1.3536083698272705,
"learning_rate": 3.6000000000000003e-06,
"loss": 1.5496,
"num_input_tokens_seen": 9699328,
"step": 37
},
{
"epoch": 0.009375771033802122,
"grad_norm": 2.4008381366729736,
"learning_rate": 3.7e-06,
"loss": 2.3225,
"num_input_tokens_seen": 9961472,
"step": 38
},
{
"epoch": 0.009622501850481125,
"grad_norm": 1.4890753030776978,
"learning_rate": 3.8000000000000005e-06,
"loss": 2.2718,
"num_input_tokens_seen": 10223616,
"step": 39
},
{
"epoch": 0.009869232667160128,
"grad_norm": 22.986099243164062,
"learning_rate": 3.900000000000001e-06,
"loss": 1.8245,
"num_input_tokens_seen": 10485760,
"step": 40
},
{
"epoch": 0.010115963483839132,
"grad_norm": 3.394990921020508,
"learning_rate": 4.000000000000001e-06,
"loss": 1.991,
"num_input_tokens_seen": 10747904,
"step": 41
},
{
"epoch": 0.010362694300518135,
"grad_norm": 1.8627941608428955,
"learning_rate": 4.1e-06,
"loss": 2.2176,
"num_input_tokens_seen": 11010048,
"step": 42
},
{
"epoch": 0.010609425117197138,
"grad_norm": 9.628961563110352,
"learning_rate": 4.2000000000000004e-06,
"loss": 2.1058,
"num_input_tokens_seen": 11272192,
"step": 43
},
{
"epoch": 0.01085615593387614,
"grad_norm": 1.0936553478240967,
"learning_rate": 4.3e-06,
"loss": 2.2169,
"num_input_tokens_seen": 11534336,
"step": 44
},
{
"epoch": 0.011102886750555145,
"grad_norm": 1.0321332216262817,
"learning_rate": 4.4e-06,
"loss": 1.7619,
"num_input_tokens_seen": 11796480,
"step": 45
},
{
"epoch": 0.011349617567234148,
"grad_norm": 19.57929039001465,
"learning_rate": 4.5e-06,
"loss": 2.0291,
"num_input_tokens_seen": 12058624,
"step": 46
},
{
"epoch": 0.01159634838391315,
"grad_norm": 1.3844197988510132,
"learning_rate": 4.600000000000001e-06,
"loss": 2.2314,
"num_input_tokens_seen": 12320768,
"step": 47
},
{
"epoch": 0.011843079200592153,
"grad_norm": 0.850957453250885,
"learning_rate": 4.7e-06,
"loss": 1.8677,
"num_input_tokens_seen": 12582912,
"step": 48
},
{
"epoch": 0.012089810017271158,
"grad_norm": 1.143416166305542,
"learning_rate": 4.800000000000001e-06,
"loss": 1.8022,
"num_input_tokens_seen": 12845056,
"step": 49
},
{
"epoch": 0.01233654083395016,
"grad_norm": 1.5641077756881714,
"learning_rate": 4.9000000000000005e-06,
"loss": 1.97,
"num_input_tokens_seen": 13107200,
"step": 50
},
{
"epoch": 0.012583271650629163,
"grad_norm": 1.3804495334625244,
"learning_rate": 5e-06,
"loss": 1.9272,
"num_input_tokens_seen": 13369344,
"step": 51
},
{
"epoch": 0.012830002467308166,
"grad_norm": 1.558229684829712,
"learning_rate": 5.1e-06,
"loss": 2.062,
"num_input_tokens_seen": 13631488,
"step": 52
},
{
"epoch": 0.01307673328398717,
"grad_norm": 11.860638618469238,
"learning_rate": 5.2e-06,
"loss": 2.3097,
"num_input_tokens_seen": 13893632,
"step": 53
},
{
"epoch": 0.013323464100666173,
"grad_norm": 0.9336137175559998,
"learning_rate": 5.300000000000001e-06,
"loss": 1.8883,
"num_input_tokens_seen": 14155776,
"step": 54
},
{
"epoch": 0.013570194917345176,
"grad_norm": 1.0054830312728882,
"learning_rate": 5.400000000000001e-06,
"loss": 2.1054,
"num_input_tokens_seen": 14417920,
"step": 55
},
{
"epoch": 0.013816925734024179,
"grad_norm": 0.8040982484817505,
"learning_rate": 5.500000000000001e-06,
"loss": 1.704,
"num_input_tokens_seen": 14680064,
"step": 56
},
{
"epoch": 0.014063656550703183,
"grad_norm": 0.8780809640884399,
"learning_rate": 5.600000000000001e-06,
"loss": 1.5234,
"num_input_tokens_seen": 14942208,
"step": 57
},
{
"epoch": 0.014310387367382186,
"grad_norm": 1.0134830474853516,
"learning_rate": 5.7e-06,
"loss": 1.8635,
"num_input_tokens_seen": 15204352,
"step": 58
},
{
"epoch": 0.014557118184061189,
"grad_norm": 1.2978655099868774,
"learning_rate": 5.8e-06,
"loss": 1.8941,
"num_input_tokens_seen": 15466496,
"step": 59
},
{
"epoch": 0.014803849000740192,
"grad_norm": 1.0360183715820312,
"learning_rate": 5.9e-06,
"loss": 1.5408,
"num_input_tokens_seen": 15728640,
"step": 60
},
{
"epoch": 0.015050579817419196,
"grad_norm": 1.1055400371551514,
"learning_rate": 6e-06,
"loss": 2.3942,
"num_input_tokens_seen": 15990784,
"step": 61
},
{
"epoch": 0.015297310634098199,
"grad_norm": 0.677260160446167,
"learning_rate": 6.1e-06,
"loss": 2.2286,
"num_input_tokens_seen": 16252928,
"step": 62
},
{
"epoch": 0.015544041450777202,
"grad_norm": 2.7347521781921387,
"learning_rate": 6.200000000000001e-06,
"loss": 1.5486,
"num_input_tokens_seen": 16515072,
"step": 63
},
{
"epoch": 0.015790772267456205,
"grad_norm": 0.5401502847671509,
"learning_rate": 6.300000000000001e-06,
"loss": 2.0308,
"num_input_tokens_seen": 16777216,
"step": 64
},
{
"epoch": 0.016037503084135207,
"grad_norm": 1.037590503692627,
"learning_rate": 6.4000000000000006e-06,
"loss": 1.4798,
"num_input_tokens_seen": 17039360,
"step": 65
},
{
"epoch": 0.01628423390081421,
"grad_norm": 0.6973419189453125,
"learning_rate": 6.5000000000000004e-06,
"loss": 2.0788,
"num_input_tokens_seen": 17301504,
"step": 66
},
{
"epoch": 0.016530964717493216,
"grad_norm": 0.8023918271064758,
"learning_rate": 6.600000000000001e-06,
"loss": 1.7997,
"num_input_tokens_seen": 17563648,
"step": 67
},
{
"epoch": 0.01677769553417222,
"grad_norm": 0.6638779640197754,
"learning_rate": 6.700000000000001e-06,
"loss": 1.9026,
"num_input_tokens_seen": 17825792,
"step": 68
},
{
"epoch": 0.017024426350851222,
"grad_norm": 0.6551264524459839,
"learning_rate": 6.800000000000001e-06,
"loss": 1.7984,
"num_input_tokens_seen": 18087936,
"step": 69
},
{
"epoch": 0.017271157167530225,
"grad_norm": 1.490675687789917,
"learning_rate": 6.9e-06,
"loss": 1.7201,
"num_input_tokens_seen": 18350080,
"step": 70
},
{
"epoch": 0.017517887984209227,
"grad_norm": 1.0823466777801514,
"learning_rate": 7e-06,
"loss": 1.7211,
"num_input_tokens_seen": 18612224,
"step": 71
},
{
"epoch": 0.01776461880088823,
"grad_norm": 0.6104586124420166,
"learning_rate": 7.100000000000001e-06,
"loss": 1.1804,
"num_input_tokens_seen": 18874368,
"step": 72
},
{
"epoch": 0.018011349617567233,
"grad_norm": 0.562042236328125,
"learning_rate": 7.2000000000000005e-06,
"loss": 1.3951,
"num_input_tokens_seen": 19136512,
"step": 73
},
{
"epoch": 0.018258080434246236,
"grad_norm": 0.7759392261505127,
"learning_rate": 7.3e-06,
"loss": 2.0176,
"num_input_tokens_seen": 19398656,
"step": 74
},
{
"epoch": 0.018504811250925242,
"grad_norm": 1.0909373760223389,
"learning_rate": 7.4e-06,
"loss": 1.981,
"num_input_tokens_seen": 19660800,
"step": 75
},
{
"epoch": 0.018751542067604245,
"grad_norm": 1.069872498512268,
"learning_rate": 7.500000000000001e-06,
"loss": 1.6917,
"num_input_tokens_seen": 19922944,
"step": 76
},
{
"epoch": 0.018998272884283247,
"grad_norm": 0.7408702373504639,
"learning_rate": 7.600000000000001e-06,
"loss": 2.0653,
"num_input_tokens_seen": 20185088,
"step": 77
},
{
"epoch": 0.01924500370096225,
"grad_norm": 2.0848100185394287,
"learning_rate": 7.7e-06,
"loss": 1.704,
"num_input_tokens_seen": 20447232,
"step": 78
},
{
"epoch": 0.019491734517641253,
"grad_norm": 0.908286988735199,
"learning_rate": 7.800000000000002e-06,
"loss": 2.208,
"num_input_tokens_seen": 20709376,
"step": 79
},
{
"epoch": 0.019738465334320256,
"grad_norm": 0.746882975101471,
"learning_rate": 7.9e-06,
"loss": 1.6077,
"num_input_tokens_seen": 20971520,
"step": 80
},
{
"epoch": 0.01998519615099926,
"grad_norm": 1.8665310144424438,
"learning_rate": 8.000000000000001e-06,
"loss": 2.0049,
"num_input_tokens_seen": 21233664,
"step": 81
},
{
"epoch": 0.020231926967678265,
"grad_norm": 0.5606961846351624,
"learning_rate": 8.1e-06,
"loss": 2.0754,
"num_input_tokens_seen": 21495808,
"step": 82
},
{
"epoch": 0.020478657784357267,
"grad_norm": 8.728538513183594,
"learning_rate": 8.2e-06,
"loss": 1.8255,
"num_input_tokens_seen": 21757952,
"step": 83
},
{
"epoch": 0.02072538860103627,
"grad_norm": 1.3503004312515259,
"learning_rate": 8.3e-06,
"loss": 1.5528,
"num_input_tokens_seen": 22020096,
"step": 84
},
{
"epoch": 0.020972119417715273,
"grad_norm": 0.989362895488739,
"learning_rate": 8.400000000000001e-06,
"loss": 2.1408,
"num_input_tokens_seen": 22282240,
"step": 85
},
{
"epoch": 0.021218850234394276,
"grad_norm": 0.6346938610076904,
"learning_rate": 8.5e-06,
"loss": 2.0522,
"num_input_tokens_seen": 22544384,
"step": 86
},
{
"epoch": 0.02146558105107328,
"grad_norm": 0.9276885390281677,
"learning_rate": 8.6e-06,
"loss": 2.0185,
"num_input_tokens_seen": 22806528,
"step": 87
},
{
"epoch": 0.02171231186775228,
"grad_norm": 0.44476205110549927,
"learning_rate": 8.700000000000001e-06,
"loss": 2.1696,
"num_input_tokens_seen": 23068672,
"step": 88
},
{
"epoch": 0.021959042684431284,
"grad_norm": 0.5806407332420349,
"learning_rate": 8.8e-06,
"loss": 2.0952,
"num_input_tokens_seen": 23330816,
"step": 89
},
{
"epoch": 0.02220577350111029,
"grad_norm": 0.3519119620323181,
"learning_rate": 8.900000000000001e-06,
"loss": 1.5856,
"num_input_tokens_seen": 23592960,
"step": 90
},
{
"epoch": 0.022452504317789293,
"grad_norm": 0.5435070991516113,
"learning_rate": 9e-06,
"loss": 1.5677,
"num_input_tokens_seen": 23855104,
"step": 91
},
{
"epoch": 0.022699235134468296,
"grad_norm": 0.5541613101959229,
"learning_rate": 9.100000000000001e-06,
"loss": 2.0635,
"num_input_tokens_seen": 24117248,
"step": 92
},
{
"epoch": 0.0229459659511473,
"grad_norm": 0.8948331475257874,
"learning_rate": 9.200000000000002e-06,
"loss": 1.386,
"num_input_tokens_seen": 24379392,
"step": 93
},
{
"epoch": 0.0231926967678263,
"grad_norm": 0.39605551958084106,
"learning_rate": 9.3e-06,
"loss": 1.3151,
"num_input_tokens_seen": 24641536,
"step": 94
},
{
"epoch": 0.023439427584505304,
"grad_norm": 0.4620722234249115,
"learning_rate": 9.4e-06,
"loss": 1.5472,
"num_input_tokens_seen": 24903680,
"step": 95
},
{
"epoch": 0.023686158401184307,
"grad_norm": 0.8512610197067261,
"learning_rate": 9.5e-06,
"loss": 1.918,
"num_input_tokens_seen": 25165824,
"step": 96
},
{
"epoch": 0.02393288921786331,
"grad_norm": 1.0007725954055786,
"learning_rate": 9.600000000000001e-06,
"loss": 2.0203,
"num_input_tokens_seen": 25427968,
"step": 97
},
{
"epoch": 0.024179620034542316,
"grad_norm": 0.558268129825592,
"learning_rate": 9.7e-06,
"loss": 1.7781,
"num_input_tokens_seen": 25690112,
"step": 98
},
{
"epoch": 0.02442635085122132,
"grad_norm": 0.33053046464920044,
"learning_rate": 9.800000000000001e-06,
"loss": 1.9374,
"num_input_tokens_seen": 25952256,
"step": 99
},
{
"epoch": 0.02467308166790032,
"grad_norm": 0.47462955117225647,
"learning_rate": 9.9e-06,
"loss": 1.7824,
"num_input_tokens_seen": 26214400,
"step": 100
},
{
"epoch": 0.024919812484579324,
"grad_norm": 0.5819123983383179,
"learning_rate": 1e-05,
"loss": 1.7157,
"num_input_tokens_seen": 26476544,
"step": 101
},
{
"epoch": 0.025166543301258327,
"grad_norm": 0.4024888575077057,
"learning_rate": 9.999969538288953e-06,
"loss": 1.6603,
"num_input_tokens_seen": 26738688,
"step": 102
},
{
"epoch": 0.02541327411793733,
"grad_norm": 0.4729028046131134,
"learning_rate": 9.999878153526974e-06,
"loss": 2.4554,
"num_input_tokens_seen": 27000832,
"step": 103
},
{
"epoch": 0.025660004934616332,
"grad_norm": 0.6632626056671143,
"learning_rate": 9.999725846827562e-06,
"loss": 1.8782,
"num_input_tokens_seen": 27262976,
"step": 104
},
{
"epoch": 0.025906735751295335,
"grad_norm": 1.9816948175430298,
"learning_rate": 9.999512620046523e-06,
"loss": 1.7047,
"num_input_tokens_seen": 27525120,
"step": 105
},
{
"epoch": 0.02615346656797434,
"grad_norm": 0.40769246220588684,
"learning_rate": 9.999238475781957e-06,
"loss": 1.6149,
"num_input_tokens_seen": 27787264,
"step": 106
},
{
"epoch": 0.026400197384653344,
"grad_norm": 0.3405158221721649,
"learning_rate": 9.998903417374228e-06,
"loss": 2.0906,
"num_input_tokens_seen": 28049408,
"step": 107
},
{
"epoch": 0.026646928201332347,
"grad_norm": 0.497395396232605,
"learning_rate": 9.998507448905917e-06,
"loss": 1.5876,
"num_input_tokens_seen": 28311552,
"step": 108
},
{
"epoch": 0.02689365901801135,
"grad_norm": 0.4290633499622345,
"learning_rate": 9.998050575201772e-06,
"loss": 1.5216,
"num_input_tokens_seen": 28573696,
"step": 109
},
{
"epoch": 0.027140389834690352,
"grad_norm": 0.3609262704849243,
"learning_rate": 9.997532801828659e-06,
"loss": 2.1408,
"num_input_tokens_seen": 28835840,
"step": 110
},
{
"epoch": 0.027387120651369355,
"grad_norm": 0.6255217790603638,
"learning_rate": 9.99695413509548e-06,
"loss": 1.1463,
"num_input_tokens_seen": 29097984,
"step": 111
},
{
"epoch": 0.027633851468048358,
"grad_norm": 0.6729506850242615,
"learning_rate": 9.996314582053106e-06,
"loss": 1.3884,
"num_input_tokens_seen": 29360128,
"step": 112
},
{
"epoch": 0.02788058228472736,
"grad_norm": 0.3117102086544037,
"learning_rate": 9.995614150494293e-06,
"loss": 1.6354,
"num_input_tokens_seen": 29622272,
"step": 113
},
{
"epoch": 0.028127313101406367,
"grad_norm": 0.7941350936889648,
"learning_rate": 9.994852848953574e-06,
"loss": 1.4103,
"num_input_tokens_seen": 29884416,
"step": 114
},
{
"epoch": 0.02837404391808537,
"grad_norm": 0.552463710308075,
"learning_rate": 9.994030686707171e-06,
"loss": 1.4375,
"num_input_tokens_seen": 30146560,
"step": 115
},
{
"epoch": 0.028620774734764372,
"grad_norm": 0.4415859878063202,
"learning_rate": 9.993147673772869e-06,
"loss": 1.554,
"num_input_tokens_seen": 30408704,
"step": 116
},
{
"epoch": 0.028867505551443375,
"grad_norm": 0.3809610903263092,
"learning_rate": 9.992203820909906e-06,
"loss": 1.5911,
"num_input_tokens_seen": 30670848,
"step": 117
},
{
"epoch": 0.029114236368122378,
"grad_norm": 0.3418121635913849,
"learning_rate": 9.991199139618828e-06,
"loss": 1.5202,
"num_input_tokens_seen": 30932992,
"step": 118
},
{
"epoch": 0.02936096718480138,
"grad_norm": 0.4744432270526886,
"learning_rate": 9.990133642141359e-06,
"loss": 1.8556,
"num_input_tokens_seen": 31195136,
"step": 119
},
{
"epoch": 0.029607698001480384,
"grad_norm": 0.5877406001091003,
"learning_rate": 9.989007341460251e-06,
"loss": 2.0399,
"num_input_tokens_seen": 31457280,
"step": 120
},
{
"epoch": 0.02985442881815939,
"grad_norm": 0.7880735397338867,
"learning_rate": 9.987820251299121e-06,
"loss": 1.7712,
"num_input_tokens_seen": 31719424,
"step": 121
},
{
"epoch": 0.030101159634838393,
"grad_norm": 0.5546642541885376,
"learning_rate": 9.98657238612229e-06,
"loss": 1.6735,
"num_input_tokens_seen": 31981568,
"step": 122
},
{
"epoch": 0.030347890451517395,
"grad_norm": 0.32245898246765137,
"learning_rate": 9.985263761134602e-06,
"loss": 1.7308,
"num_input_tokens_seen": 32243712,
"step": 123
},
{
"epoch": 0.030594621268196398,
"grad_norm": 0.7740073204040527,
"learning_rate": 9.983894392281237e-06,
"loss": 1.4024,
"num_input_tokens_seen": 32505856,
"step": 124
},
{
"epoch": 0.0308413520848754,
"grad_norm": 0.5259237885475159,
"learning_rate": 9.982464296247523e-06,
"loss": 1.8065,
"num_input_tokens_seen": 32768000,
"step": 125
},
{
"epoch": 0.031088082901554404,
"grad_norm": 1.6967214345932007,
"learning_rate": 9.980973490458728e-06,
"loss": 1.787,
"num_input_tokens_seen": 33030144,
"step": 126
},
{
"epoch": 0.031334813718233406,
"grad_norm": 0.3362340033054352,
"learning_rate": 9.979421993079853e-06,
"loss": 2.2669,
"num_input_tokens_seen": 33292288,
"step": 127
},
{
"epoch": 0.03158154453491241,
"grad_norm": 0.49716174602508545,
"learning_rate": 9.9778098230154e-06,
"loss": 1.9467,
"num_input_tokens_seen": 33554432,
"step": 128
},
{
"epoch": 0.03182827535159141,
"grad_norm": 0.31525981426239014,
"learning_rate": 9.976136999909156e-06,
"loss": 2.0196,
"num_input_tokens_seen": 33816576,
"step": 129
},
{
"epoch": 0.032075006168270415,
"grad_norm": 0.4798625707626343,
"learning_rate": 9.974403544143942e-06,
"loss": 1.4132,
"num_input_tokens_seen": 34078720,
"step": 130
},
{
"epoch": 0.03232173698494942,
"grad_norm": 0.3842827081680298,
"learning_rate": 9.972609476841368e-06,
"loss": 1.4007,
"num_input_tokens_seen": 34340864,
"step": 131
},
{
"epoch": 0.03256846780162842,
"grad_norm": 0.3742620348930359,
"learning_rate": 9.970754819861577e-06,
"loss": 1.8558,
"num_input_tokens_seen": 34603008,
"step": 132
},
{
"epoch": 0.03281519861830743,
"grad_norm": 0.2592817544937134,
"learning_rate": 9.968839595802982e-06,
"loss": 1.8881,
"num_input_tokens_seen": 34865152,
"step": 133
},
{
"epoch": 0.03306192943498643,
"grad_norm": 0.7180063128471375,
"learning_rate": 9.966863828001982e-06,
"loss": 1.4668,
"num_input_tokens_seen": 35127296,
"step": 134
},
{
"epoch": 0.033308660251665435,
"grad_norm": 0.5548637509346008,
"learning_rate": 9.964827540532685e-06,
"loss": 1.9784,
"num_input_tokens_seen": 35389440,
"step": 135
},
{
"epoch": 0.03355539106834444,
"grad_norm": 0.608799397945404,
"learning_rate": 9.962730758206612e-06,
"loss": 1.8939,
"num_input_tokens_seen": 35651584,
"step": 136
},
{
"epoch": 0.03380212188502344,
"grad_norm": 0.34416207671165466,
"learning_rate": 9.960573506572391e-06,
"loss": 1.8515,
"num_input_tokens_seen": 35913728,
"step": 137
},
{
"epoch": 0.034048852701702444,
"grad_norm": 0.37716981768608093,
"learning_rate": 9.958355811915452e-06,
"loss": 1.7956,
"num_input_tokens_seen": 36175872,
"step": 138
},
{
"epoch": 0.034295583518381446,
"grad_norm": 0.4558100402355194,
"learning_rate": 9.95607770125771e-06,
"loss": 2.0922,
"num_input_tokens_seen": 36438016,
"step": 139
},
{
"epoch": 0.03454231433506045,
"grad_norm": 0.9773036241531372,
"learning_rate": 9.953739202357219e-06,
"loss": 1.1874,
"num_input_tokens_seen": 36700160,
"step": 140
},
{
"epoch": 0.03478904515173945,
"grad_norm": 0.6007382869720459,
"learning_rate": 9.951340343707852e-06,
"loss": 1.6766,
"num_input_tokens_seen": 36962304,
"step": 141
},
{
"epoch": 0.035035775968418455,
"grad_norm": 0.47525718808174133,
"learning_rate": 9.948881154538946e-06,
"loss": 1.4628,
"num_input_tokens_seen": 37224448,
"step": 142
},
{
"epoch": 0.03528250678509746,
"grad_norm": 1.6111029386520386,
"learning_rate": 9.946361664814942e-06,
"loss": 1.6761,
"num_input_tokens_seen": 37486592,
"step": 143
},
{
"epoch": 0.03552923760177646,
"grad_norm": 0.510202944278717,
"learning_rate": 9.94378190523503e-06,
"loss": 1.5761,
"num_input_tokens_seen": 37748736,
"step": 144
},
{
"epoch": 0.03577596841845546,
"grad_norm": 0.3128513693809509,
"learning_rate": 9.941141907232766e-06,
"loss": 1.7816,
"num_input_tokens_seen": 38010880,
"step": 145
},
{
"epoch": 0.036022699235134466,
"grad_norm": 0.5524406433105469,
"learning_rate": 9.938441702975689e-06,
"loss": 2.3617,
"num_input_tokens_seen": 38273024,
"step": 146
},
{
"epoch": 0.03626943005181347,
"grad_norm": 0.28761568665504456,
"learning_rate": 9.93568132536494e-06,
"loss": 1.5824,
"num_input_tokens_seen": 38535168,
"step": 147
},
{
"epoch": 0.03651616086849247,
"grad_norm": 0.3261396288871765,
"learning_rate": 9.932860808034847e-06,
"loss": 1.4992,
"num_input_tokens_seen": 38797312,
"step": 148
},
{
"epoch": 0.03676289168517148,
"grad_norm": 1.7563891410827637,
"learning_rate": 9.929980185352525e-06,
"loss": 2.0062,
"num_input_tokens_seen": 39059456,
"step": 149
},
{
"epoch": 0.037009622501850484,
"grad_norm": 0.3231620192527771,
"learning_rate": 9.927039492417452e-06,
"loss": 1.9814,
"num_input_tokens_seen": 39321600,
"step": 150
},
{
"epoch": 0.037256353318529487,
"grad_norm": 0.5372762084007263,
"learning_rate": 9.924038765061042e-06,
"loss": 1.8328,
"num_input_tokens_seen": 39583744,
"step": 151
},
{
"epoch": 0.03750308413520849,
"grad_norm": 0.5384396314620972,
"learning_rate": 9.92097803984621e-06,
"loss": 1.9721,
"num_input_tokens_seen": 39845888,
"step": 152
},
{
"epoch": 0.03774981495188749,
"grad_norm": 0.3716285228729248,
"learning_rate": 9.91785735406693e-06,
"loss": 1.8848,
"num_input_tokens_seen": 40108032,
"step": 153
},
{
"epoch": 0.037996545768566495,
"grad_norm": 0.3303345739841461,
"learning_rate": 9.914676745747772e-06,
"loss": 1.83,
"num_input_tokens_seen": 40370176,
"step": 154
},
{
"epoch": 0.0382432765852455,
"grad_norm": 0.4025614559650421,
"learning_rate": 9.911436253643445e-06,
"loss": 1.9444,
"num_input_tokens_seen": 40632320,
"step": 155
},
{
"epoch": 0.0384900074019245,
"grad_norm": 0.3882172107696533,
"learning_rate": 9.908135917238321e-06,
"loss": 1.7647,
"num_input_tokens_seen": 40894464,
"step": 156
},
{
"epoch": 0.0387367382186035,
"grad_norm": 0.3332440257072449,
"learning_rate": 9.904775776745959e-06,
"loss": 1.5421,
"num_input_tokens_seen": 41156608,
"step": 157
},
{
"epoch": 0.038983469035282506,
"grad_norm": 0.4627978205680847,
"learning_rate": 9.901355873108611e-06,
"loss": 1.6268,
"num_input_tokens_seen": 41418752,
"step": 158
},
{
"epoch": 0.03923019985196151,
"grad_norm": 0.45848193764686584,
"learning_rate": 9.89787624799672e-06,
"loss": 1.8942,
"num_input_tokens_seen": 41680896,
"step": 159
},
{
"epoch": 0.03947693066864051,
"grad_norm": 0.34131988883018494,
"learning_rate": 9.894336943808426e-06,
"loss": 1.8634,
"num_input_tokens_seen": 41943040,
"step": 160
},
{
"epoch": 0.039723661485319514,
"grad_norm": 0.6125646233558655,
"learning_rate": 9.890738003669029e-06,
"loss": 1.7283,
"num_input_tokens_seen": 42205184,
"step": 161
},
{
"epoch": 0.03997039230199852,
"grad_norm": 0.29789456725120544,
"learning_rate": 9.887079471430481e-06,
"loss": 2.0546,
"num_input_tokens_seen": 42467328,
"step": 162
},
{
"epoch": 0.04021712311867752,
"grad_norm": 0.6001120209693909,
"learning_rate": 9.883361391670841e-06,
"loss": 1.9128,
"num_input_tokens_seen": 42729472,
"step": 163
},
{
"epoch": 0.04046385393535653,
"grad_norm": 0.403012752532959,
"learning_rate": 9.879583809693737e-06,
"loss": 1.7995,
"num_input_tokens_seen": 42991616,
"step": 164
},
{
"epoch": 0.04071058475203553,
"grad_norm": 0.4296472668647766,
"learning_rate": 9.875746771527817e-06,
"loss": 1.4867,
"num_input_tokens_seen": 43253760,
"step": 165
},
{
"epoch": 0.040957315568714535,
"grad_norm": 0.40064001083374023,
"learning_rate": 9.871850323926178e-06,
"loss": 1.5266,
"num_input_tokens_seen": 43515904,
"step": 166
},
{
"epoch": 0.04120404638539354,
"grad_norm": 0.43372851610183716,
"learning_rate": 9.867894514365802e-06,
"loss": 1.8959,
"num_input_tokens_seen": 43778048,
"step": 167
},
{
"epoch": 0.04145077720207254,
"grad_norm": 0.2620079815387726,
"learning_rate": 9.863879391046985e-06,
"loss": 2.2592,
"num_input_tokens_seen": 44040192,
"step": 168
},
{
"epoch": 0.04169750801875154,
"grad_norm": 0.5816200375556946,
"learning_rate": 9.859805002892733e-06,
"loss": 1.6409,
"num_input_tokens_seen": 44302336,
"step": 169
},
{
"epoch": 0.041944238835430546,
"grad_norm": 0.24126295745372772,
"learning_rate": 9.85567139954818e-06,
"loss": 1.5916,
"num_input_tokens_seen": 44564480,
"step": 170
},
{
"epoch": 0.04219096965210955,
"grad_norm": 0.44154781103134155,
"learning_rate": 9.851478631379982e-06,
"loss": 2.1419,
"num_input_tokens_seen": 44826624,
"step": 171
},
{
"epoch": 0.04243770046878855,
"grad_norm": 0.3429047763347626,
"learning_rate": 9.847226749475696e-06,
"loss": 1.2978,
"num_input_tokens_seen": 45088768,
"step": 172
},
{
"epoch": 0.042684431285467554,
"grad_norm": 0.4060891270637512,
"learning_rate": 9.842915805643156e-06,
"loss": 2.0514,
"num_input_tokens_seen": 45350912,
"step": 173
},
{
"epoch": 0.04293116210214656,
"grad_norm": 3.0130326747894287,
"learning_rate": 9.838545852409857e-06,
"loss": 1.9413,
"num_input_tokens_seen": 45613056,
"step": 174
},
{
"epoch": 0.04317789291882556,
"grad_norm": 0.3682909309864044,
"learning_rate": 9.834116943022299e-06,
"loss": 1.8323,
"num_input_tokens_seen": 45875200,
"step": 175
},
{
"epoch": 0.04342462373550456,
"grad_norm": 0.317842960357666,
"learning_rate": 9.829629131445342e-06,
"loss": 1.6821,
"num_input_tokens_seen": 46137344,
"step": 176
},
{
"epoch": 0.043671354552183565,
"grad_norm": 0.5329482555389404,
"learning_rate": 9.825082472361558e-06,
"loss": 2.0072,
"num_input_tokens_seen": 46399488,
"step": 177
},
{
"epoch": 0.04391808536886257,
"grad_norm": 0.36978521943092346,
"learning_rate": 9.82047702117055e-06,
"loss": 2.1558,
"num_input_tokens_seen": 46661632,
"step": 178
},
{
"epoch": 0.04416481618554157,
"grad_norm": 0.3222188949584961,
"learning_rate": 9.815812833988292e-06,
"loss": 2.1112,
"num_input_tokens_seen": 46923776,
"step": 179
},
{
"epoch": 0.04441154700222058,
"grad_norm": 0.4519665539264679,
"learning_rate": 9.811089967646427e-06,
"loss": 1.9106,
"num_input_tokens_seen": 47185920,
"step": 180
},
{
"epoch": 0.04465827781889958,
"grad_norm": 0.2261379510164261,
"learning_rate": 9.806308479691595e-06,
"loss": 1.7543,
"num_input_tokens_seen": 47448064,
"step": 181
},
{
"epoch": 0.044905008635578586,
"grad_norm": 0.4306648373603821,
"learning_rate": 9.801468428384716e-06,
"loss": 2.1505,
"num_input_tokens_seen": 47710208,
"step": 182
},
{
"epoch": 0.04515173945225759,
"grad_norm": 0.34030383825302124,
"learning_rate": 9.796569872700287e-06,
"loss": 1.7867,
"num_input_tokens_seen": 47972352,
"step": 183
},
{
"epoch": 0.04539847026893659,
"grad_norm": 0.29829004406929016,
"learning_rate": 9.791612872325667e-06,
"loss": 2.1088,
"num_input_tokens_seen": 48234496,
"step": 184
},
{
"epoch": 0.045645201085615594,
"grad_norm": 0.5674875974655151,
"learning_rate": 9.786597487660336e-06,
"loss": 1.7166,
"num_input_tokens_seen": 48496640,
"step": 185
},
{
"epoch": 0.0458919319022946,
"grad_norm": 1.0494624376296997,
"learning_rate": 9.781523779815178e-06,
"loss": 1.909,
"num_input_tokens_seen": 48758784,
"step": 186
},
{
"epoch": 0.0461386627189736,
"grad_norm": 0.5063501596450806,
"learning_rate": 9.776391810611719e-06,
"loss": 2.0148,
"num_input_tokens_seen": 49020928,
"step": 187
},
{
"epoch": 0.0463853935356526,
"grad_norm": 0.3112070560455322,
"learning_rate": 9.771201642581384e-06,
"loss": 1.6722,
"num_input_tokens_seen": 49283072,
"step": 188
},
{
"epoch": 0.046632124352331605,
"grad_norm": 0.2980418801307678,
"learning_rate": 9.765953338964736e-06,
"loss": 1.9106,
"num_input_tokens_seen": 49545216,
"step": 189
},
{
"epoch": 0.04687885516901061,
"grad_norm": 0.4022710919380188,
"learning_rate": 9.760646963710694e-06,
"loss": 1.8416,
"num_input_tokens_seen": 49807360,
"step": 190
},
{
"epoch": 0.04712558598568961,
"grad_norm": 0.4678567051887512,
"learning_rate": 9.755282581475769e-06,
"loss": 1.3391,
"num_input_tokens_seen": 50069504,
"step": 191
},
{
"epoch": 0.047372316802368614,
"grad_norm": 0.3067920506000519,
"learning_rate": 9.749860257623262e-06,
"loss": 1.817,
"num_input_tokens_seen": 50331648,
"step": 192
},
{
"epoch": 0.047619047619047616,
"grad_norm": 0.31706950068473816,
"learning_rate": 9.744380058222483e-06,
"loss": 1.5534,
"num_input_tokens_seen": 50593792,
"step": 193
},
{
"epoch": 0.04786577843572662,
"grad_norm": 0.3867824673652649,
"learning_rate": 9.73884205004793e-06,
"loss": 1.8822,
"num_input_tokens_seen": 50855936,
"step": 194
},
{
"epoch": 0.04811250925240563,
"grad_norm": 0.5505887866020203,
"learning_rate": 9.733246300578482e-06,
"loss": 1.5254,
"num_input_tokens_seen": 51118080,
"step": 195
},
{
"epoch": 0.04835924006908463,
"grad_norm": 0.3414452075958252,
"learning_rate": 9.727592877996585e-06,
"loss": 1.8025,
"num_input_tokens_seen": 51380224,
"step": 196
},
{
"epoch": 0.048605970885763634,
"grad_norm": 0.45754650235176086,
"learning_rate": 9.721881851187406e-06,
"loss": 2.2462,
"num_input_tokens_seen": 51642368,
"step": 197
},
{
"epoch": 0.04885270170244264,
"grad_norm": 0.265245258808136,
"learning_rate": 9.716113289738005e-06,
"loss": 2.0391,
"num_input_tokens_seen": 51904512,
"step": 198
},
{
"epoch": 0.04909943251912164,
"grad_norm": 0.3758782744407654,
"learning_rate": 9.710287263936485e-06,
"loss": 1.71,
"num_input_tokens_seen": 52166656,
"step": 199
},
{
"epoch": 0.04934616333580064,
"grad_norm": 0.3058117926120758,
"learning_rate": 9.704403844771128e-06,
"loss": 1.5908,
"num_input_tokens_seen": 52428800,
"step": 200
},
{
"epoch": 0.049592894152479645,
"grad_norm": 0.3393475413322449,
"learning_rate": 9.698463103929542e-06,
"loss": 1.4709,
"num_input_tokens_seen": 52690944,
"step": 201
},
{
"epoch": 0.04983962496915865,
"grad_norm": 0.33585816621780396,
"learning_rate": 9.69246511379778e-06,
"loss": 1.5541,
"num_input_tokens_seen": 52953088,
"step": 202
},
{
"epoch": 0.05008635578583765,
"grad_norm": 0.3068286180496216,
"learning_rate": 9.68640994745946e-06,
"loss": 2.0374,
"num_input_tokens_seen": 53215232,
"step": 203
},
{
"epoch": 0.050333086602516654,
"grad_norm": 0.3098278045654297,
"learning_rate": 9.680297678694867e-06,
"loss": 1.9932,
"num_input_tokens_seen": 53477376,
"step": 204
},
{
"epoch": 0.050579817419195656,
"grad_norm": 2.7050626277923584,
"learning_rate": 9.674128381980073e-06,
"loss": 1.3617,
"num_input_tokens_seen": 53739520,
"step": 205
},
{
"epoch": 0.05082654823587466,
"grad_norm": 0.22176742553710938,
"learning_rate": 9.667902132486009e-06,
"loss": 1.8249,
"num_input_tokens_seen": 54001664,
"step": 206
},
{
"epoch": 0.05107327905255366,
"grad_norm": 0.35358500480651855,
"learning_rate": 9.661619006077562e-06,
"loss": 1.8192,
"num_input_tokens_seen": 54263808,
"step": 207
},
{
"epoch": 0.051320009869232665,
"grad_norm": 0.6246964335441589,
"learning_rate": 9.655279079312643e-06,
"loss": 1.6647,
"num_input_tokens_seen": 54525952,
"step": 208
},
{
"epoch": 0.05156674068591167,
"grad_norm": 0.5339682102203369,
"learning_rate": 9.648882429441258e-06,
"loss": 1.9559,
"num_input_tokens_seen": 54788096,
"step": 209
},
{
"epoch": 0.05181347150259067,
"grad_norm": 0.4896355867385864,
"learning_rate": 9.642429134404568e-06,
"loss": 1.1909,
"num_input_tokens_seen": 55050240,
"step": 210
},
{
"epoch": 0.05206020231926968,
"grad_norm": 0.3802131414413452,
"learning_rate": 9.635919272833938e-06,
"loss": 2.1234,
"num_input_tokens_seen": 55312384,
"step": 211
},
{
"epoch": 0.05230693313594868,
"grad_norm": 0.3870851695537567,
"learning_rate": 9.629352924049975e-06,
"loss": 1.5652,
"num_input_tokens_seen": 55574528,
"step": 212
},
{
"epoch": 0.052553663952627686,
"grad_norm": 0.2589404881000519,
"learning_rate": 9.622730168061568e-06,
"loss": 1.5086,
"num_input_tokens_seen": 55836672,
"step": 213
},
{
"epoch": 0.05280039476930669,
"grad_norm": 0.34146517515182495,
"learning_rate": 9.616051085564905e-06,
"loss": 1.3068,
"num_input_tokens_seen": 56098816,
"step": 214
},
{
"epoch": 0.05304712558598569,
"grad_norm": 0.3951856195926666,
"learning_rate": 9.609315757942504e-06,
"loss": 1.4909,
"num_input_tokens_seen": 56360960,
"step": 215
},
{
"epoch": 0.053293856402664694,
"grad_norm": 0.23125781118869781,
"learning_rate": 9.602524267262202e-06,
"loss": 1.6771,
"num_input_tokens_seen": 56623104,
"step": 216
},
{
"epoch": 0.0535405872193437,
"grad_norm": 0.30795449018478394,
"learning_rate": 9.595676696276173e-06,
"loss": 1.7215,
"num_input_tokens_seen": 56885248,
"step": 217
},
{
"epoch": 0.0537873180360227,
"grad_norm": 0.2819620370864868,
"learning_rate": 9.588773128419907e-06,
"loss": 1.3582,
"num_input_tokens_seen": 57147392,
"step": 218
},
{
"epoch": 0.0540340488527017,
"grad_norm": 0.21246762573719025,
"learning_rate": 9.581813647811199e-06,
"loss": 1.6455,
"num_input_tokens_seen": 57409536,
"step": 219
},
{
"epoch": 0.054280779669380705,
"grad_norm": 0.2589544355869293,
"learning_rate": 9.574798339249124e-06,
"loss": 1.6292,
"num_input_tokens_seen": 57671680,
"step": 220
},
{
"epoch": 0.05452751048605971,
"grad_norm": 0.2522466778755188,
"learning_rate": 9.567727288213005e-06,
"loss": 1.8189,
"num_input_tokens_seen": 57933824,
"step": 221
},
{
"epoch": 0.05477424130273871,
"grad_norm": 0.2954379618167877,
"learning_rate": 9.560600580861366e-06,
"loss": 1.774,
"num_input_tokens_seen": 58195968,
"step": 222
},
{
"epoch": 0.05502097211941771,
"grad_norm": 0.3274562358856201,
"learning_rate": 9.553418304030886e-06,
"loss": 1.6534,
"num_input_tokens_seen": 58458112,
"step": 223
},
{
"epoch": 0.055267702936096716,
"grad_norm": 0.37323611974716187,
"learning_rate": 9.546180545235344e-06,
"loss": 1.8863,
"num_input_tokens_seen": 58720256,
"step": 224
},
{
"epoch": 0.05551443375277572,
"grad_norm": 0.28407496213912964,
"learning_rate": 9.538887392664544e-06,
"loss": 1.7969,
"num_input_tokens_seen": 58982400,
"step": 225
},
{
"epoch": 0.05576116456945472,
"grad_norm": 0.25206607580184937,
"learning_rate": 9.531538935183252e-06,
"loss": 1.842,
"num_input_tokens_seen": 59244544,
"step": 226
},
{
"epoch": 0.05600789538613373,
"grad_norm": 0.3008832037448883,
"learning_rate": 9.524135262330098e-06,
"loss": 2.0255,
"num_input_tokens_seen": 59506688,
"step": 227
},
{
"epoch": 0.056254626202812734,
"grad_norm": 0.2403050661087036,
"learning_rate": 9.516676464316505e-06,
"loss": 1.3322,
"num_input_tokens_seen": 59768832,
"step": 228
},
{
"epoch": 0.05650135701949174,
"grad_norm": 0.26147162914276123,
"learning_rate": 9.50916263202557e-06,
"loss": 1.8543,
"num_input_tokens_seen": 60030976,
"step": 229
},
{
"epoch": 0.05674808783617074,
"grad_norm": 0.32931873202323914,
"learning_rate": 9.501593857010968e-06,
"loss": 1.9961,
"num_input_tokens_seen": 60293120,
"step": 230
},
{
"epoch": 0.05699481865284974,
"grad_norm": 0.26853638887405396,
"learning_rate": 9.493970231495836e-06,
"loss": 1.6481,
"num_input_tokens_seen": 60555264,
"step": 231
},
{
"epoch": 0.057241549469528745,
"grad_norm": 0.33695557713508606,
"learning_rate": 9.486291848371642e-06,
"loss": 1.6796,
"num_input_tokens_seen": 60817408,
"step": 232
},
{
"epoch": 0.05748828028620775,
"grad_norm": 0.3412604331970215,
"learning_rate": 9.478558801197065e-06,
"loss": 1.4463,
"num_input_tokens_seen": 61079552,
"step": 233
},
{
"epoch": 0.05773501110288675,
"grad_norm": 0.3287171423435211,
"learning_rate": 9.470771184196842e-06,
"loss": 1.8843,
"num_input_tokens_seen": 61341696,
"step": 234
},
{
"epoch": 0.05798174191956575,
"grad_norm": 0.4994906187057495,
"learning_rate": 9.46292909226063e-06,
"loss": 1.6577,
"num_input_tokens_seen": 61603840,
"step": 235
},
{
"epoch": 0.058228472736244756,
"grad_norm": 0.3780612647533417,
"learning_rate": 9.45503262094184e-06,
"loss": 1.8985,
"num_input_tokens_seen": 61865984,
"step": 236
},
{
"epoch": 0.05847520355292376,
"grad_norm": 0.2917082607746124,
"learning_rate": 9.44708186645649e-06,
"loss": 1.6278,
"num_input_tokens_seen": 62128128,
"step": 237
},
{
"epoch": 0.05872193436960276,
"grad_norm": 0.2749132812023163,
"learning_rate": 9.439076925682006e-06,
"loss": 2.1182,
"num_input_tokens_seen": 62390272,
"step": 238
},
{
"epoch": 0.058968665186281764,
"grad_norm": 0.34079936146736145,
"learning_rate": 9.431017896156074e-06,
"loss": 1.7252,
"num_input_tokens_seen": 62652416,
"step": 239
},
{
"epoch": 0.05921539600296077,
"grad_norm": 0.2402135133743286,
"learning_rate": 9.42290487607542e-06,
"loss": 1.8629,
"num_input_tokens_seen": 62914560,
"step": 240
},
{
"epoch": 0.05946212681963977,
"grad_norm": 0.40557897090911865,
"learning_rate": 9.414737964294636e-06,
"loss": 1.4655,
"num_input_tokens_seen": 63176704,
"step": 241
},
{
"epoch": 0.05970885763631878,
"grad_norm": 0.31378883123397827,
"learning_rate": 9.406517260324962e-06,
"loss": 1.3033,
"num_input_tokens_seen": 63438848,
"step": 242
},
{
"epoch": 0.05995558845299778,
"grad_norm": 1.0957661867141724,
"learning_rate": 9.398242864333084e-06,
"loss": 1.6811,
"num_input_tokens_seen": 63700992,
"step": 243
},
{
"epoch": 0.060202319269676785,
"grad_norm": 0.5017796158790588,
"learning_rate": 9.389914877139903e-06,
"loss": 1.7252,
"num_input_tokens_seen": 63963136,
"step": 244
},
{
"epoch": 0.06044905008635579,
"grad_norm": 0.31879162788391113,
"learning_rate": 9.381533400219319e-06,
"loss": 1.6618,
"num_input_tokens_seen": 64225280,
"step": 245
},
{
"epoch": 0.06069578090303479,
"grad_norm": 0.28952914476394653,
"learning_rate": 9.37309853569698e-06,
"loss": 1.449,
"num_input_tokens_seen": 64487424,
"step": 246
},
{
"epoch": 0.06094251171971379,
"grad_norm": 0.2795778214931488,
"learning_rate": 9.364610386349048e-06,
"loss": 1.6841,
"num_input_tokens_seen": 64749568,
"step": 247
},
{
"epoch": 0.061189242536392796,
"grad_norm": 0.2345486432313919,
"learning_rate": 9.356069055600949e-06,
"loss": 1.686,
"num_input_tokens_seen": 65011712,
"step": 248
},
{
"epoch": 0.0614359733530718,
"grad_norm": 0.4272654056549072,
"learning_rate": 9.347474647526095e-06,
"loss": 1.5592,
"num_input_tokens_seen": 65273856,
"step": 249
},
{
"epoch": 0.0616827041697508,
"grad_norm": 0.28724414110183716,
"learning_rate": 9.338827266844643e-06,
"loss": 1.6606,
"num_input_tokens_seen": 65536000,
"step": 250
},
{
"epoch": 0.061929434986429804,
"grad_norm": 0.27960872650146484,
"learning_rate": 9.330127018922195e-06,
"loss": 1.5445,
"num_input_tokens_seen": 65798144,
"step": 251
},
{
"epoch": 0.06217616580310881,
"grad_norm": 0.2837168872356415,
"learning_rate": 9.321374009768525e-06,
"loss": 1.4314,
"num_input_tokens_seen": 66060288,
"step": 252
},
{
"epoch": 0.06242289661978781,
"grad_norm": 0.26491212844848633,
"learning_rate": 9.312568346036288e-06,
"loss": 1.3124,
"num_input_tokens_seen": 66322432,
"step": 253
},
{
"epoch": 0.06266962743646681,
"grad_norm": 0.39029908180236816,
"learning_rate": 9.30371013501972e-06,
"loss": 1.6831,
"num_input_tokens_seen": 66584576,
"step": 254
},
{
"epoch": 0.06291635825314582,
"grad_norm": 0.26802682876586914,
"learning_rate": 9.294799484653323e-06,
"loss": 1.7579,
"num_input_tokens_seen": 66846720,
"step": 255
},
{
"epoch": 0.06316308906982482,
"grad_norm": 0.2546183168888092,
"learning_rate": 9.285836503510562e-06,
"loss": 1.6593,
"num_input_tokens_seen": 67108864,
"step": 256
},
{
"epoch": 0.06340981988650382,
"grad_norm": 0.24312745034694672,
"learning_rate": 9.276821300802535e-06,
"loss": 2.0922,
"num_input_tokens_seen": 67371008,
"step": 257
},
{
"epoch": 0.06365655070318282,
"grad_norm": 0.4153420925140381,
"learning_rate": 9.267753986376638e-06,
"loss": 1.5677,
"num_input_tokens_seen": 67633152,
"step": 258
},
{
"epoch": 0.06390328151986183,
"grad_norm": 0.20946179330348969,
"learning_rate": 9.25863467071524e-06,
"loss": 1.8557,
"num_input_tokens_seen": 67895296,
"step": 259
},
{
"epoch": 0.06415001233654083,
"grad_norm": 0.3546552360057831,
"learning_rate": 9.24946346493432e-06,
"loss": 1.6333,
"num_input_tokens_seen": 68157440,
"step": 260
},
{
"epoch": 0.06439674315321983,
"grad_norm": 0.34967514872550964,
"learning_rate": 9.24024048078213e-06,
"loss": 2.1472,
"num_input_tokens_seen": 68419584,
"step": 261
},
{
"epoch": 0.06464347396989883,
"grad_norm": 0.304616242647171,
"learning_rate": 9.230965830637821e-06,
"loss": 1.8176,
"num_input_tokens_seen": 68681728,
"step": 262
},
{
"epoch": 0.06489020478657784,
"grad_norm": 0.23437748849391937,
"learning_rate": 9.221639627510076e-06,
"loss": 1.9167,
"num_input_tokens_seen": 68943872,
"step": 263
},
{
"epoch": 0.06513693560325684,
"grad_norm": 0.26866498589515686,
"learning_rate": 9.21226198503574e-06,
"loss": 1.6385,
"num_input_tokens_seen": 69206016,
"step": 264
},
{
"epoch": 0.06538366641993584,
"grad_norm": 0.278683066368103,
"learning_rate": 9.202833017478421e-06,
"loss": 1.7856,
"num_input_tokens_seen": 69468160,
"step": 265
},
{
"epoch": 0.06563039723661486,
"grad_norm": 0.3804785907268524,
"learning_rate": 9.193352839727122e-06,
"loss": 1.7004,
"num_input_tokens_seen": 69730304,
"step": 266
},
{
"epoch": 0.06587712805329386,
"grad_norm": 0.24755661189556122,
"learning_rate": 9.18382156729481e-06,
"loss": 1.7464,
"num_input_tokens_seen": 69992448,
"step": 267
},
{
"epoch": 0.06612385886997287,
"grad_norm": 0.31840574741363525,
"learning_rate": 9.174239316317034e-06,
"loss": 1.4537,
"num_input_tokens_seen": 70254592,
"step": 268
},
{
"epoch": 0.06637058968665187,
"grad_norm": 0.2087324857711792,
"learning_rate": 9.164606203550498e-06,
"loss": 1.8524,
"num_input_tokens_seen": 70516736,
"step": 269
},
{
"epoch": 0.06661732050333087,
"grad_norm": 0.5688844919204712,
"learning_rate": 9.154922346371641e-06,
"loss": 1.868,
"num_input_tokens_seen": 70778880,
"step": 270
},
{
"epoch": 0.06686405132000987,
"grad_norm": 0.28839144110679626,
"learning_rate": 9.145187862775208e-06,
"loss": 1.8603,
"num_input_tokens_seen": 71041024,
"step": 271
},
{
"epoch": 0.06711078213668888,
"grad_norm": 0.3128485083580017,
"learning_rate": 9.13540287137281e-06,
"loss": 1.3797,
"num_input_tokens_seen": 71303168,
"step": 272
},
{
"epoch": 0.06735751295336788,
"grad_norm": 0.3282206952571869,
"learning_rate": 9.125567491391476e-06,
"loss": 1.5653,
"num_input_tokens_seen": 71565312,
"step": 273
},
{
"epoch": 0.06760424377004688,
"grad_norm": 0.4496432840824127,
"learning_rate": 9.115681842672211e-06,
"loss": 1.6801,
"num_input_tokens_seen": 71827456,
"step": 274
},
{
"epoch": 0.06785097458672588,
"grad_norm": 0.32268252968788147,
"learning_rate": 9.10574604566852e-06,
"loss": 2.2858,
"num_input_tokens_seen": 72089600,
"step": 275
},
{
"epoch": 0.06809770540340489,
"grad_norm": 0.2562284767627716,
"learning_rate": 9.09576022144496e-06,
"loss": 1.4747,
"num_input_tokens_seen": 72351744,
"step": 276
},
{
"epoch": 0.06834443622008389,
"grad_norm": 0.23939964175224304,
"learning_rate": 9.085724491675642e-06,
"loss": 1.5631,
"num_input_tokens_seen": 72613888,
"step": 277
},
{
"epoch": 0.06859116703676289,
"grad_norm": 0.4609747529029846,
"learning_rate": 9.07563897864277e-06,
"loss": 1.7264,
"num_input_tokens_seen": 72876032,
"step": 278
},
{
"epoch": 0.0688378978534419,
"grad_norm": 0.3924141824245453,
"learning_rate": 9.065503805235139e-06,
"loss": 1.4072,
"num_input_tokens_seen": 73138176,
"step": 279
},
{
"epoch": 0.0690846286701209,
"grad_norm": 0.29724472761154175,
"learning_rate": 9.055319094946633e-06,
"loss": 1.3118,
"num_input_tokens_seen": 73400320,
"step": 280
},
{
"epoch": 0.0693313594867999,
"grad_norm": 0.4114074110984802,
"learning_rate": 9.045084971874738e-06,
"loss": 1.9738,
"num_input_tokens_seen": 73662464,
"step": 281
},
{
"epoch": 0.0695780903034789,
"grad_norm": 0.23386453092098236,
"learning_rate": 9.03480156071901e-06,
"loss": 1.8462,
"num_input_tokens_seen": 73924608,
"step": 282
},
{
"epoch": 0.0698248211201579,
"grad_norm": 0.22296760976314545,
"learning_rate": 9.02446898677957e-06,
"loss": 1.7928,
"num_input_tokens_seen": 74186752,
"step": 283
},
{
"epoch": 0.07007155193683691,
"grad_norm": 0.2875213027000427,
"learning_rate": 9.014087375955574e-06,
"loss": 1.7923,
"num_input_tokens_seen": 74448896,
"step": 284
},
{
"epoch": 0.07031828275351591,
"grad_norm": 0.19052010774612427,
"learning_rate": 9.003656854743667e-06,
"loss": 1.528,
"num_input_tokens_seen": 74711040,
"step": 285
},
{
"epoch": 0.07056501357019491,
"grad_norm": 0.3048911392688751,
"learning_rate": 8.993177550236464e-06,
"loss": 1.831,
"num_input_tokens_seen": 74973184,
"step": 286
},
{
"epoch": 0.07081174438687392,
"grad_norm": 0.17250804603099823,
"learning_rate": 8.982649590120982e-06,
"loss": 1.1951,
"num_input_tokens_seen": 75235328,
"step": 287
},
{
"epoch": 0.07105847520355292,
"grad_norm": 0.3263763189315796,
"learning_rate": 8.972073102677091e-06,
"loss": 1.7192,
"num_input_tokens_seen": 75497472,
"step": 288
},
{
"epoch": 0.07130520602023192,
"grad_norm": 0.3392772376537323,
"learning_rate": 8.961448216775955e-06,
"loss": 1.9796,
"num_input_tokens_seen": 75759616,
"step": 289
},
{
"epoch": 0.07155193683691093,
"grad_norm": 0.3611414432525635,
"learning_rate": 8.950775061878453e-06,
"loss": 2.015,
"num_input_tokens_seen": 76021760,
"step": 290
},
{
"epoch": 0.07179866765358993,
"grad_norm": 0.1821010261774063,
"learning_rate": 8.94005376803361e-06,
"loss": 1.3525,
"num_input_tokens_seen": 76283904,
"step": 291
},
{
"epoch": 0.07204539847026893,
"grad_norm": 0.5545079112052917,
"learning_rate": 8.92928446587701e-06,
"loss": 1.6759,
"num_input_tokens_seen": 76546048,
"step": 292
},
{
"epoch": 0.07229212928694793,
"grad_norm": 0.3202209174633026,
"learning_rate": 8.9184672866292e-06,
"loss": 1.4154,
"num_input_tokens_seen": 76808192,
"step": 293
},
{
"epoch": 0.07253886010362694,
"grad_norm": 0.2715050280094147,
"learning_rate": 8.907602362094094e-06,
"loss": 1.6875,
"num_input_tokens_seen": 77070336,
"step": 294
},
{
"epoch": 0.07278559092030594,
"grad_norm": 0.2807007133960724,
"learning_rate": 8.896689824657371e-06,
"loss": 1.7927,
"num_input_tokens_seen": 77332480,
"step": 295
},
{
"epoch": 0.07303232173698494,
"grad_norm": 0.2917633354663849,
"learning_rate": 8.885729807284855e-06,
"loss": 1.5705,
"num_input_tokens_seen": 77594624,
"step": 296
},
{
"epoch": 0.07327905255366396,
"grad_norm": 0.3167378604412079,
"learning_rate": 8.874722443520898e-06,
"loss": 1.9123,
"num_input_tokens_seen": 77856768,
"step": 297
},
{
"epoch": 0.07352578337034296,
"grad_norm": 0.30874526500701904,
"learning_rate": 8.863667867486756e-06,
"loss": 1.7957,
"num_input_tokens_seen": 78118912,
"step": 298
},
{
"epoch": 0.07377251418702196,
"grad_norm": 0.32000553607940674,
"learning_rate": 8.852566213878947e-06,
"loss": 1.9255,
"num_input_tokens_seen": 78381056,
"step": 299
},
{
"epoch": 0.07401924500370097,
"grad_norm": 0.3279455602169037,
"learning_rate": 8.841417617967618e-06,
"loss": 1.8232,
"num_input_tokens_seen": 78643200,
"step": 300
},
{
"epoch": 0.07426597582037997,
"grad_norm": 0.2578760087490082,
"learning_rate": 8.83022221559489e-06,
"loss": 1.5326,
"num_input_tokens_seen": 78905344,
"step": 301
},
{
"epoch": 0.07451270663705897,
"grad_norm": 0.21257418394088745,
"learning_rate": 8.818980143173212e-06,
"loss": 1.529,
"num_input_tokens_seen": 79167488,
"step": 302
},
{
"epoch": 0.07475943745373798,
"grad_norm": 0.3379907011985779,
"learning_rate": 8.807691537683685e-06,
"loss": 1.6849,
"num_input_tokens_seen": 79429632,
"step": 303
},
{
"epoch": 0.07500616827041698,
"grad_norm": 0.23352430760860443,
"learning_rate": 8.796356536674404e-06,
"loss": 1.5998,
"num_input_tokens_seen": 79691776,
"step": 304
},
{
"epoch": 0.07525289908709598,
"grad_norm": 0.4021840989589691,
"learning_rate": 8.784975278258783e-06,
"loss": 1.7475,
"num_input_tokens_seen": 79953920,
"step": 305
},
{
"epoch": 0.07549962990377498,
"grad_norm": 0.3122968375682831,
"learning_rate": 8.773547901113862e-06,
"loss": 1.8813,
"num_input_tokens_seen": 80216064,
"step": 306
},
{
"epoch": 0.07574636072045399,
"grad_norm": 0.25700807571411133,
"learning_rate": 8.762074544478622e-06,
"loss": 2.0497,
"num_input_tokens_seen": 80478208,
"step": 307
},
{
"epoch": 0.07599309153713299,
"grad_norm": 0.6370308995246887,
"learning_rate": 8.750555348152299e-06,
"loss": 1.6946,
"num_input_tokens_seen": 80740352,
"step": 308
},
{
"epoch": 0.07623982235381199,
"grad_norm": 0.22553326189517975,
"learning_rate": 8.73899045249266e-06,
"loss": 1.902,
"num_input_tokens_seen": 81002496,
"step": 309
},
{
"epoch": 0.076486553170491,
"grad_norm": 0.23086640238761902,
"learning_rate": 8.727379998414311e-06,
"loss": 1.629,
"num_input_tokens_seen": 81264640,
"step": 310
},
{
"epoch": 0.07673328398717,
"grad_norm": 0.4032231271266937,
"learning_rate": 8.715724127386971e-06,
"loss": 1.805,
"num_input_tokens_seen": 81526784,
"step": 311
},
{
"epoch": 0.076980014803849,
"grad_norm": 0.31547385454177856,
"learning_rate": 8.70402298143375e-06,
"loss": 1.6519,
"num_input_tokens_seen": 81788928,
"step": 312
},
{
"epoch": 0.077226745620528,
"grad_norm": 0.2974226176738739,
"learning_rate": 8.692276703129421e-06,
"loss": 1.6291,
"num_input_tokens_seen": 82051072,
"step": 313
},
{
"epoch": 0.077473476437207,
"grad_norm": 0.2320721596479416,
"learning_rate": 8.680485435598674e-06,
"loss": 1.5294,
"num_input_tokens_seen": 82313216,
"step": 314
},
{
"epoch": 0.07772020725388601,
"grad_norm": 0.26393061876296997,
"learning_rate": 8.668649322514382e-06,
"loss": 1.6567,
"num_input_tokens_seen": 82575360,
"step": 315
},
{
"epoch": 0.07796693807056501,
"grad_norm": 0.31006282567977905,
"learning_rate": 8.656768508095853e-06,
"loss": 1.3175,
"num_input_tokens_seen": 82837504,
"step": 316
},
{
"epoch": 0.07821366888724401,
"grad_norm": 0.30799600481987,
"learning_rate": 8.644843137107058e-06,
"loss": 1.6641,
"num_input_tokens_seen": 83099648,
"step": 317
},
{
"epoch": 0.07846039970392302,
"grad_norm": 0.2402356117963791,
"learning_rate": 8.632873354854881e-06,
"loss": 1.6,
"num_input_tokens_seen": 83361792,
"step": 318
},
{
"epoch": 0.07870713052060202,
"grad_norm": 0.2880563735961914,
"learning_rate": 8.620859307187339e-06,
"loss": 1.3359,
"num_input_tokens_seen": 83623936,
"step": 319
},
{
"epoch": 0.07895386133728102,
"grad_norm": 0.20638631284236908,
"learning_rate": 8.608801140491811e-06,
"loss": 1.3001,
"num_input_tokens_seen": 83886080,
"step": 320
},
{
"epoch": 0.07920059215396003,
"grad_norm": 0.18959663808345795,
"learning_rate": 8.596699001693257e-06,
"loss": 1.862,
"num_input_tokens_seen": 84148224,
"step": 321
},
{
"epoch": 0.07944732297063903,
"grad_norm": 0.24531440436840057,
"learning_rate": 8.584553038252415e-06,
"loss": 1.6415,
"num_input_tokens_seen": 84410368,
"step": 322
},
{
"epoch": 0.07969405378731803,
"grad_norm": 0.2331283986568451,
"learning_rate": 8.572363398164017e-06,
"loss": 1.5621,
"num_input_tokens_seen": 84672512,
"step": 323
},
{
"epoch": 0.07994078460399703,
"grad_norm": 0.19714899361133575,
"learning_rate": 8.560130229954985e-06,
"loss": 1.9105,
"num_input_tokens_seen": 84934656,
"step": 324
},
{
"epoch": 0.08018751542067604,
"grad_norm": 0.37617865204811096,
"learning_rate": 8.547853682682605e-06,
"loss": 1.4315,
"num_input_tokens_seen": 85196800,
"step": 325
},
{
"epoch": 0.08043424623735504,
"grad_norm": 0.38534656167030334,
"learning_rate": 8.535533905932739e-06,
"loss": 1.8205,
"num_input_tokens_seen": 85458944,
"step": 326
},
{
"epoch": 0.08068097705403404,
"grad_norm": 0.3376696705818176,
"learning_rate": 8.523171049817974e-06,
"loss": 1.668,
"num_input_tokens_seen": 85721088,
"step": 327
},
{
"epoch": 0.08092770787071306,
"grad_norm": 0.3171237111091614,
"learning_rate": 8.510765264975813e-06,
"loss": 1.474,
"num_input_tokens_seen": 85983232,
"step": 328
},
{
"epoch": 0.08117443868739206,
"grad_norm": 0.40522676706314087,
"learning_rate": 8.498316702566828e-06,
"loss": 1.828,
"num_input_tokens_seen": 86245376,
"step": 329
},
{
"epoch": 0.08142116950407106,
"grad_norm": 0.29329410195350647,
"learning_rate": 8.485825514272824e-06,
"loss": 1.5659,
"num_input_tokens_seen": 86507520,
"step": 330
},
{
"epoch": 0.08166790032075007,
"grad_norm": 0.457561194896698,
"learning_rate": 8.473291852294986e-06,
"loss": 2.0202,
"num_input_tokens_seen": 86769664,
"step": 331
},
{
"epoch": 0.08191463113742907,
"grad_norm": 0.3520510792732239,
"learning_rate": 8.460715869352035e-06,
"loss": 1.5418,
"num_input_tokens_seen": 87031808,
"step": 332
},
{
"epoch": 0.08216136195410807,
"grad_norm": 0.24725475907325745,
"learning_rate": 8.44809771867835e-06,
"loss": 1.5912,
"num_input_tokens_seen": 87293952,
"step": 333
},
{
"epoch": 0.08240809277078708,
"grad_norm": 0.27952858805656433,
"learning_rate": 8.435437554022116e-06,
"loss": 2.0136,
"num_input_tokens_seen": 87556096,
"step": 334
},
{
"epoch": 0.08265482358746608,
"grad_norm": 0.3019633889198303,
"learning_rate": 8.422735529643445e-06,
"loss": 1.7385,
"num_input_tokens_seen": 87818240,
"step": 335
},
{
"epoch": 0.08290155440414508,
"grad_norm": 0.27266594767570496,
"learning_rate": 8.409991800312493e-06,
"loss": 1.2272,
"num_input_tokens_seen": 88080384,
"step": 336
},
{
"epoch": 0.08314828522082408,
"grad_norm": 0.23454353213310242,
"learning_rate": 8.397206521307584e-06,
"loss": 2.0053,
"num_input_tokens_seen": 88342528,
"step": 337
},
{
"epoch": 0.08339501603750309,
"grad_norm": 0.4828662574291229,
"learning_rate": 8.384379848413304e-06,
"loss": 1.9389,
"num_input_tokens_seen": 88604672,
"step": 338
},
{
"epoch": 0.08364174685418209,
"grad_norm": 0.3016226887702942,
"learning_rate": 8.371511937918616e-06,
"loss": 1.7783,
"num_input_tokens_seen": 88866816,
"step": 339
},
{
"epoch": 0.08388847767086109,
"grad_norm": 0.2821120619773865,
"learning_rate": 8.358602946614952e-06,
"loss": 2.0129,
"num_input_tokens_seen": 89128960,
"step": 340
},
{
"epoch": 0.0841352084875401,
"grad_norm": 0.26417189836502075,
"learning_rate": 8.345653031794292e-06,
"loss": 1.8609,
"num_input_tokens_seen": 89391104,
"step": 341
},
{
"epoch": 0.0843819393042191,
"grad_norm": 0.22533980011940002,
"learning_rate": 8.332662351247262e-06,
"loss": 1.9865,
"num_input_tokens_seen": 89653248,
"step": 342
},
{
"epoch": 0.0846286701208981,
"grad_norm": 0.21896155178546906,
"learning_rate": 8.319631063261209e-06,
"loss": 1.5262,
"num_input_tokens_seen": 89915392,
"step": 343
},
{
"epoch": 0.0848754009375771,
"grad_norm": 0.3828951120376587,
"learning_rate": 8.30655932661826e-06,
"loss": 1.8122,
"num_input_tokens_seen": 90177536,
"step": 344
},
{
"epoch": 0.0851221317542561,
"grad_norm": 0.2111983597278595,
"learning_rate": 8.293447300593402e-06,
"loss": 1.7962,
"num_input_tokens_seen": 90439680,
"step": 345
},
{
"epoch": 0.08536886257093511,
"grad_norm": 0.34530311822891235,
"learning_rate": 8.280295144952537e-06,
"loss": 1.8614,
"num_input_tokens_seen": 90701824,
"step": 346
},
{
"epoch": 0.08561559338761411,
"grad_norm": 0.3356333374977112,
"learning_rate": 8.267103019950529e-06,
"loss": 1.688,
"num_input_tokens_seen": 90963968,
"step": 347
},
{
"epoch": 0.08586232420429311,
"grad_norm": 0.17506998777389526,
"learning_rate": 8.253871086329255e-06,
"loss": 1.1625,
"num_input_tokens_seen": 91226112,
"step": 348
},
{
"epoch": 0.08610905502097212,
"grad_norm": 0.26492053270339966,
"learning_rate": 8.240599505315656e-06,
"loss": 1.8381,
"num_input_tokens_seen": 91488256,
"step": 349
},
{
"epoch": 0.08635578583765112,
"grad_norm": 0.4436925947666168,
"learning_rate": 8.227288438619754e-06,
"loss": 1.2551,
"num_input_tokens_seen": 91750400,
"step": 350
},
{
"epoch": 0.08660251665433012,
"grad_norm": 0.29770198464393616,
"learning_rate": 8.213938048432697e-06,
"loss": 2.0214,
"num_input_tokens_seen": 92012544,
"step": 351
},
{
"epoch": 0.08684924747100913,
"grad_norm": 0.2671870291233063,
"learning_rate": 8.200548497424779e-06,
"loss": 1.8595,
"num_input_tokens_seen": 92274688,
"step": 352
},
{
"epoch": 0.08709597828768813,
"grad_norm": 0.3322468101978302,
"learning_rate": 8.18711994874345e-06,
"loss": 1.8408,
"num_input_tokens_seen": 92536832,
"step": 353
},
{
"epoch": 0.08734270910436713,
"grad_norm": 0.2707277834415436,
"learning_rate": 8.173652566011339e-06,
"loss": 1.8141,
"num_input_tokens_seen": 92798976,
"step": 354
},
{
"epoch": 0.08758943992104613,
"grad_norm": 0.3420428931713104,
"learning_rate": 8.160146513324256e-06,
"loss": 1.7292,
"num_input_tokens_seen": 93061120,
"step": 355
},
{
"epoch": 0.08783617073772514,
"grad_norm": 0.34296715259552,
"learning_rate": 8.146601955249187e-06,
"loss": 1.5302,
"num_input_tokens_seen": 93323264,
"step": 356
},
{
"epoch": 0.08808290155440414,
"grad_norm": 0.2963867783546448,
"learning_rate": 8.133019056822303e-06,
"loss": 1.6297,
"num_input_tokens_seen": 93585408,
"step": 357
},
{
"epoch": 0.08832963237108314,
"grad_norm": 0.2812798321247101,
"learning_rate": 8.119397983546932e-06,
"loss": 1.7256,
"num_input_tokens_seen": 93847552,
"step": 358
},
{
"epoch": 0.08857636318776216,
"grad_norm": 0.20225989818572998,
"learning_rate": 8.105738901391553e-06,
"loss": 1.5714,
"num_input_tokens_seen": 94109696,
"step": 359
},
{
"epoch": 0.08882309400444116,
"grad_norm": 0.3172900974750519,
"learning_rate": 8.092041976787772e-06,
"loss": 1.8771,
"num_input_tokens_seen": 94371840,
"step": 360
},
{
"epoch": 0.08906982482112016,
"grad_norm": 0.2378884255886078,
"learning_rate": 8.078307376628292e-06,
"loss": 1.739,
"num_input_tokens_seen": 94633984,
"step": 361
},
{
"epoch": 0.08931655563779917,
"grad_norm": 0.6169587969779968,
"learning_rate": 8.064535268264883e-06,
"loss": 1.5751,
"num_input_tokens_seen": 94896128,
"step": 362
},
{
"epoch": 0.08956328645447817,
"grad_norm": 0.2723526060581207,
"learning_rate": 8.05072581950634e-06,
"loss": 1.3679,
"num_input_tokens_seen": 95158272,
"step": 363
},
{
"epoch": 0.08981001727115717,
"grad_norm": 0.20204123854637146,
"learning_rate": 8.036879198616434e-06,
"loss": 1.3856,
"num_input_tokens_seen": 95420416,
"step": 364
},
{
"epoch": 0.09005674808783617,
"grad_norm": 0.3206169009208679,
"learning_rate": 8.022995574311876e-06,
"loss": 1.8262,
"num_input_tokens_seen": 95682560,
"step": 365
},
{
"epoch": 0.09030347890451518,
"grad_norm": 0.32242709398269653,
"learning_rate": 8.009075115760243e-06,
"loss": 1.7613,
"num_input_tokens_seen": 95944704,
"step": 366
},
{
"epoch": 0.09055020972119418,
"grad_norm": 0.2581716477870941,
"learning_rate": 7.99511799257793e-06,
"loss": 1.1449,
"num_input_tokens_seen": 96206848,
"step": 367
},
{
"epoch": 0.09079694053787318,
"grad_norm": 0.23341234028339386,
"learning_rate": 7.981124374828079e-06,
"loss": 1.7955,
"num_input_tokens_seen": 96468992,
"step": 368
},
{
"epoch": 0.09104367135455219,
"grad_norm": 0.3861933648586273,
"learning_rate": 7.967094433018508e-06,
"loss": 1.6175,
"num_input_tokens_seen": 96731136,
"step": 369
},
{
"epoch": 0.09129040217123119,
"grad_norm": 0.1654781550168991,
"learning_rate": 7.953028338099628e-06,
"loss": 1.7449,
"num_input_tokens_seen": 96993280,
"step": 370
},
{
"epoch": 0.09153713298791019,
"grad_norm": 0.28304633498191833,
"learning_rate": 7.938926261462366e-06,
"loss": 1.74,
"num_input_tokens_seen": 97255424,
"step": 371
},
{
"epoch": 0.0917838638045892,
"grad_norm": 0.4599458873271942,
"learning_rate": 7.92478837493608e-06,
"loss": 1.8308,
"num_input_tokens_seen": 97517568,
"step": 372
},
{
"epoch": 0.0920305946212682,
"grad_norm": 0.2703869342803955,
"learning_rate": 7.910614850786448e-06,
"loss": 2.0039,
"num_input_tokens_seen": 97779712,
"step": 373
},
{
"epoch": 0.0922773254379472,
"grad_norm": 0.4541519284248352,
"learning_rate": 7.896405861713393e-06,
"loss": 1.8527,
"num_input_tokens_seen": 98041856,
"step": 374
},
{
"epoch": 0.0925240562546262,
"grad_norm": 0.32874220609664917,
"learning_rate": 7.882161580848966e-06,
"loss": 1.3467,
"num_input_tokens_seen": 98304000,
"step": 375
},
{
"epoch": 0.0927707870713052,
"grad_norm": 0.39634859561920166,
"learning_rate": 7.86788218175523e-06,
"loss": 1.8598,
"num_input_tokens_seen": 98566144,
"step": 376
},
{
"epoch": 0.09301751788798421,
"grad_norm": 0.222642183303833,
"learning_rate": 7.85356783842216e-06,
"loss": 1.9958,
"num_input_tokens_seen": 98828288,
"step": 377
},
{
"epoch": 0.09326424870466321,
"grad_norm": 0.17015564441680908,
"learning_rate": 7.839218725265507e-06,
"loss": 1.4093,
"num_input_tokens_seen": 99090432,
"step": 378
},
{
"epoch": 0.09351097952134221,
"grad_norm": 0.2415931075811386,
"learning_rate": 7.82483501712469e-06,
"loss": 1.6474,
"num_input_tokens_seen": 99352576,
"step": 379
},
{
"epoch": 0.09375771033802122,
"grad_norm": 0.37360304594039917,
"learning_rate": 7.810416889260653e-06,
"loss": 1.6654,
"num_input_tokens_seen": 99614720,
"step": 380
},
{
"epoch": 0.09400444115470022,
"grad_norm": 0.8946660161018372,
"learning_rate": 7.795964517353734e-06,
"loss": 2.0143,
"num_input_tokens_seen": 99876864,
"step": 381
},
{
"epoch": 0.09425117197137922,
"grad_norm": 0.2802148759365082,
"learning_rate": 7.781478077501526e-06,
"loss": 1.8721,
"num_input_tokens_seen": 100139008,
"step": 382
},
{
"epoch": 0.09449790278805822,
"grad_norm": 0.44011056423187256,
"learning_rate": 7.76695774621672e-06,
"loss": 1.9178,
"num_input_tokens_seen": 100401152,
"step": 383
},
{
"epoch": 0.09474463360473723,
"grad_norm": 0.20782537758350372,
"learning_rate": 7.752403700424978e-06,
"loss": 2.0922,
"num_input_tokens_seen": 100663296,
"step": 384
},
{
"epoch": 0.09499136442141623,
"grad_norm": 0.2332392930984497,
"learning_rate": 7.737816117462752e-06,
"loss": 1.332,
"num_input_tokens_seen": 100925440,
"step": 385
},
{
"epoch": 0.09523809523809523,
"grad_norm": 0.333558589220047,
"learning_rate": 7.723195175075136e-06,
"loss": 1.7392,
"num_input_tokens_seen": 101187584,
"step": 386
},
{
"epoch": 0.09548482605477424,
"grad_norm": 0.48137280344963074,
"learning_rate": 7.7085410514137e-06,
"loss": 1.6437,
"num_input_tokens_seen": 101449728,
"step": 387
},
{
"epoch": 0.09573155687145324,
"grad_norm": 1.1044962406158447,
"learning_rate": 7.693853925034316e-06,
"loss": 1.336,
"num_input_tokens_seen": 101711872,
"step": 388
},
{
"epoch": 0.09597828768813224,
"grad_norm": 0.29922980070114136,
"learning_rate": 7.679133974894984e-06,
"loss": 1.7313,
"num_input_tokens_seen": 101974016,
"step": 389
},
{
"epoch": 0.09622501850481126,
"grad_norm": 0.18246975541114807,
"learning_rate": 7.66438138035365e-06,
"loss": 1.6867,
"num_input_tokens_seen": 102236160,
"step": 390
},
{
"epoch": 0.09647174932149026,
"grad_norm": 0.38045111298561096,
"learning_rate": 7.649596321166024e-06,
"loss": 1.8284,
"num_input_tokens_seen": 102498304,
"step": 391
},
{
"epoch": 0.09671848013816926,
"grad_norm": 0.35603606700897217,
"learning_rate": 7.634778977483389e-06,
"loss": 1.6896,
"num_input_tokens_seen": 102760448,
"step": 392
},
{
"epoch": 0.09696521095484827,
"grad_norm": 0.3689684569835663,
"learning_rate": 7.619929529850397e-06,
"loss": 1.8039,
"num_input_tokens_seen": 103022592,
"step": 393
},
{
"epoch": 0.09721194177152727,
"grad_norm": 0.23540283739566803,
"learning_rate": 7.605048159202884e-06,
"loss": 1.5043,
"num_input_tokens_seen": 103284736,
"step": 394
},
{
"epoch": 0.09745867258820627,
"grad_norm": 0.20255284011363983,
"learning_rate": 7.590135046865652e-06,
"loss": 1.7616,
"num_input_tokens_seen": 103546880,
"step": 395
},
{
"epoch": 0.09770540340488527,
"grad_norm": 0.356922447681427,
"learning_rate": 7.575190374550272e-06,
"loss": 1.6655,
"num_input_tokens_seen": 103809024,
"step": 396
},
{
"epoch": 0.09795213422156428,
"grad_norm": 0.227988600730896,
"learning_rate": 7.560214324352858e-06,
"loss": 1.8386,
"num_input_tokens_seen": 104071168,
"step": 397
},
{
"epoch": 0.09819886503824328,
"grad_norm": 0.20468367636203766,
"learning_rate": 7.545207078751858e-06,
"loss": 1.7277,
"num_input_tokens_seen": 104333312,
"step": 398
},
{
"epoch": 0.09844559585492228,
"grad_norm": 0.2872495651245117,
"learning_rate": 7.530168820605819e-06,
"loss": 1.5949,
"num_input_tokens_seen": 104595456,
"step": 399
},
{
"epoch": 0.09869232667160129,
"grad_norm": 0.3156526982784271,
"learning_rate": 7.515099733151177e-06,
"loss": 1.7387,
"num_input_tokens_seen": 104857600,
"step": 400
},
{
"epoch": 0.09893905748828029,
"grad_norm": 0.22332081198692322,
"learning_rate": 7.500000000000001e-06,
"loss": 1.814,
"num_input_tokens_seen": 105119744,
"step": 401
},
{
"epoch": 0.09918578830495929,
"grad_norm": 0.2750714421272278,
"learning_rate": 7.484869805137778e-06,
"loss": 1.7958,
"num_input_tokens_seen": 105381888,
"step": 402
},
{
"epoch": 0.0994325191216383,
"grad_norm": 0.30154484510421753,
"learning_rate": 7.469709332921155e-06,
"loss": 1.7635,
"num_input_tokens_seen": 105644032,
"step": 403
},
{
"epoch": 0.0996792499383173,
"grad_norm": 0.32109102606773376,
"learning_rate": 7.454518768075705e-06,
"loss": 2.0223,
"num_input_tokens_seen": 105906176,
"step": 404
},
{
"epoch": 0.0999259807549963,
"grad_norm": 0.24551673233509064,
"learning_rate": 7.4392982956936644e-06,
"loss": 1.9326,
"num_input_tokens_seen": 106168320,
"step": 405
},
{
"epoch": 0.1001727115716753,
"grad_norm": 0.15664780139923096,
"learning_rate": 7.424048101231687e-06,
"loss": 1.5685,
"num_input_tokens_seen": 106430464,
"step": 406
},
{
"epoch": 0.1004194423883543,
"grad_norm": 0.32335466146469116,
"learning_rate": 7.408768370508577e-06,
"loss": 1.6535,
"num_input_tokens_seen": 106692608,
"step": 407
},
{
"epoch": 0.10066617320503331,
"grad_norm": 0.2537672221660614,
"learning_rate": 7.393459289703035e-06,
"loss": 1.972,
"num_input_tokens_seen": 106954752,
"step": 408
},
{
"epoch": 0.10091290402171231,
"grad_norm": 0.35351064801216125,
"learning_rate": 7.378121045351378e-06,
"loss": 1.7374,
"num_input_tokens_seen": 107216896,
"step": 409
},
{
"epoch": 0.10115963483839131,
"grad_norm": 0.23979292809963226,
"learning_rate": 7.362753824345271e-06,
"loss": 1.4493,
"num_input_tokens_seen": 107479040,
"step": 410
},
{
"epoch": 0.10140636565507032,
"grad_norm": 0.31650328636169434,
"learning_rate": 7.347357813929455e-06,
"loss": 1.4775,
"num_input_tokens_seen": 107741184,
"step": 411
},
{
"epoch": 0.10165309647174932,
"grad_norm": 0.3928271234035492,
"learning_rate": 7.3319332016994575e-06,
"loss": 1.5661,
"num_input_tokens_seen": 108003328,
"step": 412
},
{
"epoch": 0.10189982728842832,
"grad_norm": 0.23190626502037048,
"learning_rate": 7.31648017559931e-06,
"loss": 1.5938,
"num_input_tokens_seen": 108265472,
"step": 413
},
{
"epoch": 0.10214655810510732,
"grad_norm": 0.3870755136013031,
"learning_rate": 7.300998923919259e-06,
"loss": 1.9648,
"num_input_tokens_seen": 108527616,
"step": 414
},
{
"epoch": 0.10239328892178633,
"grad_norm": 0.2155226618051529,
"learning_rate": 7.285489635293472e-06,
"loss": 1.6274,
"num_input_tokens_seen": 108789760,
"step": 415
},
{
"epoch": 0.10264001973846533,
"grad_norm": 0.4175530970096588,
"learning_rate": 7.269952498697734e-06,
"loss": 1.3114,
"num_input_tokens_seen": 109051904,
"step": 416
},
{
"epoch": 0.10288675055514433,
"grad_norm": 0.2762328088283539,
"learning_rate": 7.254387703447154e-06,
"loss": 1.4719,
"num_input_tokens_seen": 109314048,
"step": 417
},
{
"epoch": 0.10313348137182334,
"grad_norm": 0.3410496711730957,
"learning_rate": 7.238795439193849e-06,
"loss": 1.4193,
"num_input_tokens_seen": 109576192,
"step": 418
},
{
"epoch": 0.10338021218850234,
"grad_norm": 0.25268110632896423,
"learning_rate": 7.223175895924638e-06,
"loss": 1.5439,
"num_input_tokens_seen": 109838336,
"step": 419
},
{
"epoch": 0.10362694300518134,
"grad_norm": 0.23334471881389618,
"learning_rate": 7.207529263958727e-06,
"loss": 1.8237,
"num_input_tokens_seen": 110100480,
"step": 420
},
{
"epoch": 0.10387367382186036,
"grad_norm": 0.2891489863395691,
"learning_rate": 7.191855733945388e-06,
"loss": 1.5014,
"num_input_tokens_seen": 110362624,
"step": 421
},
{
"epoch": 0.10412040463853936,
"grad_norm": 0.21520870923995972,
"learning_rate": 7.176155496861639e-06,
"loss": 1.6988,
"num_input_tokens_seen": 110624768,
"step": 422
},
{
"epoch": 0.10436713545521836,
"grad_norm": 0.6229656934738159,
"learning_rate": 7.160428744009913e-06,
"loss": 1.4534,
"num_input_tokens_seen": 110886912,
"step": 423
},
{
"epoch": 0.10461386627189737,
"grad_norm": 0.2720300257205963,
"learning_rate": 7.1446756670157306e-06,
"loss": 1.88,
"num_input_tokens_seen": 111149056,
"step": 424
},
{
"epoch": 0.10486059708857637,
"grad_norm": 0.37459537386894226,
"learning_rate": 7.128896457825364e-06,
"loss": 1.5503,
"num_input_tokens_seen": 111411200,
"step": 425
},
{
"epoch": 0.10510732790525537,
"grad_norm": 0.1883237063884735,
"learning_rate": 7.113091308703498e-06,
"loss": 1.6568,
"num_input_tokens_seen": 111673344,
"step": 426
},
{
"epoch": 0.10535405872193437,
"grad_norm": 0.2526542842388153,
"learning_rate": 7.0972604122308865e-06,
"loss": 1.9429,
"num_input_tokens_seen": 111935488,
"step": 427
},
{
"epoch": 0.10560078953861338,
"grad_norm": 0.14260494709014893,
"learning_rate": 7.081403961302007e-06,
"loss": 1.5881,
"num_input_tokens_seen": 112197632,
"step": 428
},
{
"epoch": 0.10584752035529238,
"grad_norm": 0.3512221574783325,
"learning_rate": 7.06552214912271e-06,
"loss": 1.6881,
"num_input_tokens_seen": 112459776,
"step": 429
},
{
"epoch": 0.10609425117197138,
"grad_norm": 0.3587039113044739,
"learning_rate": 7.049615169207864e-06,
"loss": 1.9665,
"num_input_tokens_seen": 112721920,
"step": 430
},
{
"epoch": 0.10634098198865038,
"grad_norm": 0.4645310044288635,
"learning_rate": 7.033683215379002e-06,
"loss": 1.667,
"num_input_tokens_seen": 112984064,
"step": 431
},
{
"epoch": 0.10658771280532939,
"grad_norm": 0.1917354017496109,
"learning_rate": 7.0177264817619514e-06,
"loss": 1.2185,
"num_input_tokens_seen": 113246208,
"step": 432
},
{
"epoch": 0.10683444362200839,
"grad_norm": 0.376539945602417,
"learning_rate": 7.0017451627844765e-06,
"loss": 1.4775,
"num_input_tokens_seen": 113508352,
"step": 433
},
{
"epoch": 0.1070811744386874,
"grad_norm": 0.3137195408344269,
"learning_rate": 6.985739453173903e-06,
"loss": 1.9464,
"num_input_tokens_seen": 113770496,
"step": 434
},
{
"epoch": 0.1073279052553664,
"grad_norm": 0.42510855197906494,
"learning_rate": 6.9697095479547564e-06,
"loss": 1.4709,
"num_input_tokens_seen": 114032640,
"step": 435
},
{
"epoch": 0.1075746360720454,
"grad_norm": 0.3661745488643646,
"learning_rate": 6.953655642446368e-06,
"loss": 1.5647,
"num_input_tokens_seen": 114294784,
"step": 436
},
{
"epoch": 0.1078213668887244,
"grad_norm": 0.3004036843776703,
"learning_rate": 6.9375779322605154e-06,
"loss": 1.5687,
"num_input_tokens_seen": 114556928,
"step": 437
},
{
"epoch": 0.1080680977054034,
"grad_norm": 0.3142424523830414,
"learning_rate": 6.921476613299018e-06,
"loss": 1.8035,
"num_input_tokens_seen": 114819072,
"step": 438
},
{
"epoch": 0.10831482852208241,
"grad_norm": 0.2967626452445984,
"learning_rate": 6.905351881751372e-06,
"loss": 1.789,
"num_input_tokens_seen": 115081216,
"step": 439
},
{
"epoch": 0.10856155933876141,
"grad_norm": 0.23613819479942322,
"learning_rate": 6.889203934092337e-06,
"loss": 1.6938,
"num_input_tokens_seen": 115343360,
"step": 440
},
{
"epoch": 0.10880829015544041,
"grad_norm": 0.3341924548149109,
"learning_rate": 6.873032967079562e-06,
"loss": 1.6346,
"num_input_tokens_seen": 115605504,
"step": 441
},
{
"epoch": 0.10905502097211942,
"grad_norm": 0.17042715847492218,
"learning_rate": 6.856839177751175e-06,
"loss": 1.7458,
"num_input_tokens_seen": 115867648,
"step": 442
},
{
"epoch": 0.10930175178879842,
"grad_norm": 0.25520455837249756,
"learning_rate": 6.840622763423391e-06,
"loss": 2.0416,
"num_input_tokens_seen": 116129792,
"step": 443
},
{
"epoch": 0.10954848260547742,
"grad_norm": 0.28210514783859253,
"learning_rate": 6.824383921688098e-06,
"loss": 1.5153,
"num_input_tokens_seen": 116391936,
"step": 444
},
{
"epoch": 0.10979521342215642,
"grad_norm": 0.31353867053985596,
"learning_rate": 6.808122850410461e-06,
"loss": 1.8329,
"num_input_tokens_seen": 116654080,
"step": 445
},
{
"epoch": 0.11004194423883543,
"grad_norm": 0.35945144295692444,
"learning_rate": 6.7918397477265e-06,
"loss": 1.9674,
"num_input_tokens_seen": 116916224,
"step": 446
},
{
"epoch": 0.11028867505551443,
"grad_norm": 0.1974448412656784,
"learning_rate": 6.775534812040686e-06,
"loss": 1.5327,
"num_input_tokens_seen": 117178368,
"step": 447
},
{
"epoch": 0.11053540587219343,
"grad_norm": 0.22307898104190826,
"learning_rate": 6.759208242023509e-06,
"loss": 1.3609,
"num_input_tokens_seen": 117440512,
"step": 448
},
{
"epoch": 0.11078213668887243,
"grad_norm": 0.2500917613506317,
"learning_rate": 6.7428602366090764e-06,
"loss": 1.5072,
"num_input_tokens_seen": 117702656,
"step": 449
},
{
"epoch": 0.11102886750555144,
"grad_norm": 0.27835747599601746,
"learning_rate": 6.7264909949926735e-06,
"loss": 1.7833,
"num_input_tokens_seen": 117964800,
"step": 450
},
{
"epoch": 0.11127559832223044,
"grad_norm": 0.3439980745315552,
"learning_rate": 6.710100716628345e-06,
"loss": 1.5402,
"num_input_tokens_seen": 118226944,
"step": 451
},
{
"epoch": 0.11152232913890944,
"grad_norm": 0.22713187336921692,
"learning_rate": 6.693689601226458e-06,
"loss": 1.4622,
"num_input_tokens_seen": 118489088,
"step": 452
},
{
"epoch": 0.11176905995558846,
"grad_norm": 0.4418184459209442,
"learning_rate": 6.677257848751276e-06,
"loss": 1.8627,
"num_input_tokens_seen": 118751232,
"step": 453
},
{
"epoch": 0.11201579077226746,
"grad_norm": 0.192154660820961,
"learning_rate": 6.6608056594185166e-06,
"loss": 1.4465,
"num_input_tokens_seen": 119013376,
"step": 454
},
{
"epoch": 0.11226252158894647,
"grad_norm": 0.15196916460990906,
"learning_rate": 6.644333233692917e-06,
"loss": 2.0922,
"num_input_tokens_seen": 119275520,
"step": 455
},
{
"epoch": 0.11250925240562547,
"grad_norm": 0.3137149512767792,
"learning_rate": 6.627840772285784e-06,
"loss": 1.5899,
"num_input_tokens_seen": 119537664,
"step": 456
},
{
"epoch": 0.11275598322230447,
"grad_norm": 0.26558390259742737,
"learning_rate": 6.611328476152557e-06,
"loss": 1.4563,
"num_input_tokens_seen": 119799808,
"step": 457
},
{
"epoch": 0.11300271403898347,
"grad_norm": 0.20857472717761993,
"learning_rate": 6.594796546490351e-06,
"loss": 2.042,
"num_input_tokens_seen": 120061952,
"step": 458
},
{
"epoch": 0.11324944485566248,
"grad_norm": 0.3639008700847626,
"learning_rate": 6.578245184735513e-06,
"loss": 1.7995,
"num_input_tokens_seen": 120324096,
"step": 459
},
{
"epoch": 0.11349617567234148,
"grad_norm": 0.3296775221824646,
"learning_rate": 6.561674592561164e-06,
"loss": 1.6771,
"num_input_tokens_seen": 120586240,
"step": 460
},
{
"epoch": 0.11374290648902048,
"grad_norm": 0.37127259373664856,
"learning_rate": 6.545084971874738e-06,
"loss": 1.941,
"num_input_tokens_seen": 120848384,
"step": 461
},
{
"epoch": 0.11398963730569948,
"grad_norm": 0.24075454473495483,
"learning_rate": 6.5284765248155295e-06,
"loss": 1.5899,
"num_input_tokens_seen": 121110528,
"step": 462
},
{
"epoch": 0.11423636812237849,
"grad_norm": 0.23995226621627808,
"learning_rate": 6.5118494537522235e-06,
"loss": 1.5628,
"num_input_tokens_seen": 121372672,
"step": 463
},
{
"epoch": 0.11448309893905749,
"grad_norm": 0.31708386540412903,
"learning_rate": 6.495203961280434e-06,
"loss": 1.5741,
"num_input_tokens_seen": 121634816,
"step": 464
},
{
"epoch": 0.11472982975573649,
"grad_norm": 0.33891844749450684,
"learning_rate": 6.4785402502202345e-06,
"loss": 1.8145,
"num_input_tokens_seen": 121896960,
"step": 465
},
{
"epoch": 0.1149765605724155,
"grad_norm": 0.29311618208885193,
"learning_rate": 6.461858523613684e-06,
"loss": 1.4072,
"num_input_tokens_seen": 122159104,
"step": 466
},
{
"epoch": 0.1152232913890945,
"grad_norm": 0.3098562955856323,
"learning_rate": 6.445158984722358e-06,
"loss": 1.4241,
"num_input_tokens_seen": 122421248,
"step": 467
},
{
"epoch": 0.1154700222057735,
"grad_norm": 0.2898317277431488,
"learning_rate": 6.428441837024868e-06,
"loss": 1.9755,
"num_input_tokens_seen": 122683392,
"step": 468
},
{
"epoch": 0.1157167530224525,
"grad_norm": 0.24661053717136383,
"learning_rate": 6.411707284214384e-06,
"loss": 1.9215,
"num_input_tokens_seen": 122945536,
"step": 469
},
{
"epoch": 0.1159634838391315,
"grad_norm": 0.19443027675151825,
"learning_rate": 6.3949555301961474e-06,
"loss": 1.7465,
"num_input_tokens_seen": 123207680,
"step": 470
},
{
"epoch": 0.11621021465581051,
"grad_norm": 0.3199927508831024,
"learning_rate": 6.378186779084996e-06,
"loss": 1.4374,
"num_input_tokens_seen": 123469824,
"step": 471
},
{
"epoch": 0.11645694547248951,
"grad_norm": 0.21007393300533295,
"learning_rate": 6.361401235202872e-06,
"loss": 1.8806,
"num_input_tokens_seen": 123731968,
"step": 472
},
{
"epoch": 0.11670367628916851,
"grad_norm": 0.3228000998497009,
"learning_rate": 6.344599103076329e-06,
"loss": 1.3583,
"num_input_tokens_seen": 123994112,
"step": 473
},
{
"epoch": 0.11695040710584752,
"grad_norm": 0.4336913824081421,
"learning_rate": 6.327780587434045e-06,
"loss": 1.5847,
"num_input_tokens_seen": 124256256,
"step": 474
},
{
"epoch": 0.11719713792252652,
"grad_norm": 0.28673404455184937,
"learning_rate": 6.310945893204324e-06,
"loss": 1.2474,
"num_input_tokens_seen": 124518400,
"step": 475
},
{
"epoch": 0.11744386873920552,
"grad_norm": 0.22935543954372406,
"learning_rate": 6.294095225512604e-06,
"loss": 1.5185,
"num_input_tokens_seen": 124780544,
"step": 476
},
{
"epoch": 0.11769059955588453,
"grad_norm": 0.22382254898548126,
"learning_rate": 6.277228789678953e-06,
"loss": 1.6705,
"num_input_tokens_seen": 125042688,
"step": 477
},
{
"epoch": 0.11793733037256353,
"grad_norm": 0.5391249060630798,
"learning_rate": 6.26034679121557e-06,
"loss": 1.682,
"num_input_tokens_seen": 125304832,
"step": 478
},
{
"epoch": 0.11818406118924253,
"grad_norm": 0.28263115882873535,
"learning_rate": 6.243449435824276e-06,
"loss": 2.1149,
"num_input_tokens_seen": 125566976,
"step": 479
},
{
"epoch": 0.11843079200592153,
"grad_norm": 0.24054385721683502,
"learning_rate": 6.2265369293940135e-06,
"loss": 1.801,
"num_input_tokens_seen": 125829120,
"step": 480
},
{
"epoch": 0.11867752282260054,
"grad_norm": 0.23487120866775513,
"learning_rate": 6.209609477998339e-06,
"loss": 1.6175,
"num_input_tokens_seen": 126091264,
"step": 481
},
{
"epoch": 0.11892425363927954,
"grad_norm": 0.30739638209342957,
"learning_rate": 6.192667287892905e-06,
"loss": 1.9698,
"num_input_tokens_seen": 126353408,
"step": 482
},
{
"epoch": 0.11917098445595854,
"grad_norm": 0.3312475085258484,
"learning_rate": 6.17571056551295e-06,
"loss": 2.1531,
"num_input_tokens_seen": 126615552,
"step": 483
},
{
"epoch": 0.11941771527263756,
"grad_norm": 0.3214891254901886,
"learning_rate": 6.158739517470786e-06,
"loss": 1.7976,
"num_input_tokens_seen": 126877696,
"step": 484
},
{
"epoch": 0.11966444608931656,
"grad_norm": 0.19428040087223053,
"learning_rate": 6.141754350553279e-06,
"loss": 1.5015,
"num_input_tokens_seen": 127139840,
"step": 485
},
{
"epoch": 0.11991117690599556,
"grad_norm": 0.18812116980552673,
"learning_rate": 6.124755271719326e-06,
"loss": 1.2669,
"num_input_tokens_seen": 127401984,
"step": 486
},
{
"epoch": 0.12015790772267457,
"grad_norm": 0.2792108654975891,
"learning_rate": 6.107742488097338e-06,
"loss": 1.6827,
"num_input_tokens_seen": 127664128,
"step": 487
},
{
"epoch": 0.12040463853935357,
"grad_norm": 0.32973822951316833,
"learning_rate": 6.090716206982714e-06,
"loss": 1.4348,
"num_input_tokens_seen": 127926272,
"step": 488
},
{
"epoch": 0.12065136935603257,
"grad_norm": 0.23311680555343628,
"learning_rate": 6.073676635835317e-06,
"loss": 1.7046,
"num_input_tokens_seen": 128188416,
"step": 489
},
{
"epoch": 0.12089810017271158,
"grad_norm": 0.4876193106174469,
"learning_rate": 6.056623982276945e-06,
"loss": 0.9838,
"num_input_tokens_seen": 128450560,
"step": 490
},
{
"epoch": 0.12114483098939058,
"grad_norm": 0.23447804152965546,
"learning_rate": 6.039558454088796e-06,
"loss": 1.5899,
"num_input_tokens_seen": 128712704,
"step": 491
},
{
"epoch": 0.12139156180606958,
"grad_norm": 0.31559622287750244,
"learning_rate": 6.022480259208951e-06,
"loss": 1.8169,
"num_input_tokens_seen": 128974848,
"step": 492
},
{
"epoch": 0.12163829262274858,
"grad_norm": 0.228166401386261,
"learning_rate": 6.005389605729824e-06,
"loss": 1.7,
"num_input_tokens_seen": 129236992,
"step": 493
},
{
"epoch": 0.12188502343942759,
"grad_norm": 0.25788137316703796,
"learning_rate": 5.988286701895631e-06,
"loss": 1.8412,
"num_input_tokens_seen": 129499136,
"step": 494
},
{
"epoch": 0.12213175425610659,
"grad_norm": 0.23868145048618317,
"learning_rate": 5.97117175609986e-06,
"loss": 1.4791,
"num_input_tokens_seen": 129761280,
"step": 495
},
{
"epoch": 0.12237848507278559,
"grad_norm": 0.22958144545555115,
"learning_rate": 5.954044976882725e-06,
"loss": 1.6004,
"num_input_tokens_seen": 130023424,
"step": 496
},
{
"epoch": 0.1226252158894646,
"grad_norm": 0.24195703864097595,
"learning_rate": 5.936906572928625e-06,
"loss": 1.8709,
"num_input_tokens_seen": 130285568,
"step": 497
},
{
"epoch": 0.1228719467061436,
"grad_norm": 0.35294461250305176,
"learning_rate": 5.919756753063601e-06,
"loss": 1.8445,
"num_input_tokens_seen": 130547712,
"step": 498
},
{
"epoch": 0.1231186775228226,
"grad_norm": 0.4138323664665222,
"learning_rate": 5.902595726252801e-06,
"loss": 1.9917,
"num_input_tokens_seen": 130809856,
"step": 499
},
{
"epoch": 0.1233654083395016,
"grad_norm": 0.7690160870552063,
"learning_rate": 5.885423701597918e-06,
"loss": 2.0276,
"num_input_tokens_seen": 131072000,
"step": 500
},
{
"epoch": 0.1236121391561806,
"grad_norm": 0.28807610273361206,
"learning_rate": 5.8682408883346535e-06,
"loss": 1.952,
"num_input_tokens_seen": 131334144,
"step": 501
},
{
"epoch": 0.12385886997285961,
"grad_norm": 0.35294869542121887,
"learning_rate": 5.851047495830163e-06,
"loss": 1.5884,
"num_input_tokens_seen": 131596288,
"step": 502
},
{
"epoch": 0.12410560078953861,
"grad_norm": 0.20024876296520233,
"learning_rate": 5.8338437335805124e-06,
"loss": 1.497,
"num_input_tokens_seen": 131858432,
"step": 503
},
{
"epoch": 0.12435233160621761,
"grad_norm": 0.3332088887691498,
"learning_rate": 5.816629811208112e-06,
"loss": 1.8099,
"num_input_tokens_seen": 132120576,
"step": 504
},
{
"epoch": 0.12459906242289662,
"grad_norm": 0.21323569118976593,
"learning_rate": 5.799405938459175e-06,
"loss": 2.0373,
"num_input_tokens_seen": 132382720,
"step": 505
},
{
"epoch": 0.12484579323957562,
"grad_norm": 0.2117518186569214,
"learning_rate": 5.782172325201155e-06,
"loss": 1.8806,
"num_input_tokens_seen": 132644864,
"step": 506
},
{
"epoch": 0.12509252405625462,
"grad_norm": 0.29583173990249634,
"learning_rate": 5.764929181420191e-06,
"loss": 1.7029,
"num_input_tokens_seen": 132907008,
"step": 507
},
{
"epoch": 0.12533925487293363,
"grad_norm": 0.3036397695541382,
"learning_rate": 5.747676717218549e-06,
"loss": 1.6968,
"num_input_tokens_seen": 133169152,
"step": 508
},
{
"epoch": 0.12558598568961263,
"grad_norm": 0.4999355971813202,
"learning_rate": 5.730415142812059e-06,
"loss": 1.9457,
"num_input_tokens_seen": 133431296,
"step": 509
},
{
"epoch": 0.12583271650629163,
"grad_norm": 0.2996709942817688,
"learning_rate": 5.7131446685275595e-06,
"loss": 1.8654,
"num_input_tokens_seen": 133693440,
"step": 510
},
{
"epoch": 0.12607944732297063,
"grad_norm": 0.3077060282230377,
"learning_rate": 5.695865504800328e-06,
"loss": 1.8855,
"num_input_tokens_seen": 133955584,
"step": 511
},
{
"epoch": 0.12632617813964964,
"grad_norm": 0.29848551750183105,
"learning_rate": 5.678577862171523e-06,
"loss": 1.8403,
"num_input_tokens_seen": 134217728,
"step": 512
},
{
"epoch": 0.12657290895632864,
"grad_norm": 0.3802272081375122,
"learning_rate": 5.661281951285613e-06,
"loss": 1.7932,
"num_input_tokens_seen": 134479872,
"step": 513
},
{
"epoch": 0.12681963977300764,
"grad_norm": 0.2726668417453766,
"learning_rate": 5.643977982887815e-06,
"loss": 1.399,
"num_input_tokens_seen": 134742016,
"step": 514
},
{
"epoch": 0.12706637058968664,
"grad_norm": 0.3101379871368408,
"learning_rate": 5.626666167821522e-06,
"loss": 1.917,
"num_input_tokens_seen": 135004160,
"step": 515
},
{
"epoch": 0.12731310140636565,
"grad_norm": 0.1991398185491562,
"learning_rate": 5.609346717025738e-06,
"loss": 1.6612,
"num_input_tokens_seen": 135266304,
"step": 516
},
{
"epoch": 0.12755983222304465,
"grad_norm": 0.2825518250465393,
"learning_rate": 5.592019841532507e-06,
"loss": 1.7776,
"num_input_tokens_seen": 135528448,
"step": 517
},
{
"epoch": 0.12780656303972365,
"grad_norm": 0.21650439500808716,
"learning_rate": 5.5746857524643335e-06,
"loss": 1.5227,
"num_input_tokens_seen": 135790592,
"step": 518
},
{
"epoch": 0.12805329385640266,
"grad_norm": 0.272237092256546,
"learning_rate": 5.557344661031628e-06,
"loss": 1.8114,
"num_input_tokens_seen": 136052736,
"step": 519
},
{
"epoch": 0.12830002467308166,
"grad_norm": 0.3208904564380646,
"learning_rate": 5.539996778530114e-06,
"loss": 1.4108,
"num_input_tokens_seen": 136314880,
"step": 520
},
{
"epoch": 0.12854675548976066,
"grad_norm": 0.21720334887504578,
"learning_rate": 5.522642316338268e-06,
"loss": 2.0095,
"num_input_tokens_seen": 136577024,
"step": 521
},
{
"epoch": 0.12879348630643966,
"grad_norm": 0.3421568274497986,
"learning_rate": 5.505281485914732e-06,
"loss": 1.8559,
"num_input_tokens_seen": 136839168,
"step": 522
},
{
"epoch": 0.12904021712311867,
"grad_norm": 0.2509710192680359,
"learning_rate": 5.487914498795748e-06,
"loss": 1.6357,
"num_input_tokens_seen": 137101312,
"step": 523
},
{
"epoch": 0.12928694793979767,
"grad_norm": 0.19334466755390167,
"learning_rate": 5.470541566592573e-06,
"loss": 1.1662,
"num_input_tokens_seen": 137363456,
"step": 524
},
{
"epoch": 0.12953367875647667,
"grad_norm": 0.22243958711624146,
"learning_rate": 5.453162900988902e-06,
"loss": 1.4116,
"num_input_tokens_seen": 137625600,
"step": 525
},
{
"epoch": 0.12978040957315568,
"grad_norm": 0.37442705035209656,
"learning_rate": 5.435778713738292e-06,
"loss": 2.0153,
"num_input_tokens_seen": 137887744,
"step": 526
},
{
"epoch": 0.13002714038983468,
"grad_norm": 0.24719154834747314,
"learning_rate": 5.41838921666158e-06,
"loss": 1.2343,
"num_input_tokens_seen": 138149888,
"step": 527
},
{
"epoch": 0.13027387120651368,
"grad_norm": 0.313684344291687,
"learning_rate": 5.400994621644294e-06,
"loss": 1.83,
"num_input_tokens_seen": 138412032,
"step": 528
},
{
"epoch": 0.13052060202319268,
"grad_norm": 0.3713296353816986,
"learning_rate": 5.383595140634093e-06,
"loss": 1.9503,
"num_input_tokens_seen": 138674176,
"step": 529
},
{
"epoch": 0.13076733283987169,
"grad_norm": 0.2469317466020584,
"learning_rate": 5.366190985638159e-06,
"loss": 1.1653,
"num_input_tokens_seen": 138936320,
"step": 530
},
{
"epoch": 0.13101406365655072,
"grad_norm": 0.3134791851043701,
"learning_rate": 5.348782368720627e-06,
"loss": 1.4074,
"num_input_tokens_seen": 139198464,
"step": 531
},
{
"epoch": 0.13126079447322972,
"grad_norm": 0.2104741483926773,
"learning_rate": 5.3313695020000026e-06,
"loss": 1.9757,
"num_input_tokens_seen": 139460608,
"step": 532
},
{
"epoch": 0.13150752528990872,
"grad_norm": 0.2387029379606247,
"learning_rate": 5.3139525976465675e-06,
"loss": 1.6213,
"num_input_tokens_seen": 139722752,
"step": 533
},
{
"epoch": 0.13175425610658772,
"grad_norm": 0.371168315410614,
"learning_rate": 5.296531867879809e-06,
"loss": 1.8211,
"num_input_tokens_seen": 139984896,
"step": 534
},
{
"epoch": 0.13200098692326673,
"grad_norm": 0.4188603162765503,
"learning_rate": 5.27910752496582e-06,
"loss": 1.7739,
"num_input_tokens_seen": 140247040,
"step": 535
},
{
"epoch": 0.13224771773994573,
"grad_norm": 0.38428500294685364,
"learning_rate": 5.2616797812147205e-06,
"loss": 1.8766,
"num_input_tokens_seen": 140509184,
"step": 536
},
{
"epoch": 0.13249444855662473,
"grad_norm": 0.27660655975341797,
"learning_rate": 5.244248848978067e-06,
"loss": 1.7614,
"num_input_tokens_seen": 140771328,
"step": 537
},
{
"epoch": 0.13274117937330374,
"grad_norm": 0.1527780443429947,
"learning_rate": 5.226814940646268e-06,
"loss": 1.2256,
"num_input_tokens_seen": 141033472,
"step": 538
},
{
"epoch": 0.13298791018998274,
"grad_norm": 0.264778733253479,
"learning_rate": 5.209378268645998e-06,
"loss": 1.4941,
"num_input_tokens_seen": 141295616,
"step": 539
},
{
"epoch": 0.13323464100666174,
"grad_norm": 0.3885688781738281,
"learning_rate": 5.1919390454376e-06,
"loss": 1.9089,
"num_input_tokens_seen": 141557760,
"step": 540
},
{
"epoch": 0.13348137182334074,
"grad_norm": 0.2816999554634094,
"learning_rate": 5.174497483512506e-06,
"loss": 1.6554,
"num_input_tokens_seen": 141819904,
"step": 541
},
{
"epoch": 0.13372810264001975,
"grad_norm": 0.2949479818344116,
"learning_rate": 5.157053795390642e-06,
"loss": 1.5687,
"num_input_tokens_seen": 142082048,
"step": 542
},
{
"epoch": 0.13397483345669875,
"grad_norm": 0.1631646454334259,
"learning_rate": 5.139608193617846e-06,
"loss": 1.117,
"num_input_tokens_seen": 142344192,
"step": 543
},
{
"epoch": 0.13422156427337775,
"grad_norm": 0.23090092837810516,
"learning_rate": 5.1221608907632665e-06,
"loss": 1.8237,
"num_input_tokens_seen": 142606336,
"step": 544
},
{
"epoch": 0.13446829509005676,
"grad_norm": 0.20347069203853607,
"learning_rate": 5.1047120994167855e-06,
"loss": 1.7979,
"num_input_tokens_seen": 142868480,
"step": 545
},
{
"epoch": 0.13471502590673576,
"grad_norm": 0.2656711935997009,
"learning_rate": 5.087262032186418e-06,
"loss": 1.8041,
"num_input_tokens_seen": 143130624,
"step": 546
},
{
"epoch": 0.13496175672341476,
"grad_norm": 0.40291059017181396,
"learning_rate": 5.069810901695727e-06,
"loss": 1.9054,
"num_input_tokens_seen": 143392768,
"step": 547
},
{
"epoch": 0.13520848754009376,
"grad_norm": 0.6440242528915405,
"learning_rate": 5.05235892058123e-06,
"loss": 1.5505,
"num_input_tokens_seen": 143654912,
"step": 548
},
{
"epoch": 0.13545521835677277,
"grad_norm": 0.2078663855791092,
"learning_rate": 5.034906301489808e-06,
"loss": 1.6307,
"num_input_tokens_seen": 143917056,
"step": 549
},
{
"epoch": 0.13570194917345177,
"grad_norm": 0.3324054777622223,
"learning_rate": 5.0174532570761194e-06,
"loss": 1.7519,
"num_input_tokens_seen": 144179200,
"step": 550
},
{
"epoch": 0.13594867999013077,
"grad_norm": 0.20945605635643005,
"learning_rate": 5e-06,
"loss": 1.7034,
"num_input_tokens_seen": 144441344,
"step": 551
},
{
"epoch": 0.13619541080680977,
"grad_norm": 0.3655446469783783,
"learning_rate": 4.982546742923883e-06,
"loss": 2.0479,
"num_input_tokens_seen": 144703488,
"step": 552
},
{
"epoch": 0.13644214162348878,
"grad_norm": 0.3826795518398285,
"learning_rate": 4.965093698510192e-06,
"loss": 1.454,
"num_input_tokens_seen": 144965632,
"step": 553
},
{
"epoch": 0.13668887244016778,
"grad_norm": 0.30656811594963074,
"learning_rate": 4.9476410794187726e-06,
"loss": 1.5023,
"num_input_tokens_seen": 145227776,
"step": 554
},
{
"epoch": 0.13693560325684678,
"grad_norm": 0.23264142870903015,
"learning_rate": 4.9301890983042744e-06,
"loss": 1.4307,
"num_input_tokens_seen": 145489920,
"step": 555
},
{
"epoch": 0.13718233407352579,
"grad_norm": 0.2861672341823578,
"learning_rate": 4.9127379678135825e-06,
"loss": 1.4308,
"num_input_tokens_seen": 145752064,
"step": 556
},
{
"epoch": 0.1374290648902048,
"grad_norm": 0.31259533762931824,
"learning_rate": 4.895287900583216e-06,
"loss": 2.0955,
"num_input_tokens_seen": 146014208,
"step": 557
},
{
"epoch": 0.1376757957068838,
"grad_norm": 0.33701300621032715,
"learning_rate": 4.877839109236735e-06,
"loss": 2.0646,
"num_input_tokens_seen": 146276352,
"step": 558
},
{
"epoch": 0.1379225265235628,
"grad_norm": 0.23440125584602356,
"learning_rate": 4.860391806382157e-06,
"loss": 1.3577,
"num_input_tokens_seen": 146538496,
"step": 559
},
{
"epoch": 0.1381692573402418,
"grad_norm": 0.39793065190315247,
"learning_rate": 4.842946204609359e-06,
"loss": 1.9013,
"num_input_tokens_seen": 146800640,
"step": 560
},
{
"epoch": 0.1384159881569208,
"grad_norm": 0.3499162495136261,
"learning_rate": 4.825502516487497e-06,
"loss": 1.9023,
"num_input_tokens_seen": 147062784,
"step": 561
},
{
"epoch": 0.1386627189735998,
"grad_norm": 0.28828251361846924,
"learning_rate": 4.8080609545624004e-06,
"loss": 1.446,
"num_input_tokens_seen": 147324928,
"step": 562
},
{
"epoch": 0.1389094497902788,
"grad_norm": 0.3065829575061798,
"learning_rate": 4.7906217313540035e-06,
"loss": 2.0544,
"num_input_tokens_seen": 147587072,
"step": 563
},
{
"epoch": 0.1391561806069578,
"grad_norm": 0.3566630780696869,
"learning_rate": 4.7731850593537316e-06,
"loss": 1.9503,
"num_input_tokens_seen": 147849216,
"step": 564
},
{
"epoch": 0.1394029114236368,
"grad_norm": 0.2425328940153122,
"learning_rate": 4.755751151021934e-06,
"loss": 1.7033,
"num_input_tokens_seen": 148111360,
"step": 565
},
{
"epoch": 0.1396496422403158,
"grad_norm": 0.2376437932252884,
"learning_rate": 4.738320218785281e-06,
"loss": 1.6898,
"num_input_tokens_seen": 148373504,
"step": 566
},
{
"epoch": 0.13989637305699482,
"grad_norm": 0.19275647401809692,
"learning_rate": 4.720892475034181e-06,
"loss": 2.0209,
"num_input_tokens_seen": 148635648,
"step": 567
},
{
"epoch": 0.14014310387367382,
"grad_norm": 0.3049376308917999,
"learning_rate": 4.703468132120193e-06,
"loss": 1.7352,
"num_input_tokens_seen": 148897792,
"step": 568
},
{
"epoch": 0.14038983469035282,
"grad_norm": 0.24015147984027863,
"learning_rate": 4.686047402353433e-06,
"loss": 1.6749,
"num_input_tokens_seen": 149159936,
"step": 569
},
{
"epoch": 0.14063656550703182,
"grad_norm": 0.3943753242492676,
"learning_rate": 4.668630498000001e-06,
"loss": 1.977,
"num_input_tokens_seen": 149422080,
"step": 570
},
{
"epoch": 0.14088329632371083,
"grad_norm": 0.18447957932949066,
"learning_rate": 4.651217631279374e-06,
"loss": 1.8602,
"num_input_tokens_seen": 149684224,
"step": 571
},
{
"epoch": 0.14113002714038983,
"grad_norm": 0.20968987047672272,
"learning_rate": 4.6338090143618435e-06,
"loss": 1.5058,
"num_input_tokens_seen": 149946368,
"step": 572
},
{
"epoch": 0.14137675795706883,
"grad_norm": 0.27084267139434814,
"learning_rate": 4.6164048593659076e-06,
"loss": 1.8903,
"num_input_tokens_seen": 150208512,
"step": 573
},
{
"epoch": 0.14162348877374784,
"grad_norm": 0.33901768922805786,
"learning_rate": 4.5990053783557066e-06,
"loss": 1.81,
"num_input_tokens_seen": 150470656,
"step": 574
},
{
"epoch": 0.14187021959042684,
"grad_norm": 0.38979673385620117,
"learning_rate": 4.581610783338424e-06,
"loss": 2.0141,
"num_input_tokens_seen": 150732800,
"step": 575
},
{
"epoch": 0.14211695040710584,
"grad_norm": 0.3750932216644287,
"learning_rate": 4.564221286261709e-06,
"loss": 1.6991,
"num_input_tokens_seen": 150994944,
"step": 576
},
{
"epoch": 0.14236368122378484,
"grad_norm": 0.2790539264678955,
"learning_rate": 4.546837099011101e-06,
"loss": 1.5295,
"num_input_tokens_seen": 151257088,
"step": 577
},
{
"epoch": 0.14261041204046385,
"grad_norm": 0.17121604084968567,
"learning_rate": 4.529458433407429e-06,
"loss": 1.3758,
"num_input_tokens_seen": 151519232,
"step": 578
},
{
"epoch": 0.14285714285714285,
"grad_norm": 1.2501734495162964,
"learning_rate": 4.512085501204254e-06,
"loss": 1.8476,
"num_input_tokens_seen": 151781376,
"step": 579
},
{
"epoch": 0.14310387367382185,
"grad_norm": 0.22766587138175964,
"learning_rate": 4.494718514085269e-06,
"loss": 1.0005,
"num_input_tokens_seen": 152043520,
"step": 580
},
{
"epoch": 0.14335060449050085,
"grad_norm": 0.46749159693717957,
"learning_rate": 4.477357683661734e-06,
"loss": 1.7644,
"num_input_tokens_seen": 152305664,
"step": 581
},
{
"epoch": 0.14359733530717986,
"grad_norm": 0.5128108859062195,
"learning_rate": 4.460003221469886e-06,
"loss": 1.6494,
"num_input_tokens_seen": 152567808,
"step": 582
},
{
"epoch": 0.14384406612385886,
"grad_norm": 0.35547828674316406,
"learning_rate": 4.442655338968373e-06,
"loss": 1.9043,
"num_input_tokens_seen": 152829952,
"step": 583
},
{
"epoch": 0.14409079694053786,
"grad_norm": 0.2643188536167145,
"learning_rate": 4.425314247535668e-06,
"loss": 1.8414,
"num_input_tokens_seen": 153092096,
"step": 584
},
{
"epoch": 0.14433752775721687,
"grad_norm": 0.36946919560432434,
"learning_rate": 4.4079801584674955e-06,
"loss": 1.6937,
"num_input_tokens_seen": 153354240,
"step": 585
},
{
"epoch": 0.14458425857389587,
"grad_norm": 0.30119383335113525,
"learning_rate": 4.390653282974264e-06,
"loss": 1.6792,
"num_input_tokens_seen": 153616384,
"step": 586
},
{
"epoch": 0.14483098939057487,
"grad_norm": 0.30609673261642456,
"learning_rate": 4.373333832178478e-06,
"loss": 1.7094,
"num_input_tokens_seen": 153878528,
"step": 587
},
{
"epoch": 0.14507772020725387,
"grad_norm": 0.32530122995376587,
"learning_rate": 4.356022017112187e-06,
"loss": 1.9982,
"num_input_tokens_seen": 154140672,
"step": 588
},
{
"epoch": 0.14532445102393288,
"grad_norm": 0.39551761746406555,
"learning_rate": 4.3387180487143875e-06,
"loss": 2.07,
"num_input_tokens_seen": 154402816,
"step": 589
},
{
"epoch": 0.14557118184061188,
"grad_norm": 0.45149853825569153,
"learning_rate": 4.321422137828479e-06,
"loss": 1.4199,
"num_input_tokens_seen": 154664960,
"step": 590
},
{
"epoch": 0.14581791265729088,
"grad_norm": 0.3984176814556122,
"learning_rate": 4.304134495199675e-06,
"loss": 1.5707,
"num_input_tokens_seen": 154927104,
"step": 591
},
{
"epoch": 0.14606464347396989,
"grad_norm": 0.3942200839519501,
"learning_rate": 4.286855331472442e-06,
"loss": 1.6478,
"num_input_tokens_seen": 155189248,
"step": 592
},
{
"epoch": 0.14631137429064892,
"grad_norm": 0.1897418200969696,
"learning_rate": 4.269584857187942e-06,
"loss": 1.4291,
"num_input_tokens_seen": 155451392,
"step": 593
},
{
"epoch": 0.14655810510732792,
"grad_norm": 0.262803852558136,
"learning_rate": 4.2523232827814534e-06,
"loss": 1.2587,
"num_input_tokens_seen": 155713536,
"step": 594
},
{
"epoch": 0.14680483592400692,
"grad_norm": 0.6395058631896973,
"learning_rate": 4.23507081857981e-06,
"loss": 1.3483,
"num_input_tokens_seen": 155975680,
"step": 595
},
{
"epoch": 0.14705156674068592,
"grad_norm": 0.25592687726020813,
"learning_rate": 4.217827674798845e-06,
"loss": 1.615,
"num_input_tokens_seen": 156237824,
"step": 596
},
{
"epoch": 0.14729829755736493,
"grad_norm": 0.22586092352867126,
"learning_rate": 4.200594061540827e-06,
"loss": 1.6813,
"num_input_tokens_seen": 156499968,
"step": 597
},
{
"epoch": 0.14754502837404393,
"grad_norm": 0.2651527225971222,
"learning_rate": 4.183370188791891e-06,
"loss": 1.416,
"num_input_tokens_seen": 156762112,
"step": 598
},
{
"epoch": 0.14779175919072293,
"grad_norm": 0.33769476413726807,
"learning_rate": 4.166156266419489e-06,
"loss": 1.7654,
"num_input_tokens_seen": 157024256,
"step": 599
},
{
"epoch": 0.14803849000740193,
"grad_norm": 0.3942760229110718,
"learning_rate": 4.148952504169839e-06,
"loss": 1.666,
"num_input_tokens_seen": 157286400,
"step": 600
},
{
"epoch": 0.14828522082408094,
"grad_norm": 0.315070241689682,
"learning_rate": 4.131759111665349e-06,
"loss": 1.6868,
"num_input_tokens_seen": 157548544,
"step": 601
},
{
"epoch": 0.14853195164075994,
"grad_norm": 0.30992016196250916,
"learning_rate": 4.114576298402085e-06,
"loss": 1.2569,
"num_input_tokens_seen": 157810688,
"step": 602
},
{
"epoch": 0.14877868245743894,
"grad_norm": 0.15681593120098114,
"learning_rate": 4.0974042737472005e-06,
"loss": 1.8132,
"num_input_tokens_seen": 158072832,
"step": 603
},
{
"epoch": 0.14902541327411795,
"grad_norm": 0.3081078827381134,
"learning_rate": 4.0802432469364e-06,
"loss": 1.8482,
"num_input_tokens_seen": 158334976,
"step": 604
},
{
"epoch": 0.14927214409079695,
"grad_norm": 0.2770363688468933,
"learning_rate": 4.063093427071376e-06,
"loss": 2.1245,
"num_input_tokens_seen": 158597120,
"step": 605
},
{
"epoch": 0.14951887490747595,
"grad_norm": 0.22125285863876343,
"learning_rate": 4.045955023117276e-06,
"loss": 2.0896,
"num_input_tokens_seen": 158859264,
"step": 606
},
{
"epoch": 0.14976560572415495,
"grad_norm": 0.2769063115119934,
"learning_rate": 4.028828243900141e-06,
"loss": 2.1542,
"num_input_tokens_seen": 159121408,
"step": 607
},
{
"epoch": 0.15001233654083396,
"grad_norm": 0.26015493273735046,
"learning_rate": 4.0117132981043695e-06,
"loss": 1.6067,
"num_input_tokens_seen": 159383552,
"step": 608
},
{
"epoch": 0.15025906735751296,
"grad_norm": 0.41176754236221313,
"learning_rate": 3.994610394270178e-06,
"loss": 1.942,
"num_input_tokens_seen": 159645696,
"step": 609
},
{
"epoch": 0.15050579817419196,
"grad_norm": 0.2698221206665039,
"learning_rate": 3.977519740791049e-06,
"loss": 1.8544,
"num_input_tokens_seen": 159907840,
"step": 610
},
{
"epoch": 0.15075252899087097,
"grad_norm": 0.21741293370723724,
"learning_rate": 3.960441545911205e-06,
"loss": 1.5599,
"num_input_tokens_seen": 160169984,
"step": 611
},
{
"epoch": 0.15099925980754997,
"grad_norm": 0.3728959560394287,
"learning_rate": 3.943376017723058e-06,
"loss": 1.3439,
"num_input_tokens_seen": 160432128,
"step": 612
},
{
"epoch": 0.15124599062422897,
"grad_norm": 0.36028945446014404,
"learning_rate": 3.926323364164684e-06,
"loss": 1.9217,
"num_input_tokens_seen": 160694272,
"step": 613
},
{
"epoch": 0.15149272144090797,
"grad_norm": 0.24226897954940796,
"learning_rate": 3.909283793017289e-06,
"loss": 1.6554,
"num_input_tokens_seen": 160956416,
"step": 614
},
{
"epoch": 0.15173945225758698,
"grad_norm": 0.41064637899398804,
"learning_rate": 3.892257511902664e-06,
"loss": 1.8607,
"num_input_tokens_seen": 161218560,
"step": 615
},
{
"epoch": 0.15198618307426598,
"grad_norm": 0.2579009234905243,
"learning_rate": 3.875244728280676e-06,
"loss": 1.4227,
"num_input_tokens_seen": 161480704,
"step": 616
},
{
"epoch": 0.15223291389094498,
"grad_norm": 0.26726585626602173,
"learning_rate": 3.8582456494467214e-06,
"loss": 1.5324,
"num_input_tokens_seen": 161742848,
"step": 617
},
{
"epoch": 0.15247964470762398,
"grad_norm": 0.25933149456977844,
"learning_rate": 3.841260482529215e-06,
"loss": 1.7812,
"num_input_tokens_seen": 162004992,
"step": 618
},
{
"epoch": 0.152726375524303,
"grad_norm": 0.4113217294216156,
"learning_rate": 3.82428943448705e-06,
"loss": 1.3672,
"num_input_tokens_seen": 162267136,
"step": 619
},
{
"epoch": 0.152973106340982,
"grad_norm": 0.20348288118839264,
"learning_rate": 3.8073327121070968e-06,
"loss": 1.0549,
"num_input_tokens_seen": 162529280,
"step": 620
},
{
"epoch": 0.153219837157661,
"grad_norm": 0.2775270640850067,
"learning_rate": 3.790390522001662e-06,
"loss": 1.5691,
"num_input_tokens_seen": 162791424,
"step": 621
},
{
"epoch": 0.15346656797434,
"grad_norm": 0.2500717043876648,
"learning_rate": 3.7734630706059873e-06,
"loss": 1.2996,
"num_input_tokens_seen": 163053568,
"step": 622
},
{
"epoch": 0.153713298791019,
"grad_norm": 0.4124068021774292,
"learning_rate": 3.756550564175727e-06,
"loss": 2.1117,
"num_input_tokens_seen": 163315712,
"step": 623
},
{
"epoch": 0.153960029607698,
"grad_norm": 0.20559923350811005,
"learning_rate": 3.7396532087844318e-06,
"loss": 1.1094,
"num_input_tokens_seen": 163577856,
"step": 624
},
{
"epoch": 0.154206760424377,
"grad_norm": 0.33458614349365234,
"learning_rate": 3.7227712103210485e-06,
"loss": 1.5924,
"num_input_tokens_seen": 163840000,
"step": 625
},
{
"epoch": 0.154453491241056,
"grad_norm": 0.27552902698516846,
"learning_rate": 3.705904774487396e-06,
"loss": 1.7425,
"num_input_tokens_seen": 164102144,
"step": 626
},
{
"epoch": 0.154700222057735,
"grad_norm": 0.2488897293806076,
"learning_rate": 3.6890541067956775e-06,
"loss": 2.0084,
"num_input_tokens_seen": 164364288,
"step": 627
},
{
"epoch": 0.154946952874414,
"grad_norm": 0.3026079535484314,
"learning_rate": 3.672219412565956e-06,
"loss": 1.7328,
"num_input_tokens_seen": 164626432,
"step": 628
},
{
"epoch": 0.15519368369109302,
"grad_norm": 0.3371894061565399,
"learning_rate": 3.655400896923672e-06,
"loss": 1.5856,
"num_input_tokens_seen": 164888576,
"step": 629
},
{
"epoch": 0.15544041450777202,
"grad_norm": 0.3499716520309448,
"learning_rate": 3.6385987647971287e-06,
"loss": 1.4779,
"num_input_tokens_seen": 165150720,
"step": 630
},
{
"epoch": 0.15568714532445102,
"grad_norm": 0.32119953632354736,
"learning_rate": 3.6218132209150047e-06,
"loss": 1.8302,
"num_input_tokens_seen": 165412864,
"step": 631
},
{
"epoch": 0.15593387614113002,
"grad_norm": 0.240133598446846,
"learning_rate": 3.6050444698038547e-06,
"loss": 1.6325,
"num_input_tokens_seen": 165675008,
"step": 632
},
{
"epoch": 0.15618060695780903,
"grad_norm": 0.25198155641555786,
"learning_rate": 3.5882927157856175e-06,
"loss": 2.0047,
"num_input_tokens_seen": 165937152,
"step": 633
},
{
"epoch": 0.15642733777448803,
"grad_norm": 0.19772285223007202,
"learning_rate": 3.571558162975133e-06,
"loss": 1.7201,
"num_input_tokens_seen": 166199296,
"step": 634
},
{
"epoch": 0.15667406859116703,
"grad_norm": 0.32129037380218506,
"learning_rate": 3.5548410152776414e-06,
"loss": 1.4001,
"num_input_tokens_seen": 166461440,
"step": 635
},
{
"epoch": 0.15692079940784603,
"grad_norm": 0.41397202014923096,
"learning_rate": 3.538141476386317e-06,
"loss": 1.7114,
"num_input_tokens_seen": 166723584,
"step": 636
},
{
"epoch": 0.15716753022452504,
"grad_norm": 0.3888271450996399,
"learning_rate": 3.521459749779769e-06,
"loss": 1.7102,
"num_input_tokens_seen": 166985728,
"step": 637
},
{
"epoch": 0.15741426104120404,
"grad_norm": 0.3346923887729645,
"learning_rate": 3.5047960387195673e-06,
"loss": 1.7585,
"num_input_tokens_seen": 167247872,
"step": 638
},
{
"epoch": 0.15766099185788304,
"grad_norm": 0.3371780216693878,
"learning_rate": 3.488150546247778e-06,
"loss": 1.532,
"num_input_tokens_seen": 167510016,
"step": 639
},
{
"epoch": 0.15790772267456205,
"grad_norm": 0.3255195915699005,
"learning_rate": 3.471523475184472e-06,
"loss": 2.0643,
"num_input_tokens_seen": 167772160,
"step": 640
},
{
"epoch": 0.15815445349124105,
"grad_norm": 0.3299016058444977,
"learning_rate": 3.4549150281252635e-06,
"loss": 1.7138,
"num_input_tokens_seen": 168034304,
"step": 641
},
{
"epoch": 0.15840118430792005,
"grad_norm": 0.4465411901473999,
"learning_rate": 3.4383254074388373e-06,
"loss": 1.4068,
"num_input_tokens_seen": 168296448,
"step": 642
},
{
"epoch": 0.15864791512459905,
"grad_norm": 0.2777688801288605,
"learning_rate": 3.4217548152644887e-06,
"loss": 1.7274,
"num_input_tokens_seen": 168558592,
"step": 643
},
{
"epoch": 0.15889464594127806,
"grad_norm": 0.3787316083908081,
"learning_rate": 3.40520345350965e-06,
"loss": 1.4899,
"num_input_tokens_seen": 168820736,
"step": 644
},
{
"epoch": 0.15914137675795706,
"grad_norm": 0.19709822535514832,
"learning_rate": 3.3886715238474454e-06,
"loss": 1.138,
"num_input_tokens_seen": 169082880,
"step": 645
},
{
"epoch": 0.15938810757463606,
"grad_norm": 0.5791448950767517,
"learning_rate": 3.372159227714218e-06,
"loss": 1.7617,
"num_input_tokens_seen": 169345024,
"step": 646
},
{
"epoch": 0.15963483839131506,
"grad_norm": 0.2917760908603668,
"learning_rate": 3.355666766307084e-06,
"loss": 1.4254,
"num_input_tokens_seen": 169607168,
"step": 647
},
{
"epoch": 0.15988156920799407,
"grad_norm": 0.2395298182964325,
"learning_rate": 3.339194340581485e-06,
"loss": 1.613,
"num_input_tokens_seen": 169869312,
"step": 648
},
{
"epoch": 0.16012830002467307,
"grad_norm": 0.4135853946208954,
"learning_rate": 3.322742151248726e-06,
"loss": 1.7439,
"num_input_tokens_seen": 170131456,
"step": 649
},
{
"epoch": 0.16037503084135207,
"grad_norm": 0.2076270878314972,
"learning_rate": 3.3063103987735433e-06,
"loss": 1.6445,
"num_input_tokens_seen": 170393600,
"step": 650
},
{
"epoch": 0.16062176165803108,
"grad_norm": 0.3565100133419037,
"learning_rate": 3.289899283371657e-06,
"loss": 1.8746,
"num_input_tokens_seen": 170655744,
"step": 651
},
{
"epoch": 0.16086849247471008,
"grad_norm": 0.15975721180438995,
"learning_rate": 3.273509005007327e-06,
"loss": 1.5286,
"num_input_tokens_seen": 170917888,
"step": 652
},
{
"epoch": 0.16111522329138908,
"grad_norm": 0.378402441740036,
"learning_rate": 3.2571397633909252e-06,
"loss": 2.028,
"num_input_tokens_seen": 171180032,
"step": 653
},
{
"epoch": 0.16136195410806808,
"grad_norm": 0.23541276156902313,
"learning_rate": 3.2407917579764914e-06,
"loss": 1.5504,
"num_input_tokens_seen": 171442176,
"step": 654
},
{
"epoch": 0.16160868492474711,
"grad_norm": 0.20868459343910217,
"learning_rate": 3.224465187959316e-06,
"loss": 2.0618,
"num_input_tokens_seen": 171704320,
"step": 655
},
{
"epoch": 0.16185541574142612,
"grad_norm": 0.258038192987442,
"learning_rate": 3.2081602522734987e-06,
"loss": 1.7872,
"num_input_tokens_seen": 171966464,
"step": 656
},
{
"epoch": 0.16210214655810512,
"grad_norm": 0.4574372470378876,
"learning_rate": 3.1918771495895395e-06,
"loss": 1.6086,
"num_input_tokens_seen": 172228608,
"step": 657
},
{
"epoch": 0.16234887737478412,
"grad_norm": 0.4050358533859253,
"learning_rate": 3.1756160783119015e-06,
"loss": 1.4888,
"num_input_tokens_seen": 172490752,
"step": 658
},
{
"epoch": 0.16259560819146313,
"grad_norm": 0.26240259408950806,
"learning_rate": 3.1593772365766107e-06,
"loss": 1.7043,
"num_input_tokens_seen": 172752896,
"step": 659
},
{
"epoch": 0.16284233900814213,
"grad_norm": 0.20817944407463074,
"learning_rate": 3.1431608222488276e-06,
"loss": 1.4598,
"num_input_tokens_seen": 173015040,
"step": 660
},
{
"epoch": 0.16308906982482113,
"grad_norm": 0.44060733914375305,
"learning_rate": 3.12696703292044e-06,
"loss": 1.898,
"num_input_tokens_seen": 173277184,
"step": 661
},
{
"epoch": 0.16333580064150013,
"grad_norm": 0.28915196657180786,
"learning_rate": 3.110796065907665e-06,
"loss": 1.2809,
"num_input_tokens_seen": 173539328,
"step": 662
},
{
"epoch": 0.16358253145817914,
"grad_norm": 0.28369492292404175,
"learning_rate": 3.09464811824863e-06,
"loss": 1.8475,
"num_input_tokens_seen": 173801472,
"step": 663
},
{
"epoch": 0.16382926227485814,
"grad_norm": 0.21826131641864777,
"learning_rate": 3.078523386700982e-06,
"loss": 1.7563,
"num_input_tokens_seen": 174063616,
"step": 664
},
{
"epoch": 0.16407599309153714,
"grad_norm": 0.3093124330043793,
"learning_rate": 3.0624220677394854e-06,
"loss": 1.7989,
"num_input_tokens_seen": 174325760,
"step": 665
},
{
"epoch": 0.16432272390821615,
"grad_norm": 0.23655392229557037,
"learning_rate": 3.0463443575536324e-06,
"loss": 1.1725,
"num_input_tokens_seen": 174587904,
"step": 666
},
{
"epoch": 0.16456945472489515,
"grad_norm": 0.30597802996635437,
"learning_rate": 3.030290452045245e-06,
"loss": 1.5523,
"num_input_tokens_seen": 174850048,
"step": 667
},
{
"epoch": 0.16481618554157415,
"grad_norm": 0.32336390018463135,
"learning_rate": 3.0142605468260976e-06,
"loss": 1.8207,
"num_input_tokens_seen": 175112192,
"step": 668
},
{
"epoch": 0.16506291635825315,
"grad_norm": 0.33342787623405457,
"learning_rate": 2.9982548372155264e-06,
"loss": 1.4246,
"num_input_tokens_seen": 175374336,
"step": 669
},
{
"epoch": 0.16530964717493216,
"grad_norm": 0.2869754433631897,
"learning_rate": 2.98227351823805e-06,
"loss": 1.287,
"num_input_tokens_seen": 175636480,
"step": 670
},
{
"epoch": 0.16555637799161116,
"grad_norm": 0.29800844192504883,
"learning_rate": 2.966316784621e-06,
"loss": 1.8511,
"num_input_tokens_seen": 175898624,
"step": 671
},
{
"epoch": 0.16580310880829016,
"grad_norm": 0.4779532551765442,
"learning_rate": 2.9503848307921363e-06,
"loss": 1.4982,
"num_input_tokens_seen": 176160768,
"step": 672
},
{
"epoch": 0.16604983962496916,
"grad_norm": 0.1853656768798828,
"learning_rate": 2.934477850877292e-06,
"loss": 1.5068,
"num_input_tokens_seen": 176422912,
"step": 673
},
{
"epoch": 0.16629657044164817,
"grad_norm": 0.24669089913368225,
"learning_rate": 2.918596038697995e-06,
"loss": 1.5875,
"num_input_tokens_seen": 176685056,
"step": 674
},
{
"epoch": 0.16654330125832717,
"grad_norm": 0.32669490575790405,
"learning_rate": 2.9027395877691143e-06,
"loss": 1.1128,
"num_input_tokens_seen": 176947200,
"step": 675
},
{
"epoch": 0.16679003207500617,
"grad_norm": 0.22396613657474518,
"learning_rate": 2.886908691296504e-06,
"loss": 1.3264,
"num_input_tokens_seen": 177209344,
"step": 676
},
{
"epoch": 0.16703676289168518,
"grad_norm": 0.22605319321155548,
"learning_rate": 2.871103542174637e-06,
"loss": 1.9182,
"num_input_tokens_seen": 177471488,
"step": 677
},
{
"epoch": 0.16728349370836418,
"grad_norm": 0.3146352171897888,
"learning_rate": 2.8553243329842715e-06,
"loss": 1.5042,
"num_input_tokens_seen": 177733632,
"step": 678
},
{
"epoch": 0.16753022452504318,
"grad_norm": 0.32827454805374146,
"learning_rate": 2.839571255990088e-06,
"loss": 1.6748,
"num_input_tokens_seen": 177995776,
"step": 679
},
{
"epoch": 0.16777695534172218,
"grad_norm": 0.19679811596870422,
"learning_rate": 2.8238445031383634e-06,
"loss": 1.485,
"num_input_tokens_seen": 178257920,
"step": 680
},
{
"epoch": 0.1680236861584012,
"grad_norm": 0.2624945044517517,
"learning_rate": 2.8081442660546126e-06,
"loss": 1.6265,
"num_input_tokens_seen": 178520064,
"step": 681
},
{
"epoch": 0.1682704169750802,
"grad_norm": 0.21925391256809235,
"learning_rate": 2.7924707360412743e-06,
"loss": 1.7237,
"num_input_tokens_seen": 178782208,
"step": 682
},
{
"epoch": 0.1685171477917592,
"grad_norm": 0.5328143239021301,
"learning_rate": 2.776824104075364e-06,
"loss": 1.8024,
"num_input_tokens_seen": 179044352,
"step": 683
},
{
"epoch": 0.1687638786084382,
"grad_norm": 0.383619099855423,
"learning_rate": 2.761204560806152e-06,
"loss": 1.5762,
"num_input_tokens_seen": 179306496,
"step": 684
},
{
"epoch": 0.1690106094251172,
"grad_norm": 0.36523357033729553,
"learning_rate": 2.7456122965528475e-06,
"loss": 1.916,
"num_input_tokens_seen": 179568640,
"step": 685
},
{
"epoch": 0.1692573402417962,
"grad_norm": 0.37342143058776855,
"learning_rate": 2.7300475013022666e-06,
"loss": 1.0616,
"num_input_tokens_seen": 179830784,
"step": 686
},
{
"epoch": 0.1695040710584752,
"grad_norm": 0.3579748570919037,
"learning_rate": 2.714510364706531e-06,
"loss": 2.0594,
"num_input_tokens_seen": 180092928,
"step": 687
},
{
"epoch": 0.1697508018751542,
"grad_norm": 0.3356163203716278,
"learning_rate": 2.699001076080742e-06,
"loss": 1.7452,
"num_input_tokens_seen": 180355072,
"step": 688
},
{
"epoch": 0.1699975326918332,
"grad_norm": 0.26012998819351196,
"learning_rate": 2.683519824400693e-06,
"loss": 1.6249,
"num_input_tokens_seen": 180617216,
"step": 689
},
{
"epoch": 0.1702442635085122,
"grad_norm": 0.45150065422058105,
"learning_rate": 2.6680667983005446e-06,
"loss": 1.4417,
"num_input_tokens_seen": 180879360,
"step": 690
},
{
"epoch": 0.17049099432519121,
"grad_norm": 0.30424007773399353,
"learning_rate": 2.6526421860705474e-06,
"loss": 1.407,
"num_input_tokens_seen": 181141504,
"step": 691
},
{
"epoch": 0.17073772514187022,
"grad_norm": 0.4288609027862549,
"learning_rate": 2.637246175654731e-06,
"loss": 1.565,
"num_input_tokens_seen": 181403648,
"step": 692
},
{
"epoch": 0.17098445595854922,
"grad_norm": 0.2482622265815735,
"learning_rate": 2.6218789546486235e-06,
"loss": 1.8121,
"num_input_tokens_seen": 181665792,
"step": 693
},
{
"epoch": 0.17123118677522822,
"grad_norm": 0.22211559116840363,
"learning_rate": 2.6065407102969664e-06,
"loss": 1.7575,
"num_input_tokens_seen": 181927936,
"step": 694
},
{
"epoch": 0.17147791759190723,
"grad_norm": 0.1990126669406891,
"learning_rate": 2.5912316294914232e-06,
"loss": 1.7779,
"num_input_tokens_seen": 182190080,
"step": 695
},
{
"epoch": 0.17172464840858623,
"grad_norm": 0.3231181800365448,
"learning_rate": 2.5759518987683154e-06,
"loss": 1.292,
"num_input_tokens_seen": 182452224,
"step": 696
},
{
"epoch": 0.17197137922526523,
"grad_norm": 0.2569955587387085,
"learning_rate": 2.560701704306336e-06,
"loss": 1.6064,
"num_input_tokens_seen": 182714368,
"step": 697
},
{
"epoch": 0.17221811004194423,
"grad_norm": 0.27727434039115906,
"learning_rate": 2.545481231924296e-06,
"loss": 1.7567,
"num_input_tokens_seen": 182976512,
"step": 698
},
{
"epoch": 0.17246484085862324,
"grad_norm": 0.4798392057418823,
"learning_rate": 2.5302906670788463e-06,
"loss": 1.6045,
"num_input_tokens_seen": 183238656,
"step": 699
},
{
"epoch": 0.17271157167530224,
"grad_norm": 0.21941442787647247,
"learning_rate": 2.5151301948622235e-06,
"loss": 1.5115,
"num_input_tokens_seen": 183500800,
"step": 700
},
{
"epoch": 0.17295830249198124,
"grad_norm": 0.26305386424064636,
"learning_rate": 2.5000000000000015e-06,
"loss": 1.2139,
"num_input_tokens_seen": 183762944,
"step": 701
},
{
"epoch": 0.17320503330866024,
"grad_norm": 0.2462892383337021,
"learning_rate": 2.484900266848825e-06,
"loss": 1.5875,
"num_input_tokens_seen": 184025088,
"step": 702
},
{
"epoch": 0.17345176412533925,
"grad_norm": 0.21067021787166595,
"learning_rate": 2.469831179394182e-06,
"loss": 1.3405,
"num_input_tokens_seen": 184287232,
"step": 703
},
{
"epoch": 0.17369849494201825,
"grad_norm": 0.23109757900238037,
"learning_rate": 2.4547929212481436e-06,
"loss": 1.606,
"num_input_tokens_seen": 184549376,
"step": 704
},
{
"epoch": 0.17394522575869725,
"grad_norm": 0.4015589654445648,
"learning_rate": 2.4397856756471435e-06,
"loss": 1.8624,
"num_input_tokens_seen": 184811520,
"step": 705
},
{
"epoch": 0.17419195657537626,
"grad_norm": 0.3183045983314514,
"learning_rate": 2.424809625449729e-06,
"loss": 1.5761,
"num_input_tokens_seen": 185073664,
"step": 706
},
{
"epoch": 0.17443868739205526,
"grad_norm": 0.3008684515953064,
"learning_rate": 2.40986495313435e-06,
"loss": 1.4587,
"num_input_tokens_seen": 185335808,
"step": 707
},
{
"epoch": 0.17468541820873426,
"grad_norm": 0.24359382688999176,
"learning_rate": 2.39495184079712e-06,
"loss": 1.5966,
"num_input_tokens_seen": 185597952,
"step": 708
},
{
"epoch": 0.17493214902541326,
"grad_norm": 0.3360385596752167,
"learning_rate": 2.380070470149605e-06,
"loss": 1.53,
"num_input_tokens_seen": 185860096,
"step": 709
},
{
"epoch": 0.17517887984209227,
"grad_norm": 0.37896421551704407,
"learning_rate": 2.3652210225166122e-06,
"loss": 1.4273,
"num_input_tokens_seen": 186122240,
"step": 710
},
{
"epoch": 0.17542561065877127,
"grad_norm": 0.22193817794322968,
"learning_rate": 2.3504036788339763e-06,
"loss": 1.4629,
"num_input_tokens_seen": 186384384,
"step": 711
},
{
"epoch": 0.17567234147545027,
"grad_norm": 0.23270967602729797,
"learning_rate": 2.3356186196463497e-06,
"loss": 1.4425,
"num_input_tokens_seen": 186646528,
"step": 712
},
{
"epoch": 0.17591907229212927,
"grad_norm": 0.2334495335817337,
"learning_rate": 2.320866025105016e-06,
"loss": 1.558,
"num_input_tokens_seen": 186908672,
"step": 713
},
{
"epoch": 0.17616580310880828,
"grad_norm": 0.29328563809394836,
"learning_rate": 2.3061460749656844e-06,
"loss": 1.7001,
"num_input_tokens_seen": 187170816,
"step": 714
},
{
"epoch": 0.17641253392548728,
"grad_norm": 0.3238964378833771,
"learning_rate": 2.2914589485863015e-06,
"loss": 1.7355,
"num_input_tokens_seen": 187432960,
"step": 715
},
{
"epoch": 0.17665926474216628,
"grad_norm": 0.5925378799438477,
"learning_rate": 2.2768048249248648e-06,
"loss": 2.0788,
"num_input_tokens_seen": 187695104,
"step": 716
},
{
"epoch": 0.17690599555884529,
"grad_norm": 0.24551017582416534,
"learning_rate": 2.2621838825372496e-06,
"loss": 1.6617,
"num_input_tokens_seen": 187957248,
"step": 717
},
{
"epoch": 0.17715272637552432,
"grad_norm": 0.19846408069133759,
"learning_rate": 2.2475962995750224e-06,
"loss": 1.5413,
"num_input_tokens_seen": 188219392,
"step": 718
},
{
"epoch": 0.17739945719220332,
"grad_norm": 0.2510528266429901,
"learning_rate": 2.23304225378328e-06,
"loss": 1.7186,
"num_input_tokens_seen": 188481536,
"step": 719
},
{
"epoch": 0.17764618800888232,
"grad_norm": 0.2695014476776123,
"learning_rate": 2.218521922498476e-06,
"loss": 1.3308,
"num_input_tokens_seen": 188743680,
"step": 720
},
{
"epoch": 0.17789291882556132,
"grad_norm": 0.3821446895599365,
"learning_rate": 2.204035482646267e-06,
"loss": 1.6929,
"num_input_tokens_seen": 189005824,
"step": 721
},
{
"epoch": 0.17813964964224033,
"grad_norm": 0.3116302788257599,
"learning_rate": 2.1895831107393485e-06,
"loss": 1.5853,
"num_input_tokens_seen": 189267968,
"step": 722
},
{
"epoch": 0.17838638045891933,
"grad_norm": 0.9760715961456299,
"learning_rate": 2.175164982875311e-06,
"loss": 1.4186,
"num_input_tokens_seen": 189530112,
"step": 723
},
{
"epoch": 0.17863311127559833,
"grad_norm": 0.2930695414543152,
"learning_rate": 2.1607812747344955e-06,
"loss": 1.9299,
"num_input_tokens_seen": 189792256,
"step": 724
},
{
"epoch": 0.17887984209227734,
"grad_norm": 0.27560484409332275,
"learning_rate": 2.146432161577842e-06,
"loss": 1.869,
"num_input_tokens_seen": 190054400,
"step": 725
},
{
"epoch": 0.17912657290895634,
"grad_norm": 0.4233398735523224,
"learning_rate": 2.132117818244771e-06,
"loss": 1.8978,
"num_input_tokens_seen": 190316544,
"step": 726
},
{
"epoch": 0.17937330372563534,
"grad_norm": 0.21294540166854858,
"learning_rate": 2.1178384191510344e-06,
"loss": 1.9182,
"num_input_tokens_seen": 190578688,
"step": 727
},
{
"epoch": 0.17962003454231434,
"grad_norm": 0.3538358211517334,
"learning_rate": 2.103594138286607e-06,
"loss": 1.9857,
"num_input_tokens_seen": 190840832,
"step": 728
},
{
"epoch": 0.17986676535899335,
"grad_norm": 0.34079834818840027,
"learning_rate": 2.0893851492135536e-06,
"loss": 1.6827,
"num_input_tokens_seen": 191102976,
"step": 729
},
{
"epoch": 0.18011349617567235,
"grad_norm": 0.28942787647247314,
"learning_rate": 2.075211625063923e-06,
"loss": 1.5478,
"num_input_tokens_seen": 191365120,
"step": 730
},
{
"epoch": 0.18036022699235135,
"grad_norm": 0.33003339171409607,
"learning_rate": 2.061073738537635e-06,
"loss": 1.6105,
"num_input_tokens_seen": 191627264,
"step": 731
},
{
"epoch": 0.18060695780903036,
"grad_norm": 0.20433729887008667,
"learning_rate": 2.046971661900373e-06,
"loss": 1.8698,
"num_input_tokens_seen": 191889408,
"step": 732
},
{
"epoch": 0.18085368862570936,
"grad_norm": 0.1890942007303238,
"learning_rate": 2.0329055669814936e-06,
"loss": 1.4596,
"num_input_tokens_seen": 192151552,
"step": 733
},
{
"epoch": 0.18110041944238836,
"grad_norm": 0.34075161814689636,
"learning_rate": 2.0188756251719204e-06,
"loss": 1.5388,
"num_input_tokens_seen": 192413696,
"step": 734
},
{
"epoch": 0.18134715025906736,
"grad_norm": 0.465636283159256,
"learning_rate": 2.0048820074220716e-06,
"loss": 1.6769,
"num_input_tokens_seen": 192675840,
"step": 735
},
{
"epoch": 0.18159388107574637,
"grad_norm": 0.3639475405216217,
"learning_rate": 1.990924884239758e-06,
"loss": 1.4542,
"num_input_tokens_seen": 192937984,
"step": 736
},
{
"epoch": 0.18184061189242537,
"grad_norm": 0.22438043355941772,
"learning_rate": 1.977004425688126e-06,
"loss": 1.7826,
"num_input_tokens_seen": 193200128,
"step": 737
},
{
"epoch": 0.18208734270910437,
"grad_norm": 0.23819977045059204,
"learning_rate": 1.9631208013835677e-06,
"loss": 1.5294,
"num_input_tokens_seen": 193462272,
"step": 738
},
{
"epoch": 0.18233407352578337,
"grad_norm": 0.16036422550678253,
"learning_rate": 1.9492741804936623e-06,
"loss": 1.3988,
"num_input_tokens_seen": 193724416,
"step": 739
},
{
"epoch": 0.18258080434246238,
"grad_norm": 0.19281713664531708,
"learning_rate": 1.9354647317351187e-06,
"loss": 2.13,
"num_input_tokens_seen": 193986560,
"step": 740
},
{
"epoch": 0.18282753515914138,
"grad_norm": 0.441250741481781,
"learning_rate": 1.9216926233717087e-06,
"loss": 2.1448,
"num_input_tokens_seen": 194248704,
"step": 741
},
{
"epoch": 0.18307426597582038,
"grad_norm": 0.299956351518631,
"learning_rate": 1.90795802321223e-06,
"loss": 1.7517,
"num_input_tokens_seen": 194510848,
"step": 742
},
{
"epoch": 0.18332099679249939,
"grad_norm": 0.2056249976158142,
"learning_rate": 1.8942610986084487e-06,
"loss": 1.2246,
"num_input_tokens_seen": 194772992,
"step": 743
},
{
"epoch": 0.1835677276091784,
"grad_norm": 0.21236024796962738,
"learning_rate": 1.8806020164530702e-06,
"loss": 1.4881,
"num_input_tokens_seen": 195035136,
"step": 744
},
{
"epoch": 0.1838144584258574,
"grad_norm": 0.2904950976371765,
"learning_rate": 1.8669809431776991e-06,
"loss": 1.6151,
"num_input_tokens_seen": 195297280,
"step": 745
},
{
"epoch": 0.1840611892425364,
"grad_norm": 0.21807019412517548,
"learning_rate": 1.8533980447508138e-06,
"loss": 1.6726,
"num_input_tokens_seen": 195559424,
"step": 746
},
{
"epoch": 0.1843079200592154,
"grad_norm": 0.23090478777885437,
"learning_rate": 1.8398534866757455e-06,
"loss": 1.7856,
"num_input_tokens_seen": 195821568,
"step": 747
},
{
"epoch": 0.1845546508758944,
"grad_norm": 0.28993329405784607,
"learning_rate": 1.8263474339886628e-06,
"loss": 1.8773,
"num_input_tokens_seen": 196083712,
"step": 748
},
{
"epoch": 0.1848013816925734,
"grad_norm": 0.3971545696258545,
"learning_rate": 1.8128800512565514e-06,
"loss": 1.7139,
"num_input_tokens_seen": 196345856,
"step": 749
},
{
"epoch": 0.1850481125092524,
"grad_norm": 0.19998975098133087,
"learning_rate": 1.799451502575222e-06,
"loss": 1.6208,
"num_input_tokens_seen": 196608000,
"step": 750
},
{
"epoch": 0.1852948433259314,
"grad_norm": 0.293244868516922,
"learning_rate": 1.7860619515673034e-06,
"loss": 1.9268,
"num_input_tokens_seen": 196870144,
"step": 751
},
{
"epoch": 0.1855415741426104,
"grad_norm": 0.3308587372303009,
"learning_rate": 1.7727115613802465e-06,
"loss": 1.9633,
"num_input_tokens_seen": 197132288,
"step": 752
},
{
"epoch": 0.1857883049592894,
"grad_norm": 0.2106206715106964,
"learning_rate": 1.7594004946843458e-06,
"loss": 1.8514,
"num_input_tokens_seen": 197394432,
"step": 753
},
{
"epoch": 0.18603503577596842,
"grad_norm": 0.3629756271839142,
"learning_rate": 1.746128913670746e-06,
"loss": 2.0288,
"num_input_tokens_seen": 197656576,
"step": 754
},
{
"epoch": 0.18628176659264742,
"grad_norm": 0.2693224549293518,
"learning_rate": 1.7328969800494727e-06,
"loss": 1.7449,
"num_input_tokens_seen": 197918720,
"step": 755
},
{
"epoch": 0.18652849740932642,
"grad_norm": 0.2536444365978241,
"learning_rate": 1.7197048550474643e-06,
"loss": 1.6386,
"num_input_tokens_seen": 198180864,
"step": 756
},
{
"epoch": 0.18677522822600542,
"grad_norm": 0.19451206922531128,
"learning_rate": 1.7065526994065973e-06,
"loss": 1.6479,
"num_input_tokens_seen": 198443008,
"step": 757
},
{
"epoch": 0.18702195904268443,
"grad_norm": 0.2774448096752167,
"learning_rate": 1.6934406733817417e-06,
"loss": 1.3525,
"num_input_tokens_seen": 198705152,
"step": 758
},
{
"epoch": 0.18726868985936343,
"grad_norm": 0.4685295820236206,
"learning_rate": 1.680368936738792e-06,
"loss": 1.9418,
"num_input_tokens_seen": 198967296,
"step": 759
},
{
"epoch": 0.18751542067604243,
"grad_norm": 0.2218584567308426,
"learning_rate": 1.6673376487527382e-06,
"loss": 1.376,
"num_input_tokens_seen": 199229440,
"step": 760
},
{
"epoch": 0.18776215149272144,
"grad_norm": 0.356768935918808,
"learning_rate": 1.6543469682057105e-06,
"loss": 1.865,
"num_input_tokens_seen": 199491584,
"step": 761
},
{
"epoch": 0.18800888230940044,
"grad_norm": 0.2825476825237274,
"learning_rate": 1.6413970533850498e-06,
"loss": 1.6937,
"num_input_tokens_seen": 199753728,
"step": 762
},
{
"epoch": 0.18825561312607944,
"grad_norm": 0.17668165266513824,
"learning_rate": 1.6284880620813847e-06,
"loss": 1.7994,
"num_input_tokens_seen": 200015872,
"step": 763
},
{
"epoch": 0.18850234394275844,
"grad_norm": 0.18310214579105377,
"learning_rate": 1.6156201515866971e-06,
"loss": 1.5417,
"num_input_tokens_seen": 200278016,
"step": 764
},
{
"epoch": 0.18874907475943745,
"grad_norm": 0.3639572262763977,
"learning_rate": 1.6027934786924187e-06,
"loss": 2.0172,
"num_input_tokens_seen": 200540160,
"step": 765
},
{
"epoch": 0.18899580557611645,
"grad_norm": 0.22743955254554749,
"learning_rate": 1.5900081996875083e-06,
"loss": 1.453,
"num_input_tokens_seen": 200802304,
"step": 766
},
{
"epoch": 0.18924253639279545,
"grad_norm": 0.2889941930770874,
"learning_rate": 1.5772644703565564e-06,
"loss": 1.5592,
"num_input_tokens_seen": 201064448,
"step": 767
},
{
"epoch": 0.18948926720947445,
"grad_norm": 0.2820563018321991,
"learning_rate": 1.5645624459778858e-06,
"loss": 1.8922,
"num_input_tokens_seen": 201326592,
"step": 768
},
{
"epoch": 0.18973599802615346,
"grad_norm": 0.1855258345603943,
"learning_rate": 1.551902281321651e-06,
"loss": 1.6053,
"num_input_tokens_seen": 201588736,
"step": 769
},
{
"epoch": 0.18998272884283246,
"grad_norm": 0.18336619436740875,
"learning_rate": 1.5392841306479667e-06,
"loss": 1.5118,
"num_input_tokens_seen": 201850880,
"step": 770
},
{
"epoch": 0.19022945965951146,
"grad_norm": 0.34779712557792664,
"learning_rate": 1.5267081477050132e-06,
"loss": 1.5625,
"num_input_tokens_seen": 202113024,
"step": 771
},
{
"epoch": 0.19047619047619047,
"grad_norm": 0.30039459466934204,
"learning_rate": 1.514174485727178e-06,
"loss": 1.5418,
"num_input_tokens_seen": 202375168,
"step": 772
},
{
"epoch": 0.19072292129286947,
"grad_norm": 0.2960957884788513,
"learning_rate": 1.5016832974331725e-06,
"loss": 1.3917,
"num_input_tokens_seen": 202637312,
"step": 773
},
{
"epoch": 0.19096965210954847,
"grad_norm": 0.2889593541622162,
"learning_rate": 1.489234735024188e-06,
"loss": 1.5424,
"num_input_tokens_seen": 202899456,
"step": 774
},
{
"epoch": 0.19121638292622747,
"grad_norm": 0.2686896324157715,
"learning_rate": 1.4768289501820265e-06,
"loss": 1.6948,
"num_input_tokens_seen": 203161600,
"step": 775
},
{
"epoch": 0.19146311374290648,
"grad_norm": 0.35694169998168945,
"learning_rate": 1.4644660940672628e-06,
"loss": 1.5278,
"num_input_tokens_seen": 203423744,
"step": 776
},
{
"epoch": 0.19170984455958548,
"grad_norm": 0.2902432382106781,
"learning_rate": 1.4521463173173966e-06,
"loss": 1.643,
"num_input_tokens_seen": 203685888,
"step": 777
},
{
"epoch": 0.19195657537626448,
"grad_norm": 0.45950496196746826,
"learning_rate": 1.4398697700450181e-06,
"loss": 1.2811,
"num_input_tokens_seen": 203948032,
"step": 778
},
{
"epoch": 0.19220330619294348,
"grad_norm": 0.28215292096138,
"learning_rate": 1.4276366018359845e-06,
"loss": 1.968,
"num_input_tokens_seen": 204210176,
"step": 779
},
{
"epoch": 0.19245003700962252,
"grad_norm": 0.26845288276672363,
"learning_rate": 1.4154469617475864e-06,
"loss": 1.9697,
"num_input_tokens_seen": 204472320,
"step": 780
},
{
"epoch": 0.19269676782630152,
"grad_norm": 0.2510419189929962,
"learning_rate": 1.4033009983067454e-06,
"loss": 1.7787,
"num_input_tokens_seen": 204734464,
"step": 781
},
{
"epoch": 0.19294349864298052,
"grad_norm": 0.39068034291267395,
"learning_rate": 1.3911988595081894e-06,
"loss": 1.8188,
"num_input_tokens_seen": 204996608,
"step": 782
},
{
"epoch": 0.19319022945965952,
"grad_norm": 0.32934966683387756,
"learning_rate": 1.3791406928126638e-06,
"loss": 1.4715,
"num_input_tokens_seen": 205258752,
"step": 783
},
{
"epoch": 0.19343696027633853,
"grad_norm": 0.4297356605529785,
"learning_rate": 1.3671266451451209e-06,
"loss": 2.0179,
"num_input_tokens_seen": 205520896,
"step": 784
},
{
"epoch": 0.19368369109301753,
"grad_norm": 0.20103523135185242,
"learning_rate": 1.3551568628929434e-06,
"loss": 1.5002,
"num_input_tokens_seen": 205783040,
"step": 785
},
{
"epoch": 0.19393042190969653,
"grad_norm": 0.5125095248222351,
"learning_rate": 1.3432314919041478e-06,
"loss": 1.8507,
"num_input_tokens_seen": 206045184,
"step": 786
},
{
"epoch": 0.19417715272637553,
"grad_norm": 0.3016910254955292,
"learning_rate": 1.3313506774856177e-06,
"loss": 1.3569,
"num_input_tokens_seen": 206307328,
"step": 787
},
{
"epoch": 0.19442388354305454,
"grad_norm": 0.25727003812789917,
"learning_rate": 1.3195145644013286e-06,
"loss": 1.6336,
"num_input_tokens_seen": 206569472,
"step": 788
},
{
"epoch": 0.19467061435973354,
"grad_norm": 0.25804463028907776,
"learning_rate": 1.3077232968705805e-06,
"loss": 1.5826,
"num_input_tokens_seen": 206831616,
"step": 789
},
{
"epoch": 0.19491734517641254,
"grad_norm": 0.2707713842391968,
"learning_rate": 1.2959770185662502e-06,
"loss": 1.5577,
"num_input_tokens_seen": 207093760,
"step": 790
},
{
"epoch": 0.19516407599309155,
"grad_norm": 0.22968651354312897,
"learning_rate": 1.2842758726130283e-06,
"loss": 1.5968,
"num_input_tokens_seen": 207355904,
"step": 791
},
{
"epoch": 0.19541080680977055,
"grad_norm": 0.14831578731536865,
"learning_rate": 1.2726200015856893e-06,
"loss": 1.0922,
"num_input_tokens_seen": 207618048,
"step": 792
},
{
"epoch": 0.19565753762644955,
"grad_norm": 0.405351847410202,
"learning_rate": 1.2610095475073415e-06,
"loss": 1.9769,
"num_input_tokens_seen": 207880192,
"step": 793
},
{
"epoch": 0.19590426844312855,
"grad_norm": 0.37764203548431396,
"learning_rate": 1.2494446518477022e-06,
"loss": 1.6718,
"num_input_tokens_seen": 208142336,
"step": 794
},
{
"epoch": 0.19615099925980756,
"grad_norm": 0.21938475966453552,
"learning_rate": 1.2379254555213788e-06,
"loss": 1.5984,
"num_input_tokens_seen": 208404480,
"step": 795
},
{
"epoch": 0.19639773007648656,
"grad_norm": 0.260897159576416,
"learning_rate": 1.22645209888614e-06,
"loss": 1.3849,
"num_input_tokens_seen": 208666624,
"step": 796
},
{
"epoch": 0.19664446089316556,
"grad_norm": 0.3183205723762512,
"learning_rate": 1.2150247217412186e-06,
"loss": 1.6641,
"num_input_tokens_seen": 208928768,
"step": 797
},
{
"epoch": 0.19689119170984457,
"grad_norm": 0.25011545419692993,
"learning_rate": 1.203643463325596e-06,
"loss": 1.485,
"num_input_tokens_seen": 209190912,
"step": 798
},
{
"epoch": 0.19713792252652357,
"grad_norm": 0.29727256298065186,
"learning_rate": 1.1923084623163172e-06,
"loss": 1.3779,
"num_input_tokens_seen": 209453056,
"step": 799
},
{
"epoch": 0.19738465334320257,
"grad_norm": 0.3542685806751251,
"learning_rate": 1.1810198568267906e-06,
"loss": 1.2787,
"num_input_tokens_seen": 209715200,
"step": 800
},
{
"epoch": 0.19763138415988157,
"grad_norm": 0.3065374791622162,
"learning_rate": 1.1697777844051105e-06,
"loss": 1.3865,
"num_input_tokens_seen": 209977344,
"step": 801
},
{
"epoch": 0.19787811497656058,
"grad_norm": 0.24071305990219116,
"learning_rate": 1.1585823820323845e-06,
"loss": 1.7097,
"num_input_tokens_seen": 210239488,
"step": 802
},
{
"epoch": 0.19812484579323958,
"grad_norm": 0.30029404163360596,
"learning_rate": 1.1474337861210543e-06,
"loss": 1.6166,
"num_input_tokens_seen": 210501632,
"step": 803
},
{
"epoch": 0.19837157660991858,
"grad_norm": 0.3482155501842499,
"learning_rate": 1.136332132513245e-06,
"loss": 1.4857,
"num_input_tokens_seen": 210763776,
"step": 804
},
{
"epoch": 0.19861830742659758,
"grad_norm": 0.33486634492874146,
"learning_rate": 1.1252775564791023e-06,
"loss": 1.4237,
"num_input_tokens_seen": 211025920,
"step": 805
},
{
"epoch": 0.1988650382432766,
"grad_norm": 0.3351602852344513,
"learning_rate": 1.1142701927151456e-06,
"loss": 1.8177,
"num_input_tokens_seen": 211288064,
"step": 806
},
{
"epoch": 0.1991117690599556,
"grad_norm": 0.3947872221469879,
"learning_rate": 1.1033101753426285e-06,
"loss": 1.8403,
"num_input_tokens_seen": 211550208,
"step": 807
},
{
"epoch": 0.1993584998766346,
"grad_norm": 0.35190752148628235,
"learning_rate": 1.0923976379059059e-06,
"loss": 1.6172,
"num_input_tokens_seen": 211812352,
"step": 808
},
{
"epoch": 0.1996052306933136,
"grad_norm": 0.23227426409721375,
"learning_rate": 1.0815327133708015e-06,
"loss": 1.1447,
"num_input_tokens_seen": 212074496,
"step": 809
},
{
"epoch": 0.1998519615099926,
"grad_norm": 0.30079352855682373,
"learning_rate": 1.0707155341229902e-06,
"loss": 1.9032,
"num_input_tokens_seen": 212336640,
"step": 810
},
{
"epoch": 0.2000986923266716,
"grad_norm": 0.20834387838840485,
"learning_rate": 1.0599462319663906e-06,
"loss": 1.8074,
"num_input_tokens_seen": 212598784,
"step": 811
},
{
"epoch": 0.2003454231433506,
"grad_norm": 0.43393853306770325,
"learning_rate": 1.049224938121548e-06,
"loss": 1.8503,
"num_input_tokens_seen": 212860928,
"step": 812
},
{
"epoch": 0.2005921539600296,
"grad_norm": 0.31511902809143066,
"learning_rate": 1.0385517832240472e-06,
"loss": 1.512,
"num_input_tokens_seen": 213123072,
"step": 813
},
{
"epoch": 0.2008388847767086,
"grad_norm": 0.17316994071006775,
"learning_rate": 1.0279268973229089e-06,
"loss": 1.5999,
"num_input_tokens_seen": 213385216,
"step": 814
},
{
"epoch": 0.2010856155933876,
"grad_norm": 0.24697916209697723,
"learning_rate": 1.0173504098790188e-06,
"loss": 1.272,
"num_input_tokens_seen": 213647360,
"step": 815
},
{
"epoch": 0.20133234641006661,
"grad_norm": 0.2564072906970978,
"learning_rate": 1.006822449763537e-06,
"loss": 2.1296,
"num_input_tokens_seen": 213909504,
"step": 816
},
{
"epoch": 0.20157907722674562,
"grad_norm": 0.26823318004608154,
"learning_rate": 9.963431452563331e-07,
"loss": 1.9528,
"num_input_tokens_seen": 214171648,
"step": 817
},
{
"epoch": 0.20182580804342462,
"grad_norm": 0.2205478847026825,
"learning_rate": 9.859126240444284e-07,
"loss": 1.4545,
"num_input_tokens_seen": 214433792,
"step": 818
},
{
"epoch": 0.20207253886010362,
"grad_norm": 0.45528295636177063,
"learning_rate": 9.7553101322043e-07,
"loss": 1.9818,
"num_input_tokens_seen": 214695936,
"step": 819
},
{
"epoch": 0.20231926967678263,
"grad_norm": 0.2815514802932739,
"learning_rate": 9.651984392809916e-07,
"loss": 1.6919,
"num_input_tokens_seen": 214958080,
"step": 820
},
{
"epoch": 0.20256600049346163,
"grad_norm": 0.31144043803215027,
"learning_rate": 9.549150281252633e-07,
"loss": 1.7837,
"num_input_tokens_seen": 215220224,
"step": 821
},
{
"epoch": 0.20281273131014063,
"grad_norm": 0.3299436569213867,
"learning_rate": 9.446809050533679e-07,
"loss": 2.0261,
"num_input_tokens_seen": 215482368,
"step": 822
},
{
"epoch": 0.20305946212681963,
"grad_norm": 0.31391656398773193,
"learning_rate": 9.344961947648624e-07,
"loss": 1.7735,
"num_input_tokens_seen": 215744512,
"step": 823
},
{
"epoch": 0.20330619294349864,
"grad_norm": 0.37549975514411926,
"learning_rate": 9.243610213572285e-07,
"loss": 1.9061,
"num_input_tokens_seen": 216006656,
"step": 824
},
{
"epoch": 0.20355292376017764,
"grad_norm": 0.3471549451351166,
"learning_rate": 9.142755083243577e-07,
"loss": 1.0835,
"num_input_tokens_seen": 216268800,
"step": 825
},
{
"epoch": 0.20379965457685664,
"grad_norm": 0.21472056210041046,
"learning_rate": 9.042397785550405e-07,
"loss": 1.487,
"num_input_tokens_seen": 216530944,
"step": 826
},
{
"epoch": 0.20404638539353565,
"grad_norm": 0.3580244183540344,
"learning_rate": 8.942539543314799e-07,
"loss": 1.5566,
"num_input_tokens_seen": 216793088,
"step": 827
},
{
"epoch": 0.20429311621021465,
"grad_norm": 0.771636962890625,
"learning_rate": 8.843181573277904e-07,
"loss": 1.4313,
"num_input_tokens_seen": 217055232,
"step": 828
},
{
"epoch": 0.20453984702689365,
"grad_norm": 0.2665698230266571,
"learning_rate": 8.744325086085248e-07,
"loss": 1.5071,
"num_input_tokens_seen": 217317376,
"step": 829
},
{
"epoch": 0.20478657784357265,
"grad_norm": 0.5237531065940857,
"learning_rate": 8.645971286271903e-07,
"loss": 1.7021,
"num_input_tokens_seen": 217579520,
"step": 830
},
{
"epoch": 0.20503330866025166,
"grad_norm": 0.3260135054588318,
"learning_rate": 8.54812137224792e-07,
"loss": 1.5491,
"num_input_tokens_seen": 217841664,
"step": 831
},
{
"epoch": 0.20528003947693066,
"grad_norm": 0.37850940227508545,
"learning_rate": 8.450776536283594e-07,
"loss": 1.9982,
"num_input_tokens_seen": 218103808,
"step": 832
},
{
"epoch": 0.20552677029360966,
"grad_norm": 0.3043404221534729,
"learning_rate": 8.353937964495029e-07,
"loss": 1.6239,
"num_input_tokens_seen": 218365952,
"step": 833
},
{
"epoch": 0.20577350111028866,
"grad_norm": 0.2891063988208771,
"learning_rate": 8.25760683682968e-07,
"loss": 1.4132,
"num_input_tokens_seen": 218628096,
"step": 834
},
{
"epoch": 0.20602023192696767,
"grad_norm": 0.2345696985721588,
"learning_rate": 8.161784327051919e-07,
"loss": 1.6094,
"num_input_tokens_seen": 218890240,
"step": 835
},
{
"epoch": 0.20626696274364667,
"grad_norm": 0.3723156750202179,
"learning_rate": 8.066471602728804e-07,
"loss": 1.6819,
"num_input_tokens_seen": 219152384,
"step": 836
},
{
"epoch": 0.20651369356032567,
"grad_norm": 0.44934532046318054,
"learning_rate": 7.971669825215789e-07,
"loss": 1.6014,
"num_input_tokens_seen": 219414528,
"step": 837
},
{
"epoch": 0.20676042437700468,
"grad_norm": 0.25686654448509216,
"learning_rate": 7.877380149642628e-07,
"loss": 1.8622,
"num_input_tokens_seen": 219676672,
"step": 838
},
{
"epoch": 0.20700715519368368,
"grad_norm": 0.3707871735095978,
"learning_rate": 7.783603724899258e-07,
"loss": 1.5101,
"num_input_tokens_seen": 219938816,
"step": 839
},
{
"epoch": 0.20725388601036268,
"grad_norm": 0.4154548943042755,
"learning_rate": 7.690341693621805e-07,
"loss": 1.7454,
"num_input_tokens_seen": 220200960,
"step": 840
},
{
"epoch": 0.20750061682704168,
"grad_norm": 0.3507881462574005,
"learning_rate": 7.597595192178702e-07,
"loss": 1.5214,
"num_input_tokens_seen": 220463104,
"step": 841
},
{
"epoch": 0.20774734764372071,
"grad_norm": 0.32315999269485474,
"learning_rate": 7.505365350656813e-07,
"loss": 1.8847,
"num_input_tokens_seen": 220725248,
"step": 842
},
{
"epoch": 0.20799407846039972,
"grad_norm": 0.2104586958885193,
"learning_rate": 7.413653292847617e-07,
"loss": 1.3325,
"num_input_tokens_seen": 220987392,
"step": 843
},
{
"epoch": 0.20824080927707872,
"grad_norm": 0.2483946830034256,
"learning_rate": 7.322460136233622e-07,
"loss": 1.4646,
"num_input_tokens_seen": 221249536,
"step": 844
},
{
"epoch": 0.20848754009375772,
"grad_norm": 0.3510516285896301,
"learning_rate": 7.23178699197467e-07,
"loss": 1.8601,
"num_input_tokens_seen": 221511680,
"step": 845
},
{
"epoch": 0.20873427091043673,
"grad_norm": 0.22424964606761932,
"learning_rate": 7.141634964894389e-07,
"loss": 1.422,
"num_input_tokens_seen": 221773824,
"step": 846
},
{
"epoch": 0.20898100172711573,
"grad_norm": 0.36059847474098206,
"learning_rate": 7.052005153466779e-07,
"loss": 1.2802,
"num_input_tokens_seen": 222035968,
"step": 847
},
{
"epoch": 0.20922773254379473,
"grad_norm": 0.29703643918037415,
"learning_rate": 6.962898649802824e-07,
"loss": 1.9207,
"num_input_tokens_seen": 222298112,
"step": 848
},
{
"epoch": 0.20947446336047373,
"grad_norm": 0.3677685856819153,
"learning_rate": 6.874316539637127e-07,
"loss": 1.5452,
"num_input_tokens_seen": 222560256,
"step": 849
},
{
"epoch": 0.20972119417715274,
"grad_norm": 0.6440513730049133,
"learning_rate": 6.786259902314768e-07,
"loss": 1.4732,
"num_input_tokens_seen": 222822400,
"step": 850
},
{
"epoch": 0.20996792499383174,
"grad_norm": 0.2577255070209503,
"learning_rate": 6.698729810778065e-07,
"loss": 1.7592,
"num_input_tokens_seen": 223084544,
"step": 851
},
{
"epoch": 0.21021465581051074,
"grad_norm": 0.287110835313797,
"learning_rate": 6.611727331553585e-07,
"loss": 2.0263,
"num_input_tokens_seen": 223346688,
"step": 852
},
{
"epoch": 0.21046138662718974,
"grad_norm": 0.2498570680618286,
"learning_rate": 6.52525352473905e-07,
"loss": 1.7688,
"num_input_tokens_seen": 223608832,
"step": 853
},
{
"epoch": 0.21070811744386875,
"grad_norm": 0.36089619994163513,
"learning_rate": 6.439309443990532e-07,
"loss": 2.0014,
"num_input_tokens_seen": 223870976,
"step": 854
},
{
"epoch": 0.21095484826054775,
"grad_norm": 1.3907390832901,
"learning_rate": 6.353896136509524e-07,
"loss": 1.8,
"num_input_tokens_seen": 224133120,
"step": 855
},
{
"epoch": 0.21120157907722675,
"grad_norm": 0.2767269015312195,
"learning_rate": 6.269014643030214e-07,
"loss": 1.4085,
"num_input_tokens_seen": 224395264,
"step": 856
},
{
"epoch": 0.21144830989390576,
"grad_norm": 0.20977024734020233,
"learning_rate": 6.184665997806832e-07,
"loss": 1.8731,
"num_input_tokens_seen": 224657408,
"step": 857
},
{
"epoch": 0.21169504071058476,
"grad_norm": 0.2735670804977417,
"learning_rate": 6.100851228600974e-07,
"loss": 1.7069,
"num_input_tokens_seen": 224919552,
"step": 858
},
{
"epoch": 0.21194177152726376,
"grad_norm": 0.3599633574485779,
"learning_rate": 6.017571356669183e-07,
"loss": 1.6557,
"num_input_tokens_seen": 225181696,
"step": 859
},
{
"epoch": 0.21218850234394276,
"grad_norm": 0.25999823212623596,
"learning_rate": 5.934827396750392e-07,
"loss": 1.2838,
"num_input_tokens_seen": 225443840,
"step": 860
},
{
"epoch": 0.21243523316062177,
"grad_norm": 0.33764758706092834,
"learning_rate": 5.852620357053651e-07,
"loss": 1.955,
"num_input_tokens_seen": 225705984,
"step": 861
},
{
"epoch": 0.21268196397730077,
"grad_norm": 0.15850548446178436,
"learning_rate": 5.770951239245803e-07,
"loss": 1.1359,
"num_input_tokens_seen": 225968128,
"step": 862
},
{
"epoch": 0.21292869479397977,
"grad_norm": 0.3201885223388672,
"learning_rate": 5.689821038439264e-07,
"loss": 1.7928,
"num_input_tokens_seen": 226230272,
"step": 863
},
{
"epoch": 0.21317542561065878,
"grad_norm": 0.25651001930236816,
"learning_rate": 5.609230743179939e-07,
"loss": 1.7624,
"num_input_tokens_seen": 226492416,
"step": 864
},
{
"epoch": 0.21342215642733778,
"grad_norm": 0.2685423493385315,
"learning_rate": 5.529181335435124e-07,
"loss": 1.747,
"num_input_tokens_seen": 226754560,
"step": 865
},
{
"epoch": 0.21366888724401678,
"grad_norm": 0.24144497513771057,
"learning_rate": 5.449673790581611e-07,
"loss": 1.2972,
"num_input_tokens_seen": 227016704,
"step": 866
},
{
"epoch": 0.21391561806069578,
"grad_norm": 0.2833223342895508,
"learning_rate": 5.370709077393721e-07,
"loss": 1.5515,
"num_input_tokens_seen": 227278848,
"step": 867
},
{
"epoch": 0.2141623488773748,
"grad_norm": 0.5597326159477234,
"learning_rate": 5.292288158031595e-07,
"loss": 1.2957,
"num_input_tokens_seen": 227540992,
"step": 868
},
{
"epoch": 0.2144090796940538,
"grad_norm": 0.2911970913410187,
"learning_rate": 5.214411988029355e-07,
"loss": 1.5322,
"num_input_tokens_seen": 227803136,
"step": 869
},
{
"epoch": 0.2146558105107328,
"grad_norm": 0.27730849385261536,
"learning_rate": 5.137081516283582e-07,
"loss": 2.0445,
"num_input_tokens_seen": 228065280,
"step": 870
},
{
"epoch": 0.2149025413274118,
"grad_norm": 0.23569044470787048,
"learning_rate": 5.06029768504166e-07,
"loss": 1.4844,
"num_input_tokens_seen": 228327424,
"step": 871
},
{
"epoch": 0.2151492721440908,
"grad_norm": 0.34393903613090515,
"learning_rate": 4.984061429890324e-07,
"loss": 1.7995,
"num_input_tokens_seen": 228589568,
"step": 872
},
{
"epoch": 0.2153960029607698,
"grad_norm": 0.3047172725200653,
"learning_rate": 4.908373679744316e-07,
"loss": 1.9499,
"num_input_tokens_seen": 228851712,
"step": 873
},
{
"epoch": 0.2156427337774488,
"grad_norm": 0.3547765612602234,
"learning_rate": 4.833235356834959e-07,
"loss": 1.2096,
"num_input_tokens_seen": 229113856,
"step": 874
},
{
"epoch": 0.2158894645941278,
"grad_norm": 0.28206881880760193,
"learning_rate": 4.758647376699033e-07,
"loss": 1.8047,
"num_input_tokens_seen": 229376000,
"step": 875
},
{
"epoch": 0.2161361954108068,
"grad_norm": 0.37405312061309814,
"learning_rate": 4.6846106481675035e-07,
"loss": 1.967,
"num_input_tokens_seen": 229638144,
"step": 876
},
{
"epoch": 0.2163829262274858,
"grad_norm": 0.28268763422966003,
"learning_rate": 4.6111260733545714e-07,
"loss": 1.413,
"num_input_tokens_seen": 229900288,
"step": 877
},
{
"epoch": 0.21662965704416481,
"grad_norm": 0.33675679564476013,
"learning_rate": 4.538194547646574e-07,
"loss": 1.8347,
"num_input_tokens_seen": 230162432,
"step": 878
},
{
"epoch": 0.21687638786084382,
"grad_norm": 0.29510554671287537,
"learning_rate": 4.4658169596911493e-07,
"loss": 1.7789,
"num_input_tokens_seen": 230424576,
"step": 879
},
{
"epoch": 0.21712311867752282,
"grad_norm": 0.22556479275226593,
"learning_rate": 4.3939941913863525e-07,
"loss": 1.4442,
"num_input_tokens_seen": 230686720,
"step": 880
},
{
"epoch": 0.21736984949420182,
"grad_norm": 0.25110024213790894,
"learning_rate": 4.322727117869951e-07,
"loss": 1.8965,
"num_input_tokens_seen": 230948864,
"step": 881
},
{
"epoch": 0.21761658031088082,
"grad_norm": 0.28704214096069336,
"learning_rate": 4.2520166075087635e-07,
"loss": 1.2111,
"num_input_tokens_seen": 231211008,
"step": 882
},
{
"epoch": 0.21786331112755983,
"grad_norm": 0.3127775490283966,
"learning_rate": 4.1818635218880186e-07,
"loss": 1.5499,
"num_input_tokens_seen": 231473152,
"step": 883
},
{
"epoch": 0.21811004194423883,
"grad_norm": 0.14734895527362823,
"learning_rate": 4.112268715800943e-07,
"loss": 1.2497,
"num_input_tokens_seen": 231735296,
"step": 884
},
{
"epoch": 0.21835677276091783,
"grad_norm": 0.29640600085258484,
"learning_rate": 4.043233037238281e-07,
"loss": 1.7869,
"num_input_tokens_seen": 231997440,
"step": 885
},
{
"epoch": 0.21860350357759684,
"grad_norm": 0.2300645112991333,
"learning_rate": 3.9747573273779816e-07,
"loss": 2.0367,
"num_input_tokens_seen": 232259584,
"step": 886
},
{
"epoch": 0.21885023439427584,
"grad_norm": 0.22962304949760437,
"learning_rate": 3.90684242057498e-07,
"loss": 1.5592,
"num_input_tokens_seen": 232521728,
"step": 887
},
{
"epoch": 0.21909696521095484,
"grad_norm": 0.2504030466079712,
"learning_rate": 3.8394891443509554e-07,
"loss": 1.6542,
"num_input_tokens_seen": 232783872,
"step": 888
},
{
"epoch": 0.21934369602763384,
"grad_norm": 0.2758059799671173,
"learning_rate": 3.772698319384349e-07,
"loss": 1.8074,
"num_input_tokens_seen": 233046016,
"step": 889
},
{
"epoch": 0.21959042684431285,
"grad_norm": 0.1838909238576889,
"learning_rate": 3.7064707595002636e-07,
"loss": 1.418,
"num_input_tokens_seen": 233308160,
"step": 890
},
{
"epoch": 0.21983715766099185,
"grad_norm": 0.23375551402568817,
"learning_rate": 3.6408072716606346e-07,
"loss": 1.3544,
"num_input_tokens_seen": 233570304,
"step": 891
},
{
"epoch": 0.22008388847767085,
"grad_norm": 0.26496320962905884,
"learning_rate": 3.575708655954324e-07,
"loss": 1.2265,
"num_input_tokens_seen": 233832448,
"step": 892
},
{
"epoch": 0.22033061929434986,
"grad_norm": 0.19440144300460815,
"learning_rate": 3.511175705587433e-07,
"loss": 1.3137,
"num_input_tokens_seen": 234094592,
"step": 893
},
{
"epoch": 0.22057735011102886,
"grad_norm": 0.33545535802841187,
"learning_rate": 3.4472092068735917e-07,
"loss": 1.8718,
"num_input_tokens_seen": 234356736,
"step": 894
},
{
"epoch": 0.22082408092770786,
"grad_norm": 0.2817319631576538,
"learning_rate": 3.3838099392243915e-07,
"loss": 1.5016,
"num_input_tokens_seen": 234618880,
"step": 895
},
{
"epoch": 0.22107081174438686,
"grad_norm": 0.2984430193901062,
"learning_rate": 3.320978675139919e-07,
"loss": 1.779,
"num_input_tokens_seen": 234881024,
"step": 896
},
{
"epoch": 0.22131754256106587,
"grad_norm": 0.23902887105941772,
"learning_rate": 3.258716180199278e-07,
"loss": 1.3892,
"num_input_tokens_seen": 235143168,
"step": 897
},
{
"epoch": 0.22156427337774487,
"grad_norm": 0.20809443295001984,
"learning_rate": 3.1970232130513365e-07,
"loss": 1.8955,
"num_input_tokens_seen": 235405312,
"step": 898
},
{
"epoch": 0.22181100419442387,
"grad_norm": 0.6240611672401428,
"learning_rate": 3.135900525405428e-07,
"loss": 1.4981,
"num_input_tokens_seen": 235667456,
"step": 899
},
{
"epoch": 0.22205773501110287,
"grad_norm": 0.24363017082214355,
"learning_rate": 3.0753488620222037e-07,
"loss": 1.8729,
"num_input_tokens_seen": 235929600,
"step": 900
},
{
"epoch": 0.22230446582778188,
"grad_norm": 0.27527663111686707,
"learning_rate": 3.015368960704584e-07,
"loss": 2.0414,
"num_input_tokens_seen": 236191744,
"step": 901
},
{
"epoch": 0.22255119664446088,
"grad_norm": 0.2439730167388916,
"learning_rate": 2.9559615522887275e-07,
"loss": 1.706,
"num_input_tokens_seen": 236453888,
"step": 902
},
{
"epoch": 0.22279792746113988,
"grad_norm": 0.25705206394195557,
"learning_rate": 2.8971273606351656e-07,
"loss": 2.0021,
"num_input_tokens_seen": 236716032,
"step": 903
},
{
"epoch": 0.22304465827781889,
"grad_norm": 0.34864532947540283,
"learning_rate": 2.838867102619952e-07,
"loss": 1.7085,
"num_input_tokens_seen": 236978176,
"step": 904
},
{
"epoch": 0.22329138909449792,
"grad_norm": 0.4523051083087921,
"learning_rate": 2.7811814881259503e-07,
"loss": 1.4751,
"num_input_tokens_seen": 237240320,
"step": 905
},
{
"epoch": 0.22353811991117692,
"grad_norm": 0.33270108699798584,
"learning_rate": 2.724071220034158e-07,
"loss": 1.9118,
"num_input_tokens_seen": 237502464,
"step": 906
},
{
"epoch": 0.22378485072785592,
"grad_norm": 0.35357242822647095,
"learning_rate": 2.6675369942151864e-07,
"loss": 1.8829,
"num_input_tokens_seen": 237764608,
"step": 907
},
{
"epoch": 0.22403158154453492,
"grad_norm": 0.38699132204055786,
"learning_rate": 2.611579499520722e-07,
"loss": 1.6583,
"num_input_tokens_seen": 238026752,
"step": 908
},
{
"epoch": 0.22427831236121393,
"grad_norm": 0.37224283814430237,
"learning_rate": 2.556199417775174e-07,
"loss": 1.3216,
"num_input_tokens_seen": 238288896,
"step": 909
},
{
"epoch": 0.22452504317789293,
"grad_norm": 0.32813823223114014,
"learning_rate": 2.5013974237673824e-07,
"loss": 2.0885,
"num_input_tokens_seen": 238551040,
"step": 910
},
{
"epoch": 0.22477177399457193,
"grad_norm": 0.3540266752243042,
"learning_rate": 2.447174185242324e-07,
"loss": 1.6199,
"num_input_tokens_seen": 238813184,
"step": 911
},
{
"epoch": 0.22501850481125094,
"grad_norm": 0.22869907319545746,
"learning_rate": 2.3935303628930705e-07,
"loss": 2.0078,
"num_input_tokens_seen": 239075328,
"step": 912
},
{
"epoch": 0.22526523562792994,
"grad_norm": 0.31733882427215576,
"learning_rate": 2.3404666103526542e-07,
"loss": 1.334,
"num_input_tokens_seen": 239337472,
"step": 913
},
{
"epoch": 0.22551196644460894,
"grad_norm": 0.27598369121551514,
"learning_rate": 2.287983574186159e-07,
"loss": 1.3499,
"num_input_tokens_seen": 239599616,
"step": 914
},
{
"epoch": 0.22575869726128794,
"grad_norm": 0.1539166122674942,
"learning_rate": 2.2360818938828189e-07,
"loss": 1.4926,
"num_input_tokens_seen": 239861760,
"step": 915
},
{
"epoch": 0.22600542807796695,
"grad_norm": 0.2620428800582886,
"learning_rate": 2.1847622018482283e-07,
"loss": 1.8938,
"num_input_tokens_seen": 240123904,
"step": 916
},
{
"epoch": 0.22625215889464595,
"grad_norm": 0.36652788519859314,
"learning_rate": 2.134025123396638e-07,
"loss": 1.3867,
"num_input_tokens_seen": 240386048,
"step": 917
},
{
"epoch": 0.22649888971132495,
"grad_norm": 0.306090384721756,
"learning_rate": 2.083871276743338e-07,
"loss": 1.5576,
"num_input_tokens_seen": 240648192,
"step": 918
},
{
"epoch": 0.22674562052800395,
"grad_norm": 0.31827080249786377,
"learning_rate": 2.0343012729971244e-07,
"loss": 1.8278,
"num_input_tokens_seen": 240910336,
"step": 919
},
{
"epoch": 0.22699235134468296,
"grad_norm": 0.3130633533000946,
"learning_rate": 1.9853157161528468e-07,
"loss": 1.999,
"num_input_tokens_seen": 241172480,
"step": 920
},
{
"epoch": 0.22723908216136196,
"grad_norm": 0.2340356707572937,
"learning_rate": 1.9369152030840553e-07,
"loss": 2.1723,
"num_input_tokens_seen": 241434624,
"step": 921
},
{
"epoch": 0.22748581297804096,
"grad_norm": 0.4273170530796051,
"learning_rate": 1.8891003235357307e-07,
"loss": 1.7863,
"num_input_tokens_seen": 241696768,
"step": 922
},
{
"epoch": 0.22773254379471997,
"grad_norm": 0.2784411907196045,
"learning_rate": 1.841871660117095e-07,
"loss": 1.7997,
"num_input_tokens_seen": 241958912,
"step": 923
},
{
"epoch": 0.22797927461139897,
"grad_norm": 0.27668172121047974,
"learning_rate": 1.7952297882945e-07,
"loss": 1.7699,
"num_input_tokens_seen": 242221056,
"step": 924
},
{
"epoch": 0.22822600542807797,
"grad_norm": 0.27039194107055664,
"learning_rate": 1.7491752763844294e-07,
"loss": 1.7443,
"num_input_tokens_seen": 242483200,
"step": 925
},
{
"epoch": 0.22847273624475697,
"grad_norm": 0.24253779649734497,
"learning_rate": 1.7037086855465902e-07,
"loss": 1.4137,
"num_input_tokens_seen": 242745344,
"step": 926
},
{
"epoch": 0.22871946706143598,
"grad_norm": 0.262689471244812,
"learning_rate": 1.6588305697770313e-07,
"loss": 1.5132,
"num_input_tokens_seen": 243007488,
"step": 927
},
{
"epoch": 0.22896619787811498,
"grad_norm": 0.21166115999221802,
"learning_rate": 1.6145414759014433e-07,
"loss": 1.3376,
"num_input_tokens_seen": 243269632,
"step": 928
},
{
"epoch": 0.22921292869479398,
"grad_norm": 0.27536365389823914,
"learning_rate": 1.5708419435684463e-07,
"loss": 1.8721,
"num_input_tokens_seen": 243531776,
"step": 929
},
{
"epoch": 0.22945965951147299,
"grad_norm": 0.3286449611186981,
"learning_rate": 1.5277325052430569e-07,
"loss": 2.1457,
"num_input_tokens_seen": 243793920,
"step": 930
},
{
"epoch": 0.229706390328152,
"grad_norm": 0.2047201246023178,
"learning_rate": 1.4852136862001766e-07,
"loss": 1.4811,
"num_input_tokens_seen": 244056064,
"step": 931
},
{
"epoch": 0.229953121144831,
"grad_norm": 0.22693318128585815,
"learning_rate": 1.4432860045182019e-07,
"loss": 1.9241,
"num_input_tokens_seen": 244318208,
"step": 932
},
{
"epoch": 0.23019985196151,
"grad_norm": 0.583653450012207,
"learning_rate": 1.4019499710726913e-07,
"loss": 1.9567,
"num_input_tokens_seen": 244580352,
"step": 933
},
{
"epoch": 0.230446582778189,
"grad_norm": 0.3117119073867798,
"learning_rate": 1.3612060895301759e-07,
"loss": 1.6888,
"num_input_tokens_seen": 244842496,
"step": 934
},
{
"epoch": 0.230693313594868,
"grad_norm": 0.26275816559791565,
"learning_rate": 1.3210548563419857e-07,
"loss": 1.7499,
"num_input_tokens_seen": 245104640,
"step": 935
},
{
"epoch": 0.230940044411547,
"grad_norm": 0.18302679061889648,
"learning_rate": 1.2814967607382433e-07,
"loss": 1.1345,
"num_input_tokens_seen": 245366784,
"step": 936
},
{
"epoch": 0.231186775228226,
"grad_norm": 0.2012418955564499,
"learning_rate": 1.2425322847218368e-07,
"loss": 1.8273,
"num_input_tokens_seen": 245628928,
"step": 937
},
{
"epoch": 0.231433506044905,
"grad_norm": 0.23460544645786285,
"learning_rate": 1.2041619030626283e-07,
"loss": 1.8274,
"num_input_tokens_seen": 245891072,
"step": 938
},
{
"epoch": 0.231680236861584,
"grad_norm": 0.4306367337703705,
"learning_rate": 1.166386083291604e-07,
"loss": 1.6486,
"num_input_tokens_seen": 246153216,
"step": 939
},
{
"epoch": 0.231926967678263,
"grad_norm": 0.2089300900697708,
"learning_rate": 1.1292052856952063e-07,
"loss": 1.3933,
"num_input_tokens_seen": 246415360,
"step": 940
},
{
"epoch": 0.23217369849494202,
"grad_norm": 0.23853234946727753,
"learning_rate": 1.0926199633097156e-07,
"loss": 1.4156,
"num_input_tokens_seen": 246677504,
"step": 941
},
{
"epoch": 0.23242042931162102,
"grad_norm": 0.24944059550762177,
"learning_rate": 1.0566305619157502e-07,
"loss": 1.7874,
"num_input_tokens_seen": 246939648,
"step": 942
},
{
"epoch": 0.23266716012830002,
"grad_norm": 0.1825234442949295,
"learning_rate": 1.0212375200327973e-07,
"loss": 1.3472,
"num_input_tokens_seen": 247201792,
"step": 943
},
{
"epoch": 0.23291389094497902,
"grad_norm": 0.3297896981239319,
"learning_rate": 9.864412689139124e-08,
"loss": 1.6156,
"num_input_tokens_seen": 247463936,
"step": 944
},
{
"epoch": 0.23316062176165803,
"grad_norm": 0.30966678261756897,
"learning_rate": 9.522422325404234e-08,
"loss": 1.7015,
"num_input_tokens_seen": 247726080,
"step": 945
},
{
"epoch": 0.23340735257833703,
"grad_norm": 0.2826879024505615,
"learning_rate": 9.186408276168012e-08,
"loss": 1.757,
"num_input_tokens_seen": 247988224,
"step": 946
},
{
"epoch": 0.23365408339501603,
"grad_norm": 0.43711355328559875,
"learning_rate": 8.856374635655696e-08,
"loss": 1.8845,
"num_input_tokens_seen": 248250368,
"step": 947
},
{
"epoch": 0.23390081421169504,
"grad_norm": 0.16611160337924957,
"learning_rate": 8.53232542522292e-08,
"loss": 1.5545,
"num_input_tokens_seen": 248512512,
"step": 948
},
{
"epoch": 0.23414754502837404,
"grad_norm": 0.25117266178131104,
"learning_rate": 8.214264593307097e-08,
"loss": 1.5059,
"num_input_tokens_seen": 248774656,
"step": 949
},
{
"epoch": 0.23439427584505304,
"grad_norm": 0.31824541091918945,
"learning_rate": 7.90219601537906e-08,
"loss": 1.937,
"num_input_tokens_seen": 249036800,
"step": 950
},
{
"epoch": 0.23464100666173204,
"grad_norm": 0.3191492259502411,
"learning_rate": 7.59612349389599e-08,
"loss": 1.9477,
"num_input_tokens_seen": 249298944,
"step": 951
},
{
"epoch": 0.23488773747841105,
"grad_norm": 0.15992209315299988,
"learning_rate": 7.296050758254958e-08,
"loss": 1.7433,
"num_input_tokens_seen": 249561088,
"step": 952
},
{
"epoch": 0.23513446829509005,
"grad_norm": 0.27538275718688965,
"learning_rate": 7.001981464747565e-08,
"loss": 1.8585,
"num_input_tokens_seen": 249823232,
"step": 953
},
{
"epoch": 0.23538119911176905,
"grad_norm": 0.35428425669670105,
"learning_rate": 6.713919196515317e-08,
"loss": 1.8342,
"num_input_tokens_seen": 250085376,
"step": 954
},
{
"epoch": 0.23562792992844805,
"grad_norm": 0.3533397912979126,
"learning_rate": 6.431867463506047e-08,
"loss": 1.6695,
"num_input_tokens_seen": 250347520,
"step": 955
},
{
"epoch": 0.23587466074512706,
"grad_norm": 0.32426542043685913,
"learning_rate": 6.15582970243117e-08,
"loss": 2.0652,
"num_input_tokens_seen": 250609664,
"step": 956
},
{
"epoch": 0.23612139156180606,
"grad_norm": 0.22531364858150482,
"learning_rate": 5.8858092767236084e-08,
"loss": 1.7251,
"num_input_tokens_seen": 250871808,
"step": 957
},
{
"epoch": 0.23636812237848506,
"grad_norm": 0.2263481318950653,
"learning_rate": 5.621809476497098e-08,
"loss": 1.1222,
"num_input_tokens_seen": 251133952,
"step": 958
},
{
"epoch": 0.23661485319516407,
"grad_norm": 0.30886295437812805,
"learning_rate": 5.363833518505834e-08,
"loss": 1.8128,
"num_input_tokens_seen": 251396096,
"step": 959
},
{
"epoch": 0.23686158401184307,
"grad_norm": 0.3169589936733246,
"learning_rate": 5.111884546105506e-08,
"loss": 1.9473,
"num_input_tokens_seen": 251658240,
"step": 960
},
{
"epoch": 0.23710831482852207,
"grad_norm": 0.1736677587032318,
"learning_rate": 4.865965629214819e-08,
"loss": 1.4546,
"num_input_tokens_seen": 251920384,
"step": 961
},
{
"epoch": 0.23735504564520107,
"grad_norm": 0.23293748497962952,
"learning_rate": 4.626079764278202e-08,
"loss": 1.2854,
"num_input_tokens_seen": 252182528,
"step": 962
},
{
"epoch": 0.23760177646188008,
"grad_norm": 0.3253999352455139,
"learning_rate": 4.392229874229159e-08,
"loss": 1.5582,
"num_input_tokens_seen": 252444672,
"step": 963
},
{
"epoch": 0.23784850727855908,
"grad_norm": 0.35785549879074097,
"learning_rate": 4.164418808454806e-08,
"loss": 1.9794,
"num_input_tokens_seen": 252706816,
"step": 964
},
{
"epoch": 0.23809523809523808,
"grad_norm": 0.22039461135864258,
"learning_rate": 3.9426493427611177e-08,
"loss": 1.288,
"num_input_tokens_seen": 252968960,
"step": 965
},
{
"epoch": 0.23834196891191708,
"grad_norm": 0.24973900616168976,
"learning_rate": 3.726924179339009e-08,
"loss": 1.3325,
"num_input_tokens_seen": 253231104,
"step": 966
},
{
"epoch": 0.23858869972859612,
"grad_norm": 0.3697846531867981,
"learning_rate": 3.517245946731529e-08,
"loss": 1.75,
"num_input_tokens_seen": 253493248,
"step": 967
},
{
"epoch": 0.23883543054527512,
"grad_norm": 0.3277115225791931,
"learning_rate": 3.313617199801777e-08,
"loss": 1.598,
"num_input_tokens_seen": 253755392,
"step": 968
},
{
"epoch": 0.23908216136195412,
"grad_norm": 0.3776665925979614,
"learning_rate": 3.1160404197018155e-08,
"loss": 1.5441,
"num_input_tokens_seen": 254017536,
"step": 969
},
{
"epoch": 0.23932889217863312,
"grad_norm": 0.27291175723075867,
"learning_rate": 2.9245180138423033e-08,
"loss": 1.4075,
"num_input_tokens_seen": 254279680,
"step": 970
},
{
"epoch": 0.23957562299531213,
"grad_norm": 0.3305472135543823,
"learning_rate": 2.7390523158633552e-08,
"loss": 2.0716,
"num_input_tokens_seen": 254541824,
"step": 971
},
{
"epoch": 0.23982235381199113,
"grad_norm": 0.21496239304542542,
"learning_rate": 2.5596455856058966e-08,
"loss": 1.2715,
"num_input_tokens_seen": 254803968,
"step": 972
},
{
"epoch": 0.24006908462867013,
"grad_norm": 0.3123878538608551,
"learning_rate": 2.386300009084408e-08,
"loss": 1.6762,
"num_input_tokens_seen": 255066112,
"step": 973
},
{
"epoch": 0.24031581544534913,
"grad_norm": 0.24841855466365814,
"learning_rate": 2.219017698460002e-08,
"loss": 1.7913,
"num_input_tokens_seen": 255328256,
"step": 974
},
{
"epoch": 0.24056254626202814,
"grad_norm": 0.33678221702575684,
"learning_rate": 2.057800692014833e-08,
"loss": 1.5478,
"num_input_tokens_seen": 255590400,
"step": 975
},
{
"epoch": 0.24080927707870714,
"grad_norm": 0.20863652229309082,
"learning_rate": 1.9026509541272276e-08,
"loss": 1.7592,
"num_input_tokens_seen": 255852544,
"step": 976
},
{
"epoch": 0.24105600789538614,
"grad_norm": 0.2960647642612457,
"learning_rate": 1.753570375247815e-08,
"loss": 1.3895,
"num_input_tokens_seen": 256114688,
"step": 977
},
{
"epoch": 0.24130273871206515,
"grad_norm": 0.2581060230731964,
"learning_rate": 1.610560771876435e-08,
"loss": 1.9954,
"num_input_tokens_seen": 256376832,
"step": 978
},
{
"epoch": 0.24154946952874415,
"grad_norm": 0.25385424494743347,
"learning_rate": 1.4736238865398766e-08,
"loss": 1.7407,
"num_input_tokens_seen": 256638976,
"step": 979
},
{
"epoch": 0.24179620034542315,
"grad_norm": 0.13977891206741333,
"learning_rate": 1.3427613877709523e-08,
"loss": 1.3923,
"num_input_tokens_seen": 256901120,
"step": 980
},
{
"epoch": 0.24204293116210215,
"grad_norm": 0.26059940457344055,
"learning_rate": 1.2179748700879013e-08,
"loss": 1.7049,
"num_input_tokens_seen": 257163264,
"step": 981
},
{
"epoch": 0.24228966197878116,
"grad_norm": 0.23644216358661652,
"learning_rate": 1.0992658539750179e-08,
"loss": 1.8183,
"num_input_tokens_seen": 257425408,
"step": 982
},
{
"epoch": 0.24253639279546016,
"grad_norm": 0.39895159006118774,
"learning_rate": 9.866357858642206e-09,
"loss": 1.2922,
"num_input_tokens_seen": 257687552,
"step": 983
},
{
"epoch": 0.24278312361213916,
"grad_norm": 0.32731232047080994,
"learning_rate": 8.800860381173448e-09,
"loss": 1.5821,
"num_input_tokens_seen": 257949696,
"step": 984
},
{
"epoch": 0.24302985442881817,
"grad_norm": 0.36111322045326233,
"learning_rate": 7.796179090094891e-09,
"loss": 1.7553,
"num_input_tokens_seen": 258211840,
"step": 985
},
{
"epoch": 0.24327658524549717,
"grad_norm": 0.3253917992115021,
"learning_rate": 6.852326227130835e-09,
"loss": 1.3174,
"num_input_tokens_seen": 258473984,
"step": 986
},
{
"epoch": 0.24352331606217617,
"grad_norm": 0.2258145809173584,
"learning_rate": 5.969313292830126e-09,
"loss": 1.6489,
"num_input_tokens_seen": 258736128,
"step": 987
},
{
"epoch": 0.24377004687885517,
"grad_norm": 0.16833138465881348,
"learning_rate": 5.147151046426824e-09,
"loss": 1.2464,
"num_input_tokens_seen": 258998272,
"step": 988
},
{
"epoch": 0.24401677769553418,
"grad_norm": 0.45908233523368835,
"learning_rate": 4.385849505708084e-09,
"loss": 1.7759,
"num_input_tokens_seen": 259260416,
"step": 989
},
{
"epoch": 0.24426350851221318,
"grad_norm": 0.16207066178321838,
"learning_rate": 3.685417946894254e-09,
"loss": 1.3764,
"num_input_tokens_seen": 259522560,
"step": 990
},
{
"epoch": 0.24451023932889218,
"grad_norm": 0.21899637579917908,
"learning_rate": 3.0458649045211897e-09,
"loss": 1.9428,
"num_input_tokens_seen": 259784704,
"step": 991
},
{
"epoch": 0.24475697014557118,
"grad_norm": 0.3863295018672943,
"learning_rate": 2.4671981713420003e-09,
"loss": 1.942,
"num_input_tokens_seen": 260046848,
"step": 992
},
{
"epoch": 0.2450037009622502,
"grad_norm": 0.2451121211051941,
"learning_rate": 1.9494247982282386e-09,
"loss": 1.4942,
"num_input_tokens_seen": 260308992,
"step": 993
},
{
"epoch": 0.2452504317789292,
"grad_norm": 0.3088672161102295,
"learning_rate": 1.4925510940844157e-09,
"loss": 2.1903,
"num_input_tokens_seen": 260571136,
"step": 994
},
{
"epoch": 0.2454971625956082,
"grad_norm": 0.32511118054389954,
"learning_rate": 1.096582625772502e-09,
"loss": 1.7177,
"num_input_tokens_seen": 260833280,
"step": 995
},
{
"epoch": 0.2457438934122872,
"grad_norm": 0.23251359164714813,
"learning_rate": 7.615242180436521e-10,
"loss": 1.2304,
"num_input_tokens_seen": 261095424,
"step": 996
},
{
"epoch": 0.2459906242289662,
"grad_norm": 0.46146008372306824,
"learning_rate": 4.87379953478806e-10,
"loss": 1.1382,
"num_input_tokens_seen": 261357568,
"step": 997
},
{
"epoch": 0.2462373550456452,
"grad_norm": 0.2049003392457962,
"learning_rate": 2.741531724392843e-10,
"loss": 1.3837,
"num_input_tokens_seen": 261619712,
"step": 998
},
{
"epoch": 0.2464840858623242,
"grad_norm": 0.3614410161972046,
"learning_rate": 1.2184647302626585e-10,
"loss": 1.9884,
"num_input_tokens_seen": 261881856,
"step": 999
},
{
"epoch": 0.2467308166790032,
"grad_norm": 0.3488081097602844,
"learning_rate": 3.0461711048035415e-11,
"loss": 1.8701,
"num_input_tokens_seen": 262144000,
"step": 1000
}
],
"logging_steps": 1.0,
"max_steps": 1000,
"num_input_tokens_seen": 262144000,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 484794433536000.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}