nsa_llama / trainer_state.json
ZetangForward's picture
Synced from ModelScope: LCM_group/nsa_llama (Auto-fixed license)
38e34ab verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.24832381425378694,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00024832381425378696,
"grad_norm": 10.215656280517578,
"learning_rate": 0.0,
"loss": 2.4774,
"num_input_tokens_seen": 262144,
"step": 1
},
{
"epoch": 0.0004966476285075739,
"grad_norm": 11.095035552978516,
"learning_rate": 1.0000000000000001e-07,
"loss": 2.9956,
"num_input_tokens_seen": 524288,
"step": 2
},
{
"epoch": 0.0007449714427613609,
"grad_norm": 15.851714134216309,
"learning_rate": 2.0000000000000002e-07,
"loss": 3.5577,
"num_input_tokens_seen": 786432,
"step": 3
},
{
"epoch": 0.0009932952570151478,
"grad_norm": 13.334907531738281,
"learning_rate": 3.0000000000000004e-07,
"loss": 3.5265,
"num_input_tokens_seen": 1048576,
"step": 4
},
{
"epoch": 0.0012416190712689348,
"grad_norm": 13.653706550598145,
"learning_rate": 4.0000000000000003e-07,
"loss": 3.2742,
"num_input_tokens_seen": 1310720,
"step": 5
},
{
"epoch": 0.0014899428855227217,
"grad_norm": 10.820155143737793,
"learning_rate": 5.000000000000001e-07,
"loss": 2.7898,
"num_input_tokens_seen": 1572864,
"step": 6
},
{
"epoch": 0.0017382666997765085,
"grad_norm": 8.97152042388916,
"learning_rate": 6.000000000000001e-07,
"loss": 2.3872,
"num_input_tokens_seen": 1835008,
"step": 7
},
{
"epoch": 0.0019865905140302956,
"grad_norm": 8.390254020690918,
"learning_rate": 7.000000000000001e-07,
"loss": 2.4003,
"num_input_tokens_seen": 2097152,
"step": 8
},
{
"epoch": 0.0022349143282840824,
"grad_norm": 7.845387935638428,
"learning_rate": 8.000000000000001e-07,
"loss": 2.3165,
"num_input_tokens_seen": 2359296,
"step": 9
},
{
"epoch": 0.0024832381425378696,
"grad_norm": 10.649581909179688,
"learning_rate": 9.000000000000001e-07,
"loss": 3.193,
"num_input_tokens_seen": 2621440,
"step": 10
},
{
"epoch": 0.0027315619567916563,
"grad_norm": 9.258522033691406,
"learning_rate": 1.0000000000000002e-06,
"loss": 3.0098,
"num_input_tokens_seen": 2883584,
"step": 11
},
{
"epoch": 0.0029798857710454435,
"grad_norm": 8.765289306640625,
"learning_rate": 1.1e-06,
"loss": 2.5393,
"num_input_tokens_seen": 3145728,
"step": 12
},
{
"epoch": 0.00322820958529923,
"grad_norm": 10.003978729248047,
"learning_rate": 1.2000000000000002e-06,
"loss": 2.9451,
"num_input_tokens_seen": 3407872,
"step": 13
},
{
"epoch": 0.003476533399553017,
"grad_norm": 8.10649585723877,
"learning_rate": 1.3e-06,
"loss": 2.7237,
"num_input_tokens_seen": 3670016,
"step": 14
},
{
"epoch": 0.003724857213806804,
"grad_norm": 8.63984489440918,
"learning_rate": 1.4000000000000001e-06,
"loss": 2.922,
"num_input_tokens_seen": 3932160,
"step": 15
},
{
"epoch": 0.003973181028060591,
"grad_norm": 6.354504585266113,
"learning_rate": 1.5e-06,
"loss": 2.494,
"num_input_tokens_seen": 4194304,
"step": 16
},
{
"epoch": 0.004221504842314378,
"grad_norm": 6.988131046295166,
"learning_rate": 1.6000000000000001e-06,
"loss": 3.051,
"num_input_tokens_seen": 4456448,
"step": 17
},
{
"epoch": 0.004469828656568165,
"grad_norm": 6.21164608001709,
"learning_rate": 1.7000000000000002e-06,
"loss": 2.8812,
"num_input_tokens_seen": 4718592,
"step": 18
},
{
"epoch": 0.0047181524708219515,
"grad_norm": 7.339629173278809,
"learning_rate": 1.8000000000000001e-06,
"loss": 2.8418,
"num_input_tokens_seen": 4980736,
"step": 19
},
{
"epoch": 0.004966476285075739,
"grad_norm": 5.495388031005859,
"learning_rate": 1.9000000000000002e-06,
"loss": 2.4937,
"num_input_tokens_seen": 5242880,
"step": 20
},
{
"epoch": 0.005214800099329526,
"grad_norm": 4.960653305053711,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.5324,
"num_input_tokens_seen": 5505024,
"step": 21
},
{
"epoch": 0.005463123913583313,
"grad_norm": 5.600866794586182,
"learning_rate": 2.1000000000000002e-06,
"loss": 2.9153,
"num_input_tokens_seen": 5767168,
"step": 22
},
{
"epoch": 0.005711447727837099,
"grad_norm": 4.116523265838623,
"learning_rate": 2.2e-06,
"loss": 2.4081,
"num_input_tokens_seen": 6029312,
"step": 23
},
{
"epoch": 0.005959771542090887,
"grad_norm": 3.069936990737915,
"learning_rate": 2.3000000000000004e-06,
"loss": 2.059,
"num_input_tokens_seen": 6291456,
"step": 24
},
{
"epoch": 0.006208095356344674,
"grad_norm": 5.050525188446045,
"learning_rate": 2.4000000000000003e-06,
"loss": 2.802,
"num_input_tokens_seen": 6553600,
"step": 25
},
{
"epoch": 0.00645641917059846,
"grad_norm": 4.871302604675293,
"learning_rate": 2.5e-06,
"loss": 2.5931,
"num_input_tokens_seen": 6815744,
"step": 26
},
{
"epoch": 0.006704742984852247,
"grad_norm": 3.577676773071289,
"learning_rate": 2.6e-06,
"loss": 2.6076,
"num_input_tokens_seen": 7077888,
"step": 27
},
{
"epoch": 0.006953066799106034,
"grad_norm": 4.384449481964111,
"learning_rate": 2.7000000000000004e-06,
"loss": 2.8547,
"num_input_tokens_seen": 7340032,
"step": 28
},
{
"epoch": 0.0072013906133598215,
"grad_norm": 2.9289448261260986,
"learning_rate": 2.8000000000000003e-06,
"loss": 2.1733,
"num_input_tokens_seen": 7602176,
"step": 29
},
{
"epoch": 0.007449714427613608,
"grad_norm": 3.34555983543396,
"learning_rate": 2.9e-06,
"loss": 2.4006,
"num_input_tokens_seen": 7864320,
"step": 30
},
{
"epoch": 0.007698038241867395,
"grad_norm": 2.8291356563568115,
"learning_rate": 3e-06,
"loss": 2.4605,
"num_input_tokens_seen": 8126464,
"step": 31
},
{
"epoch": 0.007946362056121183,
"grad_norm": 2.9072861671447754,
"learning_rate": 3.1000000000000004e-06,
"loss": 2.5204,
"num_input_tokens_seen": 8388608,
"step": 32
},
{
"epoch": 0.008194685870374968,
"grad_norm": 2.771606683731079,
"learning_rate": 3.2000000000000003e-06,
"loss": 2.4139,
"num_input_tokens_seen": 8650752,
"step": 33
},
{
"epoch": 0.008443009684628756,
"grad_norm": 2.580414295196533,
"learning_rate": 3.3000000000000006e-06,
"loss": 2.5677,
"num_input_tokens_seen": 8912896,
"step": 34
},
{
"epoch": 0.008691333498882544,
"grad_norm": 2.1037166118621826,
"learning_rate": 3.4000000000000005e-06,
"loss": 2.1773,
"num_input_tokens_seen": 9175040,
"step": 35
},
{
"epoch": 0.00893965731313633,
"grad_norm": 2.0104498863220215,
"learning_rate": 3.5e-06,
"loss": 2.1332,
"num_input_tokens_seen": 9437184,
"step": 36
},
{
"epoch": 0.009187981127390117,
"grad_norm": 1.954823613166809,
"learning_rate": 3.6000000000000003e-06,
"loss": 2.3993,
"num_input_tokens_seen": 9699328,
"step": 37
},
{
"epoch": 0.009436304941643903,
"grad_norm": 2.064880847930908,
"learning_rate": 3.7e-06,
"loss": 2.6064,
"num_input_tokens_seen": 9961472,
"step": 38
},
{
"epoch": 0.00968462875589769,
"grad_norm": 1.6983036994934082,
"learning_rate": 3.8000000000000005e-06,
"loss": 1.9788,
"num_input_tokens_seen": 10223616,
"step": 39
},
{
"epoch": 0.009932952570151478,
"grad_norm": 1.7680089473724365,
"learning_rate": 3.900000000000001e-06,
"loss": 2.3603,
"num_input_tokens_seen": 10485760,
"step": 40
},
{
"epoch": 0.010181276384405264,
"grad_norm": 1.5656706094741821,
"learning_rate": 4.000000000000001e-06,
"loss": 2.2015,
"num_input_tokens_seen": 10747904,
"step": 41
},
{
"epoch": 0.010429600198659052,
"grad_norm": 1.4727283716201782,
"learning_rate": 4.1e-06,
"loss": 2.0206,
"num_input_tokens_seen": 11010048,
"step": 42
},
{
"epoch": 0.010677924012912838,
"grad_norm": 1.2997132539749146,
"learning_rate": 4.2000000000000004e-06,
"loss": 1.7953,
"num_input_tokens_seen": 11272192,
"step": 43
},
{
"epoch": 0.010926247827166625,
"grad_norm": 1.439453363418579,
"learning_rate": 4.3e-06,
"loss": 2.124,
"num_input_tokens_seen": 11534336,
"step": 44
},
{
"epoch": 0.011174571641420413,
"grad_norm": 1.43881356716156,
"learning_rate": 4.4e-06,
"loss": 2.4577,
"num_input_tokens_seen": 11796480,
"step": 45
},
{
"epoch": 0.011422895455674199,
"grad_norm": 1.5415407419204712,
"learning_rate": 4.5e-06,
"loss": 2.3313,
"num_input_tokens_seen": 12058624,
"step": 46
},
{
"epoch": 0.011671219269927986,
"grad_norm": 1.2109966278076172,
"learning_rate": 4.600000000000001e-06,
"loss": 2.0387,
"num_input_tokens_seen": 12320768,
"step": 47
},
{
"epoch": 0.011919543084181774,
"grad_norm": 1.2937291860580444,
"learning_rate": 4.7e-06,
"loss": 2.3696,
"num_input_tokens_seen": 12582912,
"step": 48
},
{
"epoch": 0.01216786689843556,
"grad_norm": 1.2319666147232056,
"learning_rate": 4.800000000000001e-06,
"loss": 2.3702,
"num_input_tokens_seen": 12845056,
"step": 49
},
{
"epoch": 0.012416190712689347,
"grad_norm": 2.2272355556488037,
"learning_rate": 4.9000000000000005e-06,
"loss": 1.7134,
"num_input_tokens_seen": 13107200,
"step": 50
},
{
"epoch": 0.012664514526943133,
"grad_norm": 0.9094964265823364,
"learning_rate": 5e-06,
"loss": 1.8803,
"num_input_tokens_seen": 13369344,
"step": 51
},
{
"epoch": 0.01291283834119692,
"grad_norm": 1.3690983057022095,
"learning_rate": 5.1e-06,
"loss": 2.541,
"num_input_tokens_seen": 13631488,
"step": 52
},
{
"epoch": 0.013161162155450708,
"grad_norm": 0.8856329917907715,
"learning_rate": 5.2e-06,
"loss": 2.1698,
"num_input_tokens_seen": 13893632,
"step": 53
},
{
"epoch": 0.013409485969704494,
"grad_norm": 0.9775459170341492,
"learning_rate": 5.300000000000001e-06,
"loss": 2.0176,
"num_input_tokens_seen": 14155776,
"step": 54
},
{
"epoch": 0.013657809783958282,
"grad_norm": 0.8204602599143982,
"learning_rate": 5.400000000000001e-06,
"loss": 1.895,
"num_input_tokens_seen": 14417920,
"step": 55
},
{
"epoch": 0.013906133598212068,
"grad_norm": 0.9787035584449768,
"learning_rate": 5.500000000000001e-06,
"loss": 2.1046,
"num_input_tokens_seen": 14680064,
"step": 56
},
{
"epoch": 0.014154457412465855,
"grad_norm": 0.7377949357032776,
"learning_rate": 5.600000000000001e-06,
"loss": 1.9217,
"num_input_tokens_seen": 14942208,
"step": 57
},
{
"epoch": 0.014402781226719643,
"grad_norm": 0.8319304585456848,
"learning_rate": 5.7e-06,
"loss": 2.278,
"num_input_tokens_seen": 15204352,
"step": 58
},
{
"epoch": 0.014651105040973429,
"grad_norm": 0.8811312317848206,
"learning_rate": 5.8e-06,
"loss": 2.4527,
"num_input_tokens_seen": 15466496,
"step": 59
},
{
"epoch": 0.014899428855227216,
"grad_norm": 0.8537189364433289,
"learning_rate": 5.9e-06,
"loss": 2.0035,
"num_input_tokens_seen": 15728640,
"step": 60
},
{
"epoch": 0.015147752669481004,
"grad_norm": 0.8903886079788208,
"learning_rate": 6e-06,
"loss": 2.0959,
"num_input_tokens_seen": 15990784,
"step": 61
},
{
"epoch": 0.01539607648373479,
"grad_norm": 0.7955805659294128,
"learning_rate": 6.1e-06,
"loss": 1.9258,
"num_input_tokens_seen": 16252928,
"step": 62
},
{
"epoch": 0.015644400297988578,
"grad_norm": 0.7012743353843689,
"learning_rate": 6.200000000000001e-06,
"loss": 1.8984,
"num_input_tokens_seen": 16515072,
"step": 63
},
{
"epoch": 0.015892724112242365,
"grad_norm": 1.0720924139022827,
"learning_rate": 6.300000000000001e-06,
"loss": 2.3707,
"num_input_tokens_seen": 16777216,
"step": 64
},
{
"epoch": 0.01614104792649615,
"grad_norm": 0.7156981229782104,
"learning_rate": 6.4000000000000006e-06,
"loss": 2.0082,
"num_input_tokens_seen": 17039360,
"step": 65
},
{
"epoch": 0.016389371740749937,
"grad_norm": 1.379465103149414,
"learning_rate": 6.5000000000000004e-06,
"loss": 1.893,
"num_input_tokens_seen": 17301504,
"step": 66
},
{
"epoch": 0.016637695555003724,
"grad_norm": 0.6216750144958496,
"learning_rate": 6.600000000000001e-06,
"loss": 1.7876,
"num_input_tokens_seen": 17563648,
"step": 67
},
{
"epoch": 0.016886019369257512,
"grad_norm": 0.6770815253257751,
"learning_rate": 6.700000000000001e-06,
"loss": 1.813,
"num_input_tokens_seen": 17825792,
"step": 68
},
{
"epoch": 0.0171343431835113,
"grad_norm": 0.8068878054618835,
"learning_rate": 6.800000000000001e-06,
"loss": 1.9584,
"num_input_tokens_seen": 18087936,
"step": 69
},
{
"epoch": 0.017382666997765087,
"grad_norm": 0.642892062664032,
"learning_rate": 6.9e-06,
"loss": 1.5887,
"num_input_tokens_seen": 18350080,
"step": 70
},
{
"epoch": 0.01763099081201887,
"grad_norm": 0.6749325394630432,
"learning_rate": 7e-06,
"loss": 1.7739,
"num_input_tokens_seen": 18612224,
"step": 71
},
{
"epoch": 0.01787931462627266,
"grad_norm": 0.6361634135246277,
"learning_rate": 7.100000000000001e-06,
"loss": 1.8437,
"num_input_tokens_seen": 18874368,
"step": 72
},
{
"epoch": 0.018127638440526447,
"grad_norm": 0.8063591718673706,
"learning_rate": 7.2000000000000005e-06,
"loss": 1.7656,
"num_input_tokens_seen": 19136512,
"step": 73
},
{
"epoch": 0.018375962254780234,
"grad_norm": 0.608739972114563,
"learning_rate": 7.3e-06,
"loss": 2.0104,
"num_input_tokens_seen": 19398656,
"step": 74
},
{
"epoch": 0.018624286069034022,
"grad_norm": 0.6015883088111877,
"learning_rate": 7.4e-06,
"loss": 2.0805,
"num_input_tokens_seen": 19660800,
"step": 75
},
{
"epoch": 0.018872609883287806,
"grad_norm": 0.5737051367759705,
"learning_rate": 7.500000000000001e-06,
"loss": 1.9203,
"num_input_tokens_seen": 19922944,
"step": 76
},
{
"epoch": 0.019120933697541594,
"grad_norm": 0.5844286680221558,
"learning_rate": 7.600000000000001e-06,
"loss": 2.0768,
"num_input_tokens_seen": 20185088,
"step": 77
},
{
"epoch": 0.01936925751179538,
"grad_norm": 0.5958046913146973,
"learning_rate": 7.7e-06,
"loss": 1.5849,
"num_input_tokens_seen": 20447232,
"step": 78
},
{
"epoch": 0.01961758132604917,
"grad_norm": 0.7638349533081055,
"learning_rate": 7.800000000000002e-06,
"loss": 2.2179,
"num_input_tokens_seen": 20709376,
"step": 79
},
{
"epoch": 0.019865905140302956,
"grad_norm": 0.5547915697097778,
"learning_rate": 7.9e-06,
"loss": 1.8943,
"num_input_tokens_seen": 20971520,
"step": 80
},
{
"epoch": 0.02011422895455674,
"grad_norm": 0.45996469259262085,
"learning_rate": 8.000000000000001e-06,
"loss": 1.5404,
"num_input_tokens_seen": 21233664,
"step": 81
},
{
"epoch": 0.020362552768810528,
"grad_norm": 0.5060412883758545,
"learning_rate": 8.1e-06,
"loss": 1.7184,
"num_input_tokens_seen": 21495808,
"step": 82
},
{
"epoch": 0.020610876583064316,
"grad_norm": 0.5570970773696899,
"learning_rate": 8.2e-06,
"loss": 2.0638,
"num_input_tokens_seen": 21757952,
"step": 83
},
{
"epoch": 0.020859200397318103,
"grad_norm": 0.47680890560150146,
"learning_rate": 8.3e-06,
"loss": 1.4177,
"num_input_tokens_seen": 22020096,
"step": 84
},
{
"epoch": 0.02110752421157189,
"grad_norm": 0.5959429144859314,
"learning_rate": 8.400000000000001e-06,
"loss": 2.1659,
"num_input_tokens_seen": 22282240,
"step": 85
},
{
"epoch": 0.021355848025825675,
"grad_norm": 0.5277214646339417,
"learning_rate": 8.5e-06,
"loss": 1.8083,
"num_input_tokens_seen": 22544384,
"step": 86
},
{
"epoch": 0.021604171840079463,
"grad_norm": 0.5346829891204834,
"learning_rate": 8.6e-06,
"loss": 1.9323,
"num_input_tokens_seen": 22806528,
"step": 87
},
{
"epoch": 0.02185249565433325,
"grad_norm": 0.5096192359924316,
"learning_rate": 8.700000000000001e-06,
"loss": 1.401,
"num_input_tokens_seen": 23068672,
"step": 88
},
{
"epoch": 0.022100819468587038,
"grad_norm": 0.49696552753448486,
"learning_rate": 8.8e-06,
"loss": 1.4828,
"num_input_tokens_seen": 23330816,
"step": 89
},
{
"epoch": 0.022349143282840826,
"grad_norm": 0.5156976580619812,
"learning_rate": 8.900000000000001e-06,
"loss": 1.9016,
"num_input_tokens_seen": 23592960,
"step": 90
},
{
"epoch": 0.02259746709709461,
"grad_norm": 0.4339764714241028,
"learning_rate": 9e-06,
"loss": 1.3645,
"num_input_tokens_seen": 23855104,
"step": 91
},
{
"epoch": 0.022845790911348397,
"grad_norm": 0.5163502097129822,
"learning_rate": 9.100000000000001e-06,
"loss": 1.9044,
"num_input_tokens_seen": 24117248,
"step": 92
},
{
"epoch": 0.023094114725602185,
"grad_norm": 0.44321632385253906,
"learning_rate": 9.200000000000002e-06,
"loss": 1.7322,
"num_input_tokens_seen": 24379392,
"step": 93
},
{
"epoch": 0.023342438539855973,
"grad_norm": 0.5554404258728027,
"learning_rate": 9.3e-06,
"loss": 2.0536,
"num_input_tokens_seen": 24641536,
"step": 94
},
{
"epoch": 0.02359076235410976,
"grad_norm": 0.5148480534553528,
"learning_rate": 9.4e-06,
"loss": 1.9353,
"num_input_tokens_seen": 24903680,
"step": 95
},
{
"epoch": 0.023839086168363548,
"grad_norm": 0.4365761876106262,
"learning_rate": 9.5e-06,
"loss": 2.1485,
"num_input_tokens_seen": 25165824,
"step": 96
},
{
"epoch": 0.024087409982617332,
"grad_norm": 0.6153504252433777,
"learning_rate": 9.600000000000001e-06,
"loss": 2.3459,
"num_input_tokens_seen": 25427968,
"step": 97
},
{
"epoch": 0.02433573379687112,
"grad_norm": 0.7189298272132874,
"learning_rate": 9.7e-06,
"loss": 1.5111,
"num_input_tokens_seen": 25690112,
"step": 98
},
{
"epoch": 0.024584057611124907,
"grad_norm": 0.40012645721435547,
"learning_rate": 9.800000000000001e-06,
"loss": 2.0235,
"num_input_tokens_seen": 25952256,
"step": 99
},
{
"epoch": 0.024832381425378695,
"grad_norm": 1.3065471649169922,
"learning_rate": 9.9e-06,
"loss": 2.1165,
"num_input_tokens_seen": 26214400,
"step": 100
},
{
"epoch": 0.025080705239632482,
"grad_norm": 0.7975507378578186,
"learning_rate": 1e-05,
"loss": 1.722,
"num_input_tokens_seen": 26476544,
"step": 101
},
{
"epoch": 0.025329029053886266,
"grad_norm": 0.5044583678245544,
"learning_rate": 9.999969538288953e-06,
"loss": 2.0617,
"num_input_tokens_seen": 26738688,
"step": 102
},
{
"epoch": 0.025577352868140054,
"grad_norm": 0.6180616021156311,
"learning_rate": 9.999878153526974e-06,
"loss": 1.8041,
"num_input_tokens_seen": 27000832,
"step": 103
},
{
"epoch": 0.02582567668239384,
"grad_norm": 0.3513893485069275,
"learning_rate": 9.999725846827562e-06,
"loss": 2.0049,
"num_input_tokens_seen": 27262976,
"step": 104
},
{
"epoch": 0.02607400049664763,
"grad_norm": 0.5015475153923035,
"learning_rate": 9.999512620046523e-06,
"loss": 1.9308,
"num_input_tokens_seen": 27525120,
"step": 105
},
{
"epoch": 0.026322324310901417,
"grad_norm": 0.35490310192108154,
"learning_rate": 9.999238475781957e-06,
"loss": 1.7832,
"num_input_tokens_seen": 27787264,
"step": 106
},
{
"epoch": 0.0265706481251552,
"grad_norm": 0.4982094168663025,
"learning_rate": 9.998903417374228e-06,
"loss": 2.1659,
"num_input_tokens_seen": 28049408,
"step": 107
},
{
"epoch": 0.02681897193940899,
"grad_norm": 0.3601139783859253,
"learning_rate": 9.998507448905917e-06,
"loss": 1.8221,
"num_input_tokens_seen": 28311552,
"step": 108
},
{
"epoch": 0.027067295753662776,
"grad_norm": 0.47446349263191223,
"learning_rate": 9.998050575201772e-06,
"loss": 1.4861,
"num_input_tokens_seen": 28573696,
"step": 109
},
{
"epoch": 0.027315619567916564,
"grad_norm": 0.4510415196418762,
"learning_rate": 9.997532801828659e-06,
"loss": 1.8413,
"num_input_tokens_seen": 28835840,
"step": 110
},
{
"epoch": 0.02756394338217035,
"grad_norm": 0.34447139501571655,
"learning_rate": 9.99695413509548e-06,
"loss": 1.9358,
"num_input_tokens_seen": 29097984,
"step": 111
},
{
"epoch": 0.027812267196424136,
"grad_norm": 0.7382912039756775,
"learning_rate": 9.996314582053106e-06,
"loss": 2.0478,
"num_input_tokens_seen": 29360128,
"step": 112
},
{
"epoch": 0.028060591010677923,
"grad_norm": 0.4270409345626831,
"learning_rate": 9.995614150494293e-06,
"loss": 1.4107,
"num_input_tokens_seen": 29622272,
"step": 113
},
{
"epoch": 0.02830891482493171,
"grad_norm": 0.6236165761947632,
"learning_rate": 9.994852848953574e-06,
"loss": 1.9327,
"num_input_tokens_seen": 29884416,
"step": 114
},
{
"epoch": 0.0285572386391855,
"grad_norm": 0.44820812344551086,
"learning_rate": 9.994030686707171e-06,
"loss": 1.6324,
"num_input_tokens_seen": 30146560,
"step": 115
},
{
"epoch": 0.028805562453439286,
"grad_norm": 0.34471920132637024,
"learning_rate": 9.993147673772869e-06,
"loss": 2.0254,
"num_input_tokens_seen": 30408704,
"step": 116
},
{
"epoch": 0.02905388626769307,
"grad_norm": 0.4757576584815979,
"learning_rate": 9.992203820909906e-06,
"loss": 2.1909,
"num_input_tokens_seen": 30670848,
"step": 117
},
{
"epoch": 0.029302210081946858,
"grad_norm": 0.5577523112297058,
"learning_rate": 9.991199139618828e-06,
"loss": 1.7591,
"num_input_tokens_seen": 30932992,
"step": 118
},
{
"epoch": 0.029550533896200645,
"grad_norm": 0.4104521572589874,
"learning_rate": 9.990133642141359e-06,
"loss": 1.8473,
"num_input_tokens_seen": 31195136,
"step": 119
},
{
"epoch": 0.029798857710454433,
"grad_norm": 0.3846544921398163,
"learning_rate": 9.989007341460251e-06,
"loss": 1.786,
"num_input_tokens_seen": 31457280,
"step": 120
},
{
"epoch": 0.03004718152470822,
"grad_norm": 0.5335818529129028,
"learning_rate": 9.987820251299121e-06,
"loss": 2.5372,
"num_input_tokens_seen": 31719424,
"step": 121
},
{
"epoch": 0.030295505338962008,
"grad_norm": 0.5046745538711548,
"learning_rate": 9.98657238612229e-06,
"loss": 2.0604,
"num_input_tokens_seen": 31981568,
"step": 122
},
{
"epoch": 0.030543829153215792,
"grad_norm": 0.554063618183136,
"learning_rate": 9.985263761134602e-06,
"loss": 2.0795,
"num_input_tokens_seen": 32243712,
"step": 123
},
{
"epoch": 0.03079215296746958,
"grad_norm": 0.3117339611053467,
"learning_rate": 9.983894392281237e-06,
"loss": 1.7701,
"num_input_tokens_seen": 32505856,
"step": 124
},
{
"epoch": 0.031040476781723367,
"grad_norm": 0.42565709352493286,
"learning_rate": 9.982464296247523e-06,
"loss": 1.7209,
"num_input_tokens_seen": 32768000,
"step": 125
},
{
"epoch": 0.031288800595977155,
"grad_norm": 0.5742325782775879,
"learning_rate": 9.980973490458728e-06,
"loss": 1.5624,
"num_input_tokens_seen": 33030144,
"step": 126
},
{
"epoch": 0.03153712441023094,
"grad_norm": 0.4478677213191986,
"learning_rate": 9.979421993079853e-06,
"loss": 2.0519,
"num_input_tokens_seen": 33292288,
"step": 127
},
{
"epoch": 0.03178544822448473,
"grad_norm": 0.40014106035232544,
"learning_rate": 9.9778098230154e-06,
"loss": 1.7002,
"num_input_tokens_seen": 33554432,
"step": 128
},
{
"epoch": 0.032033772038738514,
"grad_norm": 0.37266790866851807,
"learning_rate": 9.976136999909156e-06,
"loss": 1.7795,
"num_input_tokens_seen": 33816576,
"step": 129
},
{
"epoch": 0.0322820958529923,
"grad_norm": 0.7558661103248596,
"learning_rate": 9.974403544143942e-06,
"loss": 1.5417,
"num_input_tokens_seen": 34078720,
"step": 130
},
{
"epoch": 0.03253041966724609,
"grad_norm": 0.42118939757347107,
"learning_rate": 9.972609476841368e-06,
"loss": 1.728,
"num_input_tokens_seen": 34340864,
"step": 131
},
{
"epoch": 0.032778743481499874,
"grad_norm": 0.45237675309181213,
"learning_rate": 9.970754819861577e-06,
"loss": 1.651,
"num_input_tokens_seen": 34603008,
"step": 132
},
{
"epoch": 0.033027067295753665,
"grad_norm": 0.42204225063323975,
"learning_rate": 9.968839595802982e-06,
"loss": 1.6179,
"num_input_tokens_seen": 34865152,
"step": 133
},
{
"epoch": 0.03327539111000745,
"grad_norm": 0.7443103790283203,
"learning_rate": 9.966863828001982e-06,
"loss": 2.0339,
"num_input_tokens_seen": 35127296,
"step": 134
},
{
"epoch": 0.03352371492426124,
"grad_norm": 0.46790680289268494,
"learning_rate": 9.964827540532685e-06,
"loss": 1.7902,
"num_input_tokens_seen": 35389440,
"step": 135
},
{
"epoch": 0.033772038738515024,
"grad_norm": 0.4609277546405792,
"learning_rate": 9.962730758206612e-06,
"loss": 1.7105,
"num_input_tokens_seen": 35651584,
"step": 136
},
{
"epoch": 0.03402036255276881,
"grad_norm": 0.4367188513278961,
"learning_rate": 9.960573506572391e-06,
"loss": 1.5516,
"num_input_tokens_seen": 35913728,
"step": 137
},
{
"epoch": 0.0342686863670226,
"grad_norm": 0.518988311290741,
"learning_rate": 9.958355811915452e-06,
"loss": 1.4072,
"num_input_tokens_seen": 36175872,
"step": 138
},
{
"epoch": 0.034517010181276384,
"grad_norm": 0.32017022371292114,
"learning_rate": 9.95607770125771e-06,
"loss": 1.7326,
"num_input_tokens_seen": 36438016,
"step": 139
},
{
"epoch": 0.034765333995530175,
"grad_norm": 0.4322156310081482,
"learning_rate": 9.953739202357219e-06,
"loss": 1.3485,
"num_input_tokens_seen": 36700160,
"step": 140
},
{
"epoch": 0.03501365780978396,
"grad_norm": 0.5065650939941406,
"learning_rate": 9.951340343707852e-06,
"loss": 1.8844,
"num_input_tokens_seen": 36962304,
"step": 141
},
{
"epoch": 0.03526198162403774,
"grad_norm": 0.46633732318878174,
"learning_rate": 9.948881154538946e-06,
"loss": 1.0832,
"num_input_tokens_seen": 37224448,
"step": 142
},
{
"epoch": 0.035510305438291534,
"grad_norm": 0.42399951815605164,
"learning_rate": 9.946361664814942e-06,
"loss": 2.0688,
"num_input_tokens_seen": 37486592,
"step": 143
},
{
"epoch": 0.03575862925254532,
"grad_norm": 0.6287701725959778,
"learning_rate": 9.94378190523503e-06,
"loss": 1.6954,
"num_input_tokens_seen": 37748736,
"step": 144
},
{
"epoch": 0.03600695306679911,
"grad_norm": 0.38575854897499084,
"learning_rate": 9.941141907232766e-06,
"loss": 1.6803,
"num_input_tokens_seen": 38010880,
"step": 145
},
{
"epoch": 0.03625527688105289,
"grad_norm": 0.5871158242225647,
"learning_rate": 9.938441702975689e-06,
"loss": 2.2275,
"num_input_tokens_seen": 38273024,
"step": 146
},
{
"epoch": 0.03650360069530668,
"grad_norm": 0.4878631830215454,
"learning_rate": 9.93568132536494e-06,
"loss": 1.9927,
"num_input_tokens_seen": 38535168,
"step": 147
},
{
"epoch": 0.03675192450956047,
"grad_norm": 0.6624669432640076,
"learning_rate": 9.932860808034847e-06,
"loss": 1.6445,
"num_input_tokens_seen": 38797312,
"step": 148
},
{
"epoch": 0.03700024832381425,
"grad_norm": 0.523557722568512,
"learning_rate": 9.929980185352525e-06,
"loss": 1.9669,
"num_input_tokens_seen": 39059456,
"step": 149
},
{
"epoch": 0.037248572138068044,
"grad_norm": 0.3871573805809021,
"learning_rate": 9.927039492417452e-06,
"loss": 1.57,
"num_input_tokens_seen": 39321600,
"step": 150
},
{
"epoch": 0.03749689595232183,
"grad_norm": 0.459677129983902,
"learning_rate": 9.924038765061042e-06,
"loss": 1.7627,
"num_input_tokens_seen": 39583744,
"step": 151
},
{
"epoch": 0.03774521976657561,
"grad_norm": 0.7199493646621704,
"learning_rate": 9.92097803984621e-06,
"loss": 1.6882,
"num_input_tokens_seen": 39845888,
"step": 152
},
{
"epoch": 0.0379935435808294,
"grad_norm": 0.4936988353729248,
"learning_rate": 9.91785735406693e-06,
"loss": 1.8768,
"num_input_tokens_seen": 40108032,
"step": 153
},
{
"epoch": 0.03824186739508319,
"grad_norm": 0.44318658113479614,
"learning_rate": 9.914676745747772e-06,
"loss": 1.6635,
"num_input_tokens_seen": 40370176,
"step": 154
},
{
"epoch": 0.03849019120933698,
"grad_norm": 0.438924103975296,
"learning_rate": 9.911436253643445e-06,
"loss": 1.6874,
"num_input_tokens_seen": 40632320,
"step": 155
},
{
"epoch": 0.03873851502359076,
"grad_norm": 0.37632086873054504,
"learning_rate": 9.908135917238321e-06,
"loss": 2.1325,
"num_input_tokens_seen": 40894464,
"step": 156
},
{
"epoch": 0.03898683883784455,
"grad_norm": 0.4754061698913574,
"learning_rate": 9.904775776745959e-06,
"loss": 1.6958,
"num_input_tokens_seen": 41156608,
"step": 157
},
{
"epoch": 0.03923516265209834,
"grad_norm": 0.629783570766449,
"learning_rate": 9.901355873108611e-06,
"loss": 1.6854,
"num_input_tokens_seen": 41418752,
"step": 158
},
{
"epoch": 0.03948348646635212,
"grad_norm": 0.4472093880176544,
"learning_rate": 9.89787624799672e-06,
"loss": 1.9023,
"num_input_tokens_seen": 41680896,
"step": 159
},
{
"epoch": 0.03973181028060591,
"grad_norm": 0.35216960310935974,
"learning_rate": 9.894336943808426e-06,
"loss": 1.9459,
"num_input_tokens_seen": 41943040,
"step": 160
},
{
"epoch": 0.0399801340948597,
"grad_norm": 0.34789201617240906,
"learning_rate": 9.890738003669029e-06,
"loss": 2.0254,
"num_input_tokens_seen": 42205184,
"step": 161
},
{
"epoch": 0.04022845790911348,
"grad_norm": 0.5008848309516907,
"learning_rate": 9.887079471430481e-06,
"loss": 1.7256,
"num_input_tokens_seen": 42467328,
"step": 162
},
{
"epoch": 0.04047678172336727,
"grad_norm": 0.2768523395061493,
"learning_rate": 9.883361391670841e-06,
"loss": 1.4073,
"num_input_tokens_seen": 42729472,
"step": 163
},
{
"epoch": 0.040725105537621056,
"grad_norm": 0.25057297945022583,
"learning_rate": 9.879583809693737e-06,
"loss": 1.8378,
"num_input_tokens_seen": 42991616,
"step": 164
},
{
"epoch": 0.04097342935187485,
"grad_norm": 0.34032392501831055,
"learning_rate": 9.875746771527817e-06,
"loss": 2.0236,
"num_input_tokens_seen": 43253760,
"step": 165
},
{
"epoch": 0.04122175316612863,
"grad_norm": 0.34935250878334045,
"learning_rate": 9.871850323926178e-06,
"loss": 1.25,
"num_input_tokens_seen": 43515904,
"step": 166
},
{
"epoch": 0.041470076980382416,
"grad_norm": 0.4536452889442444,
"learning_rate": 9.867894514365802e-06,
"loss": 1.9923,
"num_input_tokens_seen": 43778048,
"step": 167
},
{
"epoch": 0.04171840079463621,
"grad_norm": 0.38076460361480713,
"learning_rate": 9.863879391046985e-06,
"loss": 1.6626,
"num_input_tokens_seen": 44040192,
"step": 168
},
{
"epoch": 0.04196672460888999,
"grad_norm": 0.4140561521053314,
"learning_rate": 9.859805002892733e-06,
"loss": 1.7342,
"num_input_tokens_seen": 44302336,
"step": 169
},
{
"epoch": 0.04221504842314378,
"grad_norm": 0.5397564172744751,
"learning_rate": 9.85567139954818e-06,
"loss": 1.8625,
"num_input_tokens_seen": 44564480,
"step": 170
},
{
"epoch": 0.042463372237397566,
"grad_norm": 0.2765776216983795,
"learning_rate": 9.851478631379982e-06,
"loss": 1.9361,
"num_input_tokens_seen": 44826624,
"step": 171
},
{
"epoch": 0.04271169605165135,
"grad_norm": 0.3303300440311432,
"learning_rate": 9.847226749475696e-06,
"loss": 1.9308,
"num_input_tokens_seen": 45088768,
"step": 172
},
{
"epoch": 0.04296001986590514,
"grad_norm": 0.5077419281005859,
"learning_rate": 9.842915805643156e-06,
"loss": 1.8804,
"num_input_tokens_seen": 45350912,
"step": 173
},
{
"epoch": 0.043208343680158925,
"grad_norm": 0.5909125208854675,
"learning_rate": 9.838545852409857e-06,
"loss": 1.8626,
"num_input_tokens_seen": 45613056,
"step": 174
},
{
"epoch": 0.04345666749441272,
"grad_norm": 0.40436893701553345,
"learning_rate": 9.834116943022299e-06,
"loss": 1.3216,
"num_input_tokens_seen": 45875200,
"step": 175
},
{
"epoch": 0.0437049913086665,
"grad_norm": 0.2543845772743225,
"learning_rate": 9.829629131445342e-06,
"loss": 1.8692,
"num_input_tokens_seen": 46137344,
"step": 176
},
{
"epoch": 0.043953315122920285,
"grad_norm": 0.3154657185077667,
"learning_rate": 9.825082472361558e-06,
"loss": 1.8267,
"num_input_tokens_seen": 46399488,
"step": 177
},
{
"epoch": 0.044201638937174076,
"grad_norm": 0.2858025133609772,
"learning_rate": 9.82047702117055e-06,
"loss": 2.0557,
"num_input_tokens_seen": 46661632,
"step": 178
},
{
"epoch": 0.04444996275142786,
"grad_norm": 0.47114500403404236,
"learning_rate": 9.815812833988292e-06,
"loss": 1.8631,
"num_input_tokens_seen": 46923776,
"step": 179
},
{
"epoch": 0.04469828656568165,
"grad_norm": 0.3492976427078247,
"learning_rate": 9.811089967646427e-06,
"loss": 1.6999,
"num_input_tokens_seen": 47185920,
"step": 180
},
{
"epoch": 0.044946610379935435,
"grad_norm": 0.5230903625488281,
"learning_rate": 9.806308479691595e-06,
"loss": 1.9586,
"num_input_tokens_seen": 47448064,
"step": 181
},
{
"epoch": 0.04519493419418922,
"grad_norm": 0.3736109733581543,
"learning_rate": 9.801468428384716e-06,
"loss": 1.6724,
"num_input_tokens_seen": 47710208,
"step": 182
},
{
"epoch": 0.04544325800844301,
"grad_norm": 0.38272184133529663,
"learning_rate": 9.796569872700287e-06,
"loss": 2.0805,
"num_input_tokens_seen": 47972352,
"step": 183
},
{
"epoch": 0.045691581822696795,
"grad_norm": 0.3838115632534027,
"learning_rate": 9.791612872325667e-06,
"loss": 2.076,
"num_input_tokens_seen": 48234496,
"step": 184
},
{
"epoch": 0.045939905636950586,
"grad_norm": 0.39449867606163025,
"learning_rate": 9.786597487660336e-06,
"loss": 1.9271,
"num_input_tokens_seen": 48496640,
"step": 185
},
{
"epoch": 0.04618822945120437,
"grad_norm": 0.5604011416435242,
"learning_rate": 9.781523779815178e-06,
"loss": 2.0907,
"num_input_tokens_seen": 48758784,
"step": 186
},
{
"epoch": 0.04643655326545816,
"grad_norm": 0.756406307220459,
"learning_rate": 9.776391810611719e-06,
"loss": 1.8081,
"num_input_tokens_seen": 49020928,
"step": 187
},
{
"epoch": 0.046684877079711945,
"grad_norm": 0.3678906261920929,
"learning_rate": 9.771201642581384e-06,
"loss": 1.2397,
"num_input_tokens_seen": 49283072,
"step": 188
},
{
"epoch": 0.04693320089396573,
"grad_norm": 0.4872545599937439,
"learning_rate": 9.765953338964736e-06,
"loss": 1.3951,
"num_input_tokens_seen": 49545216,
"step": 189
},
{
"epoch": 0.04718152470821952,
"grad_norm": 0.46098363399505615,
"learning_rate": 9.760646963710694e-06,
"loss": 2.052,
"num_input_tokens_seen": 49807360,
"step": 190
},
{
"epoch": 0.047429848522473304,
"grad_norm": 0.675818681716919,
"learning_rate": 9.755282581475769e-06,
"loss": 1.7571,
"num_input_tokens_seen": 50069504,
"step": 191
},
{
"epoch": 0.047678172336727095,
"grad_norm": 0.6060460209846497,
"learning_rate": 9.749860257623262e-06,
"loss": 1.854,
"num_input_tokens_seen": 50331648,
"step": 192
},
{
"epoch": 0.04792649615098088,
"grad_norm": 0.47795215249061584,
"learning_rate": 9.744380058222483e-06,
"loss": 1.7057,
"num_input_tokens_seen": 50593792,
"step": 193
},
{
"epoch": 0.048174819965234664,
"grad_norm": 0.5348641872406006,
"learning_rate": 9.73884205004793e-06,
"loss": 1.8599,
"num_input_tokens_seen": 50855936,
"step": 194
},
{
"epoch": 0.048423143779488455,
"grad_norm": 0.4470706284046173,
"learning_rate": 9.733246300578482e-06,
"loss": 1.427,
"num_input_tokens_seen": 51118080,
"step": 195
},
{
"epoch": 0.04867146759374224,
"grad_norm": 0.35019198060035706,
"learning_rate": 9.727592877996585e-06,
"loss": 1.6146,
"num_input_tokens_seen": 51380224,
"step": 196
},
{
"epoch": 0.04891979140799603,
"grad_norm": 0.4542747139930725,
"learning_rate": 9.721881851187406e-06,
"loss": 1.8169,
"num_input_tokens_seen": 51642368,
"step": 197
},
{
"epoch": 0.049168115222249814,
"grad_norm": 0.5041635632514954,
"learning_rate": 9.716113289738005e-06,
"loss": 1.7434,
"num_input_tokens_seen": 51904512,
"step": 198
},
{
"epoch": 0.0494164390365036,
"grad_norm": 0.39864203333854675,
"learning_rate": 9.710287263936485e-06,
"loss": 1.5456,
"num_input_tokens_seen": 52166656,
"step": 199
},
{
"epoch": 0.04966476285075739,
"grad_norm": 0.4882585108280182,
"learning_rate": 9.704403844771128e-06,
"loss": 1.5535,
"num_input_tokens_seen": 52428800,
"step": 200
},
{
"epoch": 0.049913086665011173,
"grad_norm": 0.33271995186805725,
"learning_rate": 9.698463103929542e-06,
"loss": 1.7225,
"num_input_tokens_seen": 52690944,
"step": 201
},
{
"epoch": 0.050161410479264965,
"grad_norm": 0.5818848609924316,
"learning_rate": 9.69246511379778e-06,
"loss": 1.8262,
"num_input_tokens_seen": 52953088,
"step": 202
},
{
"epoch": 0.05040973429351875,
"grad_norm": 0.3519175946712494,
"learning_rate": 9.68640994745946e-06,
"loss": 1.8536,
"num_input_tokens_seen": 53215232,
"step": 203
},
{
"epoch": 0.05065805810777253,
"grad_norm": 0.30881020426750183,
"learning_rate": 9.680297678694867e-06,
"loss": 1.7819,
"num_input_tokens_seen": 53477376,
"step": 204
},
{
"epoch": 0.050906381922026324,
"grad_norm": 0.28387659788131714,
"learning_rate": 9.674128381980073e-06,
"loss": 1.6913,
"num_input_tokens_seen": 53739520,
"step": 205
},
{
"epoch": 0.05115470573628011,
"grad_norm": 0.42218366265296936,
"learning_rate": 9.667902132486009e-06,
"loss": 1.3839,
"num_input_tokens_seen": 54001664,
"step": 206
},
{
"epoch": 0.0514030295505339,
"grad_norm": 0.3479291796684265,
"learning_rate": 9.661619006077562e-06,
"loss": 1.7449,
"num_input_tokens_seen": 54263808,
"step": 207
},
{
"epoch": 0.05165135336478768,
"grad_norm": 0.41011831164360046,
"learning_rate": 9.655279079312643e-06,
"loss": 1.3836,
"num_input_tokens_seen": 54525952,
"step": 208
},
{
"epoch": 0.05189967717904147,
"grad_norm": 0.3655335605144501,
"learning_rate": 9.648882429441258e-06,
"loss": 2.2184,
"num_input_tokens_seen": 54788096,
"step": 209
},
{
"epoch": 0.05214800099329526,
"grad_norm": 0.4303296208381653,
"learning_rate": 9.642429134404568e-06,
"loss": 1.8072,
"num_input_tokens_seen": 55050240,
"step": 210
},
{
"epoch": 0.05239632480754904,
"grad_norm": 0.4718409776687622,
"learning_rate": 9.635919272833938e-06,
"loss": 1.7503,
"num_input_tokens_seen": 55312384,
"step": 211
},
{
"epoch": 0.052644648621802834,
"grad_norm": 0.41102349758148193,
"learning_rate": 9.629352924049975e-06,
"loss": 1.5696,
"num_input_tokens_seen": 55574528,
"step": 212
},
{
"epoch": 0.05289297243605662,
"grad_norm": 0.40920042991638184,
"learning_rate": 9.622730168061568e-06,
"loss": 1.354,
"num_input_tokens_seen": 55836672,
"step": 213
},
{
"epoch": 0.0531412962503104,
"grad_norm": 0.2790515124797821,
"learning_rate": 9.616051085564905e-06,
"loss": 1.6552,
"num_input_tokens_seen": 56098816,
"step": 214
},
{
"epoch": 0.05338962006456419,
"grad_norm": 0.708991289138794,
"learning_rate": 9.609315757942504e-06,
"loss": 1.4587,
"num_input_tokens_seen": 56360960,
"step": 215
},
{
"epoch": 0.05363794387881798,
"grad_norm": 0.5307122468948364,
"learning_rate": 9.602524267262202e-06,
"loss": 2.0549,
"num_input_tokens_seen": 56623104,
"step": 216
},
{
"epoch": 0.05388626769307177,
"grad_norm": 0.6132609248161316,
"learning_rate": 9.595676696276173e-06,
"loss": 1.8739,
"num_input_tokens_seen": 56885248,
"step": 217
},
{
"epoch": 0.05413459150732555,
"grad_norm": 0.4902276396751404,
"learning_rate": 9.588773128419907e-06,
"loss": 2.0105,
"num_input_tokens_seen": 57147392,
"step": 218
},
{
"epoch": 0.054382915321579336,
"grad_norm": 0.40454724431037903,
"learning_rate": 9.581813647811199e-06,
"loss": 1.9691,
"num_input_tokens_seen": 57409536,
"step": 219
},
{
"epoch": 0.05463123913583313,
"grad_norm": 0.5375818610191345,
"learning_rate": 9.574798339249124e-06,
"loss": 1.9857,
"num_input_tokens_seen": 57671680,
"step": 220
},
{
"epoch": 0.05487956295008691,
"grad_norm": 0.31554529070854187,
"learning_rate": 9.567727288213005e-06,
"loss": 1.7783,
"num_input_tokens_seen": 57933824,
"step": 221
},
{
"epoch": 0.0551278867643407,
"grad_norm": 0.5682997703552246,
"learning_rate": 9.560600580861366e-06,
"loss": 1.8144,
"num_input_tokens_seen": 58195968,
"step": 222
},
{
"epoch": 0.05537621057859449,
"grad_norm": 0.393583208322525,
"learning_rate": 9.553418304030886e-06,
"loss": 1.9838,
"num_input_tokens_seen": 58458112,
"step": 223
},
{
"epoch": 0.05562453439284827,
"grad_norm": 0.42172202467918396,
"learning_rate": 9.546180545235344e-06,
"loss": 1.7532,
"num_input_tokens_seen": 58720256,
"step": 224
},
{
"epoch": 0.05587285820710206,
"grad_norm": 0.4001232981681824,
"learning_rate": 9.538887392664544e-06,
"loss": 2.2716,
"num_input_tokens_seen": 58982400,
"step": 225
},
{
"epoch": 0.056121182021355846,
"grad_norm": 0.2886028289794922,
"learning_rate": 9.531538935183252e-06,
"loss": 1.5989,
"num_input_tokens_seen": 59244544,
"step": 226
},
{
"epoch": 0.05636950583560964,
"grad_norm": 0.49188050627708435,
"learning_rate": 9.524135262330098e-06,
"loss": 1.7318,
"num_input_tokens_seen": 59506688,
"step": 227
},
{
"epoch": 0.05661782964986342,
"grad_norm": 0.46622851490974426,
"learning_rate": 9.516676464316505e-06,
"loss": 1.4116,
"num_input_tokens_seen": 59768832,
"step": 228
},
{
"epoch": 0.056866153464117206,
"grad_norm": 0.26139262318611145,
"learning_rate": 9.50916263202557e-06,
"loss": 1.6799,
"num_input_tokens_seen": 60030976,
"step": 229
},
{
"epoch": 0.057114477278371,
"grad_norm": 0.4152681827545166,
"learning_rate": 9.501593857010968e-06,
"loss": 2.0024,
"num_input_tokens_seen": 60293120,
"step": 230
},
{
"epoch": 0.05736280109262478,
"grad_norm": 0.683701753616333,
"learning_rate": 9.493970231495836e-06,
"loss": 1.7395,
"num_input_tokens_seen": 60555264,
"step": 231
},
{
"epoch": 0.05761112490687857,
"grad_norm": 0.4206884503364563,
"learning_rate": 9.486291848371642e-06,
"loss": 1.4044,
"num_input_tokens_seen": 60817408,
"step": 232
},
{
"epoch": 0.057859448721132356,
"grad_norm": 0.3584212064743042,
"learning_rate": 9.478558801197065e-06,
"loss": 1.6407,
"num_input_tokens_seen": 61079552,
"step": 233
},
{
"epoch": 0.05810777253538614,
"grad_norm": 0.3988575041294098,
"learning_rate": 9.470771184196842e-06,
"loss": 1.8871,
"num_input_tokens_seen": 61341696,
"step": 234
},
{
"epoch": 0.05835609634963993,
"grad_norm": 0.6429465413093567,
"learning_rate": 9.46292909226063e-06,
"loss": 1.7096,
"num_input_tokens_seen": 61603840,
"step": 235
},
{
"epoch": 0.058604420163893715,
"grad_norm": 0.5112093091011047,
"learning_rate": 9.45503262094184e-06,
"loss": 1.878,
"num_input_tokens_seen": 61865984,
"step": 236
},
{
"epoch": 0.058852743978147506,
"grad_norm": 0.6004856824874878,
"learning_rate": 9.44708186645649e-06,
"loss": 1.73,
"num_input_tokens_seen": 62128128,
"step": 237
},
{
"epoch": 0.05910106779240129,
"grad_norm": 0.5572634339332581,
"learning_rate": 9.439076925682006e-06,
"loss": 1.601,
"num_input_tokens_seen": 62390272,
"step": 238
},
{
"epoch": 0.05934939160665508,
"grad_norm": 0.7834048271179199,
"learning_rate": 9.431017896156074e-06,
"loss": 1.8971,
"num_input_tokens_seen": 62652416,
"step": 239
},
{
"epoch": 0.059597715420908866,
"grad_norm": 0.5589991807937622,
"learning_rate": 9.42290487607542e-06,
"loss": 1.5294,
"num_input_tokens_seen": 62914560,
"step": 240
},
{
"epoch": 0.05984603923516265,
"grad_norm": 0.48503783345222473,
"learning_rate": 9.414737964294636e-06,
"loss": 1.5151,
"num_input_tokens_seen": 63176704,
"step": 241
},
{
"epoch": 0.06009436304941644,
"grad_norm": 0.39491578936576843,
"learning_rate": 9.406517260324962e-06,
"loss": 1.272,
"num_input_tokens_seen": 63438848,
"step": 242
},
{
"epoch": 0.060342686863670225,
"grad_norm": 0.5852888822555542,
"learning_rate": 9.398242864333084e-06,
"loss": 1.6314,
"num_input_tokens_seen": 63700992,
"step": 243
},
{
"epoch": 0.060591010677924016,
"grad_norm": 0.4342246949672699,
"learning_rate": 9.389914877139903e-06,
"loss": 1.8619,
"num_input_tokens_seen": 63963136,
"step": 244
},
{
"epoch": 0.0608393344921778,
"grad_norm": 0.45139801502227783,
"learning_rate": 9.381533400219319e-06,
"loss": 2.0767,
"num_input_tokens_seen": 64225280,
"step": 245
},
{
"epoch": 0.061087658306431585,
"grad_norm": 0.5253079533576965,
"learning_rate": 9.37309853569698e-06,
"loss": 1.7857,
"num_input_tokens_seen": 64487424,
"step": 246
},
{
"epoch": 0.061335982120685376,
"grad_norm": 0.5168160796165466,
"learning_rate": 9.364610386349048e-06,
"loss": 1.7157,
"num_input_tokens_seen": 64749568,
"step": 247
},
{
"epoch": 0.06158430593493916,
"grad_norm": 0.6876463890075684,
"learning_rate": 9.356069055600949e-06,
"loss": 1.9186,
"num_input_tokens_seen": 65011712,
"step": 248
},
{
"epoch": 0.06183262974919295,
"grad_norm": 0.5097318887710571,
"learning_rate": 9.347474647526095e-06,
"loss": 1.806,
"num_input_tokens_seen": 65273856,
"step": 249
},
{
"epoch": 0.062080953563446735,
"grad_norm": 0.4346969425678253,
"learning_rate": 9.338827266844643e-06,
"loss": 1.9837,
"num_input_tokens_seen": 65536000,
"step": 250
},
{
"epoch": 0.06232927737770052,
"grad_norm": 0.4577252268791199,
"learning_rate": 9.330127018922195e-06,
"loss": 1.9294,
"num_input_tokens_seen": 65798144,
"step": 251
},
{
"epoch": 0.06257760119195431,
"grad_norm": 0.5622639656066895,
"learning_rate": 9.321374009768525e-06,
"loss": 1.2883,
"num_input_tokens_seen": 66060288,
"step": 252
},
{
"epoch": 0.0628259250062081,
"grad_norm": 0.553583562374115,
"learning_rate": 9.312568346036288e-06,
"loss": 1.7474,
"num_input_tokens_seen": 66322432,
"step": 253
},
{
"epoch": 0.06307424882046188,
"grad_norm": 0.6473803520202637,
"learning_rate": 9.30371013501972e-06,
"loss": 1.7102,
"num_input_tokens_seen": 66584576,
"step": 254
},
{
"epoch": 0.06332257263471566,
"grad_norm": 0.6553588509559631,
"learning_rate": 9.294799484653323e-06,
"loss": 1.4839,
"num_input_tokens_seen": 66846720,
"step": 255
},
{
"epoch": 0.06357089644896946,
"grad_norm": 0.5461873412132263,
"learning_rate": 9.285836503510562e-06,
"loss": 2.0798,
"num_input_tokens_seen": 67108864,
"step": 256
},
{
"epoch": 0.06381922026322324,
"grad_norm": 0.5196068286895752,
"learning_rate": 9.276821300802535e-06,
"loss": 1.7274,
"num_input_tokens_seen": 67371008,
"step": 257
},
{
"epoch": 0.06406754407747703,
"grad_norm": 0.2947503924369812,
"learning_rate": 9.267753986376638e-06,
"loss": 1.8269,
"num_input_tokens_seen": 67633152,
"step": 258
},
{
"epoch": 0.06431586789173081,
"grad_norm": 0.41323122382164,
"learning_rate": 9.25863467071524e-06,
"loss": 1.3102,
"num_input_tokens_seen": 67895296,
"step": 259
},
{
"epoch": 0.0645641917059846,
"grad_norm": 0.5422347187995911,
"learning_rate": 9.24946346493432e-06,
"loss": 1.7149,
"num_input_tokens_seen": 68157440,
"step": 260
},
{
"epoch": 0.0648125155202384,
"grad_norm": 0.4071256220340729,
"learning_rate": 9.24024048078213e-06,
"loss": 1.8354,
"num_input_tokens_seen": 68419584,
"step": 261
},
{
"epoch": 0.06506083933449218,
"grad_norm": 0.31718364357948303,
"learning_rate": 9.230965830637821e-06,
"loss": 1.6514,
"num_input_tokens_seen": 68681728,
"step": 262
},
{
"epoch": 0.06530916314874596,
"grad_norm": 0.5854095816612244,
"learning_rate": 9.221639627510076e-06,
"loss": 1.1938,
"num_input_tokens_seen": 68943872,
"step": 263
},
{
"epoch": 0.06555748696299975,
"grad_norm": 0.28129681944847107,
"learning_rate": 9.21226198503574e-06,
"loss": 1.932,
"num_input_tokens_seen": 69206016,
"step": 264
},
{
"epoch": 0.06580581077725355,
"grad_norm": 0.39890119433403015,
"learning_rate": 9.202833017478421e-06,
"loss": 2.1795,
"num_input_tokens_seen": 69468160,
"step": 265
},
{
"epoch": 0.06605413459150733,
"grad_norm": 0.5349065661430359,
"learning_rate": 9.193352839727122e-06,
"loss": 1.7356,
"num_input_tokens_seen": 69730304,
"step": 266
},
{
"epoch": 0.06630245840576111,
"grad_norm": 0.5061510801315308,
"learning_rate": 9.18382156729481e-06,
"loss": 1.552,
"num_input_tokens_seen": 69992448,
"step": 267
},
{
"epoch": 0.0665507822200149,
"grad_norm": 0.5302243828773499,
"learning_rate": 9.174239316317034e-06,
"loss": 1.6972,
"num_input_tokens_seen": 70254592,
"step": 268
},
{
"epoch": 0.06679910603426868,
"grad_norm": 0.4849538803100586,
"learning_rate": 9.164606203550498e-06,
"loss": 1.8502,
"num_input_tokens_seen": 70516736,
"step": 269
},
{
"epoch": 0.06704742984852248,
"grad_norm": 0.7993111610412598,
"learning_rate": 9.154922346371641e-06,
"loss": 1.473,
"num_input_tokens_seen": 70778880,
"step": 270
},
{
"epoch": 0.06729575366277626,
"grad_norm": 0.2985432744026184,
"learning_rate": 9.145187862775208e-06,
"loss": 1.7854,
"num_input_tokens_seen": 71041024,
"step": 271
},
{
"epoch": 0.06754407747703005,
"grad_norm": 0.557628333568573,
"learning_rate": 9.13540287137281e-06,
"loss": 1.6575,
"num_input_tokens_seen": 71303168,
"step": 272
},
{
"epoch": 0.06779240129128383,
"grad_norm": 0.5635995268821716,
"learning_rate": 9.125567491391476e-06,
"loss": 1.7496,
"num_input_tokens_seen": 71565312,
"step": 273
},
{
"epoch": 0.06804072510553762,
"grad_norm": 0.5241788625717163,
"learning_rate": 9.115681842672211e-06,
"loss": 1.4799,
"num_input_tokens_seen": 71827456,
"step": 274
},
{
"epoch": 0.06828904891979141,
"grad_norm": 0.7073025107383728,
"learning_rate": 9.10574604566852e-06,
"loss": 2.0472,
"num_input_tokens_seen": 72089600,
"step": 275
},
{
"epoch": 0.0685373727340452,
"grad_norm": 0.3968837261199951,
"learning_rate": 9.09576022144496e-06,
"loss": 1.2316,
"num_input_tokens_seen": 72351744,
"step": 276
},
{
"epoch": 0.06878569654829898,
"grad_norm": 0.42373400926589966,
"learning_rate": 9.085724491675642e-06,
"loss": 1.3935,
"num_input_tokens_seen": 72613888,
"step": 277
},
{
"epoch": 0.06903402036255277,
"grad_norm": 0.3875352144241333,
"learning_rate": 9.07563897864277e-06,
"loss": 1.4776,
"num_input_tokens_seen": 72876032,
"step": 278
},
{
"epoch": 0.06928234417680655,
"grad_norm": 0.64447420835495,
"learning_rate": 9.065503805235139e-06,
"loss": 1.5065,
"num_input_tokens_seen": 73138176,
"step": 279
},
{
"epoch": 0.06953066799106035,
"grad_norm": 0.6507744789123535,
"learning_rate": 9.055319094946633e-06,
"loss": 1.7043,
"num_input_tokens_seen": 73400320,
"step": 280
},
{
"epoch": 0.06977899180531413,
"grad_norm": 0.6041735410690308,
"learning_rate": 9.045084971874738e-06,
"loss": 1.4371,
"num_input_tokens_seen": 73662464,
"step": 281
},
{
"epoch": 0.07002731561956792,
"grad_norm": 0.4756613075733185,
"learning_rate": 9.03480156071901e-06,
"loss": 1.5451,
"num_input_tokens_seen": 73924608,
"step": 282
},
{
"epoch": 0.0702756394338217,
"grad_norm": 0.6942863464355469,
"learning_rate": 9.02446898677957e-06,
"loss": 1.1622,
"num_input_tokens_seen": 74186752,
"step": 283
},
{
"epoch": 0.07052396324807549,
"grad_norm": 0.5789624452590942,
"learning_rate": 9.014087375955574e-06,
"loss": 2.2199,
"num_input_tokens_seen": 74448896,
"step": 284
},
{
"epoch": 0.07077228706232928,
"grad_norm": 0.5487517714500427,
"learning_rate": 9.003656854743667e-06,
"loss": 1.5385,
"num_input_tokens_seen": 74711040,
"step": 285
},
{
"epoch": 0.07102061087658307,
"grad_norm": 0.34135740995407104,
"learning_rate": 8.993177550236464e-06,
"loss": 1.5421,
"num_input_tokens_seen": 74973184,
"step": 286
},
{
"epoch": 0.07126893469083685,
"grad_norm": 0.4900202453136444,
"learning_rate": 8.982649590120982e-06,
"loss": 1.7945,
"num_input_tokens_seen": 75235328,
"step": 287
},
{
"epoch": 0.07151725850509064,
"grad_norm": 0.44072678685188293,
"learning_rate": 8.972073102677091e-06,
"loss": 1.8011,
"num_input_tokens_seen": 75497472,
"step": 288
},
{
"epoch": 0.07176558231934442,
"grad_norm": 0.4883013069629669,
"learning_rate": 8.961448216775955e-06,
"loss": 1.8165,
"num_input_tokens_seen": 75759616,
"step": 289
},
{
"epoch": 0.07201390613359822,
"grad_norm": 0.5138419270515442,
"learning_rate": 8.950775061878453e-06,
"loss": 1.5481,
"num_input_tokens_seen": 76021760,
"step": 290
},
{
"epoch": 0.072262229947852,
"grad_norm": 0.8689912557601929,
"learning_rate": 8.94005376803361e-06,
"loss": 1.6688,
"num_input_tokens_seen": 76283904,
"step": 291
},
{
"epoch": 0.07251055376210579,
"grad_norm": 0.6632862091064453,
"learning_rate": 8.92928446587701e-06,
"loss": 1.6316,
"num_input_tokens_seen": 76546048,
"step": 292
},
{
"epoch": 0.07275887757635957,
"grad_norm": 0.5113462209701538,
"learning_rate": 8.9184672866292e-06,
"loss": 1.8658,
"num_input_tokens_seen": 76808192,
"step": 293
},
{
"epoch": 0.07300720139061335,
"grad_norm": 0.6166042685508728,
"learning_rate": 8.907602362094094e-06,
"loss": 1.2848,
"num_input_tokens_seen": 77070336,
"step": 294
},
{
"epoch": 0.07325552520486715,
"grad_norm": 0.6267765164375305,
"learning_rate": 8.896689824657371e-06,
"loss": 1.5453,
"num_input_tokens_seen": 77332480,
"step": 295
},
{
"epoch": 0.07350384901912094,
"grad_norm": 0.5441874265670776,
"learning_rate": 8.885729807284855e-06,
"loss": 1.5841,
"num_input_tokens_seen": 77594624,
"step": 296
},
{
"epoch": 0.07375217283337472,
"grad_norm": 0.5888817310333252,
"learning_rate": 8.874722443520898e-06,
"loss": 1.9301,
"num_input_tokens_seen": 77856768,
"step": 297
},
{
"epoch": 0.0740004966476285,
"grad_norm": 0.8824830055236816,
"learning_rate": 8.863667867486756e-06,
"loss": 1.7341,
"num_input_tokens_seen": 78118912,
"step": 298
},
{
"epoch": 0.07424882046188229,
"grad_norm": 0.4109712541103363,
"learning_rate": 8.852566213878947e-06,
"loss": 1.7768,
"num_input_tokens_seen": 78381056,
"step": 299
},
{
"epoch": 0.07449714427613609,
"grad_norm": 0.4477173686027527,
"learning_rate": 8.841417617967618e-06,
"loss": 1.3926,
"num_input_tokens_seen": 78643200,
"step": 300
},
{
"epoch": 0.07474546809038987,
"grad_norm": 0.7767991423606873,
"learning_rate": 8.83022221559489e-06,
"loss": 1.3578,
"num_input_tokens_seen": 78905344,
"step": 301
},
{
"epoch": 0.07499379190464366,
"grad_norm": 0.5238236784934998,
"learning_rate": 8.818980143173212e-06,
"loss": 1.7547,
"num_input_tokens_seen": 79167488,
"step": 302
},
{
"epoch": 0.07524211571889744,
"grad_norm": 0.4706284999847412,
"learning_rate": 8.807691537683685e-06,
"loss": 2.224,
"num_input_tokens_seen": 79429632,
"step": 303
},
{
"epoch": 0.07549043953315122,
"grad_norm": 0.4068174362182617,
"learning_rate": 8.796356536674404e-06,
"loss": 1.8971,
"num_input_tokens_seen": 79691776,
"step": 304
},
{
"epoch": 0.07573876334740502,
"grad_norm": 0.36882147192955017,
"learning_rate": 8.784975278258783e-06,
"loss": 1.3618,
"num_input_tokens_seen": 79953920,
"step": 305
},
{
"epoch": 0.0759870871616588,
"grad_norm": 0.7638546228408813,
"learning_rate": 8.773547901113862e-06,
"loss": 1.4798,
"num_input_tokens_seen": 80216064,
"step": 306
},
{
"epoch": 0.07623541097591259,
"grad_norm": 0.7291756868362427,
"learning_rate": 8.762074544478622e-06,
"loss": 1.8117,
"num_input_tokens_seen": 80478208,
"step": 307
},
{
"epoch": 0.07648373479016637,
"grad_norm": 0.35935330390930176,
"learning_rate": 8.750555348152299e-06,
"loss": 1.7963,
"num_input_tokens_seen": 80740352,
"step": 308
},
{
"epoch": 0.07673205860442016,
"grad_norm": 0.41468292474746704,
"learning_rate": 8.73899045249266e-06,
"loss": 1.7334,
"num_input_tokens_seen": 81002496,
"step": 309
},
{
"epoch": 0.07698038241867396,
"grad_norm": 0.47698289155960083,
"learning_rate": 8.727379998414311e-06,
"loss": 1.5163,
"num_input_tokens_seen": 81264640,
"step": 310
},
{
"epoch": 0.07722870623292774,
"grad_norm": 0.3247212767601013,
"learning_rate": 8.715724127386971e-06,
"loss": 1.5166,
"num_input_tokens_seen": 81526784,
"step": 311
},
{
"epoch": 0.07747703004718152,
"grad_norm": 0.24329149723052979,
"learning_rate": 8.70402298143375e-06,
"loss": 1.1361,
"num_input_tokens_seen": 81788928,
"step": 312
},
{
"epoch": 0.07772535386143531,
"grad_norm": 0.4041454493999481,
"learning_rate": 8.692276703129421e-06,
"loss": 1.6928,
"num_input_tokens_seen": 82051072,
"step": 313
},
{
"epoch": 0.0779736776756891,
"grad_norm": 0.5690919160842896,
"learning_rate": 8.680485435598674e-06,
"loss": 1.4934,
"num_input_tokens_seen": 82313216,
"step": 314
},
{
"epoch": 0.07822200148994289,
"grad_norm": 0.5649420022964478,
"learning_rate": 8.668649322514382e-06,
"loss": 1.4087,
"num_input_tokens_seen": 82575360,
"step": 315
},
{
"epoch": 0.07847032530419668,
"grad_norm": 0.38145026564598083,
"learning_rate": 8.656768508095853e-06,
"loss": 1.9073,
"num_input_tokens_seen": 82837504,
"step": 316
},
{
"epoch": 0.07871864911845046,
"grad_norm": 0.7416847944259644,
"learning_rate": 8.644843137107058e-06,
"loss": 1.4074,
"num_input_tokens_seen": 83099648,
"step": 317
},
{
"epoch": 0.07896697293270424,
"grad_norm": 0.5791590213775635,
"learning_rate": 8.632873354854881e-06,
"loss": 1.8003,
"num_input_tokens_seen": 83361792,
"step": 318
},
{
"epoch": 0.07921529674695803,
"grad_norm": 0.5541636347770691,
"learning_rate": 8.620859307187339e-06,
"loss": 1.4524,
"num_input_tokens_seen": 83623936,
"step": 319
},
{
"epoch": 0.07946362056121183,
"grad_norm": 0.45152053236961365,
"learning_rate": 8.608801140491811e-06,
"loss": 1.9212,
"num_input_tokens_seen": 83886080,
"step": 320
},
{
"epoch": 0.07971194437546561,
"grad_norm": 0.7486585974693298,
"learning_rate": 8.596699001693257e-06,
"loss": 1.8341,
"num_input_tokens_seen": 84148224,
"step": 321
},
{
"epoch": 0.0799602681897194,
"grad_norm": 0.4018547832965851,
"learning_rate": 8.584553038252415e-06,
"loss": 1.6084,
"num_input_tokens_seen": 84410368,
"step": 322
},
{
"epoch": 0.08020859200397318,
"grad_norm": 0.5464489459991455,
"learning_rate": 8.572363398164017e-06,
"loss": 1.5227,
"num_input_tokens_seen": 84672512,
"step": 323
},
{
"epoch": 0.08045691581822696,
"grad_norm": 0.5198168754577637,
"learning_rate": 8.560130229954985e-06,
"loss": 1.6922,
"num_input_tokens_seen": 84934656,
"step": 324
},
{
"epoch": 0.08070523963248076,
"grad_norm": 0.48899322748184204,
"learning_rate": 8.547853682682605e-06,
"loss": 1.6412,
"num_input_tokens_seen": 85196800,
"step": 325
},
{
"epoch": 0.08095356344673454,
"grad_norm": 0.2365017831325531,
"learning_rate": 8.535533905932739e-06,
"loss": 1.5639,
"num_input_tokens_seen": 85458944,
"step": 326
},
{
"epoch": 0.08120188726098833,
"grad_norm": 0.4440731704235077,
"learning_rate": 8.523171049817974e-06,
"loss": 2.0067,
"num_input_tokens_seen": 85721088,
"step": 327
},
{
"epoch": 0.08145021107524211,
"grad_norm": 0.591474175453186,
"learning_rate": 8.510765264975813e-06,
"loss": 2.085,
"num_input_tokens_seen": 85983232,
"step": 328
},
{
"epoch": 0.0816985348894959,
"grad_norm": 0.8983985781669617,
"learning_rate": 8.498316702566828e-06,
"loss": 2.0327,
"num_input_tokens_seen": 86245376,
"step": 329
},
{
"epoch": 0.0819468587037497,
"grad_norm": 0.5473107695579529,
"learning_rate": 8.485825514272824e-06,
"loss": 1.3908,
"num_input_tokens_seen": 86507520,
"step": 330
},
{
"epoch": 0.08219518251800348,
"grad_norm": 0.4440311789512634,
"learning_rate": 8.473291852294986e-06,
"loss": 1.7376,
"num_input_tokens_seen": 86769664,
"step": 331
},
{
"epoch": 0.08244350633225726,
"grad_norm": 0.5826847553253174,
"learning_rate": 8.460715869352035e-06,
"loss": 1.7982,
"num_input_tokens_seen": 87031808,
"step": 332
},
{
"epoch": 0.08269183014651105,
"grad_norm": 0.4117896258831024,
"learning_rate": 8.44809771867835e-06,
"loss": 1.9127,
"num_input_tokens_seen": 87293952,
"step": 333
},
{
"epoch": 0.08294015396076483,
"grad_norm": 0.6492828726768494,
"learning_rate": 8.435437554022116e-06,
"loss": 1.4407,
"num_input_tokens_seen": 87556096,
"step": 334
},
{
"epoch": 0.08318847777501863,
"grad_norm": 0.47931548953056335,
"learning_rate": 8.422735529643445e-06,
"loss": 1.7312,
"num_input_tokens_seen": 87818240,
"step": 335
},
{
"epoch": 0.08343680158927241,
"grad_norm": 0.45937687158584595,
"learning_rate": 8.409991800312493e-06,
"loss": 1.3507,
"num_input_tokens_seen": 88080384,
"step": 336
},
{
"epoch": 0.0836851254035262,
"grad_norm": 0.34280064702033997,
"learning_rate": 8.397206521307584e-06,
"loss": 1.9152,
"num_input_tokens_seen": 88342528,
"step": 337
},
{
"epoch": 0.08393344921777998,
"grad_norm": 0.525225043296814,
"learning_rate": 8.384379848413304e-06,
"loss": 1.7714,
"num_input_tokens_seen": 88604672,
"step": 338
},
{
"epoch": 0.08418177303203377,
"grad_norm": 0.8012213706970215,
"learning_rate": 8.371511937918616e-06,
"loss": 1.6678,
"num_input_tokens_seen": 88866816,
"step": 339
},
{
"epoch": 0.08443009684628756,
"grad_norm": 0.49616944789886475,
"learning_rate": 8.358602946614952e-06,
"loss": 1.6751,
"num_input_tokens_seen": 89128960,
"step": 340
},
{
"epoch": 0.08467842066054135,
"grad_norm": 0.4202152490615845,
"learning_rate": 8.345653031794292e-06,
"loss": 1.595,
"num_input_tokens_seen": 89391104,
"step": 341
},
{
"epoch": 0.08492674447479513,
"grad_norm": 0.35994473099708557,
"learning_rate": 8.332662351247262e-06,
"loss": 1.8082,
"num_input_tokens_seen": 89653248,
"step": 342
},
{
"epoch": 0.08517506828904892,
"grad_norm": 0.3768995702266693,
"learning_rate": 8.319631063261209e-06,
"loss": 1.7495,
"num_input_tokens_seen": 89915392,
"step": 343
},
{
"epoch": 0.0854233921033027,
"grad_norm": 0.7302326560020447,
"learning_rate": 8.30655932661826e-06,
"loss": 1.335,
"num_input_tokens_seen": 90177536,
"step": 344
},
{
"epoch": 0.0856717159175565,
"grad_norm": 0.4386504888534546,
"learning_rate": 8.293447300593402e-06,
"loss": 1.7737,
"num_input_tokens_seen": 90439680,
"step": 345
},
{
"epoch": 0.08592003973181028,
"grad_norm": 0.7695857286453247,
"learning_rate": 8.280295144952537e-06,
"loss": 1.7856,
"num_input_tokens_seen": 90701824,
"step": 346
},
{
"epoch": 0.08616836354606407,
"grad_norm": 0.7740248441696167,
"learning_rate": 8.267103019950529e-06,
"loss": 2.0197,
"num_input_tokens_seen": 90963968,
"step": 347
},
{
"epoch": 0.08641668736031785,
"grad_norm": 3.4425835609436035,
"learning_rate": 8.253871086329255e-06,
"loss": 1.7337,
"num_input_tokens_seen": 91226112,
"step": 348
},
{
"epoch": 0.08666501117457164,
"grad_norm": 0.5034295916557312,
"learning_rate": 8.240599505315656e-06,
"loss": 1.269,
"num_input_tokens_seen": 91488256,
"step": 349
},
{
"epoch": 0.08691333498882543,
"grad_norm": 0.3939118981361389,
"learning_rate": 8.227288438619754e-06,
"loss": 1.7436,
"num_input_tokens_seen": 91750400,
"step": 350
},
{
"epoch": 0.08716165880307922,
"grad_norm": 0.6885347962379456,
"learning_rate": 8.213938048432697e-06,
"loss": 1.5946,
"num_input_tokens_seen": 92012544,
"step": 351
},
{
"epoch": 0.087409982617333,
"grad_norm": 0.4886651337146759,
"learning_rate": 8.200548497424779e-06,
"loss": 1.3196,
"num_input_tokens_seen": 92274688,
"step": 352
},
{
"epoch": 0.08765830643158679,
"grad_norm": 0.7113179564476013,
"learning_rate": 8.18711994874345e-06,
"loss": 2.1461,
"num_input_tokens_seen": 92536832,
"step": 353
},
{
"epoch": 0.08790663024584057,
"grad_norm": 0.8152286410331726,
"learning_rate": 8.173652566011339e-06,
"loss": 1.6922,
"num_input_tokens_seen": 92798976,
"step": 354
},
{
"epoch": 0.08815495406009437,
"grad_norm": 0.4894461929798126,
"learning_rate": 8.160146513324256e-06,
"loss": 1.8319,
"num_input_tokens_seen": 93061120,
"step": 355
},
{
"epoch": 0.08840327787434815,
"grad_norm": 0.5153379440307617,
"learning_rate": 8.146601955249187e-06,
"loss": 1.6654,
"num_input_tokens_seen": 93323264,
"step": 356
},
{
"epoch": 0.08865160168860194,
"grad_norm": 0.5455003380775452,
"learning_rate": 8.133019056822303e-06,
"loss": 1.8757,
"num_input_tokens_seen": 93585408,
"step": 357
},
{
"epoch": 0.08889992550285572,
"grad_norm": 0.6758735775947571,
"learning_rate": 8.119397983546932e-06,
"loss": 1.6944,
"num_input_tokens_seen": 93847552,
"step": 358
},
{
"epoch": 0.0891482493171095,
"grad_norm": 0.8125549554824829,
"learning_rate": 8.105738901391553e-06,
"loss": 1.4809,
"num_input_tokens_seen": 94109696,
"step": 359
},
{
"epoch": 0.0893965731313633,
"grad_norm": 0.5866984724998474,
"learning_rate": 8.092041976787772e-06,
"loss": 1.9641,
"num_input_tokens_seen": 94371840,
"step": 360
},
{
"epoch": 0.08964489694561709,
"grad_norm": 0.43463000655174255,
"learning_rate": 8.078307376628292e-06,
"loss": 1.5385,
"num_input_tokens_seen": 94633984,
"step": 361
},
{
"epoch": 0.08989322075987087,
"grad_norm": 0.7243896126747131,
"learning_rate": 8.064535268264883e-06,
"loss": 1.9196,
"num_input_tokens_seen": 94896128,
"step": 362
},
{
"epoch": 0.09014154457412465,
"grad_norm": 0.7336921095848083,
"learning_rate": 8.05072581950634e-06,
"loss": 1.6399,
"num_input_tokens_seen": 95158272,
"step": 363
},
{
"epoch": 0.09038986838837844,
"grad_norm": 0.4355528950691223,
"learning_rate": 8.036879198616434e-06,
"loss": 1.3215,
"num_input_tokens_seen": 95420416,
"step": 364
},
{
"epoch": 0.09063819220263224,
"grad_norm": 0.4627648591995239,
"learning_rate": 8.022995574311876e-06,
"loss": 1.7113,
"num_input_tokens_seen": 95682560,
"step": 365
},
{
"epoch": 0.09088651601688602,
"grad_norm": 1.2987405061721802,
"learning_rate": 8.009075115760243e-06,
"loss": 1.5487,
"num_input_tokens_seen": 95944704,
"step": 366
},
{
"epoch": 0.0911348398311398,
"grad_norm": 0.6013867855072021,
"learning_rate": 7.99511799257793e-06,
"loss": 1.4665,
"num_input_tokens_seen": 96206848,
"step": 367
},
{
"epoch": 0.09138316364539359,
"grad_norm": 0.7376968860626221,
"learning_rate": 7.981124374828079e-06,
"loss": 1.4543,
"num_input_tokens_seen": 96468992,
"step": 368
},
{
"epoch": 0.09163148745964737,
"grad_norm": 0.6412230730056763,
"learning_rate": 7.967094433018508e-06,
"loss": 1.2195,
"num_input_tokens_seen": 96731136,
"step": 369
},
{
"epoch": 0.09187981127390117,
"grad_norm": 0.3257865905761719,
"learning_rate": 7.953028338099628e-06,
"loss": 1.5656,
"num_input_tokens_seen": 96993280,
"step": 370
},
{
"epoch": 0.09212813508815496,
"grad_norm": 0.47559216618537903,
"learning_rate": 7.938926261462366e-06,
"loss": 1.6012,
"num_input_tokens_seen": 97255424,
"step": 371
},
{
"epoch": 0.09237645890240874,
"grad_norm": 0.45426198840141296,
"learning_rate": 7.92478837493608e-06,
"loss": 1.7031,
"num_input_tokens_seen": 97517568,
"step": 372
},
{
"epoch": 0.09262478271666252,
"grad_norm": 0.4942920506000519,
"learning_rate": 7.910614850786448e-06,
"loss": 1.6369,
"num_input_tokens_seen": 97779712,
"step": 373
},
{
"epoch": 0.09287310653091632,
"grad_norm": 0.5819427967071533,
"learning_rate": 7.896405861713393e-06,
"loss": 1.7998,
"num_input_tokens_seen": 98041856,
"step": 374
},
{
"epoch": 0.0931214303451701,
"grad_norm": 0.42620816826820374,
"learning_rate": 7.882161580848966e-06,
"loss": 1.6103,
"num_input_tokens_seen": 98304000,
"step": 375
},
{
"epoch": 0.09336975415942389,
"grad_norm": 0.6115924715995789,
"learning_rate": 7.86788218175523e-06,
"loss": 1.8329,
"num_input_tokens_seen": 98566144,
"step": 376
},
{
"epoch": 0.09361807797367767,
"grad_norm": 0.9807063341140747,
"learning_rate": 7.85356783842216e-06,
"loss": 1.3716,
"num_input_tokens_seen": 98828288,
"step": 377
},
{
"epoch": 0.09386640178793146,
"grad_norm": 0.5305372476577759,
"learning_rate": 7.839218725265507e-06,
"loss": 1.585,
"num_input_tokens_seen": 99090432,
"step": 378
},
{
"epoch": 0.09411472560218526,
"grad_norm": 0.7433108687400818,
"learning_rate": 7.82483501712469e-06,
"loss": 1.5462,
"num_input_tokens_seen": 99352576,
"step": 379
},
{
"epoch": 0.09436304941643904,
"grad_norm": 0.6521483659744263,
"learning_rate": 7.810416889260653e-06,
"loss": 2.1058,
"num_input_tokens_seen": 99614720,
"step": 380
},
{
"epoch": 0.09461137323069282,
"grad_norm": 0.6441096663475037,
"learning_rate": 7.795964517353734e-06,
"loss": 1.938,
"num_input_tokens_seen": 99876864,
"step": 381
},
{
"epoch": 0.09485969704494661,
"grad_norm": 0.4057358205318451,
"learning_rate": 7.781478077501526e-06,
"loss": 1.5177,
"num_input_tokens_seen": 100139008,
"step": 382
},
{
"epoch": 0.09510802085920039,
"grad_norm": 0.5994225144386292,
"learning_rate": 7.76695774621672e-06,
"loss": 1.6127,
"num_input_tokens_seen": 100401152,
"step": 383
},
{
"epoch": 0.09535634467345419,
"grad_norm": 0.6396727561950684,
"learning_rate": 7.752403700424978e-06,
"loss": 1.678,
"num_input_tokens_seen": 100663296,
"step": 384
},
{
"epoch": 0.09560466848770797,
"grad_norm": 0.647515594959259,
"learning_rate": 7.737816117462752e-06,
"loss": 1.7168,
"num_input_tokens_seen": 100925440,
"step": 385
},
{
"epoch": 0.09585299230196176,
"grad_norm": 0.5679896473884583,
"learning_rate": 7.723195175075136e-06,
"loss": 1.9103,
"num_input_tokens_seen": 101187584,
"step": 386
},
{
"epoch": 0.09610131611621554,
"grad_norm": 0.9553595185279846,
"learning_rate": 7.7085410514137e-06,
"loss": 1.934,
"num_input_tokens_seen": 101449728,
"step": 387
},
{
"epoch": 0.09634963993046933,
"grad_norm": 0.3686366677284241,
"learning_rate": 7.693853925034316e-06,
"loss": 2.0843,
"num_input_tokens_seen": 101711872,
"step": 388
},
{
"epoch": 0.09659796374472313,
"grad_norm": 0.528319239616394,
"learning_rate": 7.679133974894984e-06,
"loss": 2.077,
"num_input_tokens_seen": 101974016,
"step": 389
},
{
"epoch": 0.09684628755897691,
"grad_norm": 0.6407886147499084,
"learning_rate": 7.66438138035365e-06,
"loss": 1.634,
"num_input_tokens_seen": 102236160,
"step": 390
},
{
"epoch": 0.0970946113732307,
"grad_norm": 0.5554214715957642,
"learning_rate": 7.649596321166024e-06,
"loss": 1.4811,
"num_input_tokens_seen": 102498304,
"step": 391
},
{
"epoch": 0.09734293518748448,
"grad_norm": 0.5426738858222961,
"learning_rate": 7.634778977483389e-06,
"loss": 1.6908,
"num_input_tokens_seen": 102760448,
"step": 392
},
{
"epoch": 0.09759125900173826,
"grad_norm": 0.7456260323524475,
"learning_rate": 7.619929529850397e-06,
"loss": 1.8275,
"num_input_tokens_seen": 103022592,
"step": 393
},
{
"epoch": 0.09783958281599206,
"grad_norm": 0.5138922929763794,
"learning_rate": 7.605048159202884e-06,
"loss": 1.1156,
"num_input_tokens_seen": 103284736,
"step": 394
},
{
"epoch": 0.09808790663024584,
"grad_norm": 0.3001045882701874,
"learning_rate": 7.590135046865652e-06,
"loss": 1.198,
"num_input_tokens_seen": 103546880,
"step": 395
},
{
"epoch": 0.09833623044449963,
"grad_norm": 0.5880023241043091,
"learning_rate": 7.575190374550272e-06,
"loss": 1.4706,
"num_input_tokens_seen": 103809024,
"step": 396
},
{
"epoch": 0.09858455425875341,
"grad_norm": 0.3109273910522461,
"learning_rate": 7.560214324352858e-06,
"loss": 1.0782,
"num_input_tokens_seen": 104071168,
"step": 397
},
{
"epoch": 0.0988328780730072,
"grad_norm": 0.3918832540512085,
"learning_rate": 7.545207078751858e-06,
"loss": 1.3639,
"num_input_tokens_seen": 104333312,
"step": 398
},
{
"epoch": 0.099081201887261,
"grad_norm": 0.6184259653091431,
"learning_rate": 7.530168820605819e-06,
"loss": 2.0244,
"num_input_tokens_seen": 104595456,
"step": 399
},
{
"epoch": 0.09932952570151478,
"grad_norm": 0.41290906071662903,
"learning_rate": 7.515099733151177e-06,
"loss": 1.3445,
"num_input_tokens_seen": 104857600,
"step": 400
},
{
"epoch": 0.09957784951576856,
"grad_norm": 0.5972164273262024,
"learning_rate": 7.500000000000001e-06,
"loss": 1.7396,
"num_input_tokens_seen": 105119744,
"step": 401
},
{
"epoch": 0.09982617333002235,
"grad_norm": 0.49639347195625305,
"learning_rate": 7.484869805137778e-06,
"loss": 1.9205,
"num_input_tokens_seen": 105381888,
"step": 402
},
{
"epoch": 0.10007449714427613,
"grad_norm": 0.47210463881492615,
"learning_rate": 7.469709332921155e-06,
"loss": 1.434,
"num_input_tokens_seen": 105644032,
"step": 403
},
{
"epoch": 0.10032282095852993,
"grad_norm": 0.5069103837013245,
"learning_rate": 7.454518768075705e-06,
"loss": 1.5317,
"num_input_tokens_seen": 105906176,
"step": 404
},
{
"epoch": 0.10057114477278371,
"grad_norm": 0.6772238612174988,
"learning_rate": 7.4392982956936644e-06,
"loss": 1.9896,
"num_input_tokens_seen": 106168320,
"step": 405
},
{
"epoch": 0.1008194685870375,
"grad_norm": 0.5503749847412109,
"learning_rate": 7.424048101231687e-06,
"loss": 1.6349,
"num_input_tokens_seen": 106430464,
"step": 406
},
{
"epoch": 0.10106779240129128,
"grad_norm": 0.47426870465278625,
"learning_rate": 7.408768370508577e-06,
"loss": 1.221,
"num_input_tokens_seen": 106692608,
"step": 407
},
{
"epoch": 0.10131611621554507,
"grad_norm": 0.7207087874412537,
"learning_rate": 7.393459289703035e-06,
"loss": 1.9311,
"num_input_tokens_seen": 106954752,
"step": 408
},
{
"epoch": 0.10156444002979886,
"grad_norm": 0.6696469783782959,
"learning_rate": 7.378121045351378e-06,
"loss": 1.2549,
"num_input_tokens_seen": 107216896,
"step": 409
},
{
"epoch": 0.10181276384405265,
"grad_norm": 2.1489691734313965,
"learning_rate": 7.362753824345271e-06,
"loss": 1.8569,
"num_input_tokens_seen": 107479040,
"step": 410
},
{
"epoch": 0.10206108765830643,
"grad_norm": 0.6064687371253967,
"learning_rate": 7.347357813929455e-06,
"loss": 1.571,
"num_input_tokens_seen": 107741184,
"step": 411
},
{
"epoch": 0.10230941147256022,
"grad_norm": 0.651612401008606,
"learning_rate": 7.3319332016994575e-06,
"loss": 1.3514,
"num_input_tokens_seen": 108003328,
"step": 412
},
{
"epoch": 0.102557735286814,
"grad_norm": 0.2269715964794159,
"learning_rate": 7.31648017559931e-06,
"loss": 1.2449,
"num_input_tokens_seen": 108265472,
"step": 413
},
{
"epoch": 0.1028060591010678,
"grad_norm": 1.5659141540527344,
"learning_rate": 7.300998923919259e-06,
"loss": 1.8001,
"num_input_tokens_seen": 108527616,
"step": 414
},
{
"epoch": 0.10305438291532158,
"grad_norm": 0.7734239101409912,
"learning_rate": 7.285489635293472e-06,
"loss": 1.5909,
"num_input_tokens_seen": 108789760,
"step": 415
},
{
"epoch": 0.10330270672957537,
"grad_norm": 0.5222880244255066,
"learning_rate": 7.269952498697734e-06,
"loss": 1.7375,
"num_input_tokens_seen": 109051904,
"step": 416
},
{
"epoch": 0.10355103054382915,
"grad_norm": 0.45977625250816345,
"learning_rate": 7.254387703447154e-06,
"loss": 1.9382,
"num_input_tokens_seen": 109314048,
"step": 417
},
{
"epoch": 0.10379935435808293,
"grad_norm": 0.6067866683006287,
"learning_rate": 7.238795439193849e-06,
"loss": 1.5499,
"num_input_tokens_seen": 109576192,
"step": 418
},
{
"epoch": 0.10404767817233673,
"grad_norm": 1.0210500955581665,
"learning_rate": 7.223175895924638e-06,
"loss": 2.0725,
"num_input_tokens_seen": 109838336,
"step": 419
},
{
"epoch": 0.10429600198659052,
"grad_norm": 0.7185985445976257,
"learning_rate": 7.207529263958727e-06,
"loss": 1.9104,
"num_input_tokens_seen": 110100480,
"step": 420
},
{
"epoch": 0.1045443258008443,
"grad_norm": 0.9692136645317078,
"learning_rate": 7.191855733945388e-06,
"loss": 1.7128,
"num_input_tokens_seen": 110362624,
"step": 421
},
{
"epoch": 0.10479264961509809,
"grad_norm": 0.36912843585014343,
"learning_rate": 7.176155496861639e-06,
"loss": 1.5977,
"num_input_tokens_seen": 110624768,
"step": 422
},
{
"epoch": 0.10504097342935187,
"grad_norm": 0.7783231735229492,
"learning_rate": 7.160428744009913e-06,
"loss": 1.5989,
"num_input_tokens_seen": 110886912,
"step": 423
},
{
"epoch": 0.10528929724360567,
"grad_norm": 0.3686671853065491,
"learning_rate": 7.1446756670157306e-06,
"loss": 1.887,
"num_input_tokens_seen": 111149056,
"step": 424
},
{
"epoch": 0.10553762105785945,
"grad_norm": 0.47263607382774353,
"learning_rate": 7.128896457825364e-06,
"loss": 1.808,
"num_input_tokens_seen": 111411200,
"step": 425
},
{
"epoch": 0.10578594487211324,
"grad_norm": 0.543540358543396,
"learning_rate": 7.113091308703498e-06,
"loss": 1.7969,
"num_input_tokens_seen": 111673344,
"step": 426
},
{
"epoch": 0.10603426868636702,
"grad_norm": 0.672477126121521,
"learning_rate": 7.0972604122308865e-06,
"loss": 1.3285,
"num_input_tokens_seen": 111935488,
"step": 427
},
{
"epoch": 0.1062825925006208,
"grad_norm": 0.501299262046814,
"learning_rate": 7.081403961302007e-06,
"loss": 1.3305,
"num_input_tokens_seen": 112197632,
"step": 428
},
{
"epoch": 0.1065309163148746,
"grad_norm": 0.4477572441101074,
"learning_rate": 7.06552214912271e-06,
"loss": 1.1045,
"num_input_tokens_seen": 112459776,
"step": 429
},
{
"epoch": 0.10677924012912839,
"grad_norm": 0.34857606887817383,
"learning_rate": 7.049615169207864e-06,
"loss": 1.929,
"num_input_tokens_seen": 112721920,
"step": 430
},
{
"epoch": 0.10702756394338217,
"grad_norm": 0.6203132271766663,
"learning_rate": 7.033683215379002e-06,
"loss": 1.062,
"num_input_tokens_seen": 112984064,
"step": 431
},
{
"epoch": 0.10727588775763595,
"grad_norm": 0.5540516376495361,
"learning_rate": 7.0177264817619514e-06,
"loss": 1.7175,
"num_input_tokens_seen": 113246208,
"step": 432
},
{
"epoch": 0.10752421157188974,
"grad_norm": 0.4745628833770752,
"learning_rate": 7.0017451627844765e-06,
"loss": 1.8083,
"num_input_tokens_seen": 113508352,
"step": 433
},
{
"epoch": 0.10777253538614354,
"grad_norm": 0.6570994257926941,
"learning_rate": 6.985739453173903e-06,
"loss": 1.4396,
"num_input_tokens_seen": 113770496,
"step": 434
},
{
"epoch": 0.10802085920039732,
"grad_norm": 2.3753468990325928,
"learning_rate": 6.9697095479547564e-06,
"loss": 1.3002,
"num_input_tokens_seen": 114032640,
"step": 435
},
{
"epoch": 0.1082691830146511,
"grad_norm": 0.3089500367641449,
"learning_rate": 6.953655642446368e-06,
"loss": 1.589,
"num_input_tokens_seen": 114294784,
"step": 436
},
{
"epoch": 0.10851750682890489,
"grad_norm": 3.394044876098633,
"learning_rate": 6.9375779322605154e-06,
"loss": 1.1472,
"num_input_tokens_seen": 114556928,
"step": 437
},
{
"epoch": 0.10876583064315867,
"grad_norm": 0.334686279296875,
"learning_rate": 6.921476613299018e-06,
"loss": 1.9471,
"num_input_tokens_seen": 114819072,
"step": 438
},
{
"epoch": 0.10901415445741247,
"grad_norm": 0.7744854092597961,
"learning_rate": 6.905351881751372e-06,
"loss": 1.4141,
"num_input_tokens_seen": 115081216,
"step": 439
},
{
"epoch": 0.10926247827166626,
"grad_norm": 0.37837162613868713,
"learning_rate": 6.889203934092337e-06,
"loss": 1.6888,
"num_input_tokens_seen": 115343360,
"step": 440
},
{
"epoch": 0.10951080208592004,
"grad_norm": 0.6168341636657715,
"learning_rate": 6.873032967079562e-06,
"loss": 1.9082,
"num_input_tokens_seen": 115605504,
"step": 441
},
{
"epoch": 0.10975912590017382,
"grad_norm": 0.41162988543510437,
"learning_rate": 6.856839177751175e-06,
"loss": 1.5192,
"num_input_tokens_seen": 115867648,
"step": 442
},
{
"epoch": 0.11000744971442761,
"grad_norm": 0.5805467963218689,
"learning_rate": 6.840622763423391e-06,
"loss": 1.8317,
"num_input_tokens_seen": 116129792,
"step": 443
},
{
"epoch": 0.1102557735286814,
"grad_norm": 0.5233989953994751,
"learning_rate": 6.824383921688098e-06,
"loss": 1.273,
"num_input_tokens_seen": 116391936,
"step": 444
},
{
"epoch": 0.11050409734293519,
"grad_norm": 0.6940083503723145,
"learning_rate": 6.808122850410461e-06,
"loss": 1.5257,
"num_input_tokens_seen": 116654080,
"step": 445
},
{
"epoch": 0.11075242115718897,
"grad_norm": 0.680293619632721,
"learning_rate": 6.7918397477265e-06,
"loss": 1.7634,
"num_input_tokens_seen": 116916224,
"step": 446
},
{
"epoch": 0.11100074497144276,
"grad_norm": 0.680514931678772,
"learning_rate": 6.775534812040686e-06,
"loss": 1.9248,
"num_input_tokens_seen": 117178368,
"step": 447
},
{
"epoch": 0.11124906878569654,
"grad_norm": 0.6705589890480042,
"learning_rate": 6.759208242023509e-06,
"loss": 1.6782,
"num_input_tokens_seen": 117440512,
"step": 448
},
{
"epoch": 0.11149739259995034,
"grad_norm": 0.6143855452537537,
"learning_rate": 6.7428602366090764e-06,
"loss": 1.8117,
"num_input_tokens_seen": 117702656,
"step": 449
},
{
"epoch": 0.11174571641420412,
"grad_norm": 0.5517452359199524,
"learning_rate": 6.7264909949926735e-06,
"loss": 1.4715,
"num_input_tokens_seen": 117964800,
"step": 450
},
{
"epoch": 0.11199404022845791,
"grad_norm": 0.34088805317878723,
"learning_rate": 6.710100716628345e-06,
"loss": 1.6831,
"num_input_tokens_seen": 118226944,
"step": 451
},
{
"epoch": 0.11224236404271169,
"grad_norm": 0.5145544409751892,
"learning_rate": 6.693689601226458e-06,
"loss": 1.6246,
"num_input_tokens_seen": 118489088,
"step": 452
},
{
"epoch": 0.11249068785696548,
"grad_norm": 0.5433220267295837,
"learning_rate": 6.677257848751276e-06,
"loss": 1.517,
"num_input_tokens_seen": 118751232,
"step": 453
},
{
"epoch": 0.11273901167121927,
"grad_norm": 0.6025758385658264,
"learning_rate": 6.6608056594185166e-06,
"loss": 1.9205,
"num_input_tokens_seen": 119013376,
"step": 454
},
{
"epoch": 0.11298733548547306,
"grad_norm": 0.5882771015167236,
"learning_rate": 6.644333233692917e-06,
"loss": 1.6951,
"num_input_tokens_seen": 119275520,
"step": 455
},
{
"epoch": 0.11323565929972684,
"grad_norm": 0.6837276220321655,
"learning_rate": 6.627840772285784e-06,
"loss": 2.0879,
"num_input_tokens_seen": 119537664,
"step": 456
},
{
"epoch": 0.11348398311398063,
"grad_norm": 0.5370259284973145,
"learning_rate": 6.611328476152557e-06,
"loss": 1.6184,
"num_input_tokens_seen": 119799808,
"step": 457
},
{
"epoch": 0.11373230692823441,
"grad_norm": 0.4522857367992401,
"learning_rate": 6.594796546490351e-06,
"loss": 1.7773,
"num_input_tokens_seen": 120061952,
"step": 458
},
{
"epoch": 0.11398063074248821,
"grad_norm": 0.46354392170906067,
"learning_rate": 6.578245184735513e-06,
"loss": 1.5712,
"num_input_tokens_seen": 120324096,
"step": 459
},
{
"epoch": 0.114228954556742,
"grad_norm": 0.686725378036499,
"learning_rate": 6.561674592561164e-06,
"loss": 1.6311,
"num_input_tokens_seen": 120586240,
"step": 460
},
{
"epoch": 0.11447727837099578,
"grad_norm": 0.22250455617904663,
"learning_rate": 6.545084971874738e-06,
"loss": 1.7059,
"num_input_tokens_seen": 120848384,
"step": 461
},
{
"epoch": 0.11472560218524956,
"grad_norm": 0.5717688798904419,
"learning_rate": 6.5284765248155295e-06,
"loss": 1.9078,
"num_input_tokens_seen": 121110528,
"step": 462
},
{
"epoch": 0.11497392599950335,
"grad_norm": 0.6517221331596375,
"learning_rate": 6.5118494537522235e-06,
"loss": 1.923,
"num_input_tokens_seen": 121372672,
"step": 463
},
{
"epoch": 0.11522224981375714,
"grad_norm": 0.7577219009399414,
"learning_rate": 6.495203961280434e-06,
"loss": 1.5007,
"num_input_tokens_seen": 121634816,
"step": 464
},
{
"epoch": 0.11547057362801093,
"grad_norm": 1.1634546518325806,
"learning_rate": 6.4785402502202345e-06,
"loss": 1.7924,
"num_input_tokens_seen": 121896960,
"step": 465
},
{
"epoch": 0.11571889744226471,
"grad_norm": 0.5361213088035583,
"learning_rate": 6.461858523613684e-06,
"loss": 1.3429,
"num_input_tokens_seen": 122159104,
"step": 466
},
{
"epoch": 0.1159672212565185,
"grad_norm": 0.3826828598976135,
"learning_rate": 6.445158984722358e-06,
"loss": 1.4901,
"num_input_tokens_seen": 122421248,
"step": 467
},
{
"epoch": 0.11621554507077228,
"grad_norm": 0.5723513960838318,
"learning_rate": 6.428441837024868e-06,
"loss": 1.8145,
"num_input_tokens_seen": 122683392,
"step": 468
},
{
"epoch": 0.11646386888502608,
"grad_norm": 0.6699240803718567,
"learning_rate": 6.411707284214384e-06,
"loss": 1.6751,
"num_input_tokens_seen": 122945536,
"step": 469
},
{
"epoch": 0.11671219269927986,
"grad_norm": 0.35479190945625305,
"learning_rate": 6.3949555301961474e-06,
"loss": 1.4937,
"num_input_tokens_seen": 123207680,
"step": 470
},
{
"epoch": 0.11696051651353365,
"grad_norm": 0.7304947972297668,
"learning_rate": 6.378186779084996e-06,
"loss": 1.5881,
"num_input_tokens_seen": 123469824,
"step": 471
},
{
"epoch": 0.11720884032778743,
"grad_norm": 0.4155537784099579,
"learning_rate": 6.361401235202872e-06,
"loss": 1.5931,
"num_input_tokens_seen": 123731968,
"step": 472
},
{
"epoch": 0.11745716414204121,
"grad_norm": 0.5789989233016968,
"learning_rate": 6.344599103076329e-06,
"loss": 1.3299,
"num_input_tokens_seen": 123994112,
"step": 473
},
{
"epoch": 0.11770548795629501,
"grad_norm": 0.5815244913101196,
"learning_rate": 6.327780587434045e-06,
"loss": 1.7197,
"num_input_tokens_seen": 124256256,
"step": 474
},
{
"epoch": 0.1179538117705488,
"grad_norm": 0.7529072165489197,
"learning_rate": 6.310945893204324e-06,
"loss": 1.47,
"num_input_tokens_seen": 124518400,
"step": 475
},
{
"epoch": 0.11820213558480258,
"grad_norm": 0.7494714856147766,
"learning_rate": 6.294095225512604e-06,
"loss": 1.864,
"num_input_tokens_seen": 124780544,
"step": 476
},
{
"epoch": 0.11845045939905637,
"grad_norm": 0.3240576684474945,
"learning_rate": 6.277228789678953e-06,
"loss": 1.5745,
"num_input_tokens_seen": 125042688,
"step": 477
},
{
"epoch": 0.11869878321331016,
"grad_norm": 0.5607863068580627,
"learning_rate": 6.26034679121557e-06,
"loss": 1.9548,
"num_input_tokens_seen": 125304832,
"step": 478
},
{
"epoch": 0.11894710702756395,
"grad_norm": 0.5693123936653137,
"learning_rate": 6.243449435824276e-06,
"loss": 1.7745,
"num_input_tokens_seen": 125566976,
"step": 479
},
{
"epoch": 0.11919543084181773,
"grad_norm": 0.5455982685089111,
"learning_rate": 6.2265369293940135e-06,
"loss": 1.9634,
"num_input_tokens_seen": 125829120,
"step": 480
},
{
"epoch": 0.11944375465607152,
"grad_norm": 0.4890311658382416,
"learning_rate": 6.209609477998339e-06,
"loss": 2.1942,
"num_input_tokens_seen": 126091264,
"step": 481
},
{
"epoch": 0.1196920784703253,
"grad_norm": 0.5301004648208618,
"learning_rate": 6.192667287892905e-06,
"loss": 1.4093,
"num_input_tokens_seen": 126353408,
"step": 482
},
{
"epoch": 0.1199404022845791,
"grad_norm": 0.47906139492988586,
"learning_rate": 6.17571056551295e-06,
"loss": 1.7136,
"num_input_tokens_seen": 126615552,
"step": 483
},
{
"epoch": 0.12018872609883288,
"grad_norm": 0.6675156354904175,
"learning_rate": 6.158739517470786e-06,
"loss": 1.6023,
"num_input_tokens_seen": 126877696,
"step": 484
},
{
"epoch": 0.12043704991308667,
"grad_norm": 0.31835541129112244,
"learning_rate": 6.141754350553279e-06,
"loss": 1.5503,
"num_input_tokens_seen": 127139840,
"step": 485
},
{
"epoch": 0.12068537372734045,
"grad_norm": 0.7083136439323425,
"learning_rate": 6.124755271719326e-06,
"loss": 1.5225,
"num_input_tokens_seen": 127401984,
"step": 486
},
{
"epoch": 0.12093369754159423,
"grad_norm": 0.4737892746925354,
"learning_rate": 6.107742488097338e-06,
"loss": 1.4856,
"num_input_tokens_seen": 127664128,
"step": 487
},
{
"epoch": 0.12118202135584803,
"grad_norm": 0.403909832239151,
"learning_rate": 6.090716206982714e-06,
"loss": 1.5458,
"num_input_tokens_seen": 127926272,
"step": 488
},
{
"epoch": 0.12143034517010182,
"grad_norm": 0.6817101240158081,
"learning_rate": 6.073676635835317e-06,
"loss": 1.3755,
"num_input_tokens_seen": 128188416,
"step": 489
},
{
"epoch": 0.1216786689843556,
"grad_norm": 0.5082156658172607,
"learning_rate": 6.056623982276945e-06,
"loss": 1.8278,
"num_input_tokens_seen": 128450560,
"step": 490
},
{
"epoch": 0.12192699279860938,
"grad_norm": 0.4969983696937561,
"learning_rate": 6.039558454088796e-06,
"loss": 2.0705,
"num_input_tokens_seen": 128712704,
"step": 491
},
{
"epoch": 0.12217531661286317,
"grad_norm": 0.6239178776741028,
"learning_rate": 6.022480259208951e-06,
"loss": 1.5768,
"num_input_tokens_seen": 128974848,
"step": 492
},
{
"epoch": 0.12242364042711697,
"grad_norm": 0.5710934400558472,
"learning_rate": 6.005389605729824e-06,
"loss": 1.6925,
"num_input_tokens_seen": 129236992,
"step": 493
},
{
"epoch": 0.12267196424137075,
"grad_norm": 0.5775221586227417,
"learning_rate": 5.988286701895631e-06,
"loss": 1.924,
"num_input_tokens_seen": 129499136,
"step": 494
},
{
"epoch": 0.12292028805562454,
"grad_norm": 0.4340408146381378,
"learning_rate": 5.97117175609986e-06,
"loss": 1.791,
"num_input_tokens_seen": 129761280,
"step": 495
},
{
"epoch": 0.12316861186987832,
"grad_norm": 0.3268572986125946,
"learning_rate": 5.954044976882725e-06,
"loss": 1.7505,
"num_input_tokens_seen": 130023424,
"step": 496
},
{
"epoch": 0.1234169356841321,
"grad_norm": 0.43856051564216614,
"learning_rate": 5.936906572928625e-06,
"loss": 1.3188,
"num_input_tokens_seen": 130285568,
"step": 497
},
{
"epoch": 0.1236652594983859,
"grad_norm": 0.459693044424057,
"learning_rate": 5.919756753063601e-06,
"loss": 1.8807,
"num_input_tokens_seen": 130547712,
"step": 498
},
{
"epoch": 0.12391358331263969,
"grad_norm": 1.3583054542541504,
"learning_rate": 5.902595726252801e-06,
"loss": 1.5139,
"num_input_tokens_seen": 130809856,
"step": 499
},
{
"epoch": 0.12416190712689347,
"grad_norm": 0.4457927644252777,
"learning_rate": 5.885423701597918e-06,
"loss": 1.0955,
"num_input_tokens_seen": 131072000,
"step": 500
},
{
"epoch": 0.12441023094114725,
"grad_norm": 0.45763128995895386,
"learning_rate": 5.8682408883346535e-06,
"loss": 1.3018,
"num_input_tokens_seen": 131334144,
"step": 501
},
{
"epoch": 0.12465855475540104,
"grad_norm": 0.43406641483306885,
"learning_rate": 5.851047495830163e-06,
"loss": 1.8796,
"num_input_tokens_seen": 131596288,
"step": 502
},
{
"epoch": 0.12490687856965484,
"grad_norm": 0.6790747046470642,
"learning_rate": 5.8338437335805124e-06,
"loss": 1.7733,
"num_input_tokens_seen": 131858432,
"step": 503
},
{
"epoch": 0.12515520238390862,
"grad_norm": 0.469204306602478,
"learning_rate": 5.816629811208112e-06,
"loss": 1.7257,
"num_input_tokens_seen": 132120576,
"step": 504
},
{
"epoch": 0.1254035261981624,
"grad_norm": 0.29336878657341003,
"learning_rate": 5.799405938459175e-06,
"loss": 2.0604,
"num_input_tokens_seen": 132382720,
"step": 505
},
{
"epoch": 0.1256518500124162,
"grad_norm": 1.0650948286056519,
"learning_rate": 5.782172325201155e-06,
"loss": 1.4833,
"num_input_tokens_seen": 132644864,
"step": 506
},
{
"epoch": 0.12590017382667,
"grad_norm": 0.5020011067390442,
"learning_rate": 5.764929181420191e-06,
"loss": 1.9276,
"num_input_tokens_seen": 132907008,
"step": 507
},
{
"epoch": 0.12614849764092376,
"grad_norm": 0.623615562915802,
"learning_rate": 5.747676717218549e-06,
"loss": 1.6809,
"num_input_tokens_seen": 133169152,
"step": 508
},
{
"epoch": 0.12639682145517755,
"grad_norm": 0.6677452325820923,
"learning_rate": 5.730415142812059e-06,
"loss": 1.6377,
"num_input_tokens_seen": 133431296,
"step": 509
},
{
"epoch": 0.12664514526943133,
"grad_norm": 0.5016704201698303,
"learning_rate": 5.7131446685275595e-06,
"loss": 1.4934,
"num_input_tokens_seen": 133693440,
"step": 510
},
{
"epoch": 0.12689346908368512,
"grad_norm": 0.4135633707046509,
"learning_rate": 5.695865504800328e-06,
"loss": 1.6179,
"num_input_tokens_seen": 133955584,
"step": 511
},
{
"epoch": 0.12714179289793892,
"grad_norm": 0.7254384160041809,
"learning_rate": 5.678577862171523e-06,
"loss": 1.5534,
"num_input_tokens_seen": 134217728,
"step": 512
},
{
"epoch": 0.1273901167121927,
"grad_norm": 0.7331421971321106,
"learning_rate": 5.661281951285613e-06,
"loss": 1.7944,
"num_input_tokens_seen": 134479872,
"step": 513
},
{
"epoch": 0.1276384405264465,
"grad_norm": 0.6121946573257446,
"learning_rate": 5.643977982887815e-06,
"loss": 1.6006,
"num_input_tokens_seen": 134742016,
"step": 514
},
{
"epoch": 0.12788676434070026,
"grad_norm": 0.4421185255050659,
"learning_rate": 5.626666167821522e-06,
"loss": 1.5906,
"num_input_tokens_seen": 135004160,
"step": 515
},
{
"epoch": 0.12813508815495406,
"grad_norm": 0.3607276380062103,
"learning_rate": 5.609346717025738e-06,
"loss": 1.7194,
"num_input_tokens_seen": 135266304,
"step": 516
},
{
"epoch": 0.12838341196920786,
"grad_norm": 0.66265469789505,
"learning_rate": 5.592019841532507e-06,
"loss": 1.5176,
"num_input_tokens_seen": 135528448,
"step": 517
},
{
"epoch": 0.12863173578346163,
"grad_norm": 0.4719207286834717,
"learning_rate": 5.5746857524643335e-06,
"loss": 1.5677,
"num_input_tokens_seen": 135790592,
"step": 518
},
{
"epoch": 0.12888005959771542,
"grad_norm": 0.5282115936279297,
"learning_rate": 5.557344661031628e-06,
"loss": 1.6344,
"num_input_tokens_seen": 136052736,
"step": 519
},
{
"epoch": 0.1291283834119692,
"grad_norm": 0.3419008255004883,
"learning_rate": 5.539996778530114e-06,
"loss": 1.9051,
"num_input_tokens_seen": 136314880,
"step": 520
},
{
"epoch": 0.129376707226223,
"grad_norm": 0.7775862812995911,
"learning_rate": 5.522642316338268e-06,
"loss": 1.7739,
"num_input_tokens_seen": 136577024,
"step": 521
},
{
"epoch": 0.1296250310404768,
"grad_norm": 0.554291307926178,
"learning_rate": 5.505281485914732e-06,
"loss": 1.6473,
"num_input_tokens_seen": 136839168,
"step": 522
},
{
"epoch": 0.12987335485473056,
"grad_norm": 0.5523970127105713,
"learning_rate": 5.487914498795748e-06,
"loss": 1.5672,
"num_input_tokens_seen": 137101312,
"step": 523
},
{
"epoch": 0.13012167866898436,
"grad_norm": 0.4398843050003052,
"learning_rate": 5.470541566592573e-06,
"loss": 1.5184,
"num_input_tokens_seen": 137363456,
"step": 524
},
{
"epoch": 0.13037000248323813,
"grad_norm": 0.36770564317703247,
"learning_rate": 5.453162900988902e-06,
"loss": 1.272,
"num_input_tokens_seen": 137625600,
"step": 525
},
{
"epoch": 0.13061832629749193,
"grad_norm": 0.36852288246154785,
"learning_rate": 5.435778713738292e-06,
"loss": 1.914,
"num_input_tokens_seen": 137887744,
"step": 526
},
{
"epoch": 0.13086665011174572,
"grad_norm": 0.5804073810577393,
"learning_rate": 5.41838921666158e-06,
"loss": 1.3187,
"num_input_tokens_seen": 138149888,
"step": 527
},
{
"epoch": 0.1311149739259995,
"grad_norm": 0.4716220796108246,
"learning_rate": 5.400994621644294e-06,
"loss": 1.6562,
"num_input_tokens_seen": 138412032,
"step": 528
},
{
"epoch": 0.1313632977402533,
"grad_norm": 0.6712131500244141,
"learning_rate": 5.383595140634093e-06,
"loss": 1.6972,
"num_input_tokens_seen": 138674176,
"step": 529
},
{
"epoch": 0.1316116215545071,
"grad_norm": 0.6651138067245483,
"learning_rate": 5.366190985638159e-06,
"loss": 1.2614,
"num_input_tokens_seen": 138936320,
"step": 530
},
{
"epoch": 0.13185994536876086,
"grad_norm": 0.6065943241119385,
"learning_rate": 5.348782368720627e-06,
"loss": 1.8296,
"num_input_tokens_seen": 139198464,
"step": 531
},
{
"epoch": 0.13210826918301466,
"grad_norm": 0.5409244298934937,
"learning_rate": 5.3313695020000026e-06,
"loss": 1.6437,
"num_input_tokens_seen": 139460608,
"step": 532
},
{
"epoch": 0.13235659299726843,
"grad_norm": 0.742667555809021,
"learning_rate": 5.3139525976465675e-06,
"loss": 1.5862,
"num_input_tokens_seen": 139722752,
"step": 533
},
{
"epoch": 0.13260491681152223,
"grad_norm": 0.6471251845359802,
"learning_rate": 5.296531867879809e-06,
"loss": 1.6151,
"num_input_tokens_seen": 139984896,
"step": 534
},
{
"epoch": 0.13285324062577603,
"grad_norm": 0.643791675567627,
"learning_rate": 5.27910752496582e-06,
"loss": 1.7427,
"num_input_tokens_seen": 140247040,
"step": 535
},
{
"epoch": 0.1331015644400298,
"grad_norm": 0.7029093503952026,
"learning_rate": 5.2616797812147205e-06,
"loss": 1.6691,
"num_input_tokens_seen": 140509184,
"step": 536
},
{
"epoch": 0.1333498882542836,
"grad_norm": 0.7368614077568054,
"learning_rate": 5.244248848978067e-06,
"loss": 1.9763,
"num_input_tokens_seen": 140771328,
"step": 537
},
{
"epoch": 0.13359821206853736,
"grad_norm": 0.7032376527786255,
"learning_rate": 5.226814940646268e-06,
"loss": 1.7725,
"num_input_tokens_seen": 141033472,
"step": 538
},
{
"epoch": 0.13384653588279116,
"grad_norm": 0.47285741567611694,
"learning_rate": 5.209378268645998e-06,
"loss": 2.148,
"num_input_tokens_seen": 141295616,
"step": 539
},
{
"epoch": 0.13409485969704496,
"grad_norm": 0.6670664548873901,
"learning_rate": 5.1919390454376e-06,
"loss": 1.7662,
"num_input_tokens_seen": 141557760,
"step": 540
},
{
"epoch": 0.13434318351129873,
"grad_norm": 0.6090880632400513,
"learning_rate": 5.174497483512506e-06,
"loss": 1.2453,
"num_input_tokens_seen": 141819904,
"step": 541
},
{
"epoch": 0.13459150732555253,
"grad_norm": 0.48240217566490173,
"learning_rate": 5.157053795390642e-06,
"loss": 1.7743,
"num_input_tokens_seen": 142082048,
"step": 542
},
{
"epoch": 0.1348398311398063,
"grad_norm": 0.5195634961128235,
"learning_rate": 5.139608193617846e-06,
"loss": 1.8024,
"num_input_tokens_seen": 142344192,
"step": 543
},
{
"epoch": 0.1350881549540601,
"grad_norm": 0.5877946615219116,
"learning_rate": 5.1221608907632665e-06,
"loss": 1.6163,
"num_input_tokens_seen": 142606336,
"step": 544
},
{
"epoch": 0.1353364787683139,
"grad_norm": 0.6305245757102966,
"learning_rate": 5.1047120994167855e-06,
"loss": 1.5309,
"num_input_tokens_seen": 142868480,
"step": 545
},
{
"epoch": 0.13558480258256767,
"grad_norm": 0.591148853302002,
"learning_rate": 5.087262032186418e-06,
"loss": 1.531,
"num_input_tokens_seen": 143130624,
"step": 546
},
{
"epoch": 0.13583312639682146,
"grad_norm": 0.8955023884773254,
"learning_rate": 5.069810901695727e-06,
"loss": 1.7347,
"num_input_tokens_seen": 143392768,
"step": 547
},
{
"epoch": 0.13608145021107523,
"grad_norm": 0.5800215005874634,
"learning_rate": 5.05235892058123e-06,
"loss": 1.6724,
"num_input_tokens_seen": 143654912,
"step": 548
},
{
"epoch": 0.13632977402532903,
"grad_norm": 0.5544260144233704,
"learning_rate": 5.034906301489808e-06,
"loss": 1.5061,
"num_input_tokens_seen": 143917056,
"step": 549
},
{
"epoch": 0.13657809783958283,
"grad_norm": 0.5210686922073364,
"learning_rate": 5.0174532570761194e-06,
"loss": 1.3236,
"num_input_tokens_seen": 144179200,
"step": 550
},
{
"epoch": 0.1368264216538366,
"grad_norm": 0.39467668533325195,
"learning_rate": 5e-06,
"loss": 1.5716,
"num_input_tokens_seen": 144441344,
"step": 551
},
{
"epoch": 0.1370747454680904,
"grad_norm": 0.45690640807151794,
"learning_rate": 4.982546742923883e-06,
"loss": 1.5183,
"num_input_tokens_seen": 144703488,
"step": 552
},
{
"epoch": 0.13732306928234417,
"grad_norm": 0.34431830048561096,
"learning_rate": 4.965093698510192e-06,
"loss": 1.7103,
"num_input_tokens_seen": 144965632,
"step": 553
},
{
"epoch": 0.13757139309659797,
"grad_norm": 0.6095772385597229,
"learning_rate": 4.9476410794187726e-06,
"loss": 1.79,
"num_input_tokens_seen": 145227776,
"step": 554
},
{
"epoch": 0.13781971691085176,
"grad_norm": 0.5050289630889893,
"learning_rate": 4.9301890983042744e-06,
"loss": 1.6136,
"num_input_tokens_seen": 145489920,
"step": 555
},
{
"epoch": 0.13806804072510553,
"grad_norm": 0.6285063028335571,
"learning_rate": 4.9127379678135825e-06,
"loss": 1.5999,
"num_input_tokens_seen": 145752064,
"step": 556
},
{
"epoch": 0.13831636453935933,
"grad_norm": 0.6678712368011475,
"learning_rate": 4.895287900583216e-06,
"loss": 1.2344,
"num_input_tokens_seen": 146014208,
"step": 557
},
{
"epoch": 0.1385646883536131,
"grad_norm": 0.3648228347301483,
"learning_rate": 4.877839109236735e-06,
"loss": 1.5726,
"num_input_tokens_seen": 146276352,
"step": 558
},
{
"epoch": 0.1388130121678669,
"grad_norm": 0.5992112755775452,
"learning_rate": 4.860391806382157e-06,
"loss": 1.9771,
"num_input_tokens_seen": 146538496,
"step": 559
},
{
"epoch": 0.1390613359821207,
"grad_norm": 0.4710709750652313,
"learning_rate": 4.842946204609359e-06,
"loss": 1.8191,
"num_input_tokens_seen": 146800640,
"step": 560
},
{
"epoch": 0.13930965979637447,
"grad_norm": 0.5668407678604126,
"learning_rate": 4.825502516487497e-06,
"loss": 1.8406,
"num_input_tokens_seen": 147062784,
"step": 561
},
{
"epoch": 0.13955798361062827,
"grad_norm": 0.5589337944984436,
"learning_rate": 4.8080609545624004e-06,
"loss": 1.5411,
"num_input_tokens_seen": 147324928,
"step": 562
},
{
"epoch": 0.13980630742488204,
"grad_norm": 0.7744218707084656,
"learning_rate": 4.7906217313540035e-06,
"loss": 1.4392,
"num_input_tokens_seen": 147587072,
"step": 563
},
{
"epoch": 0.14005463123913584,
"grad_norm": 0.8113576769828796,
"learning_rate": 4.7731850593537316e-06,
"loss": 1.6712,
"num_input_tokens_seen": 147849216,
"step": 564
},
{
"epoch": 0.14030295505338963,
"grad_norm": 0.8065240979194641,
"learning_rate": 4.755751151021934e-06,
"loss": 1.7134,
"num_input_tokens_seen": 148111360,
"step": 565
},
{
"epoch": 0.1405512788676434,
"grad_norm": 0.6279537081718445,
"learning_rate": 4.738320218785281e-06,
"loss": 1.6341,
"num_input_tokens_seen": 148373504,
"step": 566
},
{
"epoch": 0.1407996026818972,
"grad_norm": 0.34301266074180603,
"learning_rate": 4.720892475034181e-06,
"loss": 1.7529,
"num_input_tokens_seen": 148635648,
"step": 567
},
{
"epoch": 0.14104792649615097,
"grad_norm": 0.3987272381782532,
"learning_rate": 4.703468132120193e-06,
"loss": 1.3504,
"num_input_tokens_seen": 148897792,
"step": 568
},
{
"epoch": 0.14129625031040477,
"grad_norm": 0.5643488168716431,
"learning_rate": 4.686047402353433e-06,
"loss": 1.6918,
"num_input_tokens_seen": 149159936,
"step": 569
},
{
"epoch": 0.14154457412465857,
"grad_norm": 1.1378772258758545,
"learning_rate": 4.668630498000001e-06,
"loss": 1.6232,
"num_input_tokens_seen": 149422080,
"step": 570
},
{
"epoch": 0.14179289793891234,
"grad_norm": 0.40794894099235535,
"learning_rate": 4.651217631279374e-06,
"loss": 1.2633,
"num_input_tokens_seen": 149684224,
"step": 571
},
{
"epoch": 0.14204122175316614,
"grad_norm": 0.40206679701805115,
"learning_rate": 4.6338090143618435e-06,
"loss": 1.7796,
"num_input_tokens_seen": 149946368,
"step": 572
},
{
"epoch": 0.1422895455674199,
"grad_norm": 0.6353849768638611,
"learning_rate": 4.6164048593659076e-06,
"loss": 1.5364,
"num_input_tokens_seen": 150208512,
"step": 573
},
{
"epoch": 0.1425378693816737,
"grad_norm": 0.5148465037345886,
"learning_rate": 4.5990053783557066e-06,
"loss": 1.5442,
"num_input_tokens_seen": 150470656,
"step": 574
},
{
"epoch": 0.1427861931959275,
"grad_norm": 0.5749762058258057,
"learning_rate": 4.581610783338424e-06,
"loss": 1.0381,
"num_input_tokens_seen": 150732800,
"step": 575
},
{
"epoch": 0.14303451701018127,
"grad_norm": 0.9002466201782227,
"learning_rate": 4.564221286261709e-06,
"loss": 1.4117,
"num_input_tokens_seen": 150994944,
"step": 576
},
{
"epoch": 0.14328284082443507,
"grad_norm": 0.3512033224105835,
"learning_rate": 4.546837099011101e-06,
"loss": 1.5649,
"num_input_tokens_seen": 151257088,
"step": 577
},
{
"epoch": 0.14353116463868884,
"grad_norm": 0.45879220962524414,
"learning_rate": 4.529458433407429e-06,
"loss": 1.4193,
"num_input_tokens_seen": 151519232,
"step": 578
},
{
"epoch": 0.14377948845294264,
"grad_norm": 0.7155461311340332,
"learning_rate": 4.512085501204254e-06,
"loss": 1.5547,
"num_input_tokens_seen": 151781376,
"step": 579
},
{
"epoch": 0.14402781226719644,
"grad_norm": 0.6171830892562866,
"learning_rate": 4.494718514085269e-06,
"loss": 1.2821,
"num_input_tokens_seen": 152043520,
"step": 580
},
{
"epoch": 0.1442761360814502,
"grad_norm": 0.5107810497283936,
"learning_rate": 4.477357683661734e-06,
"loss": 1.6026,
"num_input_tokens_seen": 152305664,
"step": 581
},
{
"epoch": 0.144524459895704,
"grad_norm": 0.6672670245170593,
"learning_rate": 4.460003221469886e-06,
"loss": 1.7716,
"num_input_tokens_seen": 152567808,
"step": 582
},
{
"epoch": 0.14477278370995778,
"grad_norm": 0.4775781035423279,
"learning_rate": 4.442655338968373e-06,
"loss": 1.3139,
"num_input_tokens_seen": 152829952,
"step": 583
},
{
"epoch": 0.14502110752421157,
"grad_norm": 0.665295422077179,
"learning_rate": 4.425314247535668e-06,
"loss": 1.2141,
"num_input_tokens_seen": 153092096,
"step": 584
},
{
"epoch": 0.14526943133846537,
"grad_norm": 0.49010828137397766,
"learning_rate": 4.4079801584674955e-06,
"loss": 1.3422,
"num_input_tokens_seen": 153354240,
"step": 585
},
{
"epoch": 0.14551775515271914,
"grad_norm": 0.4619687795639038,
"learning_rate": 4.390653282974264e-06,
"loss": 1.5652,
"num_input_tokens_seen": 153616384,
"step": 586
},
{
"epoch": 0.14576607896697294,
"grad_norm": 0.37380003929138184,
"learning_rate": 4.373333832178478e-06,
"loss": 1.8029,
"num_input_tokens_seen": 153878528,
"step": 587
},
{
"epoch": 0.1460144027812267,
"grad_norm": 0.5546239614486694,
"learning_rate": 4.356022017112187e-06,
"loss": 1.7681,
"num_input_tokens_seen": 154140672,
"step": 588
},
{
"epoch": 0.1462627265954805,
"grad_norm": 0.6969411969184875,
"learning_rate": 4.3387180487143875e-06,
"loss": 1.9277,
"num_input_tokens_seen": 154402816,
"step": 589
},
{
"epoch": 0.1465110504097343,
"grad_norm": 0.7840688824653625,
"learning_rate": 4.321422137828479e-06,
"loss": 1.6007,
"num_input_tokens_seen": 154664960,
"step": 590
},
{
"epoch": 0.14675937422398808,
"grad_norm": 0.6813507080078125,
"learning_rate": 4.304134495199675e-06,
"loss": 1.1834,
"num_input_tokens_seen": 154927104,
"step": 591
},
{
"epoch": 0.14700769803824187,
"grad_norm": 0.5060350298881531,
"learning_rate": 4.286855331472442e-06,
"loss": 1.4612,
"num_input_tokens_seen": 155189248,
"step": 592
},
{
"epoch": 0.14725602185249564,
"grad_norm": 0.5000078678131104,
"learning_rate": 4.269584857187942e-06,
"loss": 1.64,
"num_input_tokens_seen": 155451392,
"step": 593
},
{
"epoch": 0.14750434566674944,
"grad_norm": 0.5458803176879883,
"learning_rate": 4.2523232827814534e-06,
"loss": 2.1238,
"num_input_tokens_seen": 155713536,
"step": 594
},
{
"epoch": 0.14775266948100324,
"grad_norm": 1.3677194118499756,
"learning_rate": 4.23507081857981e-06,
"loss": 1.5499,
"num_input_tokens_seen": 155975680,
"step": 595
},
{
"epoch": 0.148000993295257,
"grad_norm": 0.5004885196685791,
"learning_rate": 4.217827674798845e-06,
"loss": 1.836,
"num_input_tokens_seen": 156237824,
"step": 596
},
{
"epoch": 0.1482493171095108,
"grad_norm": 1.3544896841049194,
"learning_rate": 4.200594061540827e-06,
"loss": 1.5202,
"num_input_tokens_seen": 156499968,
"step": 597
},
{
"epoch": 0.14849764092376458,
"grad_norm": 0.41308310627937317,
"learning_rate": 4.183370188791891e-06,
"loss": 1.4474,
"num_input_tokens_seen": 156762112,
"step": 598
},
{
"epoch": 0.14874596473801838,
"grad_norm": 0.5802090167999268,
"learning_rate": 4.166156266419489e-06,
"loss": 1.8016,
"num_input_tokens_seen": 157024256,
"step": 599
},
{
"epoch": 0.14899428855227217,
"grad_norm": 0.5909608602523804,
"learning_rate": 4.148952504169839e-06,
"loss": 1.5757,
"num_input_tokens_seen": 157286400,
"step": 600
},
{
"epoch": 0.14924261236652595,
"grad_norm": 0.41110658645629883,
"learning_rate": 4.131759111665349e-06,
"loss": 0.8013,
"num_input_tokens_seen": 157548544,
"step": 601
},
{
"epoch": 0.14949093618077974,
"grad_norm": 0.5430499911308289,
"learning_rate": 4.114576298402085e-06,
"loss": 1.9595,
"num_input_tokens_seen": 157810688,
"step": 602
},
{
"epoch": 0.1497392599950335,
"grad_norm": 0.7195497751235962,
"learning_rate": 4.0974042737472005e-06,
"loss": 1.5623,
"num_input_tokens_seen": 158072832,
"step": 603
},
{
"epoch": 0.1499875838092873,
"grad_norm": 0.4989107847213745,
"learning_rate": 4.0802432469364e-06,
"loss": 1.3031,
"num_input_tokens_seen": 158334976,
"step": 604
},
{
"epoch": 0.1502359076235411,
"grad_norm": 0.5433792471885681,
"learning_rate": 4.063093427071376e-06,
"loss": 1.5003,
"num_input_tokens_seen": 158597120,
"step": 605
},
{
"epoch": 0.15048423143779488,
"grad_norm": 0.3139537274837494,
"learning_rate": 4.045955023117276e-06,
"loss": 1.2943,
"num_input_tokens_seen": 158859264,
"step": 606
},
{
"epoch": 0.15073255525204868,
"grad_norm": 0.7295723557472229,
"learning_rate": 4.028828243900141e-06,
"loss": 1.9625,
"num_input_tokens_seen": 159121408,
"step": 607
},
{
"epoch": 0.15098087906630245,
"grad_norm": 0.7763230204582214,
"learning_rate": 4.0117132981043695e-06,
"loss": 1.8425,
"num_input_tokens_seen": 159383552,
"step": 608
},
{
"epoch": 0.15122920288055625,
"grad_norm": 0.5297269821166992,
"learning_rate": 3.994610394270178e-06,
"loss": 1.6922,
"num_input_tokens_seen": 159645696,
"step": 609
},
{
"epoch": 0.15147752669481004,
"grad_norm": 0.4328116476535797,
"learning_rate": 3.977519740791049e-06,
"loss": 0.9763,
"num_input_tokens_seen": 159907840,
"step": 610
},
{
"epoch": 0.15172585050906381,
"grad_norm": 0.6759671568870544,
"learning_rate": 3.960441545911205e-06,
"loss": 1.7297,
"num_input_tokens_seen": 160169984,
"step": 611
},
{
"epoch": 0.1519741743233176,
"grad_norm": 0.44700494408607483,
"learning_rate": 3.943376017723058e-06,
"loss": 1.9346,
"num_input_tokens_seen": 160432128,
"step": 612
},
{
"epoch": 0.15222249813757138,
"grad_norm": 0.8407000303268433,
"learning_rate": 3.926323364164684e-06,
"loss": 1.7382,
"num_input_tokens_seen": 160694272,
"step": 613
},
{
"epoch": 0.15247082195182518,
"grad_norm": 0.8016761541366577,
"learning_rate": 3.909283793017289e-06,
"loss": 1.7423,
"num_input_tokens_seen": 160956416,
"step": 614
},
{
"epoch": 0.15271914576607898,
"grad_norm": 0.414358526468277,
"learning_rate": 3.892257511902664e-06,
"loss": 1.4276,
"num_input_tokens_seen": 161218560,
"step": 615
},
{
"epoch": 0.15296746958033275,
"grad_norm": 1.0210763216018677,
"learning_rate": 3.875244728280676e-06,
"loss": 1.3653,
"num_input_tokens_seen": 161480704,
"step": 616
},
{
"epoch": 0.15321579339458655,
"grad_norm": 0.8063709735870361,
"learning_rate": 3.8582456494467214e-06,
"loss": 1.8222,
"num_input_tokens_seen": 161742848,
"step": 617
},
{
"epoch": 0.15346411720884032,
"grad_norm": 0.5376055836677551,
"learning_rate": 3.841260482529215e-06,
"loss": 1.7019,
"num_input_tokens_seen": 162004992,
"step": 618
},
{
"epoch": 0.15371244102309412,
"grad_norm": 0.5194859504699707,
"learning_rate": 3.82428943448705e-06,
"loss": 1.633,
"num_input_tokens_seen": 162267136,
"step": 619
},
{
"epoch": 0.1539607648373479,
"grad_norm": 0.5276075005531311,
"learning_rate": 3.8073327121070968e-06,
"loss": 1.6824,
"num_input_tokens_seen": 162529280,
"step": 620
},
{
"epoch": 0.15420908865160168,
"grad_norm": 0.7873098254203796,
"learning_rate": 3.790390522001662e-06,
"loss": 1.287,
"num_input_tokens_seen": 162791424,
"step": 621
},
{
"epoch": 0.15445741246585548,
"grad_norm": 0.6120622158050537,
"learning_rate": 3.7734630706059873e-06,
"loss": 1.8678,
"num_input_tokens_seen": 163053568,
"step": 622
},
{
"epoch": 0.15470573628010925,
"grad_norm": 0.5474843382835388,
"learning_rate": 3.756550564175727e-06,
"loss": 1.4913,
"num_input_tokens_seen": 163315712,
"step": 623
},
{
"epoch": 0.15495406009436305,
"grad_norm": 2.4813928604125977,
"learning_rate": 3.7396532087844318e-06,
"loss": 1.5975,
"num_input_tokens_seen": 163577856,
"step": 624
},
{
"epoch": 0.15520238390861685,
"grad_norm": 0.4030190408229828,
"learning_rate": 3.7227712103210485e-06,
"loss": 1.3417,
"num_input_tokens_seen": 163840000,
"step": 625
},
{
"epoch": 0.15545070772287062,
"grad_norm": 0.4890609085559845,
"learning_rate": 3.705904774487396e-06,
"loss": 1.6517,
"num_input_tokens_seen": 164102144,
"step": 626
},
{
"epoch": 0.15569903153712442,
"grad_norm": 0.4225277006626129,
"learning_rate": 3.6890541067956775e-06,
"loss": 1.9024,
"num_input_tokens_seen": 164364288,
"step": 627
},
{
"epoch": 0.1559473553513782,
"grad_norm": 0.3365168273448944,
"learning_rate": 3.672219412565956e-06,
"loss": 1.8831,
"num_input_tokens_seen": 164626432,
"step": 628
},
{
"epoch": 0.15619567916563198,
"grad_norm": 0.46206042170524597,
"learning_rate": 3.655400896923672e-06,
"loss": 1.5472,
"num_input_tokens_seen": 164888576,
"step": 629
},
{
"epoch": 0.15644400297988578,
"grad_norm": 0.5152633190155029,
"learning_rate": 3.6385987647971287e-06,
"loss": 1.9021,
"num_input_tokens_seen": 165150720,
"step": 630
},
{
"epoch": 0.15669232679413955,
"grad_norm": 0.6955782771110535,
"learning_rate": 3.6218132209150047e-06,
"loss": 1.5289,
"num_input_tokens_seen": 165412864,
"step": 631
},
{
"epoch": 0.15694065060839335,
"grad_norm": 0.8016681671142578,
"learning_rate": 3.6050444698038547e-06,
"loss": 1.2874,
"num_input_tokens_seen": 165675008,
"step": 632
},
{
"epoch": 0.15718897442264712,
"grad_norm": 0.32328078150749207,
"learning_rate": 3.5882927157856175e-06,
"loss": 1.9816,
"num_input_tokens_seen": 165937152,
"step": 633
},
{
"epoch": 0.15743729823690092,
"grad_norm": 0.4143851101398468,
"learning_rate": 3.571558162975133e-06,
"loss": 1.4726,
"num_input_tokens_seen": 166199296,
"step": 634
},
{
"epoch": 0.15768562205115472,
"grad_norm": 0.6780726909637451,
"learning_rate": 3.5548410152776414e-06,
"loss": 1.5232,
"num_input_tokens_seen": 166461440,
"step": 635
},
{
"epoch": 0.1579339458654085,
"grad_norm": 0.4231894314289093,
"learning_rate": 3.538141476386317e-06,
"loss": 1.7631,
"num_input_tokens_seen": 166723584,
"step": 636
},
{
"epoch": 0.15818226967966229,
"grad_norm": 0.4781966805458069,
"learning_rate": 3.521459749779769e-06,
"loss": 2.3027,
"num_input_tokens_seen": 166985728,
"step": 637
},
{
"epoch": 0.15843059349391606,
"grad_norm": 0.6974207162857056,
"learning_rate": 3.5047960387195673e-06,
"loss": 1.4963,
"num_input_tokens_seen": 167247872,
"step": 638
},
{
"epoch": 0.15867891730816985,
"grad_norm": 0.4340938627719879,
"learning_rate": 3.488150546247778e-06,
"loss": 1.6811,
"num_input_tokens_seen": 167510016,
"step": 639
},
{
"epoch": 0.15892724112242365,
"grad_norm": 0.6632753014564514,
"learning_rate": 3.471523475184472e-06,
"loss": 2.0226,
"num_input_tokens_seen": 167772160,
"step": 640
},
{
"epoch": 0.15917556493667742,
"grad_norm": 0.5399497747421265,
"learning_rate": 3.4549150281252635e-06,
"loss": 1.6728,
"num_input_tokens_seen": 168034304,
"step": 641
},
{
"epoch": 0.15942388875093122,
"grad_norm": 0.6213698387145996,
"learning_rate": 3.4383254074388373e-06,
"loss": 1.7282,
"num_input_tokens_seen": 168296448,
"step": 642
},
{
"epoch": 0.159672212565185,
"grad_norm": 0.8171265125274658,
"learning_rate": 3.4217548152644887e-06,
"loss": 1.7681,
"num_input_tokens_seen": 168558592,
"step": 643
},
{
"epoch": 0.1599205363794388,
"grad_norm": 0.36624467372894287,
"learning_rate": 3.40520345350965e-06,
"loss": 1.7461,
"num_input_tokens_seen": 168820736,
"step": 644
},
{
"epoch": 0.16016886019369259,
"grad_norm": 0.4452555179595947,
"learning_rate": 3.3886715238474454e-06,
"loss": 1.8516,
"num_input_tokens_seen": 169082880,
"step": 645
},
{
"epoch": 0.16041718400794636,
"grad_norm": 0.6282974481582642,
"learning_rate": 3.372159227714218e-06,
"loss": 1.589,
"num_input_tokens_seen": 169345024,
"step": 646
},
{
"epoch": 0.16066550782220015,
"grad_norm": 0.2544865012168884,
"learning_rate": 3.355666766307084e-06,
"loss": 1.4478,
"num_input_tokens_seen": 169607168,
"step": 647
},
{
"epoch": 0.16091383163645392,
"grad_norm": 0.7939302921295166,
"learning_rate": 3.339194340581485e-06,
"loss": 1.5844,
"num_input_tokens_seen": 169869312,
"step": 648
},
{
"epoch": 0.16116215545070772,
"grad_norm": 1.1076239347457886,
"learning_rate": 3.322742151248726e-06,
"loss": 1.8774,
"num_input_tokens_seen": 170131456,
"step": 649
},
{
"epoch": 0.16141047926496152,
"grad_norm": 0.5885612964630127,
"learning_rate": 3.3063103987735433e-06,
"loss": 1.8922,
"num_input_tokens_seen": 170393600,
"step": 650
},
{
"epoch": 0.1616588030792153,
"grad_norm": 0.8514074683189392,
"learning_rate": 3.289899283371657e-06,
"loss": 1.4842,
"num_input_tokens_seen": 170655744,
"step": 651
},
{
"epoch": 0.1619071268934691,
"grad_norm": 0.6668713092803955,
"learning_rate": 3.273509005007327e-06,
"loss": 1.538,
"num_input_tokens_seen": 170917888,
"step": 652
},
{
"epoch": 0.16215545070772286,
"grad_norm": 0.4702228009700775,
"learning_rate": 3.2571397633909252e-06,
"loss": 1.4504,
"num_input_tokens_seen": 171180032,
"step": 653
},
{
"epoch": 0.16240377452197666,
"grad_norm": 0.4500584900379181,
"learning_rate": 3.2407917579764914e-06,
"loss": 1.8598,
"num_input_tokens_seen": 171442176,
"step": 654
},
{
"epoch": 0.16265209833623046,
"grad_norm": 0.477428674697876,
"learning_rate": 3.224465187959316e-06,
"loss": 1.9269,
"num_input_tokens_seen": 171704320,
"step": 655
},
{
"epoch": 0.16290042215048423,
"grad_norm": 0.43787822127342224,
"learning_rate": 3.2081602522734987e-06,
"loss": 1.9365,
"num_input_tokens_seen": 171966464,
"step": 656
},
{
"epoch": 0.16314874596473802,
"grad_norm": 0.4032402038574219,
"learning_rate": 3.1918771495895395e-06,
"loss": 1.3458,
"num_input_tokens_seen": 172228608,
"step": 657
},
{
"epoch": 0.1633970697789918,
"grad_norm": 0.733826220035553,
"learning_rate": 3.1756160783119015e-06,
"loss": 1.4177,
"num_input_tokens_seen": 172490752,
"step": 658
},
{
"epoch": 0.1636453935932456,
"grad_norm": 0.34868526458740234,
"learning_rate": 3.1593772365766107e-06,
"loss": 1.6965,
"num_input_tokens_seen": 172752896,
"step": 659
},
{
"epoch": 0.1638937174074994,
"grad_norm": 0.8062167167663574,
"learning_rate": 3.1431608222488276e-06,
"loss": 1.7109,
"num_input_tokens_seen": 173015040,
"step": 660
},
{
"epoch": 0.16414204122175316,
"grad_norm": 0.6374398469924927,
"learning_rate": 3.12696703292044e-06,
"loss": 1.2467,
"num_input_tokens_seen": 173277184,
"step": 661
},
{
"epoch": 0.16439036503600696,
"grad_norm": 0.593471348285675,
"learning_rate": 3.110796065907665e-06,
"loss": 1.2338,
"num_input_tokens_seen": 173539328,
"step": 662
},
{
"epoch": 0.16463868885026073,
"grad_norm": 0.7001546621322632,
"learning_rate": 3.09464811824863e-06,
"loss": 1.944,
"num_input_tokens_seen": 173801472,
"step": 663
},
{
"epoch": 0.16488701266451453,
"grad_norm": 0.39311012625694275,
"learning_rate": 3.078523386700982e-06,
"loss": 1.8565,
"num_input_tokens_seen": 174063616,
"step": 664
},
{
"epoch": 0.16513533647876832,
"grad_norm": 0.5350921750068665,
"learning_rate": 3.0624220677394854e-06,
"loss": 1.7504,
"num_input_tokens_seen": 174325760,
"step": 665
},
{
"epoch": 0.1653836602930221,
"grad_norm": 0.38316506147384644,
"learning_rate": 3.0463443575536324e-06,
"loss": 1.6073,
"num_input_tokens_seen": 174587904,
"step": 666
},
{
"epoch": 0.1656319841072759,
"grad_norm": 0.7546458840370178,
"learning_rate": 3.030290452045245e-06,
"loss": 1.3026,
"num_input_tokens_seen": 174850048,
"step": 667
},
{
"epoch": 0.16588030792152966,
"grad_norm": 0.27675381302833557,
"learning_rate": 3.0142605468260976e-06,
"loss": 1.3484,
"num_input_tokens_seen": 175112192,
"step": 668
},
{
"epoch": 0.16612863173578346,
"grad_norm": 0.4194891154766083,
"learning_rate": 2.9982548372155264e-06,
"loss": 1.8019,
"num_input_tokens_seen": 175374336,
"step": 669
},
{
"epoch": 0.16637695555003726,
"grad_norm": 0.36900594830513,
"learning_rate": 2.98227351823805e-06,
"loss": 1.5313,
"num_input_tokens_seen": 175636480,
"step": 670
},
{
"epoch": 0.16662527936429103,
"grad_norm": 0.6106650829315186,
"learning_rate": 2.966316784621e-06,
"loss": 1.6545,
"num_input_tokens_seen": 175898624,
"step": 671
},
{
"epoch": 0.16687360317854483,
"grad_norm": 0.3975144624710083,
"learning_rate": 2.9503848307921363e-06,
"loss": 1.8634,
"num_input_tokens_seen": 176160768,
"step": 672
},
{
"epoch": 0.1671219269927986,
"grad_norm": 0.5684614777565002,
"learning_rate": 2.934477850877292e-06,
"loss": 1.618,
"num_input_tokens_seen": 176422912,
"step": 673
},
{
"epoch": 0.1673702508070524,
"grad_norm": 0.7491419315338135,
"learning_rate": 2.918596038697995e-06,
"loss": 1.8058,
"num_input_tokens_seen": 176685056,
"step": 674
},
{
"epoch": 0.1676185746213062,
"grad_norm": 0.36299964785575867,
"learning_rate": 2.9027395877691143e-06,
"loss": 1.6074,
"num_input_tokens_seen": 176947200,
"step": 675
},
{
"epoch": 0.16786689843555996,
"grad_norm": 1.1680899858474731,
"learning_rate": 2.886908691296504e-06,
"loss": 1.7054,
"num_input_tokens_seen": 177209344,
"step": 676
},
{
"epoch": 0.16811522224981376,
"grad_norm": 0.4961852729320526,
"learning_rate": 2.871103542174637e-06,
"loss": 1.8983,
"num_input_tokens_seen": 177471488,
"step": 677
},
{
"epoch": 0.16836354606406753,
"grad_norm": 0.42342936992645264,
"learning_rate": 2.8553243329842715e-06,
"loss": 1.6285,
"num_input_tokens_seen": 177733632,
"step": 678
},
{
"epoch": 0.16861186987832133,
"grad_norm": 0.6261987686157227,
"learning_rate": 2.839571255990088e-06,
"loss": 1.8729,
"num_input_tokens_seen": 177995776,
"step": 679
},
{
"epoch": 0.16886019369257513,
"grad_norm": 0.5163364410400391,
"learning_rate": 2.8238445031383634e-06,
"loss": 1.5881,
"num_input_tokens_seen": 178257920,
"step": 680
},
{
"epoch": 0.1691085175068289,
"grad_norm": 0.6004268527030945,
"learning_rate": 2.8081442660546126e-06,
"loss": 1.7253,
"num_input_tokens_seen": 178520064,
"step": 681
},
{
"epoch": 0.1693568413210827,
"grad_norm": 0.513282299041748,
"learning_rate": 2.7924707360412743e-06,
"loss": 1.3065,
"num_input_tokens_seen": 178782208,
"step": 682
},
{
"epoch": 0.16960516513533647,
"grad_norm": 0.508873701095581,
"learning_rate": 2.776824104075364e-06,
"loss": 1.4716,
"num_input_tokens_seen": 179044352,
"step": 683
},
{
"epoch": 0.16985348894959026,
"grad_norm": 0.43925005197525024,
"learning_rate": 2.761204560806152e-06,
"loss": 1.7001,
"num_input_tokens_seen": 179306496,
"step": 684
},
{
"epoch": 0.17010181276384406,
"grad_norm": 0.5974088907241821,
"learning_rate": 2.7456122965528475e-06,
"loss": 1.6872,
"num_input_tokens_seen": 179568640,
"step": 685
},
{
"epoch": 0.17035013657809783,
"grad_norm": 0.6642299890518188,
"learning_rate": 2.7300475013022666e-06,
"loss": 1.6835,
"num_input_tokens_seen": 179830784,
"step": 686
},
{
"epoch": 0.17059846039235163,
"grad_norm": 1.514657735824585,
"learning_rate": 2.714510364706531e-06,
"loss": 1.6302,
"num_input_tokens_seen": 180092928,
"step": 687
},
{
"epoch": 0.1708467842066054,
"grad_norm": 0.753200113773346,
"learning_rate": 2.699001076080742e-06,
"loss": 1.7994,
"num_input_tokens_seen": 180355072,
"step": 688
},
{
"epoch": 0.1710951080208592,
"grad_norm": 0.5442324876785278,
"learning_rate": 2.683519824400693e-06,
"loss": 1.1455,
"num_input_tokens_seen": 180617216,
"step": 689
},
{
"epoch": 0.171343431835113,
"grad_norm": 0.42106226086616516,
"learning_rate": 2.6680667983005446e-06,
"loss": 1.4338,
"num_input_tokens_seen": 180879360,
"step": 690
},
{
"epoch": 0.17159175564936677,
"grad_norm": 0.36291244626045227,
"learning_rate": 2.6526421860705474e-06,
"loss": 1.4588,
"num_input_tokens_seen": 181141504,
"step": 691
},
{
"epoch": 0.17184007946362057,
"grad_norm": 0.5528322458267212,
"learning_rate": 2.637246175654731e-06,
"loss": 1.972,
"num_input_tokens_seen": 181403648,
"step": 692
},
{
"epoch": 0.17208840327787434,
"grad_norm": 1.1730087995529175,
"learning_rate": 2.6218789546486235e-06,
"loss": 1.4716,
"num_input_tokens_seen": 181665792,
"step": 693
},
{
"epoch": 0.17233672709212813,
"grad_norm": 0.28138279914855957,
"learning_rate": 2.6065407102969664e-06,
"loss": 1.9811,
"num_input_tokens_seen": 181927936,
"step": 694
},
{
"epoch": 0.17258505090638193,
"grad_norm": 0.585191011428833,
"learning_rate": 2.5912316294914232e-06,
"loss": 1.413,
"num_input_tokens_seen": 182190080,
"step": 695
},
{
"epoch": 0.1728333747206357,
"grad_norm": 0.7064807415008545,
"learning_rate": 2.5759518987683154e-06,
"loss": 1.7552,
"num_input_tokens_seen": 182452224,
"step": 696
},
{
"epoch": 0.1730816985348895,
"grad_norm": 0.2733915448188782,
"learning_rate": 2.560701704306336e-06,
"loss": 1.5639,
"num_input_tokens_seen": 182714368,
"step": 697
},
{
"epoch": 0.17333002234914327,
"grad_norm": 0.37990668416023254,
"learning_rate": 2.545481231924296e-06,
"loss": 2.0107,
"num_input_tokens_seen": 182976512,
"step": 698
},
{
"epoch": 0.17357834616339707,
"grad_norm": 0.5895552039146423,
"learning_rate": 2.5302906670788463e-06,
"loss": 2.0406,
"num_input_tokens_seen": 183238656,
"step": 699
},
{
"epoch": 0.17382666997765087,
"grad_norm": 0.6895752549171448,
"learning_rate": 2.5151301948622235e-06,
"loss": 1.9766,
"num_input_tokens_seen": 183500800,
"step": 700
},
{
"epoch": 0.17407499379190464,
"grad_norm": 0.46613720059394836,
"learning_rate": 2.5000000000000015e-06,
"loss": 2.0084,
"num_input_tokens_seen": 183762944,
"step": 701
},
{
"epoch": 0.17432331760615843,
"grad_norm": 0.5650424957275391,
"learning_rate": 2.484900266848825e-06,
"loss": 1.7718,
"num_input_tokens_seen": 184025088,
"step": 702
},
{
"epoch": 0.1745716414204122,
"grad_norm": 0.5093001127243042,
"learning_rate": 2.469831179394182e-06,
"loss": 1.5874,
"num_input_tokens_seen": 184287232,
"step": 703
},
{
"epoch": 0.174819965234666,
"grad_norm": 0.7076382040977478,
"learning_rate": 2.4547929212481436e-06,
"loss": 1.5776,
"num_input_tokens_seen": 184549376,
"step": 704
},
{
"epoch": 0.1750682890489198,
"grad_norm": 0.7036752700805664,
"learning_rate": 2.4397856756471435e-06,
"loss": 1.0319,
"num_input_tokens_seen": 184811520,
"step": 705
},
{
"epoch": 0.17531661286317357,
"grad_norm": 0.6339607238769531,
"learning_rate": 2.424809625449729e-06,
"loss": 1.4841,
"num_input_tokens_seen": 185073664,
"step": 706
},
{
"epoch": 0.17556493667742737,
"grad_norm": 0.6772640347480774,
"learning_rate": 2.40986495313435e-06,
"loss": 1.398,
"num_input_tokens_seen": 185335808,
"step": 707
},
{
"epoch": 0.17581326049168114,
"grad_norm": 0.626737117767334,
"learning_rate": 2.39495184079712e-06,
"loss": 1.9448,
"num_input_tokens_seen": 185597952,
"step": 708
},
{
"epoch": 0.17606158430593494,
"grad_norm": 0.8285679221153259,
"learning_rate": 2.380070470149605e-06,
"loss": 1.596,
"num_input_tokens_seen": 185860096,
"step": 709
},
{
"epoch": 0.17630990812018874,
"grad_norm": 0.47030216455459595,
"learning_rate": 2.3652210225166122e-06,
"loss": 1.6729,
"num_input_tokens_seen": 186122240,
"step": 710
},
{
"epoch": 0.1765582319344425,
"grad_norm": 0.36715853214263916,
"learning_rate": 2.3504036788339763e-06,
"loss": 1.375,
"num_input_tokens_seen": 186384384,
"step": 711
},
{
"epoch": 0.1768065557486963,
"grad_norm": 0.5815637707710266,
"learning_rate": 2.3356186196463497e-06,
"loss": 1.6501,
"num_input_tokens_seen": 186646528,
"step": 712
},
{
"epoch": 0.17705487956295007,
"grad_norm": 0.5376741290092468,
"learning_rate": 2.320866025105016e-06,
"loss": 1.7278,
"num_input_tokens_seen": 186908672,
"step": 713
},
{
"epoch": 0.17730320337720387,
"grad_norm": 0.5446439385414124,
"learning_rate": 2.3061460749656844e-06,
"loss": 1.9617,
"num_input_tokens_seen": 187170816,
"step": 714
},
{
"epoch": 0.17755152719145767,
"grad_norm": 0.8022477030754089,
"learning_rate": 2.2914589485863015e-06,
"loss": 1.8491,
"num_input_tokens_seen": 187432960,
"step": 715
},
{
"epoch": 0.17779985100571144,
"grad_norm": 0.40645989775657654,
"learning_rate": 2.2768048249248648e-06,
"loss": 1.5563,
"num_input_tokens_seen": 187695104,
"step": 716
},
{
"epoch": 0.17804817481996524,
"grad_norm": 0.5382466316223145,
"learning_rate": 2.2621838825372496e-06,
"loss": 1.5754,
"num_input_tokens_seen": 187957248,
"step": 717
},
{
"epoch": 0.178296498634219,
"grad_norm": 0.6198011636734009,
"learning_rate": 2.2475962995750224e-06,
"loss": 1.8925,
"num_input_tokens_seen": 188219392,
"step": 718
},
{
"epoch": 0.1785448224484728,
"grad_norm": 0.4357577860355377,
"learning_rate": 2.23304225378328e-06,
"loss": 1.9635,
"num_input_tokens_seen": 188481536,
"step": 719
},
{
"epoch": 0.1787931462627266,
"grad_norm": 0.5042324662208557,
"learning_rate": 2.218521922498476e-06,
"loss": 1.4101,
"num_input_tokens_seen": 188743680,
"step": 720
},
{
"epoch": 0.17904147007698037,
"grad_norm": 0.5827696323394775,
"learning_rate": 2.204035482646267e-06,
"loss": 1.6445,
"num_input_tokens_seen": 189005824,
"step": 721
},
{
"epoch": 0.17928979389123417,
"grad_norm": 0.5384161472320557,
"learning_rate": 2.1895831107393485e-06,
"loss": 1.5907,
"num_input_tokens_seen": 189267968,
"step": 722
},
{
"epoch": 0.17953811770548794,
"grad_norm": 0.5630956292152405,
"learning_rate": 2.175164982875311e-06,
"loss": 1.4097,
"num_input_tokens_seen": 189530112,
"step": 723
},
{
"epoch": 0.17978644151974174,
"grad_norm": 0.43942683935165405,
"learning_rate": 2.1607812747344955e-06,
"loss": 1.393,
"num_input_tokens_seen": 189792256,
"step": 724
},
{
"epoch": 0.18003476533399554,
"grad_norm": 0.2822117209434509,
"learning_rate": 2.146432161577842e-06,
"loss": 1.4047,
"num_input_tokens_seen": 190054400,
"step": 725
},
{
"epoch": 0.1802830891482493,
"grad_norm": 0.7013232707977295,
"learning_rate": 2.132117818244771e-06,
"loss": 1.8612,
"num_input_tokens_seen": 190316544,
"step": 726
},
{
"epoch": 0.1805314129625031,
"grad_norm": 0.7166525721549988,
"learning_rate": 2.1178384191510344e-06,
"loss": 1.5652,
"num_input_tokens_seen": 190578688,
"step": 727
},
{
"epoch": 0.18077973677675688,
"grad_norm": 0.8852736949920654,
"learning_rate": 2.103594138286607e-06,
"loss": 1.8348,
"num_input_tokens_seen": 190840832,
"step": 728
},
{
"epoch": 0.18102806059101068,
"grad_norm": 0.7377064228057861,
"learning_rate": 2.0893851492135536e-06,
"loss": 1.5408,
"num_input_tokens_seen": 191102976,
"step": 729
},
{
"epoch": 0.18127638440526447,
"grad_norm": 0.31571272015571594,
"learning_rate": 2.075211625063923e-06,
"loss": 1.4516,
"num_input_tokens_seen": 191365120,
"step": 730
},
{
"epoch": 0.18152470821951824,
"grad_norm": 0.4377698004245758,
"learning_rate": 2.061073738537635e-06,
"loss": 1.8452,
"num_input_tokens_seen": 191627264,
"step": 731
},
{
"epoch": 0.18177303203377204,
"grad_norm": 0.6246635317802429,
"learning_rate": 2.046971661900373e-06,
"loss": 1.0213,
"num_input_tokens_seen": 191889408,
"step": 732
},
{
"epoch": 0.1820213558480258,
"grad_norm": 0.3734930157661438,
"learning_rate": 2.0329055669814936e-06,
"loss": 1.3397,
"num_input_tokens_seen": 192151552,
"step": 733
},
{
"epoch": 0.1822696796622796,
"grad_norm": 0.6088233590126038,
"learning_rate": 2.0188756251719204e-06,
"loss": 1.5803,
"num_input_tokens_seen": 192413696,
"step": 734
},
{
"epoch": 0.1825180034765334,
"grad_norm": 0.6044638156890869,
"learning_rate": 2.0048820074220716e-06,
"loss": 1.629,
"num_input_tokens_seen": 192675840,
"step": 735
},
{
"epoch": 0.18276632729078718,
"grad_norm": 0.5411587357521057,
"learning_rate": 1.990924884239758e-06,
"loss": 1.5093,
"num_input_tokens_seen": 192937984,
"step": 736
},
{
"epoch": 0.18301465110504098,
"grad_norm": 0.7263090014457703,
"learning_rate": 1.977004425688126e-06,
"loss": 1.6338,
"num_input_tokens_seen": 193200128,
"step": 737
},
{
"epoch": 0.18326297491929475,
"grad_norm": 0.5330092310905457,
"learning_rate": 1.9631208013835677e-06,
"loss": 1.6223,
"num_input_tokens_seen": 193462272,
"step": 738
},
{
"epoch": 0.18351129873354854,
"grad_norm": 0.8732761740684509,
"learning_rate": 1.9492741804936623e-06,
"loss": 1.81,
"num_input_tokens_seen": 193724416,
"step": 739
},
{
"epoch": 0.18375962254780234,
"grad_norm": 0.4876832067966461,
"learning_rate": 1.9354647317351187e-06,
"loss": 1.8895,
"num_input_tokens_seen": 193986560,
"step": 740
},
{
"epoch": 0.1840079463620561,
"grad_norm": 0.5221810340881348,
"learning_rate": 1.9216926233717087e-06,
"loss": 1.7537,
"num_input_tokens_seen": 194248704,
"step": 741
},
{
"epoch": 0.1842562701763099,
"grad_norm": 0.7166823744773865,
"learning_rate": 1.90795802321223e-06,
"loss": 1.6196,
"num_input_tokens_seen": 194510848,
"step": 742
},
{
"epoch": 0.1845045939905637,
"grad_norm": 0.5422106981277466,
"learning_rate": 1.8942610986084487e-06,
"loss": 1.7422,
"num_input_tokens_seen": 194772992,
"step": 743
},
{
"epoch": 0.18475291780481748,
"grad_norm": 0.5047271847724915,
"learning_rate": 1.8806020164530702e-06,
"loss": 1.1779,
"num_input_tokens_seen": 195035136,
"step": 744
},
{
"epoch": 0.18500124161907128,
"grad_norm": 0.6293126344680786,
"learning_rate": 1.8669809431776991e-06,
"loss": 1.923,
"num_input_tokens_seen": 195297280,
"step": 745
},
{
"epoch": 0.18524956543332505,
"grad_norm": 0.41052141785621643,
"learning_rate": 1.8533980447508138e-06,
"loss": 1.3557,
"num_input_tokens_seen": 195559424,
"step": 746
},
{
"epoch": 0.18549788924757885,
"grad_norm": 0.45604202151298523,
"learning_rate": 1.8398534866757455e-06,
"loss": 1.7005,
"num_input_tokens_seen": 195821568,
"step": 747
},
{
"epoch": 0.18574621306183264,
"grad_norm": 0.730992317199707,
"learning_rate": 1.8263474339886628e-06,
"loss": 2.0785,
"num_input_tokens_seen": 196083712,
"step": 748
},
{
"epoch": 0.1859945368760864,
"grad_norm": 0.5063241124153137,
"learning_rate": 1.8128800512565514e-06,
"loss": 2.1056,
"num_input_tokens_seen": 196345856,
"step": 749
},
{
"epoch": 0.1862428606903402,
"grad_norm": 0.2811354100704193,
"learning_rate": 1.799451502575222e-06,
"loss": 1.2958,
"num_input_tokens_seen": 196608000,
"step": 750
},
{
"epoch": 0.18649118450459398,
"grad_norm": 0.5314778089523315,
"learning_rate": 1.7860619515673034e-06,
"loss": 1.7808,
"num_input_tokens_seen": 196870144,
"step": 751
},
{
"epoch": 0.18673950831884778,
"grad_norm": 1.0328506231307983,
"learning_rate": 1.7727115613802465e-06,
"loss": 2.0584,
"num_input_tokens_seen": 197132288,
"step": 752
},
{
"epoch": 0.18698783213310158,
"grad_norm": 0.7677520513534546,
"learning_rate": 1.7594004946843458e-06,
"loss": 1.6937,
"num_input_tokens_seen": 197394432,
"step": 753
},
{
"epoch": 0.18723615594735535,
"grad_norm": 0.6427842974662781,
"learning_rate": 1.746128913670746e-06,
"loss": 1.8869,
"num_input_tokens_seen": 197656576,
"step": 754
},
{
"epoch": 0.18748447976160915,
"grad_norm": 1.0200343132019043,
"learning_rate": 1.7328969800494727e-06,
"loss": 1.796,
"num_input_tokens_seen": 197918720,
"step": 755
},
{
"epoch": 0.18773280357586292,
"grad_norm": 0.8236029148101807,
"learning_rate": 1.7197048550474643e-06,
"loss": 1.6866,
"num_input_tokens_seen": 198180864,
"step": 756
},
{
"epoch": 0.18798112739011671,
"grad_norm": 1.2460148334503174,
"learning_rate": 1.7065526994065973e-06,
"loss": 1.3428,
"num_input_tokens_seen": 198443008,
"step": 757
},
{
"epoch": 0.1882294512043705,
"grad_norm": 0.4800860285758972,
"learning_rate": 1.6934406733817417e-06,
"loss": 1.4296,
"num_input_tokens_seen": 198705152,
"step": 758
},
{
"epoch": 0.18847777501862428,
"grad_norm": 0.9592916965484619,
"learning_rate": 1.680368936738792e-06,
"loss": 1.4531,
"num_input_tokens_seen": 198967296,
"step": 759
},
{
"epoch": 0.18872609883287808,
"grad_norm": 0.6796224117279053,
"learning_rate": 1.6673376487527382e-06,
"loss": 1.4354,
"num_input_tokens_seen": 199229440,
"step": 760
},
{
"epoch": 0.18897442264713185,
"grad_norm": 0.6231174468994141,
"learning_rate": 1.6543469682057105e-06,
"loss": 1.2075,
"num_input_tokens_seen": 199491584,
"step": 761
},
{
"epoch": 0.18922274646138565,
"grad_norm": 0.7283173203468323,
"learning_rate": 1.6413970533850498e-06,
"loss": 1.7713,
"num_input_tokens_seen": 199753728,
"step": 762
},
{
"epoch": 0.18947107027563945,
"grad_norm": 0.6808333396911621,
"learning_rate": 1.6284880620813847e-06,
"loss": 1.7076,
"num_input_tokens_seen": 200015872,
"step": 763
},
{
"epoch": 0.18971939408989322,
"grad_norm": 0.7591480612754822,
"learning_rate": 1.6156201515866971e-06,
"loss": 1.4767,
"num_input_tokens_seen": 200278016,
"step": 764
},
{
"epoch": 0.18996771790414702,
"grad_norm": 3.4821579456329346,
"learning_rate": 1.6027934786924187e-06,
"loss": 1.7879,
"num_input_tokens_seen": 200540160,
"step": 765
},
{
"epoch": 0.19021604171840079,
"grad_norm": 0.5467571020126343,
"learning_rate": 1.5900081996875083e-06,
"loss": 1.6919,
"num_input_tokens_seen": 200802304,
"step": 766
},
{
"epoch": 0.19046436553265458,
"grad_norm": 0.4840015172958374,
"learning_rate": 1.5772644703565564e-06,
"loss": 1.9006,
"num_input_tokens_seen": 201064448,
"step": 767
},
{
"epoch": 0.19071268934690838,
"grad_norm": 0.4922867715358734,
"learning_rate": 1.5645624459778858e-06,
"loss": 1.505,
"num_input_tokens_seen": 201326592,
"step": 768
},
{
"epoch": 0.19096101316116215,
"grad_norm": 0.8669015169143677,
"learning_rate": 1.551902281321651e-06,
"loss": 1.5521,
"num_input_tokens_seen": 201588736,
"step": 769
},
{
"epoch": 0.19120933697541595,
"grad_norm": 0.7580779194831848,
"learning_rate": 1.5392841306479667e-06,
"loss": 1.5425,
"num_input_tokens_seen": 201850880,
"step": 770
},
{
"epoch": 0.19145766078966972,
"grad_norm": 0.5388744473457336,
"learning_rate": 1.5267081477050132e-06,
"loss": 1.6623,
"num_input_tokens_seen": 202113024,
"step": 771
},
{
"epoch": 0.19170598460392352,
"grad_norm": 0.36039191484451294,
"learning_rate": 1.514174485727178e-06,
"loss": 1.9833,
"num_input_tokens_seen": 202375168,
"step": 772
},
{
"epoch": 0.19195430841817732,
"grad_norm": 0.7689981460571289,
"learning_rate": 1.5016832974331725e-06,
"loss": 1.9371,
"num_input_tokens_seen": 202637312,
"step": 773
},
{
"epoch": 0.1922026322324311,
"grad_norm": 0.510267436504364,
"learning_rate": 1.489234735024188e-06,
"loss": 1.7589,
"num_input_tokens_seen": 202899456,
"step": 774
},
{
"epoch": 0.19245095604668488,
"grad_norm": 1.7721202373504639,
"learning_rate": 1.4768289501820265e-06,
"loss": 1.8343,
"num_input_tokens_seen": 203161600,
"step": 775
},
{
"epoch": 0.19269927986093865,
"grad_norm": 0.35707518458366394,
"learning_rate": 1.4644660940672628e-06,
"loss": 1.3319,
"num_input_tokens_seen": 203423744,
"step": 776
},
{
"epoch": 0.19294760367519245,
"grad_norm": 0.39053982496261597,
"learning_rate": 1.4521463173173966e-06,
"loss": 1.4868,
"num_input_tokens_seen": 203685888,
"step": 777
},
{
"epoch": 0.19319592748944625,
"grad_norm": 1.0973213911056519,
"learning_rate": 1.4398697700450181e-06,
"loss": 1.2248,
"num_input_tokens_seen": 203948032,
"step": 778
},
{
"epoch": 0.19344425130370002,
"grad_norm": 0.4421512186527252,
"learning_rate": 1.4276366018359845e-06,
"loss": 1.513,
"num_input_tokens_seen": 204210176,
"step": 779
},
{
"epoch": 0.19369257511795382,
"grad_norm": 0.5916955471038818,
"learning_rate": 1.4154469617475864e-06,
"loss": 1.5881,
"num_input_tokens_seen": 204472320,
"step": 780
},
{
"epoch": 0.1939408989322076,
"grad_norm": 0.749496340751648,
"learning_rate": 1.4033009983067454e-06,
"loss": 2.0316,
"num_input_tokens_seen": 204734464,
"step": 781
},
{
"epoch": 0.1941892227464614,
"grad_norm": 0.4848249554634094,
"learning_rate": 1.3911988595081894e-06,
"loss": 1.5584,
"num_input_tokens_seen": 204996608,
"step": 782
},
{
"epoch": 0.19443754656071519,
"grad_norm": 0.5584369897842407,
"learning_rate": 1.3791406928126638e-06,
"loss": 1.6737,
"num_input_tokens_seen": 205258752,
"step": 783
},
{
"epoch": 0.19468587037496896,
"grad_norm": 0.7734516263008118,
"learning_rate": 1.3671266451451209e-06,
"loss": 1.7325,
"num_input_tokens_seen": 205520896,
"step": 784
},
{
"epoch": 0.19493419418922275,
"grad_norm": 0.4235021471977234,
"learning_rate": 1.3551568628929434e-06,
"loss": 1.7884,
"num_input_tokens_seen": 205783040,
"step": 785
},
{
"epoch": 0.19518251800347652,
"grad_norm": 0.5882217288017273,
"learning_rate": 1.3432314919041478e-06,
"loss": 1.5367,
"num_input_tokens_seen": 206045184,
"step": 786
},
{
"epoch": 0.19543084181773032,
"grad_norm": 0.382315456867218,
"learning_rate": 1.3313506774856177e-06,
"loss": 1.562,
"num_input_tokens_seen": 206307328,
"step": 787
},
{
"epoch": 0.19567916563198412,
"grad_norm": 0.5313207507133484,
"learning_rate": 1.3195145644013286e-06,
"loss": 0.9386,
"num_input_tokens_seen": 206569472,
"step": 788
},
{
"epoch": 0.1959274894462379,
"grad_norm": 0.5512835383415222,
"learning_rate": 1.3077232968705805e-06,
"loss": 1.3328,
"num_input_tokens_seen": 206831616,
"step": 789
},
{
"epoch": 0.1961758132604917,
"grad_norm": 0.6115958094596863,
"learning_rate": 1.2959770185662502e-06,
"loss": 1.6127,
"num_input_tokens_seen": 207093760,
"step": 790
},
{
"epoch": 0.19642413707474546,
"grad_norm": 1.8349878787994385,
"learning_rate": 1.2842758726130283e-06,
"loss": 1.3873,
"num_input_tokens_seen": 207355904,
"step": 791
},
{
"epoch": 0.19667246088899926,
"grad_norm": 0.6167082786560059,
"learning_rate": 1.2726200015856893e-06,
"loss": 1.6641,
"num_input_tokens_seen": 207618048,
"step": 792
},
{
"epoch": 0.19692078470325305,
"grad_norm": 0.27391648292541504,
"learning_rate": 1.2610095475073415e-06,
"loss": 1.7774,
"num_input_tokens_seen": 207880192,
"step": 793
},
{
"epoch": 0.19716910851750682,
"grad_norm": 0.4807204306125641,
"learning_rate": 1.2494446518477022e-06,
"loss": 1.5564,
"num_input_tokens_seen": 208142336,
"step": 794
},
{
"epoch": 0.19741743233176062,
"grad_norm": 0.3716016113758087,
"learning_rate": 1.2379254555213788e-06,
"loss": 1.6801,
"num_input_tokens_seen": 208404480,
"step": 795
},
{
"epoch": 0.1976657561460144,
"grad_norm": 0.6262674331665039,
"learning_rate": 1.22645209888614e-06,
"loss": 1.6579,
"num_input_tokens_seen": 208666624,
"step": 796
},
{
"epoch": 0.1979140799602682,
"grad_norm": 0.49892279505729675,
"learning_rate": 1.2150247217412186e-06,
"loss": 1.4113,
"num_input_tokens_seen": 208928768,
"step": 797
},
{
"epoch": 0.198162403774522,
"grad_norm": 0.28231680393218994,
"learning_rate": 1.203643463325596e-06,
"loss": 1.2046,
"num_input_tokens_seen": 209190912,
"step": 798
},
{
"epoch": 0.19841072758877576,
"grad_norm": 0.39441466331481934,
"learning_rate": 1.1923084623163172e-06,
"loss": 1.25,
"num_input_tokens_seen": 209453056,
"step": 799
},
{
"epoch": 0.19865905140302956,
"grad_norm": 0.8825608491897583,
"learning_rate": 1.1810198568267906e-06,
"loss": 1.8506,
"num_input_tokens_seen": 209715200,
"step": 800
},
{
"epoch": 0.19890737521728333,
"grad_norm": 0.3593462109565735,
"learning_rate": 1.1697777844051105e-06,
"loss": 0.9988,
"num_input_tokens_seen": 209977344,
"step": 801
},
{
"epoch": 0.19915569903153713,
"grad_norm": 0.42397427558898926,
"learning_rate": 1.1585823820323845e-06,
"loss": 1.5294,
"num_input_tokens_seen": 210239488,
"step": 802
},
{
"epoch": 0.19940402284579092,
"grad_norm": 0.5862644910812378,
"learning_rate": 1.1474337861210543e-06,
"loss": 1.8455,
"num_input_tokens_seen": 210501632,
"step": 803
},
{
"epoch": 0.1996523466600447,
"grad_norm": 0.7144994139671326,
"learning_rate": 1.136332132513245e-06,
"loss": 1.6364,
"num_input_tokens_seen": 210763776,
"step": 804
},
{
"epoch": 0.1999006704742985,
"grad_norm": 0.6182805299758911,
"learning_rate": 1.1252775564791023e-06,
"loss": 1.533,
"num_input_tokens_seen": 211025920,
"step": 805
},
{
"epoch": 0.20014899428855226,
"grad_norm": 2.216693878173828,
"learning_rate": 1.1142701927151456e-06,
"loss": 1.8744,
"num_input_tokens_seen": 211288064,
"step": 806
},
{
"epoch": 0.20039731810280606,
"grad_norm": 0.4277917444705963,
"learning_rate": 1.1033101753426285e-06,
"loss": 1.5751,
"num_input_tokens_seen": 211550208,
"step": 807
},
{
"epoch": 0.20064564191705986,
"grad_norm": 0.5398479104042053,
"learning_rate": 1.0923976379059059e-06,
"loss": 1.5959,
"num_input_tokens_seen": 211812352,
"step": 808
},
{
"epoch": 0.20089396573131363,
"grad_norm": 0.38512706756591797,
"learning_rate": 1.0815327133708015e-06,
"loss": 1.5632,
"num_input_tokens_seen": 212074496,
"step": 809
},
{
"epoch": 0.20114228954556743,
"grad_norm": 0.28112220764160156,
"learning_rate": 1.0707155341229902e-06,
"loss": 1.5852,
"num_input_tokens_seen": 212336640,
"step": 810
},
{
"epoch": 0.2013906133598212,
"grad_norm": 0.3636101186275482,
"learning_rate": 1.0599462319663906e-06,
"loss": 1.659,
"num_input_tokens_seen": 212598784,
"step": 811
},
{
"epoch": 0.201638937174075,
"grad_norm": 0.632411003112793,
"learning_rate": 1.049224938121548e-06,
"loss": 1.7422,
"num_input_tokens_seen": 212860928,
"step": 812
},
{
"epoch": 0.2018872609883288,
"grad_norm": 0.4831806719303131,
"learning_rate": 1.0385517832240472e-06,
"loss": 1.6995,
"num_input_tokens_seen": 213123072,
"step": 813
},
{
"epoch": 0.20213558480258256,
"grad_norm": 0.6856269836425781,
"learning_rate": 1.0279268973229089e-06,
"loss": 1.4744,
"num_input_tokens_seen": 213385216,
"step": 814
},
{
"epoch": 0.20238390861683636,
"grad_norm": 0.706402599811554,
"learning_rate": 1.0173504098790188e-06,
"loss": 1.806,
"num_input_tokens_seen": 213647360,
"step": 815
},
{
"epoch": 0.20263223243109013,
"grad_norm": 0.6212208271026611,
"learning_rate": 1.006822449763537e-06,
"loss": 1.4372,
"num_input_tokens_seen": 213909504,
"step": 816
},
{
"epoch": 0.20288055624534393,
"grad_norm": 0.4880361258983612,
"learning_rate": 9.963431452563331e-07,
"loss": 1.7287,
"num_input_tokens_seen": 214171648,
"step": 817
},
{
"epoch": 0.20312888005959773,
"grad_norm": 0.5508049726486206,
"learning_rate": 9.859126240444284e-07,
"loss": 1.4213,
"num_input_tokens_seen": 214433792,
"step": 818
},
{
"epoch": 0.2033772038738515,
"grad_norm": 0.40056681632995605,
"learning_rate": 9.7553101322043e-07,
"loss": 1.6258,
"num_input_tokens_seen": 214695936,
"step": 819
},
{
"epoch": 0.2036255276881053,
"grad_norm": 0.5356477499008179,
"learning_rate": 9.651984392809916e-07,
"loss": 1.5275,
"num_input_tokens_seen": 214958080,
"step": 820
},
{
"epoch": 0.20387385150235907,
"grad_norm": 0.7192147374153137,
"learning_rate": 9.549150281252633e-07,
"loss": 1.675,
"num_input_tokens_seen": 215220224,
"step": 821
},
{
"epoch": 0.20412217531661286,
"grad_norm": 0.6497499942779541,
"learning_rate": 9.446809050533679e-07,
"loss": 2.0607,
"num_input_tokens_seen": 215482368,
"step": 822
},
{
"epoch": 0.20437049913086666,
"grad_norm": 0.6447398662567139,
"learning_rate": 9.344961947648624e-07,
"loss": 1.3855,
"num_input_tokens_seen": 215744512,
"step": 823
},
{
"epoch": 0.20461882294512043,
"grad_norm": 1.1430552005767822,
"learning_rate": 9.243610213572285e-07,
"loss": 1.6178,
"num_input_tokens_seen": 216006656,
"step": 824
},
{
"epoch": 0.20486714675937423,
"grad_norm": 0.6385335922241211,
"learning_rate": 9.142755083243577e-07,
"loss": 1.5856,
"num_input_tokens_seen": 216268800,
"step": 825
},
{
"epoch": 0.205115470573628,
"grad_norm": 1.0095096826553345,
"learning_rate": 9.042397785550405e-07,
"loss": 1.6267,
"num_input_tokens_seen": 216530944,
"step": 826
},
{
"epoch": 0.2053637943878818,
"grad_norm": 0.7737463712692261,
"learning_rate": 8.942539543314799e-07,
"loss": 1.3607,
"num_input_tokens_seen": 216793088,
"step": 827
},
{
"epoch": 0.2056121182021356,
"grad_norm": 0.6710413694381714,
"learning_rate": 8.843181573277904e-07,
"loss": 1.7918,
"num_input_tokens_seen": 217055232,
"step": 828
},
{
"epoch": 0.20586044201638937,
"grad_norm": 0.5380450487136841,
"learning_rate": 8.744325086085248e-07,
"loss": 1.8734,
"num_input_tokens_seen": 217317376,
"step": 829
},
{
"epoch": 0.20610876583064316,
"grad_norm": 0.769334614276886,
"learning_rate": 8.645971286271903e-07,
"loss": 1.4816,
"num_input_tokens_seen": 217579520,
"step": 830
},
{
"epoch": 0.20635708964489693,
"grad_norm": 0.49632272124290466,
"learning_rate": 8.54812137224792e-07,
"loss": 1.7042,
"num_input_tokens_seen": 217841664,
"step": 831
},
{
"epoch": 0.20660541345915073,
"grad_norm": 0.30258211493492126,
"learning_rate": 8.450776536283594e-07,
"loss": 1.5544,
"num_input_tokens_seen": 218103808,
"step": 832
},
{
"epoch": 0.20685373727340453,
"grad_norm": 0.5039138793945312,
"learning_rate": 8.353937964495029e-07,
"loss": 1.6497,
"num_input_tokens_seen": 218365952,
"step": 833
},
{
"epoch": 0.2071020610876583,
"grad_norm": 0.5678662657737732,
"learning_rate": 8.25760683682968e-07,
"loss": 1.9249,
"num_input_tokens_seen": 218628096,
"step": 834
},
{
"epoch": 0.2073503849019121,
"grad_norm": 0.7532708644866943,
"learning_rate": 8.161784327051919e-07,
"loss": 1.5489,
"num_input_tokens_seen": 218890240,
"step": 835
},
{
"epoch": 0.20759870871616587,
"grad_norm": 0.3733159303665161,
"learning_rate": 8.066471602728804e-07,
"loss": 2.1036,
"num_input_tokens_seen": 219152384,
"step": 836
},
{
"epoch": 0.20784703253041967,
"grad_norm": 0.52272629737854,
"learning_rate": 7.971669825215789e-07,
"loss": 1.5633,
"num_input_tokens_seen": 219414528,
"step": 837
},
{
"epoch": 0.20809535634467347,
"grad_norm": 0.6050902605056763,
"learning_rate": 7.877380149642628e-07,
"loss": 1.4245,
"num_input_tokens_seen": 219676672,
"step": 838
},
{
"epoch": 0.20834368015892724,
"grad_norm": 0.6302130222320557,
"learning_rate": 7.783603724899258e-07,
"loss": 1.3682,
"num_input_tokens_seen": 219938816,
"step": 839
},
{
"epoch": 0.20859200397318103,
"grad_norm": 0.2699492871761322,
"learning_rate": 7.690341693621805e-07,
"loss": 1.344,
"num_input_tokens_seen": 220200960,
"step": 840
},
{
"epoch": 0.2088403277874348,
"grad_norm": 0.7535718679428101,
"learning_rate": 7.597595192178702e-07,
"loss": 1.751,
"num_input_tokens_seen": 220463104,
"step": 841
},
{
"epoch": 0.2090886516016886,
"grad_norm": 0.9196535348892212,
"learning_rate": 7.505365350656813e-07,
"loss": 1.6558,
"num_input_tokens_seen": 220725248,
"step": 842
},
{
"epoch": 0.2093369754159424,
"grad_norm": 0.6124866604804993,
"learning_rate": 7.413653292847617e-07,
"loss": 1.4843,
"num_input_tokens_seen": 220987392,
"step": 843
},
{
"epoch": 0.20958529923019617,
"grad_norm": 0.830053448677063,
"learning_rate": 7.322460136233622e-07,
"loss": 1.583,
"num_input_tokens_seen": 221249536,
"step": 844
},
{
"epoch": 0.20983362304444997,
"grad_norm": 0.7714657783508301,
"learning_rate": 7.23178699197467e-07,
"loss": 1.763,
"num_input_tokens_seen": 221511680,
"step": 845
},
{
"epoch": 0.21008194685870374,
"grad_norm": 0.3930104970932007,
"learning_rate": 7.141634964894389e-07,
"loss": 1.381,
"num_input_tokens_seen": 221773824,
"step": 846
},
{
"epoch": 0.21033027067295754,
"grad_norm": 0.6444476246833801,
"learning_rate": 7.052005153466779e-07,
"loss": 1.7138,
"num_input_tokens_seen": 222035968,
"step": 847
},
{
"epoch": 0.21057859448721133,
"grad_norm": 0.7626250982284546,
"learning_rate": 6.962898649802824e-07,
"loss": 1.3164,
"num_input_tokens_seen": 222298112,
"step": 848
},
{
"epoch": 0.2108269183014651,
"grad_norm": 0.5761337876319885,
"learning_rate": 6.874316539637127e-07,
"loss": 1.4375,
"num_input_tokens_seen": 222560256,
"step": 849
},
{
"epoch": 0.2110752421157189,
"grad_norm": 0.5399318337440491,
"learning_rate": 6.786259902314768e-07,
"loss": 1.5317,
"num_input_tokens_seen": 222822400,
"step": 850
},
{
"epoch": 0.21132356592997267,
"grad_norm": 0.6818238496780396,
"learning_rate": 6.698729810778065e-07,
"loss": 1.5059,
"num_input_tokens_seen": 223084544,
"step": 851
},
{
"epoch": 0.21157188974422647,
"grad_norm": 0.5061662197113037,
"learning_rate": 6.611727331553585e-07,
"loss": 1.7645,
"num_input_tokens_seen": 223346688,
"step": 852
},
{
"epoch": 0.21182021355848027,
"grad_norm": 0.7583996057510376,
"learning_rate": 6.52525352473905e-07,
"loss": 1.7479,
"num_input_tokens_seen": 223608832,
"step": 853
},
{
"epoch": 0.21206853737273404,
"grad_norm": 0.4888990819454193,
"learning_rate": 6.439309443990532e-07,
"loss": 1.4111,
"num_input_tokens_seen": 223870976,
"step": 854
},
{
"epoch": 0.21231686118698784,
"grad_norm": 0.5152997374534607,
"learning_rate": 6.353896136509524e-07,
"loss": 1.4035,
"num_input_tokens_seen": 224133120,
"step": 855
},
{
"epoch": 0.2125651850012416,
"grad_norm": 0.8078181147575378,
"learning_rate": 6.269014643030214e-07,
"loss": 1.5448,
"num_input_tokens_seen": 224395264,
"step": 856
},
{
"epoch": 0.2128135088154954,
"grad_norm": 0.38689664006233215,
"learning_rate": 6.184665997806832e-07,
"loss": 1.5432,
"num_input_tokens_seen": 224657408,
"step": 857
},
{
"epoch": 0.2130618326297492,
"grad_norm": 0.5154587626457214,
"learning_rate": 6.100851228600974e-07,
"loss": 1.6356,
"num_input_tokens_seen": 224919552,
"step": 858
},
{
"epoch": 0.21331015644400297,
"grad_norm": 0.7129220366477966,
"learning_rate": 6.017571356669183e-07,
"loss": 1.4454,
"num_input_tokens_seen": 225181696,
"step": 859
},
{
"epoch": 0.21355848025825677,
"grad_norm": 0.4863054156303406,
"learning_rate": 5.934827396750392e-07,
"loss": 1.6839,
"num_input_tokens_seen": 225443840,
"step": 860
},
{
"epoch": 0.21380680407251054,
"grad_norm": 0.461911678314209,
"learning_rate": 5.852620357053651e-07,
"loss": 1.4632,
"num_input_tokens_seen": 225705984,
"step": 861
},
{
"epoch": 0.21405512788676434,
"grad_norm": 0.4267142415046692,
"learning_rate": 5.770951239245803e-07,
"loss": 1.5694,
"num_input_tokens_seen": 225968128,
"step": 862
},
{
"epoch": 0.21430345170101814,
"grad_norm": 0.4966636300086975,
"learning_rate": 5.689821038439264e-07,
"loss": 1.5516,
"num_input_tokens_seen": 226230272,
"step": 863
},
{
"epoch": 0.2145517755152719,
"grad_norm": 0.5140483379364014,
"learning_rate": 5.609230743179939e-07,
"loss": 1.3778,
"num_input_tokens_seen": 226492416,
"step": 864
},
{
"epoch": 0.2148000993295257,
"grad_norm": 0.6450092196464539,
"learning_rate": 5.529181335435124e-07,
"loss": 1.3893,
"num_input_tokens_seen": 226754560,
"step": 865
},
{
"epoch": 0.21504842314377948,
"grad_norm": 0.5973859429359436,
"learning_rate": 5.449673790581611e-07,
"loss": 1.5149,
"num_input_tokens_seen": 227016704,
"step": 866
},
{
"epoch": 0.21529674695803327,
"grad_norm": 1.7493269443511963,
"learning_rate": 5.370709077393721e-07,
"loss": 1.5618,
"num_input_tokens_seen": 227278848,
"step": 867
},
{
"epoch": 0.21554507077228707,
"grad_norm": 0.4318649172782898,
"learning_rate": 5.292288158031595e-07,
"loss": 1.1686,
"num_input_tokens_seen": 227540992,
"step": 868
},
{
"epoch": 0.21579339458654084,
"grad_norm": 0.3847927749156952,
"learning_rate": 5.214411988029355e-07,
"loss": 1.7999,
"num_input_tokens_seen": 227803136,
"step": 869
},
{
"epoch": 0.21604171840079464,
"grad_norm": 0.596502423286438,
"learning_rate": 5.137081516283582e-07,
"loss": 1.5796,
"num_input_tokens_seen": 228065280,
"step": 870
},
{
"epoch": 0.2162900422150484,
"grad_norm": 0.6176612377166748,
"learning_rate": 5.06029768504166e-07,
"loss": 1.7902,
"num_input_tokens_seen": 228327424,
"step": 871
},
{
"epoch": 0.2165383660293022,
"grad_norm": 0.6072035431861877,
"learning_rate": 4.984061429890324e-07,
"loss": 1.7899,
"num_input_tokens_seen": 228589568,
"step": 872
},
{
"epoch": 0.216786689843556,
"grad_norm": 0.3984832167625427,
"learning_rate": 4.908373679744316e-07,
"loss": 1.9725,
"num_input_tokens_seen": 228851712,
"step": 873
},
{
"epoch": 0.21703501365780978,
"grad_norm": 0.7739282250404358,
"learning_rate": 4.833235356834959e-07,
"loss": 1.8129,
"num_input_tokens_seen": 229113856,
"step": 874
},
{
"epoch": 0.21728333747206358,
"grad_norm": 1.2236608266830444,
"learning_rate": 4.758647376699033e-07,
"loss": 1.7323,
"num_input_tokens_seen": 229376000,
"step": 875
},
{
"epoch": 0.21753166128631735,
"grad_norm": 0.541125476360321,
"learning_rate": 4.6846106481675035e-07,
"loss": 1.4017,
"num_input_tokens_seen": 229638144,
"step": 876
},
{
"epoch": 0.21777998510057114,
"grad_norm": 0.6922534704208374,
"learning_rate": 4.6111260733545714e-07,
"loss": 1.4963,
"num_input_tokens_seen": 229900288,
"step": 877
},
{
"epoch": 0.21802830891482494,
"grad_norm": 0.5298182368278503,
"learning_rate": 4.538194547646574e-07,
"loss": 1.2116,
"num_input_tokens_seen": 230162432,
"step": 878
},
{
"epoch": 0.2182766327290787,
"grad_norm": 0.6427643299102783,
"learning_rate": 4.4658169596911493e-07,
"loss": 1.4518,
"num_input_tokens_seen": 230424576,
"step": 879
},
{
"epoch": 0.2185249565433325,
"grad_norm": 0.5809391736984253,
"learning_rate": 4.3939941913863525e-07,
"loss": 1.5274,
"num_input_tokens_seen": 230686720,
"step": 880
},
{
"epoch": 0.21877328035758628,
"grad_norm": 0.7898019552230835,
"learning_rate": 4.322727117869951e-07,
"loss": 1.4542,
"num_input_tokens_seen": 230948864,
"step": 881
},
{
"epoch": 0.21902160417184008,
"grad_norm": 0.3369694650173187,
"learning_rate": 4.2520166075087635e-07,
"loss": 1.7991,
"num_input_tokens_seen": 231211008,
"step": 882
},
{
"epoch": 0.21926992798609388,
"grad_norm": 0.9025521874427795,
"learning_rate": 4.1818635218880186e-07,
"loss": 1.5178,
"num_input_tokens_seen": 231473152,
"step": 883
},
{
"epoch": 0.21951825180034765,
"grad_norm": 0.5789931416511536,
"learning_rate": 4.112268715800943e-07,
"loss": 1.7304,
"num_input_tokens_seen": 231735296,
"step": 884
},
{
"epoch": 0.21976657561460144,
"grad_norm": 0.6976189613342285,
"learning_rate": 4.043233037238281e-07,
"loss": 1.6488,
"num_input_tokens_seen": 231997440,
"step": 885
},
{
"epoch": 0.22001489942885522,
"grad_norm": 0.49333396553993225,
"learning_rate": 3.9747573273779816e-07,
"loss": 1.2766,
"num_input_tokens_seen": 232259584,
"step": 886
},
{
"epoch": 0.220263223243109,
"grad_norm": 0.44112178683280945,
"learning_rate": 3.90684242057498e-07,
"loss": 2.2126,
"num_input_tokens_seen": 232521728,
"step": 887
},
{
"epoch": 0.2205115470573628,
"grad_norm": 0.4824700951576233,
"learning_rate": 3.8394891443509554e-07,
"loss": 1.4844,
"num_input_tokens_seen": 232783872,
"step": 888
},
{
"epoch": 0.22075987087161658,
"grad_norm": 0.9592105746269226,
"learning_rate": 3.772698319384349e-07,
"loss": 1.4548,
"num_input_tokens_seen": 233046016,
"step": 889
},
{
"epoch": 0.22100819468587038,
"grad_norm": 0.6596522927284241,
"learning_rate": 3.7064707595002636e-07,
"loss": 1.9353,
"num_input_tokens_seen": 233308160,
"step": 890
},
{
"epoch": 0.22125651850012415,
"grad_norm": 0.5860907435417175,
"learning_rate": 3.6408072716606346e-07,
"loss": 1.7116,
"num_input_tokens_seen": 233570304,
"step": 891
},
{
"epoch": 0.22150484231437795,
"grad_norm": 0.5942659974098206,
"learning_rate": 3.575708655954324e-07,
"loss": 1.5085,
"num_input_tokens_seen": 233832448,
"step": 892
},
{
"epoch": 0.22175316612863175,
"grad_norm": 0.45090603828430176,
"learning_rate": 3.511175705587433e-07,
"loss": 2.0349,
"num_input_tokens_seen": 234094592,
"step": 893
},
{
"epoch": 0.22200148994288552,
"grad_norm": 0.4501633942127228,
"learning_rate": 3.4472092068735917e-07,
"loss": 1.6038,
"num_input_tokens_seen": 234356736,
"step": 894
},
{
"epoch": 0.22224981375713931,
"grad_norm": 0.6762533783912659,
"learning_rate": 3.3838099392243915e-07,
"loss": 1.9386,
"num_input_tokens_seen": 234618880,
"step": 895
},
{
"epoch": 0.22249813757139308,
"grad_norm": 0.447121798992157,
"learning_rate": 3.320978675139919e-07,
"loss": 1.4892,
"num_input_tokens_seen": 234881024,
"step": 896
},
{
"epoch": 0.22274646138564688,
"grad_norm": 0.45953160524368286,
"learning_rate": 3.258716180199278e-07,
"loss": 1.8319,
"num_input_tokens_seen": 235143168,
"step": 897
},
{
"epoch": 0.22299478519990068,
"grad_norm": 0.6440572142601013,
"learning_rate": 3.1970232130513365e-07,
"loss": 1.515,
"num_input_tokens_seen": 235405312,
"step": 898
},
{
"epoch": 0.22324310901415445,
"grad_norm": 0.5581231117248535,
"learning_rate": 3.135900525405428e-07,
"loss": 1.4006,
"num_input_tokens_seen": 235667456,
"step": 899
},
{
"epoch": 0.22349143282840825,
"grad_norm": 0.8089503645896912,
"learning_rate": 3.0753488620222037e-07,
"loss": 1.9274,
"num_input_tokens_seen": 235929600,
"step": 900
},
{
"epoch": 0.22373975664266202,
"grad_norm": 0.3819998502731323,
"learning_rate": 3.015368960704584e-07,
"loss": 1.5894,
"num_input_tokens_seen": 236191744,
"step": 901
},
{
"epoch": 0.22398808045691582,
"grad_norm": 0.27509671449661255,
"learning_rate": 2.9559615522887275e-07,
"loss": 1.4556,
"num_input_tokens_seen": 236453888,
"step": 902
},
{
"epoch": 0.22423640427116961,
"grad_norm": 0.4570426344871521,
"learning_rate": 2.8971273606351656e-07,
"loss": 1.6122,
"num_input_tokens_seen": 236716032,
"step": 903
},
{
"epoch": 0.22448472808542339,
"grad_norm": 0.557512104511261,
"learning_rate": 2.838867102619952e-07,
"loss": 1.3736,
"num_input_tokens_seen": 236978176,
"step": 904
},
{
"epoch": 0.22473305189967718,
"grad_norm": 0.9721599221229553,
"learning_rate": 2.7811814881259503e-07,
"loss": 1.5408,
"num_input_tokens_seen": 237240320,
"step": 905
},
{
"epoch": 0.22498137571393095,
"grad_norm": 0.34721148014068604,
"learning_rate": 2.724071220034158e-07,
"loss": 1.3573,
"num_input_tokens_seen": 237502464,
"step": 906
},
{
"epoch": 0.22522969952818475,
"grad_norm": 0.5350072979927063,
"learning_rate": 2.6675369942151864e-07,
"loss": 1.7557,
"num_input_tokens_seen": 237764608,
"step": 907
},
{
"epoch": 0.22547802334243855,
"grad_norm": 0.5631807446479797,
"learning_rate": 2.611579499520722e-07,
"loss": 1.7951,
"num_input_tokens_seen": 238026752,
"step": 908
},
{
"epoch": 0.22572634715669232,
"grad_norm": 0.731895387172699,
"learning_rate": 2.556199417775174e-07,
"loss": 1.3442,
"num_input_tokens_seen": 238288896,
"step": 909
},
{
"epoch": 0.22597467097094612,
"grad_norm": 0.5491323471069336,
"learning_rate": 2.5013974237673824e-07,
"loss": 1.932,
"num_input_tokens_seen": 238551040,
"step": 910
},
{
"epoch": 0.2262229947851999,
"grad_norm": 0.3442050814628601,
"learning_rate": 2.447174185242324e-07,
"loss": 1.6905,
"num_input_tokens_seen": 238813184,
"step": 911
},
{
"epoch": 0.22647131859945369,
"grad_norm": 0.7306151390075684,
"learning_rate": 2.3935303628930705e-07,
"loss": 1.4804,
"num_input_tokens_seen": 239075328,
"step": 912
},
{
"epoch": 0.22671964241370748,
"grad_norm": 0.5416999459266663,
"learning_rate": 2.3404666103526542e-07,
"loss": 1.6624,
"num_input_tokens_seen": 239337472,
"step": 913
},
{
"epoch": 0.22696796622796125,
"grad_norm": 0.37114378809928894,
"learning_rate": 2.287983574186159e-07,
"loss": 1.8494,
"num_input_tokens_seen": 239599616,
"step": 914
},
{
"epoch": 0.22721629004221505,
"grad_norm": 0.6947669386863708,
"learning_rate": 2.2360818938828189e-07,
"loss": 1.5021,
"num_input_tokens_seen": 239861760,
"step": 915
},
{
"epoch": 0.22746461385646882,
"grad_norm": 0.5789608955383301,
"learning_rate": 2.1847622018482283e-07,
"loss": 1.9683,
"num_input_tokens_seen": 240123904,
"step": 916
},
{
"epoch": 0.22771293767072262,
"grad_norm": 0.6739006638526917,
"learning_rate": 2.134025123396638e-07,
"loss": 2.1382,
"num_input_tokens_seen": 240386048,
"step": 917
},
{
"epoch": 0.22796126148497642,
"grad_norm": 0.7561383247375488,
"learning_rate": 2.083871276743338e-07,
"loss": 1.423,
"num_input_tokens_seen": 240648192,
"step": 918
},
{
"epoch": 0.2282095852992302,
"grad_norm": 0.7975893616676331,
"learning_rate": 2.0343012729971244e-07,
"loss": 1.4102,
"num_input_tokens_seen": 240910336,
"step": 919
},
{
"epoch": 0.228457909113484,
"grad_norm": 0.2659681737422943,
"learning_rate": 1.9853157161528468e-07,
"loss": 1.215,
"num_input_tokens_seen": 241172480,
"step": 920
},
{
"epoch": 0.22870623292773776,
"grad_norm": 0.3225981593132019,
"learning_rate": 1.9369152030840553e-07,
"loss": 1.7634,
"num_input_tokens_seen": 241434624,
"step": 921
},
{
"epoch": 0.22895455674199156,
"grad_norm": 0.7171019911766052,
"learning_rate": 1.8891003235357307e-07,
"loss": 1.7829,
"num_input_tokens_seen": 241696768,
"step": 922
},
{
"epoch": 0.22920288055624535,
"grad_norm": 0.5440613031387329,
"learning_rate": 1.841871660117095e-07,
"loss": 1.3598,
"num_input_tokens_seen": 241958912,
"step": 923
},
{
"epoch": 0.22945120437049912,
"grad_norm": 0.359244167804718,
"learning_rate": 1.7952297882945e-07,
"loss": 1.5261,
"num_input_tokens_seen": 242221056,
"step": 924
},
{
"epoch": 0.22969952818475292,
"grad_norm": 0.7204708456993103,
"learning_rate": 1.7491752763844294e-07,
"loss": 1.7172,
"num_input_tokens_seen": 242483200,
"step": 925
},
{
"epoch": 0.2299478519990067,
"grad_norm": 0.4921867847442627,
"learning_rate": 1.7037086855465902e-07,
"loss": 1.8377,
"num_input_tokens_seen": 242745344,
"step": 926
},
{
"epoch": 0.2301961758132605,
"grad_norm": 0.8633773326873779,
"learning_rate": 1.6588305697770313e-07,
"loss": 1.4806,
"num_input_tokens_seen": 243007488,
"step": 927
},
{
"epoch": 0.2304444996275143,
"grad_norm": 0.5622784495353699,
"learning_rate": 1.6145414759014433e-07,
"loss": 1.5533,
"num_input_tokens_seen": 243269632,
"step": 928
},
{
"epoch": 0.23069282344176806,
"grad_norm": 0.5299321413040161,
"learning_rate": 1.5708419435684463e-07,
"loss": 1.3532,
"num_input_tokens_seen": 243531776,
"step": 929
},
{
"epoch": 0.23094114725602186,
"grad_norm": 0.6882240772247314,
"learning_rate": 1.5277325052430569e-07,
"loss": 1.7824,
"num_input_tokens_seen": 243793920,
"step": 930
},
{
"epoch": 0.23118947107027563,
"grad_norm": 0.431749552488327,
"learning_rate": 1.4852136862001766e-07,
"loss": 1.5325,
"num_input_tokens_seen": 244056064,
"step": 931
},
{
"epoch": 0.23143779488452942,
"grad_norm": 0.5995936393737793,
"learning_rate": 1.4432860045182019e-07,
"loss": 1.855,
"num_input_tokens_seen": 244318208,
"step": 932
},
{
"epoch": 0.23168611869878322,
"grad_norm": 0.7233447432518005,
"learning_rate": 1.4019499710726913e-07,
"loss": 1.6507,
"num_input_tokens_seen": 244580352,
"step": 933
},
{
"epoch": 0.231934442513037,
"grad_norm": 0.54053795337677,
"learning_rate": 1.3612060895301759e-07,
"loss": 1.8987,
"num_input_tokens_seen": 244842496,
"step": 934
},
{
"epoch": 0.2321827663272908,
"grad_norm": 0.3651261031627655,
"learning_rate": 1.3210548563419857e-07,
"loss": 1.4466,
"num_input_tokens_seen": 245104640,
"step": 935
},
{
"epoch": 0.23243109014154456,
"grad_norm": 0.597426176071167,
"learning_rate": 1.2814967607382433e-07,
"loss": 1.6555,
"num_input_tokens_seen": 245366784,
"step": 936
},
{
"epoch": 0.23267941395579836,
"grad_norm": 0.4557691216468811,
"learning_rate": 1.2425322847218368e-07,
"loss": 1.3507,
"num_input_tokens_seen": 245628928,
"step": 937
},
{
"epoch": 0.23292773777005216,
"grad_norm": 0.5856966376304626,
"learning_rate": 1.2041619030626283e-07,
"loss": 1.5985,
"num_input_tokens_seen": 245891072,
"step": 938
},
{
"epoch": 0.23317606158430593,
"grad_norm": 0.5499093532562256,
"learning_rate": 1.166386083291604e-07,
"loss": 1.9703,
"num_input_tokens_seen": 246153216,
"step": 939
},
{
"epoch": 0.23342438539855973,
"grad_norm": 0.5963953733444214,
"learning_rate": 1.1292052856952063e-07,
"loss": 1.5363,
"num_input_tokens_seen": 246415360,
"step": 940
},
{
"epoch": 0.2336727092128135,
"grad_norm": 0.5307541489601135,
"learning_rate": 1.0926199633097156e-07,
"loss": 1.2944,
"num_input_tokens_seen": 246677504,
"step": 941
},
{
"epoch": 0.2339210330270673,
"grad_norm": 0.5369905233383179,
"learning_rate": 1.0566305619157502e-07,
"loss": 1.933,
"num_input_tokens_seen": 246939648,
"step": 942
},
{
"epoch": 0.2341693568413211,
"grad_norm": 1.9757884740829468,
"learning_rate": 1.0212375200327973e-07,
"loss": 1.6174,
"num_input_tokens_seen": 247201792,
"step": 943
},
{
"epoch": 0.23441768065557486,
"grad_norm": 0.5642454028129578,
"learning_rate": 9.864412689139124e-08,
"loss": 1.2,
"num_input_tokens_seen": 247463936,
"step": 944
},
{
"epoch": 0.23466600446982866,
"grad_norm": 0.316659152507782,
"learning_rate": 9.522422325404234e-08,
"loss": 1.8678,
"num_input_tokens_seen": 247726080,
"step": 945
},
{
"epoch": 0.23491432828408243,
"grad_norm": 0.4380717873573303,
"learning_rate": 9.186408276168012e-08,
"loss": 1.4458,
"num_input_tokens_seen": 247988224,
"step": 946
},
{
"epoch": 0.23516265209833623,
"grad_norm": 0.5788005590438843,
"learning_rate": 8.856374635655696e-08,
"loss": 1.5094,
"num_input_tokens_seen": 248250368,
"step": 947
},
{
"epoch": 0.23541097591259003,
"grad_norm": 0.6096988320350647,
"learning_rate": 8.53232542522292e-08,
"loss": 1.5393,
"num_input_tokens_seen": 248512512,
"step": 948
},
{
"epoch": 0.2356592997268438,
"grad_norm": 0.5101311802864075,
"learning_rate": 8.214264593307097e-08,
"loss": 1.6412,
"num_input_tokens_seen": 248774656,
"step": 949
},
{
"epoch": 0.2359076235410976,
"grad_norm": 0.4224371314048767,
"learning_rate": 7.90219601537906e-08,
"loss": 1.4671,
"num_input_tokens_seen": 249036800,
"step": 950
},
{
"epoch": 0.23615594735535136,
"grad_norm": 0.5831036567687988,
"learning_rate": 7.59612349389599e-08,
"loss": 1.4406,
"num_input_tokens_seen": 249298944,
"step": 951
},
{
"epoch": 0.23640427116960516,
"grad_norm": 0.7039021849632263,
"learning_rate": 7.296050758254958e-08,
"loss": 1.7773,
"num_input_tokens_seen": 249561088,
"step": 952
},
{
"epoch": 0.23665259498385896,
"grad_norm": 0.5700446367263794,
"learning_rate": 7.001981464747565e-08,
"loss": 1.4874,
"num_input_tokens_seen": 249823232,
"step": 953
},
{
"epoch": 0.23690091879811273,
"grad_norm": 0.6082141995429993,
"learning_rate": 6.713919196515317e-08,
"loss": 1.5502,
"num_input_tokens_seen": 250085376,
"step": 954
},
{
"epoch": 0.23714924261236653,
"grad_norm": 0.5807713866233826,
"learning_rate": 6.431867463506047e-08,
"loss": 1.6924,
"num_input_tokens_seen": 250347520,
"step": 955
},
{
"epoch": 0.23739756642662033,
"grad_norm": 0.6795780658721924,
"learning_rate": 6.15582970243117e-08,
"loss": 1.6181,
"num_input_tokens_seen": 250609664,
"step": 956
},
{
"epoch": 0.2376458902408741,
"grad_norm": 0.5901414155960083,
"learning_rate": 5.8858092767236084e-08,
"loss": 1.6287,
"num_input_tokens_seen": 250871808,
"step": 957
},
{
"epoch": 0.2378942140551279,
"grad_norm": 0.258759081363678,
"learning_rate": 5.621809476497098e-08,
"loss": 1.5631,
"num_input_tokens_seen": 251133952,
"step": 958
},
{
"epoch": 0.23814253786938167,
"grad_norm": 0.793224036693573,
"learning_rate": 5.363833518505834e-08,
"loss": 1.6065,
"num_input_tokens_seen": 251396096,
"step": 959
},
{
"epoch": 0.23839086168363546,
"grad_norm": 0.541074275970459,
"learning_rate": 5.111884546105506e-08,
"loss": 1.7784,
"num_input_tokens_seen": 251658240,
"step": 960
},
{
"epoch": 0.23863918549788926,
"grad_norm": 0.6888409852981567,
"learning_rate": 4.865965629214819e-08,
"loss": 1.4175,
"num_input_tokens_seen": 251920384,
"step": 961
},
{
"epoch": 0.23888750931214303,
"grad_norm": 0.5229154825210571,
"learning_rate": 4.626079764278202e-08,
"loss": 1.7039,
"num_input_tokens_seen": 252182528,
"step": 962
},
{
"epoch": 0.23913583312639683,
"grad_norm": 0.43334439396858215,
"learning_rate": 4.392229874229159e-08,
"loss": 1.6586,
"num_input_tokens_seen": 252444672,
"step": 963
},
{
"epoch": 0.2393841569406506,
"grad_norm": 0.5404706597328186,
"learning_rate": 4.164418808454806e-08,
"loss": 1.5866,
"num_input_tokens_seen": 252706816,
"step": 964
},
{
"epoch": 0.2396324807549044,
"grad_norm": 0.31539681553840637,
"learning_rate": 3.9426493427611177e-08,
"loss": 1.2445,
"num_input_tokens_seen": 252968960,
"step": 965
},
{
"epoch": 0.2398808045691582,
"grad_norm": 0.767345130443573,
"learning_rate": 3.726924179339009e-08,
"loss": 1.6509,
"num_input_tokens_seen": 253231104,
"step": 966
},
{
"epoch": 0.24012912838341197,
"grad_norm": 0.5601321458816528,
"learning_rate": 3.517245946731529e-08,
"loss": 1.6218,
"num_input_tokens_seen": 253493248,
"step": 967
},
{
"epoch": 0.24037745219766576,
"grad_norm": 0.6398811936378479,
"learning_rate": 3.313617199801777e-08,
"loss": 1.8179,
"num_input_tokens_seen": 253755392,
"step": 968
},
{
"epoch": 0.24062577601191953,
"grad_norm": 0.3753248155117035,
"learning_rate": 3.1160404197018155e-08,
"loss": 1.6866,
"num_input_tokens_seen": 254017536,
"step": 969
},
{
"epoch": 0.24087409982617333,
"grad_norm": 0.7169692516326904,
"learning_rate": 2.9245180138423033e-08,
"loss": 1.6297,
"num_input_tokens_seen": 254279680,
"step": 970
},
{
"epoch": 0.24112242364042713,
"grad_norm": 0.501290500164032,
"learning_rate": 2.7390523158633552e-08,
"loss": 1.3324,
"num_input_tokens_seen": 254541824,
"step": 971
},
{
"epoch": 0.2413707474546809,
"grad_norm": 0.4676488935947418,
"learning_rate": 2.5596455856058966e-08,
"loss": 1.3584,
"num_input_tokens_seen": 254803968,
"step": 972
},
{
"epoch": 0.2416190712689347,
"grad_norm": 0.40131956338882446,
"learning_rate": 2.386300009084408e-08,
"loss": 1.0066,
"num_input_tokens_seen": 255066112,
"step": 973
},
{
"epoch": 0.24186739508318847,
"grad_norm": 0.45223063230514526,
"learning_rate": 2.219017698460002e-08,
"loss": 1.7419,
"num_input_tokens_seen": 255328256,
"step": 974
},
{
"epoch": 0.24211571889744227,
"grad_norm": 0.48078829050064087,
"learning_rate": 2.057800692014833e-08,
"loss": 1.7454,
"num_input_tokens_seen": 255590400,
"step": 975
},
{
"epoch": 0.24236404271169606,
"grad_norm": 0.3801480531692505,
"learning_rate": 1.9026509541272276e-08,
"loss": 1.542,
"num_input_tokens_seen": 255852544,
"step": 976
},
{
"epoch": 0.24261236652594984,
"grad_norm": 0.3696509301662445,
"learning_rate": 1.753570375247815e-08,
"loss": 1.8456,
"num_input_tokens_seen": 256114688,
"step": 977
},
{
"epoch": 0.24286069034020363,
"grad_norm": 0.3742285966873169,
"learning_rate": 1.610560771876435e-08,
"loss": 1.3322,
"num_input_tokens_seen": 256376832,
"step": 978
},
{
"epoch": 0.2431090141544574,
"grad_norm": 0.9268859624862671,
"learning_rate": 1.4736238865398766e-08,
"loss": 1.9955,
"num_input_tokens_seen": 256638976,
"step": 979
},
{
"epoch": 0.2433573379687112,
"grad_norm": 0.7358083128929138,
"learning_rate": 1.3427613877709523e-08,
"loss": 1.6458,
"num_input_tokens_seen": 256901120,
"step": 980
},
{
"epoch": 0.243605661782965,
"grad_norm": 0.4992476999759674,
"learning_rate": 1.2179748700879013e-08,
"loss": 1.7698,
"num_input_tokens_seen": 257163264,
"step": 981
},
{
"epoch": 0.24385398559721877,
"grad_norm": 0.7254119515419006,
"learning_rate": 1.0992658539750179e-08,
"loss": 2.0594,
"num_input_tokens_seen": 257425408,
"step": 982
},
{
"epoch": 0.24410230941147257,
"grad_norm": 0.30243805050849915,
"learning_rate": 9.866357858642206e-09,
"loss": 1.6328,
"num_input_tokens_seen": 257687552,
"step": 983
},
{
"epoch": 0.24435063322572634,
"grad_norm": 0.3717341721057892,
"learning_rate": 8.800860381173448e-09,
"loss": 1.7149,
"num_input_tokens_seen": 257949696,
"step": 984
},
{
"epoch": 0.24459895703998014,
"grad_norm": 0.6060426831245422,
"learning_rate": 7.796179090094891e-09,
"loss": 1.4753,
"num_input_tokens_seen": 258211840,
"step": 985
},
{
"epoch": 0.24484728085423393,
"grad_norm": 0.5001364350318909,
"learning_rate": 6.852326227130835e-09,
"loss": 1.6607,
"num_input_tokens_seen": 258473984,
"step": 986
},
{
"epoch": 0.2450956046684877,
"grad_norm": 0.41138938069343567,
"learning_rate": 5.969313292830126e-09,
"loss": 1.5122,
"num_input_tokens_seen": 258736128,
"step": 987
},
{
"epoch": 0.2453439284827415,
"grad_norm": 0.5986543893814087,
"learning_rate": 5.147151046426824e-09,
"loss": 1.3974,
"num_input_tokens_seen": 258998272,
"step": 988
},
{
"epoch": 0.24559225229699527,
"grad_norm": 0.3890846073627472,
"learning_rate": 4.385849505708084e-09,
"loss": 1.7548,
"num_input_tokens_seen": 259260416,
"step": 989
},
{
"epoch": 0.24584057611124907,
"grad_norm": 0.5105006694793701,
"learning_rate": 3.685417946894254e-09,
"loss": 1.4885,
"num_input_tokens_seen": 259522560,
"step": 990
},
{
"epoch": 0.24608889992550287,
"grad_norm": 0.5231227278709412,
"learning_rate": 3.0458649045211897e-09,
"loss": 1.488,
"num_input_tokens_seen": 259784704,
"step": 991
},
{
"epoch": 0.24633722373975664,
"grad_norm": 0.5039011240005493,
"learning_rate": 2.4671981713420003e-09,
"loss": 2.0,
"num_input_tokens_seen": 260046848,
"step": 992
},
{
"epoch": 0.24658554755401044,
"grad_norm": 0.8183209300041199,
"learning_rate": 1.9494247982282386e-09,
"loss": 1.5233,
"num_input_tokens_seen": 260308992,
"step": 993
},
{
"epoch": 0.2468338713682642,
"grad_norm": 0.34673362970352173,
"learning_rate": 1.4925510940844157e-09,
"loss": 1.2279,
"num_input_tokens_seen": 260571136,
"step": 994
},
{
"epoch": 0.247082195182518,
"grad_norm": 0.6855907440185547,
"learning_rate": 1.096582625772502e-09,
"loss": 1.3346,
"num_input_tokens_seen": 260833280,
"step": 995
},
{
"epoch": 0.2473305189967718,
"grad_norm": 0.5691891312599182,
"learning_rate": 7.615242180436521e-10,
"loss": 1.3504,
"num_input_tokens_seen": 261095424,
"step": 996
},
{
"epoch": 0.24757884281102557,
"grad_norm": 0.49807822704315186,
"learning_rate": 4.87379953478806e-10,
"loss": 2.1568,
"num_input_tokens_seen": 261357568,
"step": 997
},
{
"epoch": 0.24782716662527937,
"grad_norm": 0.4466933310031891,
"learning_rate": 2.741531724392843e-10,
"loss": 1.4098,
"num_input_tokens_seen": 261619712,
"step": 998
},
{
"epoch": 0.24807549043953314,
"grad_norm": 0.6643083691596985,
"learning_rate": 1.2184647302626585e-10,
"loss": 1.6152,
"num_input_tokens_seen": 261881856,
"step": 999
},
{
"epoch": 0.24832381425378694,
"grad_norm": 0.5675874352455139,
"learning_rate": 3.0461711048035415e-11,
"loss": 1.2485,
"num_input_tokens_seen": 262144000,
"step": 1000
}
],
"logging_steps": 1.0,
"max_steps": 1000,
"num_input_tokens_seen": 262144000,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 418759311360000.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}