RDW3 / last-checkpoint /trainer_state.json
ccore's picture
Training in progress, epoch 12, checkpoint
1bd1012 verified
{
"best_metric": 0.30803602933883667,
"best_model_checkpoint": "./opt_trained/checkpoint-258",
"epoch": 12.0,
"eval_steps": 500,
"global_step": 516,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.023391812865497075,
"grad_norm": 1.4262398481369019,
"learning_rate": 9.995238095238095e-05,
"loss": 2.4722,
"step": 1
},
{
"epoch": 0.04678362573099415,
"grad_norm": 12.206507682800293,
"learning_rate": 9.990476190476191e-05,
"loss": 3.176,
"step": 2
},
{
"epoch": 0.07017543859649122,
"grad_norm": 11.578840255737305,
"learning_rate": 9.985714285714287e-05,
"loss": 2.8396,
"step": 3
},
{
"epoch": 0.0935672514619883,
"grad_norm": 9.648161888122559,
"learning_rate": 9.980952380952382e-05,
"loss": 2.7336,
"step": 4
},
{
"epoch": 0.11695906432748537,
"grad_norm": 3.8782691955566406,
"learning_rate": 9.976190476190477e-05,
"loss": 2.6955,
"step": 5
},
{
"epoch": 0.14035087719298245,
"grad_norm": 12.644487380981445,
"learning_rate": 9.971428571428571e-05,
"loss": 2.6405,
"step": 6
},
{
"epoch": 0.16374269005847952,
"grad_norm": 11.057122230529785,
"learning_rate": 9.966666666666667e-05,
"loss": 2.7113,
"step": 7
},
{
"epoch": 0.1871345029239766,
"grad_norm": 4.860190391540527,
"learning_rate": 9.961904761904762e-05,
"loss": 2.7076,
"step": 8
},
{
"epoch": 0.21052631578947367,
"grad_norm": 4.317215442657471,
"learning_rate": 9.957142857142858e-05,
"loss": 2.6377,
"step": 9
},
{
"epoch": 0.23391812865497075,
"grad_norm": 3.3068416118621826,
"learning_rate": 9.952380952380953e-05,
"loss": 2.5995,
"step": 10
},
{
"epoch": 0.2573099415204678,
"grad_norm": 1.2752724885940552,
"learning_rate": 9.947619047619048e-05,
"loss": 2.6285,
"step": 11
},
{
"epoch": 0.2807017543859649,
"grad_norm": 8.849737167358398,
"learning_rate": 9.942857142857144e-05,
"loss": 2.6217,
"step": 12
},
{
"epoch": 0.30409356725146197,
"grad_norm": 5.594025611877441,
"learning_rate": 9.938095238095238e-05,
"loss": 2.6265,
"step": 13
},
{
"epoch": 0.32748538011695905,
"grad_norm": 3.581617593765259,
"learning_rate": 9.933333333333334e-05,
"loss": 2.5984,
"step": 14
},
{
"epoch": 0.3508771929824561,
"grad_norm": 5.33600378036499,
"learning_rate": 9.92857142857143e-05,
"loss": 2.6071,
"step": 15
},
{
"epoch": 0.3742690058479532,
"grad_norm": 4.4274983406066895,
"learning_rate": 9.923809523809524e-05,
"loss": 2.5908,
"step": 16
},
{
"epoch": 0.39766081871345027,
"grad_norm": 4.5507307052612305,
"learning_rate": 9.91904761904762e-05,
"loss": 2.5616,
"step": 17
},
{
"epoch": 0.42105263157894735,
"grad_norm": 3.189161777496338,
"learning_rate": 9.914285714285715e-05,
"loss": 2.5784,
"step": 18
},
{
"epoch": 0.4444444444444444,
"grad_norm": 2.706615924835205,
"learning_rate": 9.909523809523809e-05,
"loss": 2.5413,
"step": 19
},
{
"epoch": 0.4678362573099415,
"grad_norm": 2.146662712097168,
"learning_rate": 9.904761904761905e-05,
"loss": 2.6019,
"step": 20
},
{
"epoch": 0.49122807017543857,
"grad_norm": 3.2252964973449707,
"learning_rate": 9.900000000000001e-05,
"loss": 2.5914,
"step": 21
},
{
"epoch": 0.5146198830409356,
"grad_norm": 2.8859879970550537,
"learning_rate": 9.895238095238095e-05,
"loss": 2.6133,
"step": 22
},
{
"epoch": 0.5380116959064327,
"grad_norm": 2.5647897720336914,
"learning_rate": 9.890476190476191e-05,
"loss": 2.5425,
"step": 23
},
{
"epoch": 0.5614035087719298,
"grad_norm": 3.0347073078155518,
"learning_rate": 9.885714285714286e-05,
"loss": 2.5732,
"step": 24
},
{
"epoch": 0.5847953216374269,
"grad_norm": 1.8412858247756958,
"learning_rate": 9.880952380952381e-05,
"loss": 2.5776,
"step": 25
},
{
"epoch": 0.6081871345029239,
"grad_norm": 3.1820366382598877,
"learning_rate": 9.876190476190477e-05,
"loss": 2.5566,
"step": 26
},
{
"epoch": 0.631578947368421,
"grad_norm": 2.4613687992095947,
"learning_rate": 9.871428571428572e-05,
"loss": 2.5113,
"step": 27
},
{
"epoch": 0.6549707602339181,
"grad_norm": 5.942374229431152,
"learning_rate": 9.866666666666668e-05,
"loss": 2.575,
"step": 28
},
{
"epoch": 0.6783625730994152,
"grad_norm": 7.427689075469971,
"learning_rate": 9.861904761904762e-05,
"loss": 2.5692,
"step": 29
},
{
"epoch": 0.7017543859649122,
"grad_norm": 3.163085699081421,
"learning_rate": 9.857142857142858e-05,
"loss": 2.5192,
"step": 30
},
{
"epoch": 0.7251461988304093,
"grad_norm": 3.421778678894043,
"learning_rate": 9.852380952380952e-05,
"loss": 2.5533,
"step": 31
},
{
"epoch": 0.7485380116959064,
"grad_norm": 2.486320734024048,
"learning_rate": 9.847619047619048e-05,
"loss": 2.515,
"step": 32
},
{
"epoch": 0.7719298245614035,
"grad_norm": 3.2825722694396973,
"learning_rate": 9.842857142857144e-05,
"loss": 2.4994,
"step": 33
},
{
"epoch": 0.7953216374269005,
"grad_norm": 3.5643672943115234,
"learning_rate": 9.838095238095238e-05,
"loss": 2.5327,
"step": 34
},
{
"epoch": 0.8187134502923976,
"grad_norm": 3.3127200603485107,
"learning_rate": 9.833333333333333e-05,
"loss": 2.6251,
"step": 35
},
{
"epoch": 0.8421052631578947,
"grad_norm": 3.187095880508423,
"learning_rate": 9.828571428571429e-05,
"loss": 2.5233,
"step": 36
},
{
"epoch": 0.8654970760233918,
"grad_norm": 3.3743860721588135,
"learning_rate": 9.823809523809525e-05,
"loss": 2.4999,
"step": 37
},
{
"epoch": 0.8888888888888888,
"grad_norm": 2.3684120178222656,
"learning_rate": 9.81904761904762e-05,
"loss": 2.5302,
"step": 38
},
{
"epoch": 0.9122807017543859,
"grad_norm": 3.7091619968414307,
"learning_rate": 9.814285714285715e-05,
"loss": 2.5003,
"step": 39
},
{
"epoch": 0.935672514619883,
"grad_norm": 4.230418682098389,
"learning_rate": 9.80952380952381e-05,
"loss": 2.5379,
"step": 40
},
{
"epoch": 0.9590643274853801,
"grad_norm": 3.5879616737365723,
"learning_rate": 9.804761904761905e-05,
"loss": 2.5652,
"step": 41
},
{
"epoch": 0.9824561403508771,
"grad_norm": 2.621013879776001,
"learning_rate": 9.8e-05,
"loss": 2.5704,
"step": 42
},
{
"epoch": 1.0,
"grad_norm": 1.3536667823791504,
"learning_rate": 9.795238095238097e-05,
"loss": 1.9135,
"step": 43
},
{
"epoch": 1.0,
"eval_loss": 0.31598106026649475,
"eval_runtime": 3.4639,
"eval_samples_per_second": 62.358,
"eval_steps_per_second": 15.59,
"step": 43
},
{
"epoch": 1.023391812865497,
"grad_norm": 2.768101215362549,
"learning_rate": 9.790476190476191e-05,
"loss": 2.4537,
"step": 44
},
{
"epoch": 1.0467836257309941,
"grad_norm": 2.276510000228882,
"learning_rate": 9.785714285714286e-05,
"loss": 2.4703,
"step": 45
},
{
"epoch": 1.0701754385964912,
"grad_norm": 3.3676953315734863,
"learning_rate": 9.780952380952382e-05,
"loss": 2.5157,
"step": 46
},
{
"epoch": 1.0935672514619883,
"grad_norm": 2.5821385383605957,
"learning_rate": 9.776190476190476e-05,
"loss": 2.5257,
"step": 47
},
{
"epoch": 1.1169590643274854,
"grad_norm": 2.9606378078460693,
"learning_rate": 9.771428571428572e-05,
"loss": 2.4374,
"step": 48
},
{
"epoch": 1.1403508771929824,
"grad_norm": 3.44348406791687,
"learning_rate": 9.766666666666668e-05,
"loss": 2.4795,
"step": 49
},
{
"epoch": 1.1637426900584795,
"grad_norm": 2.2815728187561035,
"learning_rate": 9.761904761904762e-05,
"loss": 2.4883,
"step": 50
},
{
"epoch": 1.1871345029239766,
"grad_norm": 2.4586877822875977,
"learning_rate": 9.757142857142858e-05,
"loss": 2.4754,
"step": 51
},
{
"epoch": 1.2105263157894737,
"grad_norm": 2.3498520851135254,
"learning_rate": 9.752380952380953e-05,
"loss": 2.4607,
"step": 52
},
{
"epoch": 1.2339181286549707,
"grad_norm": 2.0736541748046875,
"learning_rate": 9.747619047619047e-05,
"loss": 2.4729,
"step": 53
},
{
"epoch": 1.2573099415204678,
"grad_norm": 3.3633615970611572,
"learning_rate": 9.742857142857143e-05,
"loss": 2.4651,
"step": 54
},
{
"epoch": 1.280701754385965,
"grad_norm": 2.9030282497406006,
"learning_rate": 9.738095238095239e-05,
"loss": 2.4837,
"step": 55
},
{
"epoch": 1.304093567251462,
"grad_norm": 3.896803855895996,
"learning_rate": 9.733333333333335e-05,
"loss": 2.53,
"step": 56
},
{
"epoch": 1.327485380116959,
"grad_norm": 2.2422099113464355,
"learning_rate": 9.728571428571429e-05,
"loss": 2.4911,
"step": 57
},
{
"epoch": 1.3508771929824561,
"grad_norm": 3.7257282733917236,
"learning_rate": 9.723809523809524e-05,
"loss": 2.5117,
"step": 58
},
{
"epoch": 1.3742690058479532,
"grad_norm": 4.366884231567383,
"learning_rate": 9.71904761904762e-05,
"loss": 2.5469,
"step": 59
},
{
"epoch": 1.3976608187134503,
"grad_norm": 2.365870714187622,
"learning_rate": 9.714285714285715e-05,
"loss": 2.522,
"step": 60
},
{
"epoch": 1.4210526315789473,
"grad_norm": 2.6924397945404053,
"learning_rate": 9.70952380952381e-05,
"loss": 2.5042,
"step": 61
},
{
"epoch": 1.4444444444444444,
"grad_norm": 2.8113691806793213,
"learning_rate": 9.704761904761905e-05,
"loss": 2.5424,
"step": 62
},
{
"epoch": 1.4678362573099415,
"grad_norm": 2.295470952987671,
"learning_rate": 9.7e-05,
"loss": 2.5205,
"step": 63
},
{
"epoch": 1.4912280701754386,
"grad_norm": 4.694986343383789,
"learning_rate": 9.695238095238096e-05,
"loss": 2.503,
"step": 64
},
{
"epoch": 1.5146198830409356,
"grad_norm": 2.4416017532348633,
"learning_rate": 9.69047619047619e-05,
"loss": 2.5223,
"step": 65
},
{
"epoch": 1.5380116959064327,
"grad_norm": 4.63900899887085,
"learning_rate": 9.685714285714286e-05,
"loss": 2.4851,
"step": 66
},
{
"epoch": 1.5614035087719298,
"grad_norm": 2.8212132453918457,
"learning_rate": 9.680952380952382e-05,
"loss": 2.5191,
"step": 67
},
{
"epoch": 1.5847953216374269,
"grad_norm": 2.3161513805389404,
"learning_rate": 9.676190476190476e-05,
"loss": 2.4891,
"step": 68
},
{
"epoch": 1.608187134502924,
"grad_norm": 2.707838535308838,
"learning_rate": 9.671428571428572e-05,
"loss": 2.4927,
"step": 69
},
{
"epoch": 1.631578947368421,
"grad_norm": 1.9635908603668213,
"learning_rate": 9.666666666666667e-05,
"loss": 2.4834,
"step": 70
},
{
"epoch": 1.654970760233918,
"grad_norm": 2.5164906978607178,
"learning_rate": 9.661904761904763e-05,
"loss": 2.4375,
"step": 71
},
{
"epoch": 1.6783625730994152,
"grad_norm": 2.799863576889038,
"learning_rate": 9.657142857142858e-05,
"loss": 2.4754,
"step": 72
},
{
"epoch": 1.7017543859649122,
"grad_norm": 3.619720935821533,
"learning_rate": 9.652380952380953e-05,
"loss": 2.511,
"step": 73
},
{
"epoch": 1.7251461988304093,
"grad_norm": 3.7119972705841064,
"learning_rate": 9.647619047619047e-05,
"loss": 2.4723,
"step": 74
},
{
"epoch": 1.7485380116959064,
"grad_norm": 4.552167892456055,
"learning_rate": 9.642857142857143e-05,
"loss": 2.4928,
"step": 75
},
{
"epoch": 1.7719298245614035,
"grad_norm": 2.3658089637756348,
"learning_rate": 9.638095238095238e-05,
"loss": 2.5233,
"step": 76
},
{
"epoch": 1.7953216374269005,
"grad_norm": 2.2602102756500244,
"learning_rate": 9.633333333333335e-05,
"loss": 2.493,
"step": 77
},
{
"epoch": 1.8187134502923976,
"grad_norm": 2.2393336296081543,
"learning_rate": 9.628571428571429e-05,
"loss": 2.4743,
"step": 78
},
{
"epoch": 1.8421052631578947,
"grad_norm": 4.036160469055176,
"learning_rate": 9.623809523809524e-05,
"loss": 2.4973,
"step": 79
},
{
"epoch": 1.8654970760233918,
"grad_norm": 2.1540491580963135,
"learning_rate": 9.61904761904762e-05,
"loss": 2.5163,
"step": 80
},
{
"epoch": 1.8888888888888888,
"grad_norm": 3.3976409435272217,
"learning_rate": 9.614285714285714e-05,
"loss": 2.4244,
"step": 81
},
{
"epoch": 1.912280701754386,
"grad_norm": 2.306898832321167,
"learning_rate": 9.60952380952381e-05,
"loss": 2.4487,
"step": 82
},
{
"epoch": 1.935672514619883,
"grad_norm": 2.8536524772644043,
"learning_rate": 9.604761904761906e-05,
"loss": 2.4441,
"step": 83
},
{
"epoch": 1.95906432748538,
"grad_norm": 2.7067620754241943,
"learning_rate": 9.6e-05,
"loss": 2.5898,
"step": 84
},
{
"epoch": 1.9824561403508771,
"grad_norm": 2.6597952842712402,
"learning_rate": 9.595238095238096e-05,
"loss": 2.4564,
"step": 85
},
{
"epoch": 2.0,
"grad_norm": 3.054412603378296,
"learning_rate": 9.59047619047619e-05,
"loss": 1.8442,
"step": 86
},
{
"epoch": 2.0,
"eval_loss": 0.31219348311424255,
"eval_runtime": 3.4985,
"eval_samples_per_second": 61.74,
"eval_steps_per_second": 15.435,
"step": 86
},
{
"epoch": 2.023391812865497,
"grad_norm": 2.8484723567962646,
"learning_rate": 9.585714285714285e-05,
"loss": 2.4903,
"step": 87
},
{
"epoch": 2.046783625730994,
"grad_norm": 3.8947722911834717,
"learning_rate": 9.580952380952382e-05,
"loss": 2.486,
"step": 88
},
{
"epoch": 2.0701754385964914,
"grad_norm": 4.0316386222839355,
"learning_rate": 9.576190476190477e-05,
"loss": 2.4559,
"step": 89
},
{
"epoch": 2.0935672514619883,
"grad_norm": 2.2441468238830566,
"learning_rate": 9.571428571428573e-05,
"loss": 2.4548,
"step": 90
},
{
"epoch": 2.116959064327485,
"grad_norm": 4.1536359786987305,
"learning_rate": 9.566666666666667e-05,
"loss": 2.4341,
"step": 91
},
{
"epoch": 2.1403508771929824,
"grad_norm": 5.011383533477783,
"learning_rate": 9.561904761904761e-05,
"loss": 2.4233,
"step": 92
},
{
"epoch": 2.1637426900584797,
"grad_norm": 2.0493083000183105,
"learning_rate": 9.557142857142857e-05,
"loss": 2.4691,
"step": 93
},
{
"epoch": 2.1871345029239766,
"grad_norm": 4.27736759185791,
"learning_rate": 9.552380952380953e-05,
"loss": 2.4666,
"step": 94
},
{
"epoch": 2.2105263157894735,
"grad_norm": 3.795992136001587,
"learning_rate": 9.547619047619049e-05,
"loss": 2.4617,
"step": 95
},
{
"epoch": 2.2339181286549707,
"grad_norm": 2.451568365097046,
"learning_rate": 9.542857142857143e-05,
"loss": 2.4485,
"step": 96
},
{
"epoch": 2.257309941520468,
"grad_norm": 2.0920753479003906,
"learning_rate": 9.538095238095238e-05,
"loss": 2.4411,
"step": 97
},
{
"epoch": 2.280701754385965,
"grad_norm": 3.969087600708008,
"learning_rate": 9.533333333333334e-05,
"loss": 2.47,
"step": 98
},
{
"epoch": 2.3040935672514617,
"grad_norm": 3.598999500274658,
"learning_rate": 9.52857142857143e-05,
"loss": 2.4431,
"step": 99
},
{
"epoch": 2.327485380116959,
"grad_norm": 1.720585584640503,
"learning_rate": 9.523809523809524e-05,
"loss": 2.4784,
"step": 100
},
{
"epoch": 2.3508771929824563,
"grad_norm": 2.879593849182129,
"learning_rate": 9.51904761904762e-05,
"loss": 2.4321,
"step": 101
},
{
"epoch": 2.374269005847953,
"grad_norm": 1.448943853378296,
"learning_rate": 9.514285714285714e-05,
"loss": 2.4054,
"step": 102
},
{
"epoch": 2.39766081871345,
"grad_norm": 2.534806489944458,
"learning_rate": 9.50952380952381e-05,
"loss": 2.4682,
"step": 103
},
{
"epoch": 2.4210526315789473,
"grad_norm": 2.2084572315216064,
"learning_rate": 9.504761904761905e-05,
"loss": 2.4459,
"step": 104
},
{
"epoch": 2.4444444444444446,
"grad_norm": 2.6770689487457275,
"learning_rate": 9.5e-05,
"loss": 2.4461,
"step": 105
},
{
"epoch": 2.4678362573099415,
"grad_norm": 2.4759695529937744,
"learning_rate": 9.495238095238096e-05,
"loss": 2.4658,
"step": 106
},
{
"epoch": 2.4912280701754383,
"grad_norm": 2.4558472633361816,
"learning_rate": 9.490476190476191e-05,
"loss": 2.4309,
"step": 107
},
{
"epoch": 2.5146198830409356,
"grad_norm": 2.584268808364868,
"learning_rate": 9.485714285714287e-05,
"loss": 2.4985,
"step": 108
},
{
"epoch": 2.538011695906433,
"grad_norm": 3.1202220916748047,
"learning_rate": 9.480952380952381e-05,
"loss": 2.4503,
"step": 109
},
{
"epoch": 2.56140350877193,
"grad_norm": 3.697275161743164,
"learning_rate": 9.476190476190476e-05,
"loss": 2.4021,
"step": 110
},
{
"epoch": 2.5847953216374266,
"grad_norm": 2.602041482925415,
"learning_rate": 9.471428571428573e-05,
"loss": 2.5068,
"step": 111
},
{
"epoch": 2.608187134502924,
"grad_norm": 3.9455575942993164,
"learning_rate": 9.466666666666667e-05,
"loss": 2.4394,
"step": 112
},
{
"epoch": 2.6315789473684212,
"grad_norm": 3.7310194969177246,
"learning_rate": 9.461904761904762e-05,
"loss": 2.4865,
"step": 113
},
{
"epoch": 2.654970760233918,
"grad_norm": 4.220344066619873,
"learning_rate": 9.457142857142858e-05,
"loss": 2.4982,
"step": 114
},
{
"epoch": 2.678362573099415,
"grad_norm": 1.7343074083328247,
"learning_rate": 9.452380952380952e-05,
"loss": 2.4484,
"step": 115
},
{
"epoch": 2.7017543859649122,
"grad_norm": 3.8400301933288574,
"learning_rate": 9.447619047619048e-05,
"loss": 2.5013,
"step": 116
},
{
"epoch": 2.7251461988304095,
"grad_norm": 5.945042133331299,
"learning_rate": 9.442857142857144e-05,
"loss": 2.5052,
"step": 117
},
{
"epoch": 2.7485380116959064,
"grad_norm": 2.8387110233306885,
"learning_rate": 9.438095238095238e-05,
"loss": 2.5203,
"step": 118
},
{
"epoch": 2.7719298245614032,
"grad_norm": 3.0027477741241455,
"learning_rate": 9.433333333333334e-05,
"loss": 2.4686,
"step": 119
},
{
"epoch": 2.7953216374269005,
"grad_norm": 4.645102024078369,
"learning_rate": 9.428571428571429e-05,
"loss": 2.5442,
"step": 120
},
{
"epoch": 2.818713450292398,
"grad_norm": 3.3703556060791016,
"learning_rate": 9.423809523809524e-05,
"loss": 2.4585,
"step": 121
},
{
"epoch": 2.8421052631578947,
"grad_norm": 2.3330347537994385,
"learning_rate": 9.41904761904762e-05,
"loss": 2.4219,
"step": 122
},
{
"epoch": 2.8654970760233915,
"grad_norm": 4.309964179992676,
"learning_rate": 9.414285714285715e-05,
"loss": 2.5121,
"step": 123
},
{
"epoch": 2.888888888888889,
"grad_norm": 4.484703540802002,
"learning_rate": 9.40952380952381e-05,
"loss": 2.5512,
"step": 124
},
{
"epoch": 2.912280701754386,
"grad_norm": 3.8170957565307617,
"learning_rate": 9.404761904761905e-05,
"loss": 2.4961,
"step": 125
},
{
"epoch": 2.935672514619883,
"grad_norm": 3.1967039108276367,
"learning_rate": 9.4e-05,
"loss": 2.4039,
"step": 126
},
{
"epoch": 2.95906432748538,
"grad_norm": 3.305359363555908,
"learning_rate": 9.395238095238095e-05,
"loss": 2.4389,
"step": 127
},
{
"epoch": 2.982456140350877,
"grad_norm": 3.0017144680023193,
"learning_rate": 9.390476190476191e-05,
"loss": 2.4839,
"step": 128
},
{
"epoch": 3.0,
"grad_norm": 1.907825231552124,
"learning_rate": 9.385714285714287e-05,
"loss": 1.7774,
"step": 129
},
{
"epoch": 3.0,
"eval_loss": 0.3095262944698334,
"eval_runtime": 3.5049,
"eval_samples_per_second": 61.627,
"eval_steps_per_second": 15.407,
"step": 129
},
{
"epoch": 3.023391812865497,
"grad_norm": 2.4059183597564697,
"learning_rate": 9.380952380952381e-05,
"loss": 2.4009,
"step": 130
},
{
"epoch": 3.046783625730994,
"grad_norm": 2.391845226287842,
"learning_rate": 9.376190476190476e-05,
"loss": 2.4191,
"step": 131
},
{
"epoch": 3.0701754385964914,
"grad_norm": 3.139408826828003,
"learning_rate": 9.371428571428572e-05,
"loss": 2.391,
"step": 132
},
{
"epoch": 3.0935672514619883,
"grad_norm": 3.1654696464538574,
"learning_rate": 9.366666666666668e-05,
"loss": 2.3909,
"step": 133
},
{
"epoch": 3.116959064327485,
"grad_norm": 1.5735834836959839,
"learning_rate": 9.361904761904763e-05,
"loss": 2.4318,
"step": 134
},
{
"epoch": 3.1403508771929824,
"grad_norm": 2.855710029602051,
"learning_rate": 9.357142857142858e-05,
"loss": 2.3899,
"step": 135
},
{
"epoch": 3.1637426900584797,
"grad_norm": 2.6927037239074707,
"learning_rate": 9.352380952380952e-05,
"loss": 2.4136,
"step": 136
},
{
"epoch": 3.1871345029239766,
"grad_norm": 2.0338966846466064,
"learning_rate": 9.347619047619048e-05,
"loss": 2.3805,
"step": 137
},
{
"epoch": 3.2105263157894735,
"grad_norm": 3.746018409729004,
"learning_rate": 9.342857142857143e-05,
"loss": 2.4159,
"step": 138
},
{
"epoch": 3.2339181286549707,
"grad_norm": 2.1957383155822754,
"learning_rate": 9.338095238095238e-05,
"loss": 2.4393,
"step": 139
},
{
"epoch": 3.257309941520468,
"grad_norm": 2.063310384750366,
"learning_rate": 9.333333333333334e-05,
"loss": 2.3983,
"step": 140
},
{
"epoch": 3.280701754385965,
"grad_norm": 1.9041073322296143,
"learning_rate": 9.328571428571429e-05,
"loss": 2.4438,
"step": 141
},
{
"epoch": 3.3040935672514617,
"grad_norm": 2.0360188484191895,
"learning_rate": 9.323809523809525e-05,
"loss": 2.4025,
"step": 142
},
{
"epoch": 3.327485380116959,
"grad_norm": 2.820622444152832,
"learning_rate": 9.319047619047619e-05,
"loss": 2.4353,
"step": 143
},
{
"epoch": 3.3508771929824563,
"grad_norm": 2.129838705062866,
"learning_rate": 9.314285714285715e-05,
"loss": 2.4528,
"step": 144
},
{
"epoch": 3.374269005847953,
"grad_norm": 2.2732796669006348,
"learning_rate": 9.309523809523811e-05,
"loss": 2.4082,
"step": 145
},
{
"epoch": 3.39766081871345,
"grad_norm": 2.280691623687744,
"learning_rate": 9.304761904761905e-05,
"loss": 2.4521,
"step": 146
},
{
"epoch": 3.4210526315789473,
"grad_norm": 2.549703598022461,
"learning_rate": 9.300000000000001e-05,
"loss": 2.4777,
"step": 147
},
{
"epoch": 3.4444444444444446,
"grad_norm": 1.628199815750122,
"learning_rate": 9.295238095238096e-05,
"loss": 2.3744,
"step": 148
},
{
"epoch": 3.4678362573099415,
"grad_norm": 2.7309587001800537,
"learning_rate": 9.29047619047619e-05,
"loss": 2.4787,
"step": 149
},
{
"epoch": 3.4912280701754383,
"grad_norm": 2.2030370235443115,
"learning_rate": 9.285714285714286e-05,
"loss": 2.4376,
"step": 150
},
{
"epoch": 3.5146198830409356,
"grad_norm": 2.3776726722717285,
"learning_rate": 9.280952380952382e-05,
"loss": 2.4595,
"step": 151
},
{
"epoch": 3.538011695906433,
"grad_norm": 3.5721945762634277,
"learning_rate": 9.276190476190476e-05,
"loss": 2.4642,
"step": 152
},
{
"epoch": 3.56140350877193,
"grad_norm": 3.3094141483306885,
"learning_rate": 9.271428571428572e-05,
"loss": 2.4229,
"step": 153
},
{
"epoch": 3.5847953216374266,
"grad_norm": 2.197993278503418,
"learning_rate": 9.266666666666666e-05,
"loss": 2.3844,
"step": 154
},
{
"epoch": 3.608187134502924,
"grad_norm": 3.706082582473755,
"learning_rate": 9.261904761904762e-05,
"loss": 2.4332,
"step": 155
},
{
"epoch": 3.6315789473684212,
"grad_norm": 2.791156053543091,
"learning_rate": 9.257142857142858e-05,
"loss": 2.448,
"step": 156
},
{
"epoch": 3.654970760233918,
"grad_norm": 4.078625679016113,
"learning_rate": 9.252380952380953e-05,
"loss": 2.4481,
"step": 157
},
{
"epoch": 3.678362573099415,
"grad_norm": 2.616171360015869,
"learning_rate": 9.247619047619048e-05,
"loss": 2.4338,
"step": 158
},
{
"epoch": 3.7017543859649122,
"grad_norm": 3.2940022945404053,
"learning_rate": 9.242857142857143e-05,
"loss": 2.4327,
"step": 159
},
{
"epoch": 3.7251461988304095,
"grad_norm": 5.047934532165527,
"learning_rate": 9.238095238095239e-05,
"loss": 2.4671,
"step": 160
},
{
"epoch": 3.7485380116959064,
"grad_norm": 2.0488369464874268,
"learning_rate": 9.233333333333333e-05,
"loss": 2.4354,
"step": 161
},
{
"epoch": 3.7719298245614032,
"grad_norm": 5.099597454071045,
"learning_rate": 9.228571428571429e-05,
"loss": 2.4336,
"step": 162
},
{
"epoch": 3.7953216374269005,
"grad_norm": 3.812992572784424,
"learning_rate": 9.223809523809525e-05,
"loss": 2.4586,
"step": 163
},
{
"epoch": 3.818713450292398,
"grad_norm": 3.237950086593628,
"learning_rate": 9.21904761904762e-05,
"loss": 2.4583,
"step": 164
},
{
"epoch": 3.8421052631578947,
"grad_norm": 5.422150135040283,
"learning_rate": 9.214285714285714e-05,
"loss": 2.4427,
"step": 165
},
{
"epoch": 3.8654970760233915,
"grad_norm": 3.7532801628112793,
"learning_rate": 9.20952380952381e-05,
"loss": 2.4103,
"step": 166
},
{
"epoch": 3.888888888888889,
"grad_norm": 3.172743320465088,
"learning_rate": 9.204761904761906e-05,
"loss": 2.4427,
"step": 167
},
{
"epoch": 3.912280701754386,
"grad_norm": 5.759428977966309,
"learning_rate": 9.200000000000001e-05,
"loss": 2.4811,
"step": 168
},
{
"epoch": 3.935672514619883,
"grad_norm": 2.9414196014404297,
"learning_rate": 9.195238095238096e-05,
"loss": 2.4602,
"step": 169
},
{
"epoch": 3.95906432748538,
"grad_norm": 4.5333662033081055,
"learning_rate": 9.19047619047619e-05,
"loss": 2.4738,
"step": 170
},
{
"epoch": 3.982456140350877,
"grad_norm": 3.230017900466919,
"learning_rate": 9.185714285714286e-05,
"loss": 2.4134,
"step": 171
},
{
"epoch": 4.0,
"grad_norm": 1.4366929531097412,
"learning_rate": 9.18095238095238e-05,
"loss": 1.8099,
"step": 172
},
{
"epoch": 4.0,
"eval_loss": 0.31109535694122314,
"eval_runtime": 3.5128,
"eval_samples_per_second": 61.489,
"eval_steps_per_second": 15.372,
"step": 172
},
{
"epoch": 4.023391812865497,
"grad_norm": 4.1390509605407715,
"learning_rate": 9.176190476190476e-05,
"loss": 2.4107,
"step": 173
},
{
"epoch": 4.046783625730994,
"grad_norm": 4.0564985275268555,
"learning_rate": 9.171428571428572e-05,
"loss": 2.3704,
"step": 174
},
{
"epoch": 4.0701754385964914,
"grad_norm": 3.3425748348236084,
"learning_rate": 9.166666666666667e-05,
"loss": 2.3986,
"step": 175
},
{
"epoch": 4.093567251461988,
"grad_norm": 3.1439638137817383,
"learning_rate": 9.161904761904763e-05,
"loss": 2.3725,
"step": 176
},
{
"epoch": 4.116959064327485,
"grad_norm": 2.531954765319824,
"learning_rate": 9.157142857142857e-05,
"loss": 2.4327,
"step": 177
},
{
"epoch": 4.140350877192983,
"grad_norm": 3.1837306022644043,
"learning_rate": 9.152380952380953e-05,
"loss": 2.3846,
"step": 178
},
{
"epoch": 4.16374269005848,
"grad_norm": 3.197889804840088,
"learning_rate": 9.147619047619049e-05,
"loss": 2.3887,
"step": 179
},
{
"epoch": 4.187134502923977,
"grad_norm": 2.6710753440856934,
"learning_rate": 9.142857142857143e-05,
"loss": 2.4217,
"step": 180
},
{
"epoch": 4.2105263157894735,
"grad_norm": 2.70224928855896,
"learning_rate": 9.138095238095239e-05,
"loss": 2.4039,
"step": 181
},
{
"epoch": 4.23391812865497,
"grad_norm": 2.7430830001831055,
"learning_rate": 9.133333333333334e-05,
"loss": 2.4032,
"step": 182
},
{
"epoch": 4.257309941520468,
"grad_norm": 2.428302764892578,
"learning_rate": 9.128571428571428e-05,
"loss": 2.394,
"step": 183
},
{
"epoch": 4.280701754385965,
"grad_norm": 4.183929920196533,
"learning_rate": 9.123809523809524e-05,
"loss": 2.3949,
"step": 184
},
{
"epoch": 4.304093567251462,
"grad_norm": 2.312898874282837,
"learning_rate": 9.11904761904762e-05,
"loss": 2.4049,
"step": 185
},
{
"epoch": 4.3274853801169595,
"grad_norm": 2.6628363132476807,
"learning_rate": 9.114285714285716e-05,
"loss": 2.3866,
"step": 186
},
{
"epoch": 4.350877192982456,
"grad_norm": 2.258638381958008,
"learning_rate": 9.10952380952381e-05,
"loss": 2.3608,
"step": 187
},
{
"epoch": 4.374269005847953,
"grad_norm": 2.543140172958374,
"learning_rate": 9.104761904761904e-05,
"loss": 2.4382,
"step": 188
},
{
"epoch": 4.39766081871345,
"grad_norm": 2.7995975017547607,
"learning_rate": 9.1e-05,
"loss": 2.3809,
"step": 189
},
{
"epoch": 4.421052631578947,
"grad_norm": 2.617652654647827,
"learning_rate": 9.095238095238096e-05,
"loss": 2.3999,
"step": 190
},
{
"epoch": 4.444444444444445,
"grad_norm": 2.1277976036071777,
"learning_rate": 9.09047619047619e-05,
"loss": 2.3982,
"step": 191
},
{
"epoch": 4.4678362573099415,
"grad_norm": 3.09194016456604,
"learning_rate": 9.085714285714286e-05,
"loss": 2.4261,
"step": 192
},
{
"epoch": 4.491228070175438,
"grad_norm": 2.2097442150115967,
"learning_rate": 9.080952380952381e-05,
"loss": 2.4457,
"step": 193
},
{
"epoch": 4.514619883040936,
"grad_norm": 2.3321805000305176,
"learning_rate": 9.076190476190477e-05,
"loss": 2.3977,
"step": 194
},
{
"epoch": 4.538011695906433,
"grad_norm": 2.72891902923584,
"learning_rate": 9.071428571428571e-05,
"loss": 2.4663,
"step": 195
},
{
"epoch": 4.56140350877193,
"grad_norm": 2.142327070236206,
"learning_rate": 9.066666666666667e-05,
"loss": 2.4171,
"step": 196
},
{
"epoch": 4.584795321637427,
"grad_norm": 2.173485517501831,
"learning_rate": 9.061904761904763e-05,
"loss": 2.4219,
"step": 197
},
{
"epoch": 4.6081871345029235,
"grad_norm": 2.439042568206787,
"learning_rate": 9.057142857142857e-05,
"loss": 2.3669,
"step": 198
},
{
"epoch": 4.631578947368421,
"grad_norm": 3.0206170082092285,
"learning_rate": 9.052380952380953e-05,
"loss": 2.4399,
"step": 199
},
{
"epoch": 4.654970760233918,
"grad_norm": 2.1517748832702637,
"learning_rate": 9.047619047619048e-05,
"loss": 2.4484,
"step": 200
},
{
"epoch": 4.678362573099415,
"grad_norm": 3.037619113922119,
"learning_rate": 9.042857142857143e-05,
"loss": 2.44,
"step": 201
},
{
"epoch": 4.701754385964913,
"grad_norm": 2.569814920425415,
"learning_rate": 9.03809523809524e-05,
"loss": 2.3586,
"step": 202
},
{
"epoch": 4.7251461988304095,
"grad_norm": 2.983091115951538,
"learning_rate": 9.033333333333334e-05,
"loss": 2.3362,
"step": 203
},
{
"epoch": 4.748538011695906,
"grad_norm": 2.2038419246673584,
"learning_rate": 9.028571428571428e-05,
"loss": 2.4397,
"step": 204
},
{
"epoch": 4.771929824561403,
"grad_norm": 1.9491034746170044,
"learning_rate": 9.023809523809524e-05,
"loss": 2.3986,
"step": 205
},
{
"epoch": 4.7953216374269,
"grad_norm": 1.9633510112762451,
"learning_rate": 9.019047619047619e-05,
"loss": 2.4017,
"step": 206
},
{
"epoch": 4.818713450292398,
"grad_norm": 5.065985679626465,
"learning_rate": 9.014285714285716e-05,
"loss": 2.4338,
"step": 207
},
{
"epoch": 4.842105263157895,
"grad_norm": 2.1132020950317383,
"learning_rate": 9.00952380952381e-05,
"loss": 2.3536,
"step": 208
},
{
"epoch": 4.8654970760233915,
"grad_norm": 4.070071697235107,
"learning_rate": 9.004761904761905e-05,
"loss": 2.4411,
"step": 209
},
{
"epoch": 4.888888888888889,
"grad_norm": 3.169827699661255,
"learning_rate": 9e-05,
"loss": 2.4099,
"step": 210
},
{
"epoch": 4.912280701754386,
"grad_norm": 2.128169059753418,
"learning_rate": 8.995238095238095e-05,
"loss": 2.4253,
"step": 211
},
{
"epoch": 4.935672514619883,
"grad_norm": 4.3199076652526855,
"learning_rate": 8.990476190476191e-05,
"loss": 2.4334,
"step": 212
},
{
"epoch": 4.95906432748538,
"grad_norm": 2.538106679916382,
"learning_rate": 8.985714285714287e-05,
"loss": 2.4465,
"step": 213
},
{
"epoch": 4.982456140350877,
"grad_norm": 4.168552398681641,
"learning_rate": 8.980952380952381e-05,
"loss": 2.3854,
"step": 214
},
{
"epoch": 5.0,
"grad_norm": 3.4958627223968506,
"learning_rate": 8.976190476190477e-05,
"loss": 1.8025,
"step": 215
},
{
"epoch": 5.0,
"eval_loss": 0.30952247977256775,
"eval_runtime": 3.5036,
"eval_samples_per_second": 61.651,
"eval_steps_per_second": 15.413,
"step": 215
},
{
"epoch": 5.023391812865497,
"grad_norm": 2.416876792907715,
"learning_rate": 8.971428571428571e-05,
"loss": 2.3852,
"step": 216
},
{
"epoch": 5.046783625730994,
"grad_norm": 3.6754486560821533,
"learning_rate": 8.966666666666666e-05,
"loss": 2.4351,
"step": 217
},
{
"epoch": 5.0701754385964914,
"grad_norm": 3.146263599395752,
"learning_rate": 8.961904761904762e-05,
"loss": 2.4231,
"step": 218
},
{
"epoch": 5.093567251461988,
"grad_norm": 2.729116439819336,
"learning_rate": 8.957142857142858e-05,
"loss": 2.3589,
"step": 219
},
{
"epoch": 5.116959064327485,
"grad_norm": 6.3045477867126465,
"learning_rate": 8.952380952380953e-05,
"loss": 2.3972,
"step": 220
},
{
"epoch": 5.140350877192983,
"grad_norm": 4.70556116104126,
"learning_rate": 8.947619047619048e-05,
"loss": 2.3614,
"step": 221
},
{
"epoch": 5.16374269005848,
"grad_norm": 4.070709705352783,
"learning_rate": 8.942857142857142e-05,
"loss": 2.3298,
"step": 222
},
{
"epoch": 5.187134502923977,
"grad_norm": 5.384773254394531,
"learning_rate": 8.938095238095238e-05,
"loss": 2.4206,
"step": 223
},
{
"epoch": 5.2105263157894735,
"grad_norm": 2.430166721343994,
"learning_rate": 8.933333333333334e-05,
"loss": 2.4027,
"step": 224
},
{
"epoch": 5.23391812865497,
"grad_norm": 2.5975165367126465,
"learning_rate": 8.92857142857143e-05,
"loss": 2.4035,
"step": 225
},
{
"epoch": 5.257309941520468,
"grad_norm": 2.0980706214904785,
"learning_rate": 8.923809523809524e-05,
"loss": 2.3119,
"step": 226
},
{
"epoch": 5.280701754385965,
"grad_norm": 3.480513095855713,
"learning_rate": 8.919047619047619e-05,
"loss": 2.3753,
"step": 227
},
{
"epoch": 5.304093567251462,
"grad_norm": 2.642237901687622,
"learning_rate": 8.914285714285715e-05,
"loss": 2.3885,
"step": 228
},
{
"epoch": 5.3274853801169595,
"grad_norm": 3.1167006492614746,
"learning_rate": 8.909523809523809e-05,
"loss": 2.3668,
"step": 229
},
{
"epoch": 5.350877192982456,
"grad_norm": 2.7304916381835938,
"learning_rate": 8.904761904761905e-05,
"loss": 2.3474,
"step": 230
},
{
"epoch": 5.374269005847953,
"grad_norm": 2.4452908039093018,
"learning_rate": 8.900000000000001e-05,
"loss": 2.3501,
"step": 231
},
{
"epoch": 5.39766081871345,
"grad_norm": 3.0729784965515137,
"learning_rate": 8.895238095238095e-05,
"loss": 2.4057,
"step": 232
},
{
"epoch": 5.421052631578947,
"grad_norm": 2.315704345703125,
"learning_rate": 8.890476190476191e-05,
"loss": 2.3416,
"step": 233
},
{
"epoch": 5.444444444444445,
"grad_norm": 2.8339195251464844,
"learning_rate": 8.885714285714286e-05,
"loss": 2.3419,
"step": 234
},
{
"epoch": 5.4678362573099415,
"grad_norm": 4.628677845001221,
"learning_rate": 8.880952380952381e-05,
"loss": 2.3926,
"step": 235
},
{
"epoch": 5.491228070175438,
"grad_norm": 3.533191204071045,
"learning_rate": 8.876190476190477e-05,
"loss": 2.4157,
"step": 236
},
{
"epoch": 5.514619883040936,
"grad_norm": 2.3621981143951416,
"learning_rate": 8.871428571428572e-05,
"loss": 2.3415,
"step": 237
},
{
"epoch": 5.538011695906433,
"grad_norm": 1.851884365081787,
"learning_rate": 8.866666666666668e-05,
"loss": 2.3789,
"step": 238
},
{
"epoch": 5.56140350877193,
"grad_norm": 2.5211541652679443,
"learning_rate": 8.861904761904762e-05,
"loss": 2.3055,
"step": 239
},
{
"epoch": 5.584795321637427,
"grad_norm": 3.753197431564331,
"learning_rate": 8.857142857142857e-05,
"loss": 2.3666,
"step": 240
},
{
"epoch": 5.6081871345029235,
"grad_norm": 2.3129889965057373,
"learning_rate": 8.852380952380954e-05,
"loss": 2.3692,
"step": 241
},
{
"epoch": 5.631578947368421,
"grad_norm": 2.3051323890686035,
"learning_rate": 8.847619047619048e-05,
"loss": 2.4062,
"step": 242
},
{
"epoch": 5.654970760233918,
"grad_norm": 2.1116273403167725,
"learning_rate": 8.842857142857143e-05,
"loss": 2.3403,
"step": 243
},
{
"epoch": 5.678362573099415,
"grad_norm": 2.010376214981079,
"learning_rate": 8.838095238095239e-05,
"loss": 2.2977,
"step": 244
},
{
"epoch": 5.701754385964913,
"grad_norm": 1.641919732093811,
"learning_rate": 8.833333333333333e-05,
"loss": 2.3663,
"step": 245
},
{
"epoch": 5.7251461988304095,
"grad_norm": 2.6189534664154053,
"learning_rate": 8.828571428571429e-05,
"loss": 2.3651,
"step": 246
},
{
"epoch": 5.748538011695906,
"grad_norm": 2.2570645809173584,
"learning_rate": 8.823809523809525e-05,
"loss": 2.3802,
"step": 247
},
{
"epoch": 5.771929824561403,
"grad_norm": 2.970245599746704,
"learning_rate": 8.819047619047619e-05,
"loss": 2.3843,
"step": 248
},
{
"epoch": 5.7953216374269,
"grad_norm": 3.3681797981262207,
"learning_rate": 8.814285714285715e-05,
"loss": 2.3726,
"step": 249
},
{
"epoch": 5.818713450292398,
"grad_norm": 2.2378933429718018,
"learning_rate": 8.80952380952381e-05,
"loss": 2.431,
"step": 250
},
{
"epoch": 5.842105263157895,
"grad_norm": 3.3535726070404053,
"learning_rate": 8.804761904761905e-05,
"loss": 2.3884,
"step": 251
},
{
"epoch": 5.8654970760233915,
"grad_norm": 2.3131027221679688,
"learning_rate": 8.800000000000001e-05,
"loss": 2.4059,
"step": 252
},
{
"epoch": 5.888888888888889,
"grad_norm": 2.942883253097534,
"learning_rate": 8.795238095238096e-05,
"loss": 2.4084,
"step": 253
},
{
"epoch": 5.912280701754386,
"grad_norm": 2.5141403675079346,
"learning_rate": 8.790476190476191e-05,
"loss": 2.408,
"step": 254
},
{
"epoch": 5.935672514619883,
"grad_norm": 2.602572202682495,
"learning_rate": 8.785714285714286e-05,
"loss": 2.3673,
"step": 255
},
{
"epoch": 5.95906432748538,
"grad_norm": 2.1632885932922363,
"learning_rate": 8.78095238095238e-05,
"loss": 2.3696,
"step": 256
},
{
"epoch": 5.982456140350877,
"grad_norm": 4.0230302810668945,
"learning_rate": 8.776190476190476e-05,
"loss": 2.3337,
"step": 257
},
{
"epoch": 6.0,
"grad_norm": 1.374912977218628,
"learning_rate": 8.771428571428572e-05,
"loss": 1.7329,
"step": 258
},
{
"epoch": 6.0,
"eval_loss": 0.30803602933883667,
"eval_runtime": 3.4815,
"eval_samples_per_second": 62.042,
"eval_steps_per_second": 15.511,
"step": 258
},
{
"epoch": 6.023391812865497,
"grad_norm": 3.084089517593384,
"learning_rate": 8.766666666666668e-05,
"loss": 2.3514,
"step": 259
},
{
"epoch": 6.046783625730994,
"grad_norm": 3.1659111976623535,
"learning_rate": 8.761904761904762e-05,
"loss": 2.3374,
"step": 260
},
{
"epoch": 6.0701754385964914,
"grad_norm": 2.125979423522949,
"learning_rate": 8.757142857142857e-05,
"loss": 2.3415,
"step": 261
},
{
"epoch": 6.093567251461988,
"grad_norm": 3.6721763610839844,
"learning_rate": 8.752380952380953e-05,
"loss": 2.3566,
"step": 262
},
{
"epoch": 6.116959064327485,
"grad_norm": 3.2312450408935547,
"learning_rate": 8.747619047619047e-05,
"loss": 2.299,
"step": 263
},
{
"epoch": 6.140350877192983,
"grad_norm": 5.158683776855469,
"learning_rate": 8.742857142857144e-05,
"loss": 2.3623,
"step": 264
},
{
"epoch": 6.16374269005848,
"grad_norm": 3.5738871097564697,
"learning_rate": 8.738095238095239e-05,
"loss": 2.3442,
"step": 265
},
{
"epoch": 6.187134502923977,
"grad_norm": 3.4018423557281494,
"learning_rate": 8.733333333333333e-05,
"loss": 2.3432,
"step": 266
},
{
"epoch": 6.2105263157894735,
"grad_norm": 3.736330509185791,
"learning_rate": 8.728571428571429e-05,
"loss": 2.3612,
"step": 267
},
{
"epoch": 6.23391812865497,
"grad_norm": 2.5863962173461914,
"learning_rate": 8.723809523809524e-05,
"loss": 2.339,
"step": 268
},
{
"epoch": 6.257309941520468,
"grad_norm": 3.0965137481689453,
"learning_rate": 8.71904761904762e-05,
"loss": 2.3152,
"step": 269
},
{
"epoch": 6.280701754385965,
"grad_norm": 3.284605026245117,
"learning_rate": 8.714285714285715e-05,
"loss": 2.3969,
"step": 270
},
{
"epoch": 6.304093567251462,
"grad_norm": 2.4946165084838867,
"learning_rate": 8.70952380952381e-05,
"loss": 2.2957,
"step": 271
},
{
"epoch": 6.3274853801169595,
"grad_norm": 3.2509989738464355,
"learning_rate": 8.704761904761906e-05,
"loss": 2.3151,
"step": 272
},
{
"epoch": 6.350877192982456,
"grad_norm": 3.7346384525299072,
"learning_rate": 8.7e-05,
"loss": 2.313,
"step": 273
},
{
"epoch": 6.374269005847953,
"grad_norm": 2.4033422470092773,
"learning_rate": 8.695238095238095e-05,
"loss": 2.3143,
"step": 274
},
{
"epoch": 6.39766081871345,
"grad_norm": 3.5691659450531006,
"learning_rate": 8.690476190476192e-05,
"loss": 2.347,
"step": 275
},
{
"epoch": 6.421052631578947,
"grad_norm": 3.408168315887451,
"learning_rate": 8.685714285714286e-05,
"loss": 2.3762,
"step": 276
},
{
"epoch": 6.444444444444445,
"grad_norm": 2.701221227645874,
"learning_rate": 8.680952380952382e-05,
"loss": 2.3781,
"step": 277
},
{
"epoch": 6.4678362573099415,
"grad_norm": 3.2413136959075928,
"learning_rate": 8.676190476190477e-05,
"loss": 2.3683,
"step": 278
},
{
"epoch": 6.491228070175438,
"grad_norm": 3.9076733589172363,
"learning_rate": 8.671428571428571e-05,
"loss": 2.3921,
"step": 279
},
{
"epoch": 6.514619883040936,
"grad_norm": 2.388068199157715,
"learning_rate": 8.666666666666667e-05,
"loss": 2.3121,
"step": 280
},
{
"epoch": 6.538011695906433,
"grad_norm": 2.6373026371002197,
"learning_rate": 8.661904761904763e-05,
"loss": 2.3386,
"step": 281
},
{
"epoch": 6.56140350877193,
"grad_norm": 3.8755855560302734,
"learning_rate": 8.657142857142858e-05,
"loss": 2.392,
"step": 282
},
{
"epoch": 6.584795321637427,
"grad_norm": 2.8325467109680176,
"learning_rate": 8.652380952380953e-05,
"loss": 2.3382,
"step": 283
},
{
"epoch": 6.6081871345029235,
"grad_norm": 3.6729838848114014,
"learning_rate": 8.647619047619047e-05,
"loss": 2.373,
"step": 284
},
{
"epoch": 6.631578947368421,
"grad_norm": 3.537522077560425,
"learning_rate": 8.642857142857143e-05,
"loss": 2.3239,
"step": 285
},
{
"epoch": 6.654970760233918,
"grad_norm": 3.385708808898926,
"learning_rate": 8.638095238095239e-05,
"loss": 2.3445,
"step": 286
},
{
"epoch": 6.678362573099415,
"grad_norm": 2.634990692138672,
"learning_rate": 8.633333333333334e-05,
"loss": 2.3395,
"step": 287
},
{
"epoch": 6.701754385964913,
"grad_norm": 3.4988627433776855,
"learning_rate": 8.62857142857143e-05,
"loss": 2.3774,
"step": 288
},
{
"epoch": 6.7251461988304095,
"grad_norm": 2.7816081047058105,
"learning_rate": 8.623809523809524e-05,
"loss": 2.3252,
"step": 289
},
{
"epoch": 6.748538011695906,
"grad_norm": 2.6087560653686523,
"learning_rate": 8.61904761904762e-05,
"loss": 2.3386,
"step": 290
},
{
"epoch": 6.771929824561403,
"grad_norm": 2.98740553855896,
"learning_rate": 8.614285714285714e-05,
"loss": 2.3114,
"step": 291
},
{
"epoch": 6.7953216374269,
"grad_norm": 2.4232819080352783,
"learning_rate": 8.60952380952381e-05,
"loss": 2.2925,
"step": 292
},
{
"epoch": 6.818713450292398,
"grad_norm": 3.8474879264831543,
"learning_rate": 8.604761904761906e-05,
"loss": 2.3508,
"step": 293
},
{
"epoch": 6.842105263157895,
"grad_norm": 3.4887146949768066,
"learning_rate": 8.6e-05,
"loss": 2.3654,
"step": 294
},
{
"epoch": 6.8654970760233915,
"grad_norm": 2.830552101135254,
"learning_rate": 8.595238095238096e-05,
"loss": 2.3511,
"step": 295
},
{
"epoch": 6.888888888888889,
"grad_norm": 3.660485029220581,
"learning_rate": 8.59047619047619e-05,
"loss": 2.3656,
"step": 296
},
{
"epoch": 6.912280701754386,
"grad_norm": 3.242731809616089,
"learning_rate": 8.585714285714286e-05,
"loss": 2.368,
"step": 297
},
{
"epoch": 6.935672514619883,
"grad_norm": 2.875051975250244,
"learning_rate": 8.580952380952382e-05,
"loss": 2.3471,
"step": 298
},
{
"epoch": 6.95906432748538,
"grad_norm": 2.302536725997925,
"learning_rate": 8.576190476190477e-05,
"loss": 2.3601,
"step": 299
},
{
"epoch": 6.982456140350877,
"grad_norm": 4.208285808563232,
"learning_rate": 8.571428571428571e-05,
"loss": 2.3197,
"step": 300
},
{
"epoch": 7.0,
"grad_norm": 1.7053803205490112,
"learning_rate": 8.566666666666667e-05,
"loss": 1.762,
"step": 301
},
{
"epoch": 7.0,
"eval_loss": 0.31038928031921387,
"eval_runtime": 3.469,
"eval_samples_per_second": 62.266,
"eval_steps_per_second": 15.567,
"step": 301
},
{
"epoch": 7.023391812865497,
"grad_norm": 3.534348964691162,
"learning_rate": 8.561904761904762e-05,
"loss": 2.3135,
"step": 302
},
{
"epoch": 7.046783625730994,
"grad_norm": 2.444737434387207,
"learning_rate": 8.557142857142857e-05,
"loss": 2.3049,
"step": 303
},
{
"epoch": 7.0701754385964914,
"grad_norm": 2.388345241546631,
"learning_rate": 8.552380952380953e-05,
"loss": 2.2772,
"step": 304
},
{
"epoch": 7.093567251461988,
"grad_norm": 3.2836976051330566,
"learning_rate": 8.547619047619048e-05,
"loss": 2.2813,
"step": 305
},
{
"epoch": 7.116959064327485,
"grad_norm": 2.2374956607818604,
"learning_rate": 8.542857142857144e-05,
"loss": 2.257,
"step": 306
},
{
"epoch": 7.140350877192983,
"grad_norm": 2.89320969581604,
"learning_rate": 8.538095238095238e-05,
"loss": 2.3366,
"step": 307
},
{
"epoch": 7.16374269005848,
"grad_norm": 2.2934420108795166,
"learning_rate": 8.533333333333334e-05,
"loss": 2.2922,
"step": 308
},
{
"epoch": 7.187134502923977,
"grad_norm": 2.66508150100708,
"learning_rate": 8.52857142857143e-05,
"loss": 2.2824,
"step": 309
},
{
"epoch": 7.2105263157894735,
"grad_norm": 2.668550729751587,
"learning_rate": 8.523809523809524e-05,
"loss": 2.3064,
"step": 310
},
{
"epoch": 7.23391812865497,
"grad_norm": 2.6760103702545166,
"learning_rate": 8.51904761904762e-05,
"loss": 2.2928,
"step": 311
},
{
"epoch": 7.257309941520468,
"grad_norm": 2.7909204959869385,
"learning_rate": 8.514285714285714e-05,
"loss": 2.2599,
"step": 312
},
{
"epoch": 7.280701754385965,
"grad_norm": 2.3334522247314453,
"learning_rate": 8.509523809523809e-05,
"loss": 2.2526,
"step": 313
},
{
"epoch": 7.304093567251462,
"grad_norm": 3.7709286212921143,
"learning_rate": 8.504761904761905e-05,
"loss": 2.3113,
"step": 314
},
{
"epoch": 7.3274853801169595,
"grad_norm": 2.955559730529785,
"learning_rate": 8.5e-05,
"loss": 2.3101,
"step": 315
},
{
"epoch": 7.350877192982456,
"grad_norm": 2.5498976707458496,
"learning_rate": 8.495238095238096e-05,
"loss": 2.2774,
"step": 316
},
{
"epoch": 7.374269005847953,
"grad_norm": 2.2549405097961426,
"learning_rate": 8.490476190476191e-05,
"loss": 2.3074,
"step": 317
},
{
"epoch": 7.39766081871345,
"grad_norm": 3.085324764251709,
"learning_rate": 8.485714285714285e-05,
"loss": 2.2579,
"step": 318
},
{
"epoch": 7.421052631578947,
"grad_norm": 3.924325942993164,
"learning_rate": 8.480952380952381e-05,
"loss": 2.2452,
"step": 319
},
{
"epoch": 7.444444444444445,
"grad_norm": 2.55413818359375,
"learning_rate": 8.476190476190477e-05,
"loss": 2.3174,
"step": 320
},
{
"epoch": 7.4678362573099415,
"grad_norm": 3.082507371902466,
"learning_rate": 8.471428571428573e-05,
"loss": 2.3215,
"step": 321
},
{
"epoch": 7.491228070175438,
"grad_norm": 3.0043070316314697,
"learning_rate": 8.466666666666667e-05,
"loss": 2.2896,
"step": 322
},
{
"epoch": 7.514619883040936,
"grad_norm": 2.590393304824829,
"learning_rate": 8.461904761904762e-05,
"loss": 2.3028,
"step": 323
},
{
"epoch": 7.538011695906433,
"grad_norm": 2.350078821182251,
"learning_rate": 8.457142857142858e-05,
"loss": 2.2711,
"step": 324
},
{
"epoch": 7.56140350877193,
"grad_norm": 2.553441047668457,
"learning_rate": 8.452380952380952e-05,
"loss": 2.3428,
"step": 325
},
{
"epoch": 7.584795321637427,
"grad_norm": 2.492304801940918,
"learning_rate": 8.447619047619048e-05,
"loss": 2.3009,
"step": 326
},
{
"epoch": 7.6081871345029235,
"grad_norm": 3.329228162765503,
"learning_rate": 8.442857142857144e-05,
"loss": 2.3234,
"step": 327
},
{
"epoch": 7.631578947368421,
"grad_norm": 2.6081371307373047,
"learning_rate": 8.438095238095238e-05,
"loss": 2.3028,
"step": 328
},
{
"epoch": 7.654970760233918,
"grad_norm": 2.6931183338165283,
"learning_rate": 8.433333333333334e-05,
"loss": 2.2992,
"step": 329
},
{
"epoch": 7.678362573099415,
"grad_norm": 2.0675008296966553,
"learning_rate": 8.428571428571429e-05,
"loss": 2.3682,
"step": 330
},
{
"epoch": 7.701754385964913,
"grad_norm": 2.3411147594451904,
"learning_rate": 8.423809523809524e-05,
"loss": 2.2903,
"step": 331
},
{
"epoch": 7.7251461988304095,
"grad_norm": 3.0642454624176025,
"learning_rate": 8.41904761904762e-05,
"loss": 2.3214,
"step": 332
},
{
"epoch": 7.748538011695906,
"grad_norm": 3.137528896331787,
"learning_rate": 8.414285714285715e-05,
"loss": 2.3448,
"step": 333
},
{
"epoch": 7.771929824561403,
"grad_norm": 1.907626748085022,
"learning_rate": 8.40952380952381e-05,
"loss": 2.3362,
"step": 334
},
{
"epoch": 7.7953216374269,
"grad_norm": 2.599119186401367,
"learning_rate": 8.404761904761905e-05,
"loss": 2.2995,
"step": 335
},
{
"epoch": 7.818713450292398,
"grad_norm": 2.1835620403289795,
"learning_rate": 8.4e-05,
"loss": 2.3506,
"step": 336
},
{
"epoch": 7.842105263157895,
"grad_norm": 2.6803245544433594,
"learning_rate": 8.395238095238095e-05,
"loss": 2.2936,
"step": 337
},
{
"epoch": 7.8654970760233915,
"grad_norm": 2.978414297103882,
"learning_rate": 8.390476190476191e-05,
"loss": 2.3327,
"step": 338
},
{
"epoch": 7.888888888888889,
"grad_norm": 3.149514675140381,
"learning_rate": 8.385714285714286e-05,
"loss": 2.2973,
"step": 339
},
{
"epoch": 7.912280701754386,
"grad_norm": 2.3366856575012207,
"learning_rate": 8.380952380952382e-05,
"loss": 2.3136,
"step": 340
},
{
"epoch": 7.935672514619883,
"grad_norm": 2.9807534217834473,
"learning_rate": 8.376190476190476e-05,
"loss": 2.2854,
"step": 341
},
{
"epoch": 7.95906432748538,
"grad_norm": 2.4020419120788574,
"learning_rate": 8.371428571428572e-05,
"loss": 2.3807,
"step": 342
},
{
"epoch": 7.982456140350877,
"grad_norm": 3.0339105129241943,
"learning_rate": 8.366666666666668e-05,
"loss": 2.3336,
"step": 343
},
{
"epoch": 8.0,
"grad_norm": 1.9973078966140747,
"learning_rate": 8.361904761904762e-05,
"loss": 1.7577,
"step": 344
},
{
"epoch": 8.0,
"eval_loss": 0.3090885281562805,
"eval_runtime": 3.4516,
"eval_samples_per_second": 62.58,
"eval_steps_per_second": 15.645,
"step": 344
},
{
"epoch": 8.023391812865498,
"grad_norm": 2.9275968074798584,
"learning_rate": 8.357142857142858e-05,
"loss": 2.278,
"step": 345
},
{
"epoch": 8.046783625730994,
"grad_norm": 2.7276556491851807,
"learning_rate": 8.352380952380952e-05,
"loss": 2.2362,
"step": 346
},
{
"epoch": 8.070175438596491,
"grad_norm": 4.678555011749268,
"learning_rate": 8.347619047619048e-05,
"loss": 2.2201,
"step": 347
},
{
"epoch": 8.093567251461987,
"grad_norm": 4.674102306365967,
"learning_rate": 8.342857142857143e-05,
"loss": 2.3193,
"step": 348
},
{
"epoch": 8.116959064327485,
"grad_norm": 3.837069272994995,
"learning_rate": 8.338095238095239e-05,
"loss": 2.2565,
"step": 349
},
{
"epoch": 8.140350877192983,
"grad_norm": 3.5084426403045654,
"learning_rate": 8.333333333333334e-05,
"loss": 2.2948,
"step": 350
},
{
"epoch": 8.163742690058479,
"grad_norm": 2.2970736026763916,
"learning_rate": 8.328571428571429e-05,
"loss": 2.2341,
"step": 351
},
{
"epoch": 8.187134502923977,
"grad_norm": 3.0149991512298584,
"learning_rate": 8.323809523809523e-05,
"loss": 2.2382,
"step": 352
},
{
"epoch": 8.210526315789474,
"grad_norm": 3.103729724884033,
"learning_rate": 8.319047619047619e-05,
"loss": 2.2195,
"step": 353
},
{
"epoch": 8.23391812865497,
"grad_norm": 3.5624403953552246,
"learning_rate": 8.314285714285715e-05,
"loss": 2.2907,
"step": 354
},
{
"epoch": 8.257309941520468,
"grad_norm": 3.337334156036377,
"learning_rate": 8.309523809523811e-05,
"loss": 2.2537,
"step": 355
},
{
"epoch": 8.280701754385966,
"grad_norm": 2.8476223945617676,
"learning_rate": 8.304761904761905e-05,
"loss": 2.2899,
"step": 356
},
{
"epoch": 8.304093567251462,
"grad_norm": 3.415135622024536,
"learning_rate": 8.3e-05,
"loss": 2.2165,
"step": 357
},
{
"epoch": 8.32748538011696,
"grad_norm": 3.373770236968994,
"learning_rate": 8.295238095238096e-05,
"loss": 2.2417,
"step": 358
},
{
"epoch": 8.350877192982455,
"grad_norm": 2.926384925842285,
"learning_rate": 8.29047619047619e-05,
"loss": 2.2356,
"step": 359
},
{
"epoch": 8.374269005847953,
"grad_norm": 3.749796152114868,
"learning_rate": 8.285714285714287e-05,
"loss": 2.2539,
"step": 360
},
{
"epoch": 8.397660818713451,
"grad_norm": 2.3710129261016846,
"learning_rate": 8.280952380952382e-05,
"loss": 2.2066,
"step": 361
},
{
"epoch": 8.421052631578947,
"grad_norm": 2.417635440826416,
"learning_rate": 8.276190476190476e-05,
"loss": 2.2212,
"step": 362
},
{
"epoch": 8.444444444444445,
"grad_norm": 2.252788543701172,
"learning_rate": 8.271428571428572e-05,
"loss": 2.202,
"step": 363
},
{
"epoch": 8.46783625730994,
"grad_norm": 2.487130641937256,
"learning_rate": 8.266666666666667e-05,
"loss": 2.2888,
"step": 364
},
{
"epoch": 8.491228070175438,
"grad_norm": 3.3258721828460693,
"learning_rate": 8.261904761904762e-05,
"loss": 2.2453,
"step": 365
},
{
"epoch": 8.514619883040936,
"grad_norm": 3.4949452877044678,
"learning_rate": 8.257142857142858e-05,
"loss": 2.2536,
"step": 366
},
{
"epoch": 8.538011695906432,
"grad_norm": 2.923041343688965,
"learning_rate": 8.252380952380953e-05,
"loss": 2.2622,
"step": 367
},
{
"epoch": 8.56140350877193,
"grad_norm": 4.235454559326172,
"learning_rate": 8.247619047619049e-05,
"loss": 2.2604,
"step": 368
},
{
"epoch": 8.584795321637428,
"grad_norm": 3.192513942718506,
"learning_rate": 8.242857142857143e-05,
"loss": 2.26,
"step": 369
},
{
"epoch": 8.608187134502923,
"grad_norm": 3.3745954036712646,
"learning_rate": 8.238095238095238e-05,
"loss": 2.3249,
"step": 370
},
{
"epoch": 8.631578947368421,
"grad_norm": 2.696607828140259,
"learning_rate": 8.233333333333333e-05,
"loss": 2.2771,
"step": 371
},
{
"epoch": 8.654970760233919,
"grad_norm": 3.3591575622558594,
"learning_rate": 8.228571428571429e-05,
"loss": 2.2722,
"step": 372
},
{
"epoch": 8.678362573099415,
"grad_norm": 2.7265303134918213,
"learning_rate": 8.223809523809525e-05,
"loss": 2.3042,
"step": 373
},
{
"epoch": 8.701754385964913,
"grad_norm": 3.1058547496795654,
"learning_rate": 8.21904761904762e-05,
"loss": 2.2676,
"step": 374
},
{
"epoch": 8.725146198830409,
"grad_norm": 3.8888497352600098,
"learning_rate": 8.214285714285714e-05,
"loss": 2.2778,
"step": 375
},
{
"epoch": 8.748538011695906,
"grad_norm": 2.25227427482605,
"learning_rate": 8.20952380952381e-05,
"loss": 2.2814,
"step": 376
},
{
"epoch": 8.771929824561404,
"grad_norm": 4.398237228393555,
"learning_rate": 8.204761904761906e-05,
"loss": 2.317,
"step": 377
},
{
"epoch": 8.7953216374269,
"grad_norm": 3.410182237625122,
"learning_rate": 8.2e-05,
"loss": 2.3045,
"step": 378
},
{
"epoch": 8.818713450292398,
"grad_norm": 2.4230539798736572,
"learning_rate": 8.195238095238096e-05,
"loss": 2.3019,
"step": 379
},
{
"epoch": 8.842105263157894,
"grad_norm": 3.115366220474243,
"learning_rate": 8.19047619047619e-05,
"loss": 2.2107,
"step": 380
},
{
"epoch": 8.865497076023392,
"grad_norm": 4.855501174926758,
"learning_rate": 8.185714285714286e-05,
"loss": 2.2712,
"step": 381
},
{
"epoch": 8.88888888888889,
"grad_norm": 2.77266001701355,
"learning_rate": 8.180952380952381e-05,
"loss": 2.2569,
"step": 382
},
{
"epoch": 8.912280701754385,
"grad_norm": 2.3498106002807617,
"learning_rate": 8.176190476190477e-05,
"loss": 2.2951,
"step": 383
},
{
"epoch": 8.935672514619883,
"grad_norm": 3.1771063804626465,
"learning_rate": 8.171428571428572e-05,
"loss": 2.2536,
"step": 384
},
{
"epoch": 8.95906432748538,
"grad_norm": 2.889338970184326,
"learning_rate": 8.166666666666667e-05,
"loss": 2.2749,
"step": 385
},
{
"epoch": 8.982456140350877,
"grad_norm": 5.354519367218018,
"learning_rate": 8.161904761904763e-05,
"loss": 2.322,
"step": 386
},
{
"epoch": 9.0,
"grad_norm": 3.0712740421295166,
"learning_rate": 8.157142857142857e-05,
"loss": 1.7048,
"step": 387
},
{
"epoch": 9.0,
"eval_loss": 0.3110826313495636,
"eval_runtime": 3.4732,
"eval_samples_per_second": 62.191,
"eval_steps_per_second": 15.548,
"step": 387
},
{
"epoch": 9.023391812865498,
"grad_norm": 4.209856033325195,
"learning_rate": 8.152380952380953e-05,
"loss": 2.2796,
"step": 388
},
{
"epoch": 9.046783625730994,
"grad_norm": 4.553707122802734,
"learning_rate": 8.147619047619049e-05,
"loss": 2.1917,
"step": 389
},
{
"epoch": 9.070175438596491,
"grad_norm": 2.235180377960205,
"learning_rate": 8.142857142857143e-05,
"loss": 2.2586,
"step": 390
},
{
"epoch": 9.093567251461987,
"grad_norm": 3.6155264377593994,
"learning_rate": 8.138095238095238e-05,
"loss": 2.2361,
"step": 391
},
{
"epoch": 9.116959064327485,
"grad_norm": 3.4730417728424072,
"learning_rate": 8.133333333333334e-05,
"loss": 2.2257,
"step": 392
},
{
"epoch": 9.140350877192983,
"grad_norm": 3.765535831451416,
"learning_rate": 8.128571428571428e-05,
"loss": 2.1642,
"step": 393
},
{
"epoch": 9.163742690058479,
"grad_norm": 3.1897642612457275,
"learning_rate": 8.123809523809525e-05,
"loss": 2.2229,
"step": 394
},
{
"epoch": 9.187134502923977,
"grad_norm": 3.8044841289520264,
"learning_rate": 8.11904761904762e-05,
"loss": 2.1882,
"step": 395
},
{
"epoch": 9.210526315789474,
"grad_norm": 3.561450719833374,
"learning_rate": 8.114285714285714e-05,
"loss": 2.1809,
"step": 396
},
{
"epoch": 9.23391812865497,
"grad_norm": 3.2488150596618652,
"learning_rate": 8.10952380952381e-05,
"loss": 2.2111,
"step": 397
},
{
"epoch": 9.257309941520468,
"grad_norm": 3.5442261695861816,
"learning_rate": 8.104761904761905e-05,
"loss": 2.1749,
"step": 398
},
{
"epoch": 9.280701754385966,
"grad_norm": 2.66875958442688,
"learning_rate": 8.1e-05,
"loss": 2.1776,
"step": 399
},
{
"epoch": 9.304093567251462,
"grad_norm": 2.672934055328369,
"learning_rate": 8.095238095238096e-05,
"loss": 2.1938,
"step": 400
},
{
"epoch": 9.32748538011696,
"grad_norm": 2.8488197326660156,
"learning_rate": 8.090476190476191e-05,
"loss": 2.1875,
"step": 401
},
{
"epoch": 9.350877192982455,
"grad_norm": 3.4410758018493652,
"learning_rate": 8.085714285714287e-05,
"loss": 2.2253,
"step": 402
},
{
"epoch": 9.374269005847953,
"grad_norm": 3.20196270942688,
"learning_rate": 8.080952380952381e-05,
"loss": 2.2043,
"step": 403
},
{
"epoch": 9.397660818713451,
"grad_norm": 3.485410213470459,
"learning_rate": 8.076190476190475e-05,
"loss": 2.2155,
"step": 404
},
{
"epoch": 9.421052631578947,
"grad_norm": 3.2198355197906494,
"learning_rate": 8.071428571428573e-05,
"loss": 2.2339,
"step": 405
},
{
"epoch": 9.444444444444445,
"grad_norm": 4.449166774749756,
"learning_rate": 8.066666666666667e-05,
"loss": 2.2157,
"step": 406
},
{
"epoch": 9.46783625730994,
"grad_norm": 3.3024957180023193,
"learning_rate": 8.061904761904763e-05,
"loss": 2.2373,
"step": 407
},
{
"epoch": 9.491228070175438,
"grad_norm": 4.262597560882568,
"learning_rate": 8.057142857142857e-05,
"loss": 2.2118,
"step": 408
},
{
"epoch": 9.514619883040936,
"grad_norm": 3.014378070831299,
"learning_rate": 8.052380952380952e-05,
"loss": 2.1513,
"step": 409
},
{
"epoch": 9.538011695906432,
"grad_norm": 2.3644843101501465,
"learning_rate": 8.047619047619048e-05,
"loss": 2.2105,
"step": 410
},
{
"epoch": 9.56140350877193,
"grad_norm": 3.573030471801758,
"learning_rate": 8.042857142857144e-05,
"loss": 2.2014,
"step": 411
},
{
"epoch": 9.584795321637428,
"grad_norm": 3.49285626411438,
"learning_rate": 8.03809523809524e-05,
"loss": 2.2258,
"step": 412
},
{
"epoch": 9.608187134502923,
"grad_norm": 2.701261281967163,
"learning_rate": 8.033333333333334e-05,
"loss": 2.1926,
"step": 413
},
{
"epoch": 9.631578947368421,
"grad_norm": 3.1829402446746826,
"learning_rate": 8.028571428571428e-05,
"loss": 2.1833,
"step": 414
},
{
"epoch": 9.654970760233919,
"grad_norm": 3.5617990493774414,
"learning_rate": 8.023809523809524e-05,
"loss": 2.2629,
"step": 415
},
{
"epoch": 9.678362573099415,
"grad_norm": 3.1133735179901123,
"learning_rate": 8.01904761904762e-05,
"loss": 2.2344,
"step": 416
},
{
"epoch": 9.701754385964913,
"grad_norm": 3.0228543281555176,
"learning_rate": 8.014285714285715e-05,
"loss": 2.2076,
"step": 417
},
{
"epoch": 9.725146198830409,
"grad_norm": 4.193742752075195,
"learning_rate": 8.00952380952381e-05,
"loss": 2.2325,
"step": 418
},
{
"epoch": 9.748538011695906,
"grad_norm": 4.473887920379639,
"learning_rate": 8.004761904761905e-05,
"loss": 2.2241,
"step": 419
},
{
"epoch": 9.771929824561404,
"grad_norm": 3.292799234390259,
"learning_rate": 8e-05,
"loss": 2.2584,
"step": 420
},
{
"epoch": 9.7953216374269,
"grad_norm": 6.8649516105651855,
"learning_rate": 7.995238095238095e-05,
"loss": 2.2649,
"step": 421
},
{
"epoch": 9.818713450292398,
"grad_norm": 3.9372193813323975,
"learning_rate": 7.990476190476191e-05,
"loss": 2.3003,
"step": 422
},
{
"epoch": 9.842105263157894,
"grad_norm": 3.112377405166626,
"learning_rate": 7.985714285714287e-05,
"loss": 2.2106,
"step": 423
},
{
"epoch": 9.865497076023392,
"grad_norm": 2.902355909347534,
"learning_rate": 7.980952380952381e-05,
"loss": 2.2946,
"step": 424
},
{
"epoch": 9.88888888888889,
"grad_norm": 2.473977565765381,
"learning_rate": 7.976190476190477e-05,
"loss": 2.2175,
"step": 425
},
{
"epoch": 9.912280701754385,
"grad_norm": 4.093216419219971,
"learning_rate": 7.971428571428572e-05,
"loss": 2.2092,
"step": 426
},
{
"epoch": 9.935672514619883,
"grad_norm": 2.5776782035827637,
"learning_rate": 7.966666666666666e-05,
"loss": 2.219,
"step": 427
},
{
"epoch": 9.95906432748538,
"grad_norm": 3.246060371398926,
"learning_rate": 7.961904761904763e-05,
"loss": 2.239,
"step": 428
},
{
"epoch": 9.982456140350877,
"grad_norm": 2.8515846729278564,
"learning_rate": 7.957142857142858e-05,
"loss": 2.2154,
"step": 429
},
{
"epoch": 10.0,
"grad_norm": 2.685945510864258,
"learning_rate": 7.952380952380952e-05,
"loss": 1.6512,
"step": 430
},
{
"epoch": 10.0,
"eval_loss": 0.3121136426925659,
"eval_runtime": 3.5135,
"eval_samples_per_second": 61.478,
"eval_steps_per_second": 15.369,
"step": 430
},
{
"epoch": 10.023391812865498,
"grad_norm": 2.626622200012207,
"learning_rate": 7.947619047619048e-05,
"loss": 2.1368,
"step": 431
},
{
"epoch": 10.046783625730994,
"grad_norm": 3.062572479248047,
"learning_rate": 7.942857142857143e-05,
"loss": 2.1211,
"step": 432
},
{
"epoch": 10.070175438596491,
"grad_norm": 3.3042075634002686,
"learning_rate": 7.938095238095238e-05,
"loss": 2.1274,
"step": 433
},
{
"epoch": 10.093567251461987,
"grad_norm": 3.31258225440979,
"learning_rate": 7.933333333333334e-05,
"loss": 2.1523,
"step": 434
},
{
"epoch": 10.116959064327485,
"grad_norm": 3.294790506362915,
"learning_rate": 7.928571428571429e-05,
"loss": 2.1665,
"step": 435
},
{
"epoch": 10.140350877192983,
"grad_norm": 2.905733585357666,
"learning_rate": 7.923809523809524e-05,
"loss": 2.1074,
"step": 436
},
{
"epoch": 10.163742690058479,
"grad_norm": 3.4812822341918945,
"learning_rate": 7.919047619047619e-05,
"loss": 2.1628,
"step": 437
},
{
"epoch": 10.187134502923977,
"grad_norm": 3.2400975227355957,
"learning_rate": 7.914285714285715e-05,
"loss": 2.1655,
"step": 438
},
{
"epoch": 10.210526315789474,
"grad_norm": 3.523585081100464,
"learning_rate": 7.90952380952381e-05,
"loss": 2.1843,
"step": 439
},
{
"epoch": 10.23391812865497,
"grad_norm": 3.4338159561157227,
"learning_rate": 7.904761904761905e-05,
"loss": 2.0997,
"step": 440
},
{
"epoch": 10.257309941520468,
"grad_norm": 2.533015251159668,
"learning_rate": 7.900000000000001e-05,
"loss": 2.1057,
"step": 441
},
{
"epoch": 10.280701754385966,
"grad_norm": 4.4363322257995605,
"learning_rate": 7.895238095238095e-05,
"loss": 2.164,
"step": 442
},
{
"epoch": 10.304093567251462,
"grad_norm": 4.817160606384277,
"learning_rate": 7.89047619047619e-05,
"loss": 2.1277,
"step": 443
},
{
"epoch": 10.32748538011696,
"grad_norm": 3.683321475982666,
"learning_rate": 7.885714285714286e-05,
"loss": 2.1394,
"step": 444
},
{
"epoch": 10.350877192982455,
"grad_norm": 4.308437347412109,
"learning_rate": 7.880952380952382e-05,
"loss": 2.1484,
"step": 445
},
{
"epoch": 10.374269005847953,
"grad_norm": 5.58378791809082,
"learning_rate": 7.876190476190477e-05,
"loss": 2.1494,
"step": 446
},
{
"epoch": 10.397660818713451,
"grad_norm": 3.057953357696533,
"learning_rate": 7.871428571428572e-05,
"loss": 2.1407,
"step": 447
},
{
"epoch": 10.421052631578947,
"grad_norm": 5.560679912567139,
"learning_rate": 7.866666666666666e-05,
"loss": 2.133,
"step": 448
},
{
"epoch": 10.444444444444445,
"grad_norm": 3.6114635467529297,
"learning_rate": 7.861904761904762e-05,
"loss": 2.177,
"step": 449
},
{
"epoch": 10.46783625730994,
"grad_norm": 4.344070911407471,
"learning_rate": 7.857142857142858e-05,
"loss": 2.15,
"step": 450
},
{
"epoch": 10.491228070175438,
"grad_norm": 5.889255046844482,
"learning_rate": 7.852380952380954e-05,
"loss": 2.1536,
"step": 451
},
{
"epoch": 10.514619883040936,
"grad_norm": 2.920121192932129,
"learning_rate": 7.847619047619048e-05,
"loss": 2.1646,
"step": 452
},
{
"epoch": 10.538011695906432,
"grad_norm": 4.948288440704346,
"learning_rate": 7.842857142857143e-05,
"loss": 2.1893,
"step": 453
},
{
"epoch": 10.56140350877193,
"grad_norm": 3.4220526218414307,
"learning_rate": 7.838095238095239e-05,
"loss": 2.1845,
"step": 454
},
{
"epoch": 10.584795321637428,
"grad_norm": 5.378625869750977,
"learning_rate": 7.833333333333333e-05,
"loss": 2.1713,
"step": 455
},
{
"epoch": 10.608187134502923,
"grad_norm": 6.9921159744262695,
"learning_rate": 7.828571428571429e-05,
"loss": 2.1645,
"step": 456
},
{
"epoch": 10.631578947368421,
"grad_norm": 3.1062328815460205,
"learning_rate": 7.823809523809525e-05,
"loss": 2.1829,
"step": 457
},
{
"epoch": 10.654970760233919,
"grad_norm": 3.9356822967529297,
"learning_rate": 7.819047619047619e-05,
"loss": 2.1961,
"step": 458
},
{
"epoch": 10.678362573099415,
"grad_norm": 2.3222548961639404,
"learning_rate": 7.814285714285715e-05,
"loss": 2.1581,
"step": 459
},
{
"epoch": 10.701754385964913,
"grad_norm": 5.491628646850586,
"learning_rate": 7.80952380952381e-05,
"loss": 2.1506,
"step": 460
},
{
"epoch": 10.725146198830409,
"grad_norm": 3.369358777999878,
"learning_rate": 7.804761904761905e-05,
"loss": 2.1679,
"step": 461
},
{
"epoch": 10.748538011695906,
"grad_norm": 6.5891523361206055,
"learning_rate": 7.800000000000001e-05,
"loss": 2.1454,
"step": 462
},
{
"epoch": 10.771929824561404,
"grad_norm": 7.308892726898193,
"learning_rate": 7.795238095238096e-05,
"loss": 2.2065,
"step": 463
},
{
"epoch": 10.7953216374269,
"grad_norm": 3.553459882736206,
"learning_rate": 7.790476190476192e-05,
"loss": 2.1726,
"step": 464
},
{
"epoch": 10.818713450292398,
"grad_norm": 7.7826642990112305,
"learning_rate": 7.785714285714286e-05,
"loss": 2.1967,
"step": 465
},
{
"epoch": 10.842105263157894,
"grad_norm": 10.483582496643066,
"learning_rate": 7.78095238095238e-05,
"loss": 2.2286,
"step": 466
},
{
"epoch": 10.865497076023392,
"grad_norm": 4.64753532409668,
"learning_rate": 7.776190476190476e-05,
"loss": 2.1643,
"step": 467
},
{
"epoch": 10.88888888888889,
"grad_norm": 4.482329845428467,
"learning_rate": 7.771428571428572e-05,
"loss": 2.1681,
"step": 468
},
{
"epoch": 10.912280701754385,
"grad_norm": 8.45293140411377,
"learning_rate": 7.766666666666667e-05,
"loss": 2.1633,
"step": 469
},
{
"epoch": 10.935672514619883,
"grad_norm": 5.571048736572266,
"learning_rate": 7.761904761904762e-05,
"loss": 2.2138,
"step": 470
},
{
"epoch": 10.95906432748538,
"grad_norm": 3.7479379177093506,
"learning_rate": 7.757142857142857e-05,
"loss": 2.1603,
"step": 471
},
{
"epoch": 10.982456140350877,
"grad_norm": 6.011244297027588,
"learning_rate": 7.752380952380953e-05,
"loss": 2.22,
"step": 472
},
{
"epoch": 11.0,
"grad_norm": 4.298096179962158,
"learning_rate": 7.747619047619049e-05,
"loss": 1.6397,
"step": 473
},
{
"epoch": 11.0,
"eval_loss": 0.3189237415790558,
"eval_runtime": 3.4973,
"eval_samples_per_second": 61.762,
"eval_steps_per_second": 15.441,
"step": 473
},
{
"epoch": 11.023391812865498,
"grad_norm": 3.59027361869812,
"learning_rate": 7.742857142857143e-05,
"loss": 2.0829,
"step": 474
},
{
"epoch": 11.046783625730994,
"grad_norm": 4.3692851066589355,
"learning_rate": 7.738095238095239e-05,
"loss": 2.0586,
"step": 475
},
{
"epoch": 11.070175438596491,
"grad_norm": 4.701996803283691,
"learning_rate": 7.733333333333333e-05,
"loss": 2.1107,
"step": 476
},
{
"epoch": 11.093567251461987,
"grad_norm": 3.7033441066741943,
"learning_rate": 7.728571428571429e-05,
"loss": 2.0621,
"step": 477
},
{
"epoch": 11.116959064327485,
"grad_norm": 5.6654887199401855,
"learning_rate": 7.723809523809524e-05,
"loss": 2.0648,
"step": 478
},
{
"epoch": 11.140350877192983,
"grad_norm": 3.5010275840759277,
"learning_rate": 7.71904761904762e-05,
"loss": 2.0696,
"step": 479
},
{
"epoch": 11.163742690058479,
"grad_norm": 5.883917808532715,
"learning_rate": 7.714285714285715e-05,
"loss": 2.0749,
"step": 480
},
{
"epoch": 11.187134502923977,
"grad_norm": 5.297660827636719,
"learning_rate": 7.70952380952381e-05,
"loss": 2.0188,
"step": 481
},
{
"epoch": 11.210526315789474,
"grad_norm": 3.6153805255889893,
"learning_rate": 7.704761904761904e-05,
"loss": 2.0778,
"step": 482
},
{
"epoch": 11.23391812865497,
"grad_norm": 5.116675853729248,
"learning_rate": 7.7e-05,
"loss": 2.0994,
"step": 483
},
{
"epoch": 11.257309941520468,
"grad_norm": 2.987740993499756,
"learning_rate": 7.695238095238096e-05,
"loss": 2.0525,
"step": 484
},
{
"epoch": 11.280701754385966,
"grad_norm": 4.422389984130859,
"learning_rate": 7.690476190476192e-05,
"loss": 2.0637,
"step": 485
},
{
"epoch": 11.304093567251462,
"grad_norm": 3.5934934616088867,
"learning_rate": 7.685714285714286e-05,
"loss": 2.1044,
"step": 486
},
{
"epoch": 11.32748538011696,
"grad_norm": 5.785375118255615,
"learning_rate": 7.680952380952381e-05,
"loss": 2.0194,
"step": 487
},
{
"epoch": 11.350877192982455,
"grad_norm": 4.386531352996826,
"learning_rate": 7.676190476190477e-05,
"loss": 2.0181,
"step": 488
},
{
"epoch": 11.374269005847953,
"grad_norm": 3.4447247982025146,
"learning_rate": 7.671428571428571e-05,
"loss": 2.0698,
"step": 489
},
{
"epoch": 11.397660818713451,
"grad_norm": 5.470218658447266,
"learning_rate": 7.666666666666667e-05,
"loss": 2.0635,
"step": 490
},
{
"epoch": 11.421052631578947,
"grad_norm": 3.2394590377807617,
"learning_rate": 7.661904761904763e-05,
"loss": 2.0613,
"step": 491
},
{
"epoch": 11.444444444444445,
"grad_norm": 7.506831169128418,
"learning_rate": 7.657142857142857e-05,
"loss": 2.0938,
"step": 492
},
{
"epoch": 11.46783625730994,
"grad_norm": 8.396202087402344,
"learning_rate": 7.652380952380953e-05,
"loss": 2.0775,
"step": 493
},
{
"epoch": 11.491228070175438,
"grad_norm": 3.5975074768066406,
"learning_rate": 7.647619047619048e-05,
"loss": 2.1297,
"step": 494
},
{
"epoch": 11.514619883040936,
"grad_norm": 7.446574687957764,
"learning_rate": 7.642857142857143e-05,
"loss": 2.1108,
"step": 495
},
{
"epoch": 11.538011695906432,
"grad_norm": 9.395698547363281,
"learning_rate": 7.638095238095239e-05,
"loss": 2.1134,
"step": 496
},
{
"epoch": 11.56140350877193,
"grad_norm": 4.919811725616455,
"learning_rate": 7.633333333333334e-05,
"loss": 2.0989,
"step": 497
},
{
"epoch": 11.584795321637428,
"grad_norm": 5.478214740753174,
"learning_rate": 7.62857142857143e-05,
"loss": 2.1026,
"step": 498
},
{
"epoch": 11.608187134502923,
"grad_norm": 7.014881134033203,
"learning_rate": 7.623809523809524e-05,
"loss": 2.0747,
"step": 499
},
{
"epoch": 11.631578947368421,
"grad_norm": 3.6512677669525146,
"learning_rate": 7.619047619047618e-05,
"loss": 2.1082,
"step": 500
},
{
"epoch": 11.654970760233919,
"grad_norm": 5.103152751922607,
"learning_rate": 7.614285714285714e-05,
"loss": 2.0812,
"step": 501
},
{
"epoch": 11.678362573099415,
"grad_norm": 5.7634477615356445,
"learning_rate": 7.60952380952381e-05,
"loss": 2.0911,
"step": 502
},
{
"epoch": 11.701754385964913,
"grad_norm": 3.540539264678955,
"learning_rate": 7.604761904761906e-05,
"loss": 2.0894,
"step": 503
},
{
"epoch": 11.725146198830409,
"grad_norm": 3.773231267929077,
"learning_rate": 7.6e-05,
"loss": 2.0756,
"step": 504
},
{
"epoch": 11.748538011695906,
"grad_norm": 4.254147052764893,
"learning_rate": 7.595238095238095e-05,
"loss": 2.0937,
"step": 505
},
{
"epoch": 11.771929824561404,
"grad_norm": 3.7697649002075195,
"learning_rate": 7.590476190476191e-05,
"loss": 2.1034,
"step": 506
},
{
"epoch": 11.7953216374269,
"grad_norm": 4.8648295402526855,
"learning_rate": 7.585714285714287e-05,
"loss": 2.106,
"step": 507
},
{
"epoch": 11.818713450292398,
"grad_norm": 4.606119155883789,
"learning_rate": 7.580952380952381e-05,
"loss": 2.1035,
"step": 508
},
{
"epoch": 11.842105263157894,
"grad_norm": 3.0035810470581055,
"learning_rate": 7.576190476190477e-05,
"loss": 2.0961,
"step": 509
},
{
"epoch": 11.865497076023392,
"grad_norm": 5.411649227142334,
"learning_rate": 7.571428571428571e-05,
"loss": 2.0972,
"step": 510
},
{
"epoch": 11.88888888888889,
"grad_norm": 5.5969157218933105,
"learning_rate": 7.566666666666667e-05,
"loss": 2.1224,
"step": 511
},
{
"epoch": 11.912280701754385,
"grad_norm": 3.6775734424591064,
"learning_rate": 7.561904761904762e-05,
"loss": 2.0929,
"step": 512
},
{
"epoch": 11.935672514619883,
"grad_norm": 4.247641563415527,
"learning_rate": 7.557142857142857e-05,
"loss": 2.1077,
"step": 513
},
{
"epoch": 11.95906432748538,
"grad_norm": 3.0920655727386475,
"learning_rate": 7.552380952380953e-05,
"loss": 2.0521,
"step": 514
},
{
"epoch": 11.982456140350877,
"grad_norm": 3.7392711639404297,
"learning_rate": 7.547619047619048e-05,
"loss": 2.1152,
"step": 515
},
{
"epoch": 12.0,
"grad_norm": 2.820730209350586,
"learning_rate": 7.542857142857144e-05,
"loss": 1.5974,
"step": 516
},
{
"epoch": 12.0,
"eval_loss": 0.32489120960235596,
"eval_runtime": 3.4774,
"eval_samples_per_second": 62.116,
"eval_steps_per_second": 15.529,
"step": 516
}
],
"logging_steps": 1,
"max_steps": 2100,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.581592674192589e+16,
"train_batch_size": 12,
"trial_name": null,
"trial_params": null
}