tempesthenno-hs2-rm / trainer_state.json
sthenno's picture
Upload folder using huggingface_hub
976e3a7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.4036326942482341,
"eval_steps": 20,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0010090817356205853,
"grad_norm": 48.721710205078125,
"learning_rate": 8e-08,
"loss": 0.8681,
"step": 1
},
{
"epoch": 0.0020181634712411706,
"grad_norm": 44.42015075683594,
"learning_rate": 1.6e-07,
"loss": 0.6615,
"step": 2
},
{
"epoch": 0.0030272452068617556,
"grad_norm": 42.49191665649414,
"learning_rate": 2.4e-07,
"loss": 0.6742,
"step": 3
},
{
"epoch": 0.004036326942482341,
"grad_norm": 40.36923599243164,
"learning_rate": 3.2e-07,
"loss": 0.7824,
"step": 4
},
{
"epoch": 0.005045408678102927,
"grad_norm": 57.19468688964844,
"learning_rate": 4e-07,
"loss": 1.0009,
"step": 5
},
{
"epoch": 0.006054490413723511,
"grad_norm": 48.78745651245117,
"learning_rate": 4.8e-07,
"loss": 0.7613,
"step": 6
},
{
"epoch": 0.007063572149344097,
"grad_norm": 37.92594528198242,
"learning_rate": 5.6e-07,
"loss": 0.6615,
"step": 7
},
{
"epoch": 0.008072653884964682,
"grad_norm": 37.87478256225586,
"learning_rate": 6.4e-07,
"loss": 0.6779,
"step": 8
},
{
"epoch": 0.009081735620585268,
"grad_norm": 43.225589752197266,
"learning_rate": 7.2e-07,
"loss": 0.796,
"step": 9
},
{
"epoch": 0.010090817356205853,
"grad_norm": 43.1926383972168,
"learning_rate": 8e-07,
"loss": 0.8526,
"step": 10
},
{
"epoch": 0.011099899091826439,
"grad_norm": 42.08029556274414,
"learning_rate": 8.799999999999999e-07,
"loss": 0.6605,
"step": 11
},
{
"epoch": 0.012108980827447022,
"grad_norm": 49.14653396606445,
"learning_rate": 9.6e-07,
"loss": 0.6774,
"step": 12
},
{
"epoch": 0.013118062563067608,
"grad_norm": 47.238338470458984,
"learning_rate": 1.04e-06,
"loss": 0.7313,
"step": 13
},
{
"epoch": 0.014127144298688193,
"grad_norm": 37.998931884765625,
"learning_rate": 1.12e-06,
"loss": 0.6244,
"step": 14
},
{
"epoch": 0.015136226034308779,
"grad_norm": 37.945064544677734,
"learning_rate": 1.2e-06,
"loss": 0.5214,
"step": 15
},
{
"epoch": 0.016145307769929364,
"grad_norm": 33.03001403808594,
"learning_rate": 1.28e-06,
"loss": 0.5042,
"step": 16
},
{
"epoch": 0.017154389505549948,
"grad_norm": 32.11518096923828,
"learning_rate": 1.3600000000000001e-06,
"loss": 0.3594,
"step": 17
},
{
"epoch": 0.018163471241170535,
"grad_norm": 30.64830207824707,
"learning_rate": 1.44e-06,
"loss": 0.3533,
"step": 18
},
{
"epoch": 0.01917255297679112,
"grad_norm": 41.34456253051758,
"learning_rate": 1.5199999999999998e-06,
"loss": 0.6093,
"step": 19
},
{
"epoch": 0.020181634712411706,
"grad_norm": 35.51686477661133,
"learning_rate": 1.6e-06,
"loss": 0.3241,
"step": 20
},
{
"epoch": 0.020181634712411706,
"eval_accuracy": 0.7758229284903518,
"eval_loss": 0.3856516480445862,
"eval_runtime": 63.0132,
"eval_samples_per_second": 27.962,
"eval_steps_per_second": 3.507,
"step": 20
},
{
"epoch": 0.02119071644803229,
"grad_norm": 36.574764251708984,
"learning_rate": 1.6799999999999998e-06,
"loss": 0.3611,
"step": 21
},
{
"epoch": 0.022199798183652877,
"grad_norm": 42.98014831542969,
"learning_rate": 1.7599999999999999e-06,
"loss": 0.5187,
"step": 22
},
{
"epoch": 0.02320887991927346,
"grad_norm": 32.25895690917969,
"learning_rate": 1.84e-06,
"loss": 0.3201,
"step": 23
},
{
"epoch": 0.024217961654894045,
"grad_norm": 45.763553619384766,
"learning_rate": 1.92e-06,
"loss": 0.3977,
"step": 24
},
{
"epoch": 0.025227043390514632,
"grad_norm": 41.3906135559082,
"learning_rate": 2e-06,
"loss": 0.3863,
"step": 25
},
{
"epoch": 0.026236125126135216,
"grad_norm": 55.49371337890625,
"learning_rate": 2.08e-06,
"loss": 0.4371,
"step": 26
},
{
"epoch": 0.027245206861755803,
"grad_norm": 49.18912124633789,
"learning_rate": 2.16e-06,
"loss": 0.4072,
"step": 27
},
{
"epoch": 0.028254288597376387,
"grad_norm": 53.16490173339844,
"learning_rate": 2.24e-06,
"loss": 0.5366,
"step": 28
},
{
"epoch": 0.029263370332996974,
"grad_norm": 58.15668869018555,
"learning_rate": 2.32e-06,
"loss": 0.5429,
"step": 29
},
{
"epoch": 0.030272452068617558,
"grad_norm": 32.979671478271484,
"learning_rate": 2.4e-06,
"loss": 0.2946,
"step": 30
},
{
"epoch": 0.03128153380423814,
"grad_norm": 40.131385803222656,
"learning_rate": 2.48e-06,
"loss": 0.298,
"step": 31
},
{
"epoch": 0.03229061553985873,
"grad_norm": 45.999542236328125,
"learning_rate": 2.56e-06,
"loss": 0.27,
"step": 32
},
{
"epoch": 0.033299697275479316,
"grad_norm": 32.2443962097168,
"learning_rate": 2.64e-06,
"loss": 0.2337,
"step": 33
},
{
"epoch": 0.034308779011099896,
"grad_norm": 28.031023025512695,
"learning_rate": 2.7200000000000002e-06,
"loss": 0.1835,
"step": 34
},
{
"epoch": 0.035317860746720484,
"grad_norm": 22.68515396118164,
"learning_rate": 2.8e-06,
"loss": 0.1522,
"step": 35
},
{
"epoch": 0.03632694248234107,
"grad_norm": 38.576019287109375,
"learning_rate": 2.88e-06,
"loss": 0.3371,
"step": 36
},
{
"epoch": 0.03733602421796166,
"grad_norm": 23.268003463745117,
"learning_rate": 2.96e-06,
"loss": 0.161,
"step": 37
},
{
"epoch": 0.03834510595358224,
"grad_norm": 43.69157409667969,
"learning_rate": 3.0399999999999997e-06,
"loss": 0.4692,
"step": 38
},
{
"epoch": 0.039354187689202826,
"grad_norm": 33.641780853271484,
"learning_rate": 3.1199999999999998e-06,
"loss": 0.3815,
"step": 39
},
{
"epoch": 0.04036326942482341,
"grad_norm": 32.81157302856445,
"learning_rate": 3.2e-06,
"loss": 0.3342,
"step": 40
},
{
"epoch": 0.04036326942482341,
"eval_accuracy": 0.8359818388195233,
"eval_loss": 0.30243048071861267,
"eval_runtime": 62.0758,
"eval_samples_per_second": 28.385,
"eval_steps_per_second": 3.56,
"step": 40
},
{
"epoch": 0.04137235116044399,
"grad_norm": 38.70158004760742,
"learning_rate": 3.2799999999999995e-06,
"loss": 0.3523,
"step": 41
},
{
"epoch": 0.04238143289606458,
"grad_norm": 36.827938079833984,
"learning_rate": 3.3599999999999996e-06,
"loss": 0.3339,
"step": 42
},
{
"epoch": 0.04339051463168517,
"grad_norm": 40.072757720947266,
"learning_rate": 3.4399999999999997e-06,
"loss": 0.3718,
"step": 43
},
{
"epoch": 0.044399596367305755,
"grad_norm": 17.283546447753906,
"learning_rate": 3.5199999999999998e-06,
"loss": 0.1344,
"step": 44
},
{
"epoch": 0.045408678102926335,
"grad_norm": 38.25215148925781,
"learning_rate": 3.6e-06,
"loss": 0.3276,
"step": 45
},
{
"epoch": 0.04641775983854692,
"grad_norm": 31.07087516784668,
"learning_rate": 3.68e-06,
"loss": 0.3091,
"step": 46
},
{
"epoch": 0.04742684157416751,
"grad_norm": 29.617158889770508,
"learning_rate": 3.7599999999999996e-06,
"loss": 0.2113,
"step": 47
},
{
"epoch": 0.04843592330978809,
"grad_norm": 37.85402297973633,
"learning_rate": 3.84e-06,
"loss": 0.3061,
"step": 48
},
{
"epoch": 0.04944500504540868,
"grad_norm": 92.3378677368164,
"learning_rate": 3.92e-06,
"loss": 0.8413,
"step": 49
},
{
"epoch": 0.050454086781029264,
"grad_norm": 25.68213653564453,
"learning_rate": 4e-06,
"loss": 0.2466,
"step": 50
},
{
"epoch": 0.05146316851664985,
"grad_norm": 22.958641052246094,
"learning_rate": 4.08e-06,
"loss": 0.2127,
"step": 51
},
{
"epoch": 0.05247225025227043,
"grad_norm": 26.674156188964844,
"learning_rate": 4.16e-06,
"loss": 0.5098,
"step": 52
},
{
"epoch": 0.05348133198789102,
"grad_norm": 18.660629272460938,
"learning_rate": 4.24e-06,
"loss": 0.2416,
"step": 53
},
{
"epoch": 0.054490413723511606,
"grad_norm": 20.310880661010742,
"learning_rate": 4.32e-06,
"loss": 0.3086,
"step": 54
},
{
"epoch": 0.055499495459132187,
"grad_norm": 20.308359146118164,
"learning_rate": 4.4e-06,
"loss": 0.3001,
"step": 55
},
{
"epoch": 0.056508577194752774,
"grad_norm": 13.419134140014648,
"learning_rate": 4.48e-06,
"loss": 0.1779,
"step": 56
},
{
"epoch": 0.05751765893037336,
"grad_norm": 14.848164558410645,
"learning_rate": 4.5599999999999995e-06,
"loss": 0.1883,
"step": 57
},
{
"epoch": 0.05852674066599395,
"grad_norm": 23.216663360595703,
"learning_rate": 4.64e-06,
"loss": 0.3184,
"step": 58
},
{
"epoch": 0.05953582240161453,
"grad_norm": 65.48796081542969,
"learning_rate": 4.72e-06,
"loss": 0.3397,
"step": 59
},
{
"epoch": 0.060544904137235116,
"grad_norm": 97.63284301757812,
"learning_rate": 4.8e-06,
"loss": 0.3936,
"step": 60
},
{
"epoch": 0.060544904137235116,
"eval_accuracy": 0.7474460839954598,
"eval_loss": 0.4690244793891907,
"eval_runtime": 62.2233,
"eval_samples_per_second": 28.317,
"eval_steps_per_second": 3.552,
"step": 60
},
{
"epoch": 0.0615539858728557,
"grad_norm": 210.90390014648438,
"learning_rate": 4.88e-06,
"loss": 0.6895,
"step": 61
},
{
"epoch": 0.06256306760847628,
"grad_norm": 46.58690643310547,
"learning_rate": 4.96e-06,
"loss": 0.29,
"step": 62
},
{
"epoch": 0.06357214934409687,
"grad_norm": 31.453752517700195,
"learning_rate": 5.04e-06,
"loss": 0.3071,
"step": 63
},
{
"epoch": 0.06458123107971746,
"grad_norm": 20.283885955810547,
"learning_rate": 5.12e-06,
"loss": 0.3571,
"step": 64
},
{
"epoch": 0.06559031281533804,
"grad_norm": 42.37957763671875,
"learning_rate": 5.2e-06,
"loss": 0.5532,
"step": 65
},
{
"epoch": 0.06659939455095863,
"grad_norm": 21.637910842895508,
"learning_rate": 5.28e-06,
"loss": 0.262,
"step": 66
},
{
"epoch": 0.06760847628657922,
"grad_norm": 18.54781723022461,
"learning_rate": 5.36e-06,
"loss": 0.2257,
"step": 67
},
{
"epoch": 0.06861755802219979,
"grad_norm": 20.521879196166992,
"learning_rate": 5.4400000000000004e-06,
"loss": 0.3857,
"step": 68
},
{
"epoch": 0.06962663975782038,
"grad_norm": 29.46757698059082,
"learning_rate": 5.52e-06,
"loss": 0.1029,
"step": 69
},
{
"epoch": 0.07063572149344097,
"grad_norm": 29.944223403930664,
"learning_rate": 5.6e-06,
"loss": 0.2712,
"step": 70
},
{
"epoch": 0.07164480322906155,
"grad_norm": 24.391925811767578,
"learning_rate": 5.68e-06,
"loss": 0.3976,
"step": 71
},
{
"epoch": 0.07265388496468214,
"grad_norm": 18.12702751159668,
"learning_rate": 5.76e-06,
"loss": 0.3472,
"step": 72
},
{
"epoch": 0.07366296670030273,
"grad_norm": 10.216832160949707,
"learning_rate": 5.84e-06,
"loss": 0.2769,
"step": 73
},
{
"epoch": 0.07467204843592332,
"grad_norm": 11.782180786132812,
"learning_rate": 5.92e-06,
"loss": 0.3737,
"step": 74
},
{
"epoch": 0.07568113017154389,
"grad_norm": 33.01165771484375,
"learning_rate": 6e-06,
"loss": 0.2062,
"step": 75
},
{
"epoch": 0.07669021190716448,
"grad_norm": 16.44459342956543,
"learning_rate": 6.079999999999999e-06,
"loss": 0.4451,
"step": 76
},
{
"epoch": 0.07769929364278506,
"grad_norm": 8.65665340423584,
"learning_rate": 6.1599999999999995e-06,
"loss": 0.2799,
"step": 77
},
{
"epoch": 0.07870837537840565,
"grad_norm": 9.738832473754883,
"learning_rate": 6.2399999999999995e-06,
"loss": 0.2986,
"step": 78
},
{
"epoch": 0.07971745711402624,
"grad_norm": 5.850776195526123,
"learning_rate": 6.32e-06,
"loss": 0.2548,
"step": 79
},
{
"epoch": 0.08072653884964683,
"grad_norm": 11.245881080627441,
"learning_rate": 6.4e-06,
"loss": 0.3249,
"step": 80
},
{
"epoch": 0.08072653884964683,
"eval_accuracy": 0.8569807037457434,
"eval_loss": 0.2707608640193939,
"eval_runtime": 62.6893,
"eval_samples_per_second": 28.107,
"eval_steps_per_second": 3.525,
"step": 80
},
{
"epoch": 0.08173562058526741,
"grad_norm": 9.138853073120117,
"learning_rate": 6.48e-06,
"loss": 0.2931,
"step": 81
},
{
"epoch": 0.08274470232088799,
"grad_norm": 7.408812999725342,
"learning_rate": 6.559999999999999e-06,
"loss": 0.276,
"step": 82
},
{
"epoch": 0.08375378405650857,
"grad_norm": 5.13024377822876,
"learning_rate": 6.639999999999999e-06,
"loss": 0.1842,
"step": 83
},
{
"epoch": 0.08476286579212916,
"grad_norm": 5.102560043334961,
"learning_rate": 6.719999999999999e-06,
"loss": 0.2041,
"step": 84
},
{
"epoch": 0.08577194752774975,
"grad_norm": 8.902843475341797,
"learning_rate": 6.799999999999999e-06,
"loss": 0.3015,
"step": 85
},
{
"epoch": 0.08678102926337034,
"grad_norm": 8.0729398727417,
"learning_rate": 6.879999999999999e-06,
"loss": 0.3637,
"step": 86
},
{
"epoch": 0.08779011099899092,
"grad_norm": 7.424192428588867,
"learning_rate": 6.9599999999999994e-06,
"loss": 0.1691,
"step": 87
},
{
"epoch": 0.08879919273461151,
"grad_norm": 19.509435653686523,
"learning_rate": 7.0399999999999995e-06,
"loss": 0.3888,
"step": 88
},
{
"epoch": 0.08980827447023208,
"grad_norm": 13.97599983215332,
"learning_rate": 7.12e-06,
"loss": 0.2415,
"step": 89
},
{
"epoch": 0.09081735620585267,
"grad_norm": 14.023358345031738,
"learning_rate": 7.2e-06,
"loss": 0.2889,
"step": 90
},
{
"epoch": 0.09182643794147326,
"grad_norm": 15.243273735046387,
"learning_rate": 7.28e-06,
"loss": 0.4061,
"step": 91
},
{
"epoch": 0.09283551967709384,
"grad_norm": 9.57010555267334,
"learning_rate": 7.36e-06,
"loss": 0.2501,
"step": 92
},
{
"epoch": 0.09384460141271443,
"grad_norm": 20.639694213867188,
"learning_rate": 7.44e-06,
"loss": 0.2877,
"step": 93
},
{
"epoch": 0.09485368314833502,
"grad_norm": 75.29905700683594,
"learning_rate": 7.519999999999999e-06,
"loss": 0.4536,
"step": 94
},
{
"epoch": 0.0958627648839556,
"grad_norm": 340.7889709472656,
"learning_rate": 7.599999999999999e-06,
"loss": 0.4443,
"step": 95
},
{
"epoch": 0.09687184661957618,
"grad_norm": 16.866931915283203,
"learning_rate": 7.68e-06,
"loss": 0.292,
"step": 96
},
{
"epoch": 0.09788092835519677,
"grad_norm": 12.300322532653809,
"learning_rate": 7.76e-06,
"loss": 0.2191,
"step": 97
},
{
"epoch": 0.09889001009081735,
"grad_norm": 18.15094566345215,
"learning_rate": 7.84e-06,
"loss": 0.1821,
"step": 98
},
{
"epoch": 0.09989909182643794,
"grad_norm": 31.187374114990234,
"learning_rate": 7.92e-06,
"loss": 0.1823,
"step": 99
},
{
"epoch": 0.10090817356205853,
"grad_norm": 64.1741943359375,
"learning_rate": 8e-06,
"loss": 0.5276,
"step": 100
},
{
"epoch": 0.10090817356205853,
"eval_accuracy": 0.8467650397275823,
"eval_loss": 0.28612375259399414,
"eval_runtime": 62.505,
"eval_samples_per_second": 28.19,
"eval_steps_per_second": 3.536,
"step": 100
},
{
"epoch": 0.10191725529767912,
"grad_norm": 12.569557189941406,
"learning_rate": 7.999975135834775e-06,
"loss": 0.3077,
"step": 101
},
{
"epoch": 0.1029263370332997,
"grad_norm": 7.0805277824401855,
"learning_rate": 7.999900543648217e-06,
"loss": 0.1759,
"step": 102
},
{
"epoch": 0.10393541876892028,
"grad_norm": 9.746481895446777,
"learning_rate": 7.999776224367659e-06,
"loss": 0.2064,
"step": 103
},
{
"epoch": 0.10494450050454086,
"grad_norm": 6.783945560455322,
"learning_rate": 7.999602179538651e-06,
"loss": 0.3028,
"step": 104
},
{
"epoch": 0.10595358224016145,
"grad_norm": 4.683195114135742,
"learning_rate": 7.999378411324933e-06,
"loss": 0.1767,
"step": 105
},
{
"epoch": 0.10696266397578204,
"grad_norm": 6.255279064178467,
"learning_rate": 7.999104922508408e-06,
"loss": 0.2984,
"step": 106
},
{
"epoch": 0.10797174571140263,
"grad_norm": 5.866703987121582,
"learning_rate": 7.99878171648911e-06,
"loss": 0.2472,
"step": 107
},
{
"epoch": 0.10898082744702321,
"grad_norm": 5.773608207702637,
"learning_rate": 7.998408797285167e-06,
"loss": 0.225,
"step": 108
},
{
"epoch": 0.1099899091826438,
"grad_norm": 10.58280086517334,
"learning_rate": 7.99798616953274e-06,
"loss": 0.3962,
"step": 109
},
{
"epoch": 0.11099899091826437,
"grad_norm": 5.545925140380859,
"learning_rate": 7.997513838485971e-06,
"loss": 0.1597,
"step": 110
},
{
"epoch": 0.11200807265388496,
"grad_norm": 7.84637975692749,
"learning_rate": 7.99699181001692e-06,
"loss": 0.346,
"step": 111
},
{
"epoch": 0.11301715438950555,
"grad_norm": 5.827946662902832,
"learning_rate": 7.996420090615486e-06,
"loss": 0.2507,
"step": 112
},
{
"epoch": 0.11402623612512613,
"grad_norm": 8.125126838684082,
"learning_rate": 7.995798687389334e-06,
"loss": 0.2496,
"step": 113
},
{
"epoch": 0.11503531786074672,
"grad_norm": 6.233455181121826,
"learning_rate": 7.9951276080638e-06,
"loss": 0.2129,
"step": 114
},
{
"epoch": 0.11604439959636731,
"grad_norm": 8.677657127380371,
"learning_rate": 7.994406860981797e-06,
"loss": 0.3926,
"step": 115
},
{
"epoch": 0.1170534813319879,
"grad_norm": 6.164268493652344,
"learning_rate": 7.99363645510371e-06,
"loss": 0.2335,
"step": 116
},
{
"epoch": 0.11806256306760847,
"grad_norm": 5.605584144592285,
"learning_rate": 7.992816400007294e-06,
"loss": 0.2357,
"step": 117
},
{
"epoch": 0.11907164480322906,
"grad_norm": 8.481727600097656,
"learning_rate": 7.991946705887537e-06,
"loss": 0.4145,
"step": 118
},
{
"epoch": 0.12008072653884964,
"grad_norm": 7.141845226287842,
"learning_rate": 7.99102738355655e-06,
"loss": 0.3807,
"step": 119
},
{
"epoch": 0.12108980827447023,
"grad_norm": 22.80964469909668,
"learning_rate": 7.990058444443424e-06,
"loss": 0.259,
"step": 120
},
{
"epoch": 0.12108980827447023,
"eval_accuracy": 0.7729852440408627,
"eval_loss": 0.44569578766822815,
"eval_runtime": 62.4809,
"eval_samples_per_second": 28.201,
"eval_steps_per_second": 3.537,
"step": 120
},
{
"epoch": 0.12209889001009082,
"grad_norm": 56.96179962158203,
"learning_rate": 7.989039900594089e-06,
"loss": 0.4721,
"step": 121
},
{
"epoch": 0.1231079717457114,
"grad_norm": 9.661979675292969,
"learning_rate": 7.987971764671168e-06,
"loss": 0.2308,
"step": 122
},
{
"epoch": 0.124117053481332,
"grad_norm": 20.324970245361328,
"learning_rate": 7.986854049953814e-06,
"loss": 0.4019,
"step": 123
},
{
"epoch": 0.12512613521695257,
"grad_norm": 7.678678512573242,
"learning_rate": 7.98568677033755e-06,
"loss": 0.1287,
"step": 124
},
{
"epoch": 0.12613521695257315,
"grad_norm": 5.97120475769043,
"learning_rate": 7.984469940334089e-06,
"loss": 0.3056,
"step": 125
},
{
"epoch": 0.12714429868819374,
"grad_norm": 4.4789509773254395,
"learning_rate": 7.983203575071166e-06,
"loss": 0.156,
"step": 126
},
{
"epoch": 0.12815338042381433,
"grad_norm": 7.833422660827637,
"learning_rate": 7.981887690292338e-06,
"loss": 0.3012,
"step": 127
},
{
"epoch": 0.12916246215943492,
"grad_norm": 4.627663612365723,
"learning_rate": 7.980522302356792e-06,
"loss": 0.1701,
"step": 128
},
{
"epoch": 0.1301715438950555,
"grad_norm": 23.75330924987793,
"learning_rate": 7.979107428239143e-06,
"loss": 0.5042,
"step": 129
},
{
"epoch": 0.1311806256306761,
"grad_norm": 8.093461036682129,
"learning_rate": 7.977643085529227e-06,
"loss": 0.3362,
"step": 130
},
{
"epoch": 0.13218970736629668,
"grad_norm": 8.479804992675781,
"learning_rate": 7.97612929243187e-06,
"loss": 0.2005,
"step": 131
},
{
"epoch": 0.13319878910191726,
"grad_norm": 8.889948844909668,
"learning_rate": 7.974566067766671e-06,
"loss": 0.4108,
"step": 132
},
{
"epoch": 0.13420787083753785,
"grad_norm": 6.473036289215088,
"learning_rate": 7.972953430967771e-06,
"loss": 0.3489,
"step": 133
},
{
"epoch": 0.13521695257315844,
"grad_norm": 5.9677863121032715,
"learning_rate": 7.971291402083606e-06,
"loss": 0.2707,
"step": 134
},
{
"epoch": 0.136226034308779,
"grad_norm": 9.331769943237305,
"learning_rate": 7.969580001776653e-06,
"loss": 0.4528,
"step": 135
},
{
"epoch": 0.13723511604439959,
"grad_norm": 6.988079071044922,
"learning_rate": 7.96781925132318e-06,
"loss": 0.3895,
"step": 136
},
{
"epoch": 0.13824419778002017,
"grad_norm": 5.6570281982421875,
"learning_rate": 7.966009172612988e-06,
"loss": 0.2324,
"step": 137
},
{
"epoch": 0.13925327951564076,
"grad_norm": 7.507450103759766,
"learning_rate": 7.964149788149122e-06,
"loss": 0.3709,
"step": 138
},
{
"epoch": 0.14026236125126135,
"grad_norm": 6.912793159484863,
"learning_rate": 7.962241121047602e-06,
"loss": 0.2004,
"step": 139
},
{
"epoch": 0.14127144298688193,
"grad_norm": 7.456505298614502,
"learning_rate": 7.960283195037138e-06,
"loss": 0.1852,
"step": 140
},
{
"epoch": 0.14127144298688193,
"eval_accuracy": 0.8382519863791147,
"eval_loss": 0.30151110887527466,
"eval_runtime": 59.0753,
"eval_samples_per_second": 29.826,
"eval_steps_per_second": 3.741,
"step": 140
},
{
"epoch": 0.14228052472250252,
"grad_norm": 5.073808670043945,
"learning_rate": 7.958276034458826e-06,
"loss": 0.2379,
"step": 141
},
{
"epoch": 0.1432896064581231,
"grad_norm": 10.782764434814453,
"learning_rate": 7.956219664265852e-06,
"loss": 0.1299,
"step": 142
},
{
"epoch": 0.1442986881937437,
"grad_norm": 5.916919708251953,
"learning_rate": 7.95411411002318e-06,
"loss": 0.2939,
"step": 143
},
{
"epoch": 0.14530776992936428,
"grad_norm": 4.290775775909424,
"learning_rate": 7.951959397907236e-06,
"loss": 0.2175,
"step": 144
},
{
"epoch": 0.14631685166498487,
"grad_norm": 6.01127290725708,
"learning_rate": 7.949755554705577e-06,
"loss": 0.2962,
"step": 145
},
{
"epoch": 0.14732593340060546,
"grad_norm": 5.430018901824951,
"learning_rate": 7.947502607816566e-06,
"loss": 0.3358,
"step": 146
},
{
"epoch": 0.14833501513622604,
"grad_norm": 2.893122911453247,
"learning_rate": 7.945200585249022e-06,
"loss": 0.0949,
"step": 147
},
{
"epoch": 0.14934409687184663,
"grad_norm": 5.385425567626953,
"learning_rate": 7.942849515621881e-06,
"loss": 0.314,
"step": 148
},
{
"epoch": 0.1503531786074672,
"grad_norm": 4.384002208709717,
"learning_rate": 7.940449428163837e-06,
"loss": 0.2312,
"step": 149
},
{
"epoch": 0.15136226034308778,
"grad_norm": 5.955286979675293,
"learning_rate": 7.938000352712972e-06,
"loss": 0.1949,
"step": 150
},
{
"epoch": 0.15237134207870837,
"grad_norm": 6.1054487228393555,
"learning_rate": 7.935502319716397e-06,
"loss": 0.3697,
"step": 151
},
{
"epoch": 0.15338042381432895,
"grad_norm": 3.2285423278808594,
"learning_rate": 7.932955360229862e-06,
"loss": 0.1118,
"step": 152
},
{
"epoch": 0.15438950554994954,
"grad_norm": 7.409282684326172,
"learning_rate": 7.930359505917381e-06,
"loss": 0.4898,
"step": 153
},
{
"epoch": 0.15539858728557013,
"grad_norm": 4.591885566711426,
"learning_rate": 7.927714789050827e-06,
"loss": 0.2288,
"step": 154
},
{
"epoch": 0.15640766902119072,
"grad_norm": 5.779257297515869,
"learning_rate": 7.925021242509538e-06,
"loss": 0.2563,
"step": 155
},
{
"epoch": 0.1574167507568113,
"grad_norm": 4.496645927429199,
"learning_rate": 7.92227889977991e-06,
"loss": 0.2096,
"step": 156
},
{
"epoch": 0.1584258324924319,
"grad_norm": 6.980607509613037,
"learning_rate": 7.919487794954972e-06,
"loss": 0.3658,
"step": 157
},
{
"epoch": 0.15943491422805248,
"grad_norm": 5.738483905792236,
"learning_rate": 7.91664796273397e-06,
"loss": 0.3123,
"step": 158
},
{
"epoch": 0.16044399596367306,
"grad_norm": 6.03507137298584,
"learning_rate": 7.913759438421932e-06,
"loss": 0.3345,
"step": 159
},
{
"epoch": 0.16145307769929365,
"grad_norm": 31.2845458984375,
"learning_rate": 7.910822257929234e-06,
"loss": 0.1122,
"step": 160
},
{
"epoch": 0.16145307769929365,
"eval_accuracy": 0.8518728717366629,
"eval_loss": 0.38947635889053345,
"eval_runtime": 62.3612,
"eval_samples_per_second": 28.255,
"eval_steps_per_second": 3.544,
"step": 160
},
{
"epoch": 0.16246215943491424,
"grad_norm": 6.148342609405518,
"learning_rate": 7.907836457771143e-06,
"loss": 0.2418,
"step": 161
},
{
"epoch": 0.16347124117053483,
"grad_norm": 38.332550048828125,
"learning_rate": 7.904802075067377e-06,
"loss": 0.6998,
"step": 162
},
{
"epoch": 0.16448032290615539,
"grad_norm": 8.149801254272461,
"learning_rate": 7.901719147541628e-06,
"loss": 0.1255,
"step": 163
},
{
"epoch": 0.16548940464177597,
"grad_norm": 19.77474021911621,
"learning_rate": 7.898587713521109e-06,
"loss": 0.6065,
"step": 164
},
{
"epoch": 0.16649848637739656,
"grad_norm": 5.291502952575684,
"learning_rate": 7.895407811936064e-06,
"loss": 0.1659,
"step": 165
},
{
"epoch": 0.16750756811301715,
"grad_norm": 5.293869495391846,
"learning_rate": 7.892179482319294e-06,
"loss": 0.2073,
"step": 166
},
{
"epoch": 0.16851664984863773,
"grad_norm": 6.291193962097168,
"learning_rate": 7.88890276480566e-06,
"loss": 0.3597,
"step": 167
},
{
"epoch": 0.16952573158425832,
"grad_norm": 5.216683864593506,
"learning_rate": 7.885577700131584e-06,
"loss": 0.2888,
"step": 168
},
{
"epoch": 0.1705348133198789,
"grad_norm": 3.573047637939453,
"learning_rate": 7.882204329634543e-06,
"loss": 0.1343,
"step": 169
},
{
"epoch": 0.1715438950554995,
"grad_norm": 5.618640899658203,
"learning_rate": 7.878782695252562e-06,
"loss": 0.2477,
"step": 170
},
{
"epoch": 0.17255297679112008,
"grad_norm": 6.469369411468506,
"learning_rate": 7.875312839523677e-06,
"loss": 0.3563,
"step": 171
},
{
"epoch": 0.17356205852674067,
"grad_norm": 7.982197284698486,
"learning_rate": 7.871794805585425e-06,
"loss": 0.2748,
"step": 172
},
{
"epoch": 0.17457114026236126,
"grad_norm": 8.727740287780762,
"learning_rate": 7.868228637174292e-06,
"loss": 0.2539,
"step": 173
},
{
"epoch": 0.17558022199798184,
"grad_norm": 9.52997875213623,
"learning_rate": 7.86461437862518e-06,
"loss": 0.2352,
"step": 174
},
{
"epoch": 0.17658930373360243,
"grad_norm": 8.447443008422852,
"learning_rate": 7.86095207487085e-06,
"loss": 0.3129,
"step": 175
},
{
"epoch": 0.17759838546922302,
"grad_norm": 7.929843425750732,
"learning_rate": 7.857241771441364e-06,
"loss": 0.3138,
"step": 176
},
{
"epoch": 0.17860746720484358,
"grad_norm": 7.068841934204102,
"learning_rate": 7.853483514463521e-06,
"loss": 0.1881,
"step": 177
},
{
"epoch": 0.17961654894046417,
"grad_norm": 179.09567260742188,
"learning_rate": 7.849677350660282e-06,
"loss": 1.6187,
"step": 178
},
{
"epoch": 0.18062563067608475,
"grad_norm": 14.378893852233887,
"learning_rate": 7.84582332735019e-06,
"loss": 0.44,
"step": 179
},
{
"epoch": 0.18163471241170534,
"grad_norm": 10.581100463867188,
"learning_rate": 7.841921492446781e-06,
"loss": 0.1962,
"step": 180
},
{
"epoch": 0.18163471241170534,
"eval_accuracy": 0.8229284903518729,
"eval_loss": 0.2842705547809601,
"eval_runtime": 63.0695,
"eval_samples_per_second": 27.937,
"eval_steps_per_second": 3.504,
"step": 180
},
{
"epoch": 0.18264379414732593,
"grad_norm": 13.833659172058105,
"learning_rate": 7.837971894457989e-06,
"loss": 0.4059,
"step": 181
},
{
"epoch": 0.18365287588294651,
"grad_norm": 6.910711288452148,
"learning_rate": 7.833974582485544e-06,
"loss": 0.3115,
"step": 182
},
{
"epoch": 0.1846619576185671,
"grad_norm": 4.3872599601745605,
"learning_rate": 7.829929606224356e-06,
"loss": 0.1654,
"step": 183
},
{
"epoch": 0.1856710393541877,
"grad_norm": 8.301304817199707,
"learning_rate": 7.825837015961904e-06,
"loss": 0.3223,
"step": 184
},
{
"epoch": 0.18668012108980828,
"grad_norm": 6.772916316986084,
"learning_rate": 7.82169686257761e-06,
"loss": 0.207,
"step": 185
},
{
"epoch": 0.18768920282542886,
"grad_norm": 10.749194145202637,
"learning_rate": 7.817509197542204e-06,
"loss": 0.3322,
"step": 186
},
{
"epoch": 0.18869828456104945,
"grad_norm": 18.030078887939453,
"learning_rate": 7.813274072917081e-06,
"loss": 0.3675,
"step": 187
},
{
"epoch": 0.18970736629667004,
"grad_norm": 15.416492462158203,
"learning_rate": 7.80899154135366e-06,
"loss": 0.2049,
"step": 188
},
{
"epoch": 0.19071644803229063,
"grad_norm": 24.75461769104004,
"learning_rate": 7.80466165609273e-06,
"loss": 0.36,
"step": 189
},
{
"epoch": 0.1917255297679112,
"grad_norm": 11.484724044799805,
"learning_rate": 7.800284470963781e-06,
"loss": 0.2014,
"step": 190
},
{
"epoch": 0.1927346115035318,
"grad_norm": 19.571962356567383,
"learning_rate": 7.795860040384339e-06,
"loss": 0.5687,
"step": 191
},
{
"epoch": 0.19374369323915236,
"grad_norm": 6.390493392944336,
"learning_rate": 7.791388419359292e-06,
"loss": 0.2563,
"step": 192
},
{
"epoch": 0.19475277497477295,
"grad_norm": 9.572249412536621,
"learning_rate": 7.786869663480201e-06,
"loss": 0.3565,
"step": 193
},
{
"epoch": 0.19576185671039353,
"grad_norm": 8.426647186279297,
"learning_rate": 7.782303828924613e-06,
"loss": 0.5456,
"step": 194
},
{
"epoch": 0.19677093844601412,
"grad_norm": 14.882564544677734,
"learning_rate": 7.77769097245536e-06,
"loss": 0.2976,
"step": 195
},
{
"epoch": 0.1977800201816347,
"grad_norm": 10.191393852233887,
"learning_rate": 7.773031151419853e-06,
"loss": 0.4641,
"step": 196
},
{
"epoch": 0.1987891019172553,
"grad_norm": 7.604254245758057,
"learning_rate": 7.768324423749376e-06,
"loss": 0.2364,
"step": 197
},
{
"epoch": 0.19979818365287588,
"grad_norm": 9.080754280090332,
"learning_rate": 7.763570847958354e-06,
"loss": 0.1754,
"step": 198
},
{
"epoch": 0.20080726538849647,
"grad_norm": 10.90381908416748,
"learning_rate": 7.758770483143633e-06,
"loss": 0.1799,
"step": 199
},
{
"epoch": 0.20181634712411706,
"grad_norm": 12.508880615234375,
"learning_rate": 7.753923388983747e-06,
"loss": 0.2855,
"step": 200
},
{
"epoch": 0.20181634712411706,
"eval_accuracy": 0.840522133938706,
"eval_loss": 0.29473429918289185,
"eval_runtime": 62.4643,
"eval_samples_per_second": 28.208,
"eval_steps_per_second": 3.538,
"step": 200
},
{
"epoch": 0.20282542885973764,
"grad_norm": 20.691980361938477,
"learning_rate": 7.749029625738169e-06,
"loss": 0.1942,
"step": 201
},
{
"epoch": 0.20383451059535823,
"grad_norm": 10.143714904785156,
"learning_rate": 7.744089254246569e-06,
"loss": 0.2001,
"step": 202
},
{
"epoch": 0.20484359233097882,
"grad_norm": 17.92678451538086,
"learning_rate": 7.739102335928053e-06,
"loss": 0.3823,
"step": 203
},
{
"epoch": 0.2058526740665994,
"grad_norm": 12.694001197814941,
"learning_rate": 7.734068932780405e-06,
"loss": 0.274,
"step": 204
},
{
"epoch": 0.20686175580222,
"grad_norm": 12.30357837677002,
"learning_rate": 7.728989107379303e-06,
"loss": 0.3017,
"step": 205
},
{
"epoch": 0.20787083753784055,
"grad_norm": 7.952809810638428,
"learning_rate": 7.72386292287756e-06,
"loss": 0.14,
"step": 206
},
{
"epoch": 0.20887991927346114,
"grad_norm": 69.02944946289062,
"learning_rate": 7.718690443004324e-06,
"loss": 0.4633,
"step": 207
},
{
"epoch": 0.20988900100908173,
"grad_norm": 92.96024322509766,
"learning_rate": 7.71347173206429e-06,
"loss": 1.1202,
"step": 208
},
{
"epoch": 0.21089808274470231,
"grad_norm": 83.07585906982422,
"learning_rate": 7.708206854936908e-06,
"loss": 0.3752,
"step": 209
},
{
"epoch": 0.2119071644803229,
"grad_norm": 10.343903541564941,
"learning_rate": 7.702895877075563e-06,
"loss": 0.3543,
"step": 210
},
{
"epoch": 0.2129162462159435,
"grad_norm": 34.449188232421875,
"learning_rate": 7.697538864506767e-06,
"loss": 0.5488,
"step": 211
},
{
"epoch": 0.21392532795156408,
"grad_norm": 5.609644412994385,
"learning_rate": 7.692135883829349e-06,
"loss": 0.342,
"step": 212
},
{
"epoch": 0.21493440968718466,
"grad_norm": 6.129890441894531,
"learning_rate": 7.686687002213609e-06,
"loss": 0.1932,
"step": 213
},
{
"epoch": 0.21594349142280525,
"grad_norm": 17.425050735473633,
"learning_rate": 7.681192287400491e-06,
"loss": 0.4084,
"step": 214
},
{
"epoch": 0.21695257315842584,
"grad_norm": 24.733633041381836,
"learning_rate": 7.675651807700748e-06,
"loss": 0.2141,
"step": 215
},
{
"epoch": 0.21796165489404642,
"grad_norm": 33.499794006347656,
"learning_rate": 7.670065631994078e-06,
"loss": 0.5658,
"step": 216
},
{
"epoch": 0.218970736629667,
"grad_norm": 9.948688507080078,
"learning_rate": 7.664433829728277e-06,
"loss": 0.2564,
"step": 217
},
{
"epoch": 0.2199798183652876,
"grad_norm": 9.255337715148926,
"learning_rate": 7.658756470918382e-06,
"loss": 0.2294,
"step": 218
},
{
"epoch": 0.2209889001009082,
"grad_norm": 30.673681259155273,
"learning_rate": 7.65303362614578e-06,
"loss": 0.4334,
"step": 219
},
{
"epoch": 0.22199798183652875,
"grad_norm": 5.19248628616333,
"learning_rate": 7.647265366557355e-06,
"loss": 0.1593,
"step": 220
},
{
"epoch": 0.22199798183652875,
"eval_accuracy": 0.8382519863791147,
"eval_loss": 0.29325050115585327,
"eval_runtime": 62.9477,
"eval_samples_per_second": 27.991,
"eval_steps_per_second": 3.511,
"step": 220
},
{
"epoch": 0.22300706357214933,
"grad_norm": 5.615384101867676,
"learning_rate": 7.641451763864587e-06,
"loss": 0.225,
"step": 221
},
{
"epoch": 0.22401614530776992,
"grad_norm": 6.7938032150268555,
"learning_rate": 7.63559289034266e-06,
"loss": 0.2151,
"step": 222
},
{
"epoch": 0.2250252270433905,
"grad_norm": 2.8786470890045166,
"learning_rate": 7.629688818829577e-06,
"loss": 0.1089,
"step": 223
},
{
"epoch": 0.2260343087790111,
"grad_norm": 7.671812057495117,
"learning_rate": 7.623739622725244e-06,
"loss": 0.289,
"step": 224
},
{
"epoch": 0.22704339051463168,
"grad_norm": 5.496854782104492,
"learning_rate": 7.617745375990556e-06,
"loss": 0.24,
"step": 225
},
{
"epoch": 0.22805247225025227,
"grad_norm": 6.458908557891846,
"learning_rate": 7.611706153146485e-06,
"loss": 0.3702,
"step": 226
},
{
"epoch": 0.22906155398587286,
"grad_norm": 10.48363971710205,
"learning_rate": 7.605622029273148e-06,
"loss": 0.1634,
"step": 227
},
{
"epoch": 0.23007063572149344,
"grad_norm": 32.09809875488281,
"learning_rate": 7.599493080008873e-06,
"loss": 0.7119,
"step": 228
},
{
"epoch": 0.23107971745711403,
"grad_norm": 43.372257232666016,
"learning_rate": 7.5933193815492675e-06,
"loss": 0.4775,
"step": 229
},
{
"epoch": 0.23208879919273462,
"grad_norm": 21.043750762939453,
"learning_rate": 7.587101010646259e-06,
"loss": 0.3401,
"step": 230
},
{
"epoch": 0.2330978809283552,
"grad_norm": 13.805304527282715,
"learning_rate": 7.58083804460715e-06,
"loss": 0.1592,
"step": 231
},
{
"epoch": 0.2341069626639758,
"grad_norm": 6.074002265930176,
"learning_rate": 7.574530561293649e-06,
"loss": 0.3836,
"step": 232
},
{
"epoch": 0.23511604439959638,
"grad_norm": 4.921712875366211,
"learning_rate": 7.5681786391209105e-06,
"loss": 0.2796,
"step": 233
},
{
"epoch": 0.23612512613521694,
"grad_norm": 5.469928741455078,
"learning_rate": 7.561782357056557e-06,
"loss": 0.2877,
"step": 234
},
{
"epoch": 0.23713420787083753,
"grad_norm": 5.309055328369141,
"learning_rate": 7.555341794619694e-06,
"loss": 0.2664,
"step": 235
},
{
"epoch": 0.23814328960645811,
"grad_norm": 55.794979095458984,
"learning_rate": 7.548857031879926e-06,
"loss": 0.1148,
"step": 236
},
{
"epoch": 0.2391523713420787,
"grad_norm": 207.49688720703125,
"learning_rate": 7.5423281494563595e-06,
"loss": 0.3024,
"step": 237
},
{
"epoch": 0.2401614530776993,
"grad_norm": 13.331132888793945,
"learning_rate": 7.535755228516601e-06,
"loss": 0.353,
"step": 238
},
{
"epoch": 0.24117053481331988,
"grad_norm": 9.264041900634766,
"learning_rate": 7.529138350775745e-06,
"loss": 0.3563,
"step": 239
},
{
"epoch": 0.24217961654894046,
"grad_norm": 4.584170818328857,
"learning_rate": 7.522477598495363e-06,
"loss": 0.2153,
"step": 240
},
{
"epoch": 0.24217961654894046,
"eval_accuracy": 0.8376844494892168,
"eval_loss": 0.2747989892959595,
"eval_runtime": 62.479,
"eval_samples_per_second": 28.201,
"eval_steps_per_second": 3.537,
"step": 240
},
{
"epoch": 0.24318869828456105,
"grad_norm": 4.465677738189697,
"learning_rate": 7.515773054482478e-06,
"loss": 0.2503,
"step": 241
},
{
"epoch": 0.24419778002018164,
"grad_norm": 4.346131324768066,
"learning_rate": 7.509024802088534e-06,
"loss": 0.2141,
"step": 242
},
{
"epoch": 0.24520686175580222,
"grad_norm": 7.381392002105713,
"learning_rate": 7.502232925208363e-06,
"loss": 0.2984,
"step": 243
},
{
"epoch": 0.2462159434914228,
"grad_norm": 4.441753387451172,
"learning_rate": 7.49539750827914e-06,
"loss": 0.2058,
"step": 244
},
{
"epoch": 0.2472250252270434,
"grad_norm": 7.497750282287598,
"learning_rate": 7.488518636279331e-06,
"loss": 0.4537,
"step": 245
},
{
"epoch": 0.248234106962664,
"grad_norm": 9.178194046020508,
"learning_rate": 7.4815963947276436e-06,
"loss": 0.2963,
"step": 246
},
{
"epoch": 0.24924318869828457,
"grad_norm": 7.65831184387207,
"learning_rate": 7.474630869681954e-06,
"loss": 0.3374,
"step": 247
},
{
"epoch": 0.25025227043390513,
"grad_norm": 4.4074482917785645,
"learning_rate": 7.467622147738246e-06,
"loss": 0.176,
"step": 248
},
{
"epoch": 0.2512613521695257,
"grad_norm": 3.8703436851501465,
"learning_rate": 7.4605703160295315e-06,
"loss": 0.1294,
"step": 249
},
{
"epoch": 0.2522704339051463,
"grad_norm": 14.600114822387695,
"learning_rate": 7.453475462224763e-06,
"loss": 0.2959,
"step": 250
},
{
"epoch": 0.2532795156407669,
"grad_norm": 7.458588600158691,
"learning_rate": 7.44633767452775e-06,
"loss": 0.3291,
"step": 251
},
{
"epoch": 0.2542885973763875,
"grad_norm": 13.63289737701416,
"learning_rate": 7.439157041676058e-06,
"loss": 0.4166,
"step": 252
},
{
"epoch": 0.25529767911200807,
"grad_norm": 8.358410835266113,
"learning_rate": 7.431933652939908e-06,
"loss": 0.257,
"step": 253
},
{
"epoch": 0.25630676084762866,
"grad_norm": 5.803023338317871,
"learning_rate": 7.424667598121065e-06,
"loss": 0.2334,
"step": 254
},
{
"epoch": 0.25731584258324924,
"grad_norm": 5.000588893890381,
"learning_rate": 7.4173589675517245e-06,
"loss": 0.2193,
"step": 255
},
{
"epoch": 0.25832492431886983,
"grad_norm": 5.990160942077637,
"learning_rate": 7.410007852093384e-06,
"loss": 0.31,
"step": 256
},
{
"epoch": 0.2593340060544904,
"grad_norm": 6.175328731536865,
"learning_rate": 7.40261434313572e-06,
"loss": 0.307,
"step": 257
},
{
"epoch": 0.260343087790111,
"grad_norm": 6.047603130340576,
"learning_rate": 7.395178532595444e-06,
"loss": 0.223,
"step": 258
},
{
"epoch": 0.2613521695257316,
"grad_norm": 5.447187423706055,
"learning_rate": 7.387700512915168e-06,
"loss": 0.0597,
"step": 259
},
{
"epoch": 0.2623612512613522,
"grad_norm": 6.375972747802734,
"learning_rate": 7.380180377062251e-06,
"loss": 0.398,
"step": 260
},
{
"epoch": 0.2623612512613522,
"eval_accuracy": 0.8320090805902384,
"eval_loss": 0.2793748378753662,
"eval_runtime": 62.5384,
"eval_samples_per_second": 28.175,
"eval_steps_per_second": 3.534,
"step": 260
},
{
"epoch": 0.26337033299697277,
"grad_norm": 5.560431003570557,
"learning_rate": 7.372618218527644e-06,
"loss": 0.2861,
"step": 261
},
{
"epoch": 0.26437941473259335,
"grad_norm": 5.246089935302734,
"learning_rate": 7.365014131324725e-06,
"loss": 0.1859,
"step": 262
},
{
"epoch": 0.26538849646821394,
"grad_norm": 7.6310625076293945,
"learning_rate": 7.3573682099881345e-06,
"loss": 0.3164,
"step": 263
},
{
"epoch": 0.26639757820383453,
"grad_norm": 6.964735507965088,
"learning_rate": 7.349680549572598e-06,
"loss": 0.3655,
"step": 264
},
{
"epoch": 0.2674066599394551,
"grad_norm": 4.680076599121094,
"learning_rate": 7.3419512456517455e-06,
"loss": 0.1872,
"step": 265
},
{
"epoch": 0.2684157416750757,
"grad_norm": 3.2980797290802,
"learning_rate": 7.3341803943169214e-06,
"loss": 0.0983,
"step": 266
},
{
"epoch": 0.2694248234106963,
"grad_norm": 9.682284355163574,
"learning_rate": 7.326368092175993e-06,
"loss": 0.1637,
"step": 267
},
{
"epoch": 0.2704339051463169,
"grad_norm": 58.10804748535156,
"learning_rate": 7.3185144363521435e-06,
"loss": 1.2893,
"step": 268
},
{
"epoch": 0.2714429868819374,
"grad_norm": 3.9088146686553955,
"learning_rate": 7.310619524482673e-06,
"loss": 0.1103,
"step": 269
},
{
"epoch": 0.272452068617558,
"grad_norm": 8.596792221069336,
"learning_rate": 7.302683454717778e-06,
"loss": 0.4524,
"step": 270
},
{
"epoch": 0.2734611503531786,
"grad_norm": 18.94976234436035,
"learning_rate": 7.294706325719331e-06,
"loss": 0.3868,
"step": 271
},
{
"epoch": 0.27447023208879917,
"grad_norm": 28.618101119995117,
"learning_rate": 7.28668823665966e-06,
"loss": 1.2269,
"step": 272
},
{
"epoch": 0.27547931382441976,
"grad_norm": 7.027772903442383,
"learning_rate": 7.2786292872203125e-06,
"loss": 0.2546,
"step": 273
},
{
"epoch": 0.27648839556004035,
"grad_norm": 4.971069812774658,
"learning_rate": 7.270529577590812e-06,
"loss": 0.2229,
"step": 274
},
{
"epoch": 0.27749747729566093,
"grad_norm": 7.468736171722412,
"learning_rate": 7.262389208467417e-06,
"loss": 0.3655,
"step": 275
},
{
"epoch": 0.2785065590312815,
"grad_norm": 7.4164509773254395,
"learning_rate": 7.2542082810518696e-06,
"loss": 0.3047,
"step": 276
},
{
"epoch": 0.2795156407669021,
"grad_norm": 6.213995933532715,
"learning_rate": 7.245986897050137e-06,
"loss": 0.2879,
"step": 277
},
{
"epoch": 0.2805247225025227,
"grad_norm": 6.1668548583984375,
"learning_rate": 7.237725158671141e-06,
"loss": 0.3405,
"step": 278
},
{
"epoch": 0.2815338042381433,
"grad_norm": 11.742606163024902,
"learning_rate": 7.229423168625498e-06,
"loss": 0.3894,
"step": 279
},
{
"epoch": 0.28254288597376387,
"grad_norm": 5.1043477058410645,
"learning_rate": 7.2210810301242345e-06,
"loss": 0.199,
"step": 280
},
{
"epoch": 0.28254288597376387,
"eval_accuracy": 0.8382519863791147,
"eval_loss": 0.28534775972366333,
"eval_runtime": 62.6946,
"eval_samples_per_second": 28.104,
"eval_steps_per_second": 3.525,
"step": 280
},
{
"epoch": 0.28355196770938446,
"grad_norm": 5.588554859161377,
"learning_rate": 7.212698846877503e-06,
"loss": 0.347,
"step": 281
},
{
"epoch": 0.28456104944500504,
"grad_norm": 4.220617294311523,
"learning_rate": 7.204276723093301e-06,
"loss": 0.2267,
"step": 282
},
{
"epoch": 0.28557013118062563,
"grad_norm": 36.09522247314453,
"learning_rate": 7.195814763476164e-06,
"loss": 0.6504,
"step": 283
},
{
"epoch": 0.2865792129162462,
"grad_norm": 5.161518096923828,
"learning_rate": 7.187313073225876e-06,
"loss": 0.2736,
"step": 284
},
{
"epoch": 0.2875882946518668,
"grad_norm": 5.858831882476807,
"learning_rate": 7.178771758036152e-06,
"loss": 0.3758,
"step": 285
},
{
"epoch": 0.2885973763874874,
"grad_norm": 14.579549789428711,
"learning_rate": 7.170190924093326e-06,
"loss": 0.3338,
"step": 286
},
{
"epoch": 0.289606458123108,
"grad_norm": 6.289809703826904,
"learning_rate": 7.161570678075037e-06,
"loss": 0.4051,
"step": 287
},
{
"epoch": 0.29061553985872857,
"grad_norm": 4.26041841506958,
"learning_rate": 7.152911127148893e-06,
"loss": 0.2365,
"step": 288
},
{
"epoch": 0.29162462159434915,
"grad_norm": 3.567761182785034,
"learning_rate": 7.1442123789711495e-06,
"loss": 0.1612,
"step": 289
},
{
"epoch": 0.29263370332996974,
"grad_norm": 4.908886432647705,
"learning_rate": 7.135474541685359e-06,
"loss": 0.2345,
"step": 290
},
{
"epoch": 0.29364278506559033,
"grad_norm": 5.828519821166992,
"learning_rate": 7.126697723921041e-06,
"loss": 0.3646,
"step": 291
},
{
"epoch": 0.2946518668012109,
"grad_norm": 5.6283674240112305,
"learning_rate": 7.117882034792315e-06,
"loss": 0.3539,
"step": 292
},
{
"epoch": 0.2956609485368315,
"grad_norm": 2.5755410194396973,
"learning_rate": 7.109027583896559e-06,
"loss": 0.072,
"step": 293
},
{
"epoch": 0.2966700302724521,
"grad_norm": 5.203614234924316,
"learning_rate": 7.1001344813130355e-06,
"loss": 0.2994,
"step": 294
},
{
"epoch": 0.2976791120080727,
"grad_norm": 4.4349822998046875,
"learning_rate": 7.0912028376015315e-06,
"loss": 0.1816,
"step": 295
},
{
"epoch": 0.29868819374369326,
"grad_norm": 3.131836175918579,
"learning_rate": 7.082232763800982e-06,
"loss": 0.1107,
"step": 296
},
{
"epoch": 0.29969727547931385,
"grad_norm": 7.76261568069458,
"learning_rate": 7.073224371428083e-06,
"loss": 0.3865,
"step": 297
},
{
"epoch": 0.3007063572149344,
"grad_norm": 8.260970115661621,
"learning_rate": 7.064177772475912e-06,
"loss": 0.3415,
"step": 298
},
{
"epoch": 0.30171543895055497,
"grad_norm": 7.274144172668457,
"learning_rate": 7.055093079412536e-06,
"loss": 0.406,
"step": 299
},
{
"epoch": 0.30272452068617556,
"grad_norm": 6.949483394622803,
"learning_rate": 7.04597040517961e-06,
"loss": 0.2363,
"step": 300
},
{
"epoch": 0.30272452068617556,
"eval_accuracy": 0.8422247446083996,
"eval_loss": 0.29199618101119995,
"eval_runtime": 62.2656,
"eval_samples_per_second": 28.298,
"eval_steps_per_second": 3.549,
"step": 300
},
{
"epoch": 0.30373360242179614,
"grad_norm": 11.153295516967773,
"learning_rate": 7.036809863190972e-06,
"loss": 0.367,
"step": 301
},
{
"epoch": 0.30474268415741673,
"grad_norm": 7.238802909851074,
"learning_rate": 7.027611567331239e-06,
"loss": 0.3071,
"step": 302
},
{
"epoch": 0.3057517658930373,
"grad_norm": 5.204061031341553,
"learning_rate": 7.018375631954384e-06,
"loss": 0.1991,
"step": 303
},
{
"epoch": 0.3067608476286579,
"grad_norm": 4.6721930503845215,
"learning_rate": 7.0091021718823185e-06,
"loss": 0.1833,
"step": 304
},
{
"epoch": 0.3077699293642785,
"grad_norm": 4.843674182891846,
"learning_rate": 6.999791302403463e-06,
"loss": 0.2464,
"step": 305
},
{
"epoch": 0.3087790110998991,
"grad_norm": 5.238533973693848,
"learning_rate": 6.990443139271317e-06,
"loss": 0.2551,
"step": 306
},
{
"epoch": 0.30978809283551967,
"grad_norm": 20.923328399658203,
"learning_rate": 6.981057798703019e-06,
"loss": 0.7206,
"step": 307
},
{
"epoch": 0.31079717457114026,
"grad_norm": 4.751791954040527,
"learning_rate": 6.971635397377895e-06,
"loss": 0.2031,
"step": 308
},
{
"epoch": 0.31180625630676084,
"grad_norm": 9.50137996673584,
"learning_rate": 6.962176052436019e-06,
"loss": 0.4901,
"step": 309
},
{
"epoch": 0.31281533804238143,
"grad_norm": 10.486673355102539,
"learning_rate": 6.952679881476746e-06,
"loss": 0.5644,
"step": 310
},
{
"epoch": 0.313824419778002,
"grad_norm": 8.85893726348877,
"learning_rate": 6.94314700255726e-06,
"loss": 0.5932,
"step": 311
},
{
"epoch": 0.3148335015136226,
"grad_norm": 33.33445358276367,
"learning_rate": 6.933577534191101e-06,
"loss": 1.3013,
"step": 312
},
{
"epoch": 0.3158425832492432,
"grad_norm": 29.183631896972656,
"learning_rate": 6.923971595346686e-06,
"loss": 0.4947,
"step": 313
},
{
"epoch": 0.3168516649848638,
"grad_norm": 7.223900318145752,
"learning_rate": 6.914329305445844e-06,
"loss": 0.3102,
"step": 314
},
{
"epoch": 0.31786074672048437,
"grad_norm": 4.909074306488037,
"learning_rate": 6.904650784362317e-06,
"loss": 0.223,
"step": 315
},
{
"epoch": 0.31886982845610495,
"grad_norm": 5.134761333465576,
"learning_rate": 6.89493615242028e-06,
"loss": 0.2572,
"step": 316
},
{
"epoch": 0.31987891019172554,
"grad_norm": 4.783396244049072,
"learning_rate": 6.885185530392841e-06,
"loss": 0.2743,
"step": 317
},
{
"epoch": 0.32088799192734613,
"grad_norm": 6.382033824920654,
"learning_rate": 6.875399039500535e-06,
"loss": 0.3093,
"step": 318
},
{
"epoch": 0.3218970736629667,
"grad_norm": 7.4557318687438965,
"learning_rate": 6.865576801409828e-06,
"loss": 0.4611,
"step": 319
},
{
"epoch": 0.3229061553985873,
"grad_norm": 9.1113862991333,
"learning_rate": 6.855718938231597e-06,
"loss": 0.5509,
"step": 320
},
{
"epoch": 0.3229061553985873,
"eval_accuracy": 0.8422247446083996,
"eval_loss": 0.2692955434322357,
"eval_runtime": 62.6677,
"eval_samples_per_second": 28.117,
"eval_steps_per_second": 3.527,
"step": 320
},
{
"epoch": 0.3239152371342079,
"grad_norm": 4.367307186126709,
"learning_rate": 6.845825572519606e-06,
"loss": 0.2284,
"step": 321
},
{
"epoch": 0.3249243188698285,
"grad_norm": 4.980352878570557,
"learning_rate": 6.8358968272689995e-06,
"loss": 0.2197,
"step": 322
},
{
"epoch": 0.32593340060544906,
"grad_norm": 3.6899354457855225,
"learning_rate": 6.825932825914758e-06,
"loss": 0.1665,
"step": 323
},
{
"epoch": 0.32694248234106965,
"grad_norm": 5.287685394287109,
"learning_rate": 6.815933692330168e-06,
"loss": 0.2254,
"step": 324
},
{
"epoch": 0.32795156407669024,
"grad_norm": 4.298030376434326,
"learning_rate": 6.805899550825285e-06,
"loss": 0.1869,
"step": 325
},
{
"epoch": 0.32896064581231077,
"grad_norm": 6.648251533508301,
"learning_rate": 6.795830526145385e-06,
"loss": 0.438,
"step": 326
},
{
"epoch": 0.32996972754793136,
"grad_norm": 8.279413223266602,
"learning_rate": 6.785726743469415e-06,
"loss": 0.3674,
"step": 327
},
{
"epoch": 0.33097880928355194,
"grad_norm": 8.667623519897461,
"learning_rate": 6.775588328408435e-06,
"loss": 0.2876,
"step": 328
},
{
"epoch": 0.33198789101917253,
"grad_norm": 4.858876705169678,
"learning_rate": 6.765415407004061e-06,
"loss": 0.2051,
"step": 329
},
{
"epoch": 0.3329969727547931,
"grad_norm": 5.615779876708984,
"learning_rate": 6.75520810572689e-06,
"loss": 0.2162,
"step": 330
},
{
"epoch": 0.3340060544904137,
"grad_norm": 5.255247116088867,
"learning_rate": 6.744966551474935e-06,
"loss": 0.2493,
"step": 331
},
{
"epoch": 0.3350151362260343,
"grad_norm": 4.313990116119385,
"learning_rate": 6.734690871572044e-06,
"loss": 0.1736,
"step": 332
},
{
"epoch": 0.3360242179616549,
"grad_norm": 6.205388069152832,
"learning_rate": 6.72438119376632e-06,
"loss": 0.1372,
"step": 333
},
{
"epoch": 0.33703329969727547,
"grad_norm": 4.961912631988525,
"learning_rate": 6.714037646228529e-06,
"loss": 0.1083,
"step": 334
},
{
"epoch": 0.33804238143289606,
"grad_norm": 11.366336822509766,
"learning_rate": 6.703660357550507e-06,
"loss": 0.1794,
"step": 335
},
{
"epoch": 0.33905146316851664,
"grad_norm": 36.39216613769531,
"learning_rate": 6.693249456743565e-06,
"loss": 0.8015,
"step": 336
},
{
"epoch": 0.34006054490413723,
"grad_norm": 26.968290328979492,
"learning_rate": 6.682805073236883e-06,
"loss": 0.5109,
"step": 337
},
{
"epoch": 0.3410696266397578,
"grad_norm": 12.663142204284668,
"learning_rate": 6.672327336875899e-06,
"loss": 0.5224,
"step": 338
},
{
"epoch": 0.3420787083753784,
"grad_norm": 7.804121017456055,
"learning_rate": 6.661816377920695e-06,
"loss": 0.2021,
"step": 339
},
{
"epoch": 0.343087790110999,
"grad_norm": 8.033084869384766,
"learning_rate": 6.651272327044385e-06,
"loss": 0.2942,
"step": 340
},
{
"epoch": 0.343087790110999,
"eval_accuracy": 0.840522133938706,
"eval_loss": 0.2765989303588867,
"eval_runtime": 62.3633,
"eval_samples_per_second": 28.254,
"eval_steps_per_second": 3.544,
"step": 340
},
{
"epoch": 0.3440968718466196,
"grad_norm": 6.994205951690674,
"learning_rate": 6.640695315331476e-06,
"loss": 0.422,
"step": 341
},
{
"epoch": 0.34510595358224017,
"grad_norm": 6.707622051239014,
"learning_rate": 6.630085474276255e-06,
"loss": 0.4127,
"step": 342
},
{
"epoch": 0.34611503531786075,
"grad_norm": 2.8467178344726562,
"learning_rate": 6.619442935781141e-06,
"loss": 0.0992,
"step": 343
},
{
"epoch": 0.34712411705348134,
"grad_norm": 3.68339467048645,
"learning_rate": 6.608767832155051e-06,
"loss": 0.1661,
"step": 344
},
{
"epoch": 0.3481331987891019,
"grad_norm": 4.811255931854248,
"learning_rate": 6.598060296111755e-06,
"loss": 0.1805,
"step": 345
},
{
"epoch": 0.3491422805247225,
"grad_norm": 7.682583332061768,
"learning_rate": 6.58732046076823e-06,
"loss": 0.4899,
"step": 346
},
{
"epoch": 0.3501513622603431,
"grad_norm": 4.097416400909424,
"learning_rate": 6.5765484596429905e-06,
"loss": 0.1297,
"step": 347
},
{
"epoch": 0.3511604439959637,
"grad_norm": 6.541982650756836,
"learning_rate": 6.565744426654449e-06,
"loss": 0.2545,
"step": 348
},
{
"epoch": 0.3521695257315843,
"grad_norm": 6.773824214935303,
"learning_rate": 6.554908496119232e-06,
"loss": 0.2685,
"step": 349
},
{
"epoch": 0.35317860746720486,
"grad_norm": 5.670194625854492,
"learning_rate": 6.544040802750526e-06,
"loss": 0.3245,
"step": 350
},
{
"epoch": 0.35418768920282545,
"grad_norm": 5.502919673919678,
"learning_rate": 6.5331414816563914e-06,
"loss": 0.282,
"step": 351
},
{
"epoch": 0.35519677093844604,
"grad_norm": 6.195550918579102,
"learning_rate": 6.52221066833809e-06,
"loss": 0.3791,
"step": 352
},
{
"epoch": 0.3562058526740666,
"grad_norm": 6.497583866119385,
"learning_rate": 6.511248498688395e-06,
"loss": 0.3853,
"step": 353
},
{
"epoch": 0.35721493440968716,
"grad_norm": 4.045083045959473,
"learning_rate": 6.500255108989904e-06,
"loss": 0.1618,
"step": 354
},
{
"epoch": 0.35822401614530774,
"grad_norm": 4.328484058380127,
"learning_rate": 6.489230635913346e-06,
"loss": 0.2393,
"step": 355
},
{
"epoch": 0.35923309788092833,
"grad_norm": 7.9082183837890625,
"learning_rate": 6.478175216515884e-06,
"loss": 0.2272,
"step": 356
},
{
"epoch": 0.3602421796165489,
"grad_norm": 3.21086049079895,
"learning_rate": 6.467088988239402e-06,
"loss": 0.0689,
"step": 357
},
{
"epoch": 0.3612512613521695,
"grad_norm": 5.211169242858887,
"learning_rate": 6.455972088908807e-06,
"loss": 0.1841,
"step": 358
},
{
"epoch": 0.3622603430877901,
"grad_norm": 5.808556079864502,
"learning_rate": 6.444824656730311e-06,
"loss": 0.1076,
"step": 359
},
{
"epoch": 0.3632694248234107,
"grad_norm": 16.821155548095703,
"learning_rate": 6.43364683028971e-06,
"loss": 0.4606,
"step": 360
},
{
"epoch": 0.3632694248234107,
"eval_accuracy": 0.8484676503972758,
"eval_loss": 0.3792904019355774,
"eval_runtime": 62.8768,
"eval_samples_per_second": 28.023,
"eval_steps_per_second": 3.515,
"step": 360
},
{
"epoch": 0.36427850655903127,
"grad_norm": 12.411088943481445,
"learning_rate": 6.422438748550666e-06,
"loss": 0.2406,
"step": 361
},
{
"epoch": 0.36528758829465185,
"grad_norm": 13.036988258361816,
"learning_rate": 6.411200550852978e-06,
"loss": 0.3551,
"step": 362
},
{
"epoch": 0.36629667003027244,
"grad_norm": 13.768930435180664,
"learning_rate": 6.3999323769108485e-06,
"loss": 0.3561,
"step": 363
},
{
"epoch": 0.36730575176589303,
"grad_norm": 4.591919422149658,
"learning_rate": 6.388634366811145e-06,
"loss": 0.1434,
"step": 364
},
{
"epoch": 0.3683148335015136,
"grad_norm": 9.278558731079102,
"learning_rate": 6.377306661011664e-06,
"loss": 0.445,
"step": 365
},
{
"epoch": 0.3693239152371342,
"grad_norm": 12.307679176330566,
"learning_rate": 6.365949400339378e-06,
"loss": 0.3966,
"step": 366
},
{
"epoch": 0.3703329969727548,
"grad_norm": 5.971735000610352,
"learning_rate": 6.354562725988691e-06,
"loss": 0.2508,
"step": 367
},
{
"epoch": 0.3713420787083754,
"grad_norm": 5.919165134429932,
"learning_rate": 6.343146779519681e-06,
"loss": 0.287,
"step": 368
},
{
"epoch": 0.37235116044399597,
"grad_norm": 4.112932205200195,
"learning_rate": 6.331701702856335e-06,
"loss": 0.1366,
"step": 369
},
{
"epoch": 0.37336024217961655,
"grad_norm": 9.273783683776855,
"learning_rate": 6.3202276382847925e-06,
"loss": 0.4608,
"step": 370
},
{
"epoch": 0.37436932391523714,
"grad_norm": 6.013513088226318,
"learning_rate": 6.308724728451572e-06,
"loss": 0.1759,
"step": 371
},
{
"epoch": 0.3753784056508577,
"grad_norm": 4.636185169219971,
"learning_rate": 6.2971931163618e-06,
"loss": 0.1464,
"step": 372
},
{
"epoch": 0.3763874873864783,
"grad_norm": 9.652426719665527,
"learning_rate": 6.285632945377429e-06,
"loss": 0.3786,
"step": 373
},
{
"epoch": 0.3773965691220989,
"grad_norm": 4.286848545074463,
"learning_rate": 6.274044359215461e-06,
"loss": 0.1569,
"step": 374
},
{
"epoch": 0.3784056508577195,
"grad_norm": 6.9270853996276855,
"learning_rate": 6.2624275019461545e-06,
"loss": 0.3606,
"step": 375
},
{
"epoch": 0.3794147325933401,
"grad_norm": 4.69766092300415,
"learning_rate": 6.250782517991241e-06,
"loss": 0.1682,
"step": 376
},
{
"epoch": 0.38042381432896066,
"grad_norm": 4.445621967315674,
"learning_rate": 6.239109552122122e-06,
"loss": 0.1959,
"step": 377
},
{
"epoch": 0.38143289606458125,
"grad_norm": 8.952390670776367,
"learning_rate": 6.227408749458073e-06,
"loss": 0.3696,
"step": 378
},
{
"epoch": 0.38244197780020184,
"grad_norm": 8.195367813110352,
"learning_rate": 6.215680255464441e-06,
"loss": 0.3198,
"step": 379
},
{
"epoch": 0.3834510595358224,
"grad_norm": 14.417852401733398,
"learning_rate": 6.203924215950831e-06,
"loss": 0.6757,
"step": 380
},
{
"epoch": 0.3834510595358224,
"eval_accuracy": 0.8513053348467651,
"eval_loss": 0.29195961356163025,
"eval_runtime": 62.6263,
"eval_samples_per_second": 28.135,
"eval_steps_per_second": 3.529,
"step": 380
},
{
"epoch": 0.384460141271443,
"grad_norm": 10.577422142028809,
"learning_rate": 6.192140777069298e-06,
"loss": 0.3435,
"step": 381
},
{
"epoch": 0.3854692230070636,
"grad_norm": 5.178398132324219,
"learning_rate": 6.180330085312526e-06,
"loss": 0.2577,
"step": 382
},
{
"epoch": 0.38647830474268413,
"grad_norm": 5.634678363800049,
"learning_rate": 6.168492287512014e-06,
"loss": 0.2692,
"step": 383
},
{
"epoch": 0.3874873864783047,
"grad_norm": 7.128296852111816,
"learning_rate": 6.156627530836239e-06,
"loss": 0.2777,
"step": 384
},
{
"epoch": 0.3884964682139253,
"grad_norm": 6.315018653869629,
"learning_rate": 6.144735962788837e-06,
"loss": 0.3489,
"step": 385
},
{
"epoch": 0.3895055499495459,
"grad_norm": 3.5851023197174072,
"learning_rate": 6.132817731206765e-06,
"loss": 0.1926,
"step": 386
},
{
"epoch": 0.3905146316851665,
"grad_norm": 10.211600303649902,
"learning_rate": 6.120872984258462e-06,
"loss": 0.4683,
"step": 387
},
{
"epoch": 0.39152371342078707,
"grad_norm": 12.310287475585938,
"learning_rate": 6.108901870442009e-06,
"loss": 0.3685,
"step": 388
},
{
"epoch": 0.39253279515640765,
"grad_norm": 6.385642051696777,
"learning_rate": 6.096904538583283e-06,
"loss": 0.2559,
"step": 389
},
{
"epoch": 0.39354187689202824,
"grad_norm": 7.468835830688477,
"learning_rate": 6.084881137834103e-06,
"loss": 0.4374,
"step": 390
},
{
"epoch": 0.39455095862764883,
"grad_norm": 2.5986368656158447,
"learning_rate": 6.072831817670382e-06,
"loss": 0.0992,
"step": 391
},
{
"epoch": 0.3955600403632694,
"grad_norm": 2.776925802230835,
"learning_rate": 6.060756727890262e-06,
"loss": 0.1379,
"step": 392
},
{
"epoch": 0.39656912209889,
"grad_norm": 5.025798797607422,
"learning_rate": 6.04865601861226e-06,
"loss": 0.37,
"step": 393
},
{
"epoch": 0.3975782038345106,
"grad_norm": 4.2647857666015625,
"learning_rate": 6.036529840273388e-06,
"loss": 0.2485,
"step": 394
},
{
"epoch": 0.3985872855701312,
"grad_norm": 3.0298309326171875,
"learning_rate": 6.0243783436273e-06,
"loss": 0.1594,
"step": 395
},
{
"epoch": 0.39959636730575177,
"grad_norm": 5.6464338302612305,
"learning_rate": 6.012201679742408e-06,
"loss": 0.3974,
"step": 396
},
{
"epoch": 0.40060544904137235,
"grad_norm": 4.060618877410889,
"learning_rate": 6e-06,
"loss": 0.2286,
"step": 397
},
{
"epoch": 0.40161453077699294,
"grad_norm": 4.622409820556641,
"learning_rate": 5.987773456092368e-06,
"loss": 0.315,
"step": 398
},
{
"epoch": 0.4026236125126135,
"grad_norm": 5.471047401428223,
"learning_rate": 5.9755222000209165e-06,
"loss": 0.4354,
"step": 399
},
{
"epoch": 0.4036326942482341,
"grad_norm": 3.929957628250122,
"learning_rate": 5.963246384094273e-06,
"loss": 0.2409,
"step": 400
},
{
"epoch": 0.4036326942482341,
"eval_accuracy": 0.8376844494892168,
"eval_loss": 0.28469741344451904,
"eval_runtime": 62.4393,
"eval_samples_per_second": 28.219,
"eval_steps_per_second": 3.539,
"step": 400
}
],
"logging_steps": 1,
"max_steps": 991,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 20,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}