square_run_min_loss / trainer_state.json
corranm's picture
End of training
c257af9 verified
{
"best_metric": 1.1496680974960327,
"best_model_checkpoint": "square_run_min_loss/checkpoint-986",
"epoch": 35.0,
"eval_steps": 500,
"global_step": 2030,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.034482758620689655,
"grad_norm": 2.575118064880371,
"learning_rate": 9.852216748768474e-07,
"loss": 1.9523,
"step": 2
},
{
"epoch": 0.06896551724137931,
"grad_norm": 4.115417003631592,
"learning_rate": 1.970443349753695e-06,
"loss": 1.897,
"step": 4
},
{
"epoch": 0.10344827586206896,
"grad_norm": 2.1270015239715576,
"learning_rate": 2.955665024630542e-06,
"loss": 1.9272,
"step": 6
},
{
"epoch": 0.13793103448275862,
"grad_norm": 3.1353254318237305,
"learning_rate": 3.94088669950739e-06,
"loss": 1.9155,
"step": 8
},
{
"epoch": 0.1724137931034483,
"grad_norm": 2.0807693004608154,
"learning_rate": 4.926108374384237e-06,
"loss": 1.9557,
"step": 10
},
{
"epoch": 0.20689655172413793,
"grad_norm": 2.7346973419189453,
"learning_rate": 5.911330049261084e-06,
"loss": 1.9696,
"step": 12
},
{
"epoch": 0.2413793103448276,
"grad_norm": 2.41186261177063,
"learning_rate": 6.896551724137932e-06,
"loss": 1.927,
"step": 14
},
{
"epoch": 0.27586206896551724,
"grad_norm": 3.0956790447235107,
"learning_rate": 7.88177339901478e-06,
"loss": 1.9232,
"step": 16
},
{
"epoch": 0.3103448275862069,
"grad_norm": 2.850667953491211,
"learning_rate": 8.866995073891627e-06,
"loss": 1.9046,
"step": 18
},
{
"epoch": 0.3448275862068966,
"grad_norm": 2.605441093444824,
"learning_rate": 9.852216748768475e-06,
"loss": 1.904,
"step": 20
},
{
"epoch": 0.3793103448275862,
"grad_norm": 2.4900379180908203,
"learning_rate": 1.0837438423645322e-05,
"loss": 1.8945,
"step": 22
},
{
"epoch": 0.41379310344827586,
"grad_norm": 2.4673266410827637,
"learning_rate": 1.1822660098522168e-05,
"loss": 2.0077,
"step": 24
},
{
"epoch": 0.4482758620689655,
"grad_norm": 2.4353814125061035,
"learning_rate": 1.2807881773399016e-05,
"loss": 2.0005,
"step": 26
},
{
"epoch": 0.4827586206896552,
"grad_norm": 2.159104824066162,
"learning_rate": 1.3793103448275863e-05,
"loss": 1.8958,
"step": 28
},
{
"epoch": 0.5172413793103449,
"grad_norm": 2.747079849243164,
"learning_rate": 1.4778325123152711e-05,
"loss": 2.0059,
"step": 30
},
{
"epoch": 0.5517241379310345,
"grad_norm": 1.8319206237792969,
"learning_rate": 1.576354679802956e-05,
"loss": 1.8849,
"step": 32
},
{
"epoch": 0.5862068965517241,
"grad_norm": 2.87062668800354,
"learning_rate": 1.6748768472906403e-05,
"loss": 1.8649,
"step": 34
},
{
"epoch": 0.6206896551724138,
"grad_norm": 3.053698778152466,
"learning_rate": 1.7733990147783254e-05,
"loss": 1.8314,
"step": 36
},
{
"epoch": 0.6551724137931034,
"grad_norm": 2.7139945030212402,
"learning_rate": 1.8719211822660098e-05,
"loss": 1.8343,
"step": 38
},
{
"epoch": 0.6896551724137931,
"grad_norm": 2.85752534866333,
"learning_rate": 1.970443349753695e-05,
"loss": 1.9139,
"step": 40
},
{
"epoch": 0.7241379310344828,
"grad_norm": 2.415708303451538,
"learning_rate": 2.0689655172413793e-05,
"loss": 1.8513,
"step": 42
},
{
"epoch": 0.7586206896551724,
"grad_norm": 2.8618996143341064,
"learning_rate": 2.1674876847290644e-05,
"loss": 1.9395,
"step": 44
},
{
"epoch": 0.7931034482758621,
"grad_norm": 2.4992380142211914,
"learning_rate": 2.266009852216749e-05,
"loss": 1.9028,
"step": 46
},
{
"epoch": 0.8275862068965517,
"grad_norm": 1.5736284255981445,
"learning_rate": 2.3645320197044336e-05,
"loss": 1.8803,
"step": 48
},
{
"epoch": 0.8620689655172413,
"grad_norm": 2.486845016479492,
"learning_rate": 2.4630541871921184e-05,
"loss": 1.8354,
"step": 50
},
{
"epoch": 0.896551724137931,
"grad_norm": 2.764396905899048,
"learning_rate": 2.561576354679803e-05,
"loss": 1.8192,
"step": 52
},
{
"epoch": 0.9310344827586207,
"grad_norm": 1.9337031841278076,
"learning_rate": 2.660098522167488e-05,
"loss": 1.9035,
"step": 54
},
{
"epoch": 0.9655172413793104,
"grad_norm": 3.11140775680542,
"learning_rate": 2.7586206896551727e-05,
"loss": 1.9185,
"step": 56
},
{
"epoch": 1.0,
"grad_norm": 2.8587818145751953,
"learning_rate": 2.857142857142857e-05,
"loss": 1.934,
"step": 58
},
{
"epoch": 1.0,
"eval_accuracy": 0.20454545454545456,
"eval_f1_macro": 0.06638977430052481,
"eval_f1_micro": 0.20454545454545456,
"eval_f1_weighted": 0.09010661039058604,
"eval_loss": 1.877989649772644,
"eval_precision_macro": 0.17075892857142858,
"eval_precision_micro": 0.20454545454545456,
"eval_precision_weighted": 0.24153645833333334,
"eval_recall_macro": 0.15343915343915343,
"eval_recall_micro": 0.20454545454545456,
"eval_recall_weighted": 0.20454545454545456,
"eval_runtime": 1.8422,
"eval_samples_per_second": 71.652,
"eval_steps_per_second": 9.228,
"step": 58
},
{
"epoch": 1.0344827586206897,
"grad_norm": 2.1993751525878906,
"learning_rate": 2.9556650246305422e-05,
"loss": 1.8763,
"step": 60
},
{
"epoch": 1.0689655172413792,
"grad_norm": 3.214564561843872,
"learning_rate": 3.0541871921182266e-05,
"loss": 1.9158,
"step": 62
},
{
"epoch": 1.103448275862069,
"grad_norm": 2.2450172901153564,
"learning_rate": 3.152709359605912e-05,
"loss": 1.7313,
"step": 64
},
{
"epoch": 1.1379310344827587,
"grad_norm": 3.495370388031006,
"learning_rate": 3.251231527093596e-05,
"loss": 1.77,
"step": 66
},
{
"epoch": 1.1724137931034484,
"grad_norm": 2.372056007385254,
"learning_rate": 3.3497536945812806e-05,
"loss": 1.8228,
"step": 68
},
{
"epoch": 1.206896551724138,
"grad_norm": 2.4576425552368164,
"learning_rate": 3.4482758620689657e-05,
"loss": 1.9171,
"step": 70
},
{
"epoch": 1.2413793103448276,
"grad_norm": 2.064105987548828,
"learning_rate": 3.546798029556651e-05,
"loss": 1.9479,
"step": 72
},
{
"epoch": 1.2758620689655173,
"grad_norm": 1.6881994009017944,
"learning_rate": 3.645320197044335e-05,
"loss": 2.0201,
"step": 74
},
{
"epoch": 1.3103448275862069,
"grad_norm": 2.1131479740142822,
"learning_rate": 3.7438423645320196e-05,
"loss": 1.7073,
"step": 76
},
{
"epoch": 1.3448275862068966,
"grad_norm": 2.1655850410461426,
"learning_rate": 3.842364532019704e-05,
"loss": 1.8294,
"step": 78
},
{
"epoch": 1.3793103448275863,
"grad_norm": 2.9730122089385986,
"learning_rate": 3.94088669950739e-05,
"loss": 1.9662,
"step": 80
},
{
"epoch": 1.4137931034482758,
"grad_norm": 2.5296716690063477,
"learning_rate": 4.039408866995074e-05,
"loss": 1.8582,
"step": 82
},
{
"epoch": 1.4482758620689655,
"grad_norm": 2.682340145111084,
"learning_rate": 4.1379310344827587e-05,
"loss": 1.9321,
"step": 84
},
{
"epoch": 1.4827586206896552,
"grad_norm": 2.764066457748413,
"learning_rate": 4.236453201970443e-05,
"loss": 1.8595,
"step": 86
},
{
"epoch": 1.5172413793103448,
"grad_norm": 3.0940446853637695,
"learning_rate": 4.334975369458129e-05,
"loss": 2.0041,
"step": 88
},
{
"epoch": 1.5517241379310345,
"grad_norm": 2.3535633087158203,
"learning_rate": 4.433497536945813e-05,
"loss": 2.0397,
"step": 90
},
{
"epoch": 1.5862068965517242,
"grad_norm": 2.1643190383911133,
"learning_rate": 4.532019704433498e-05,
"loss": 1.8423,
"step": 92
},
{
"epoch": 1.6206896551724137,
"grad_norm": 2.2240335941314697,
"learning_rate": 4.630541871921182e-05,
"loss": 1.8856,
"step": 94
},
{
"epoch": 1.6551724137931034,
"grad_norm": 1.8210374116897583,
"learning_rate": 4.729064039408867e-05,
"loss": 1.8442,
"step": 96
},
{
"epoch": 1.6896551724137931,
"grad_norm": 2.3819000720977783,
"learning_rate": 4.827586206896552e-05,
"loss": 1.9759,
"step": 98
},
{
"epoch": 1.7241379310344827,
"grad_norm": 2.209657907485962,
"learning_rate": 4.926108374384237e-05,
"loss": 1.8977,
"step": 100
},
{
"epoch": 1.7586206896551724,
"grad_norm": 1.8794820308685303,
"learning_rate": 5.024630541871922e-05,
"loss": 1.8373,
"step": 102
},
{
"epoch": 1.793103448275862,
"grad_norm": 1.9075384140014648,
"learning_rate": 5.123152709359606e-05,
"loss": 1.9054,
"step": 104
},
{
"epoch": 1.8275862068965516,
"grad_norm": 2.074863910675049,
"learning_rate": 5.2216748768472914e-05,
"loss": 1.9374,
"step": 106
},
{
"epoch": 1.8620689655172413,
"grad_norm": 3.287247657775879,
"learning_rate": 5.320197044334976e-05,
"loss": 1.9644,
"step": 108
},
{
"epoch": 1.896551724137931,
"grad_norm": 2.7543118000030518,
"learning_rate": 5.41871921182266e-05,
"loss": 1.8498,
"step": 110
},
{
"epoch": 1.9310344827586206,
"grad_norm": 2.3745720386505127,
"learning_rate": 5.517241379310345e-05,
"loss": 1.8688,
"step": 112
},
{
"epoch": 1.9655172413793105,
"grad_norm": 1.6657403707504272,
"learning_rate": 5.61576354679803e-05,
"loss": 1.8744,
"step": 114
},
{
"epoch": 2.0,
"grad_norm": 1.9578406810760498,
"learning_rate": 5.714285714285714e-05,
"loss": 1.8145,
"step": 116
},
{
"epoch": 2.0,
"eval_accuracy": 0.17424242424242425,
"eval_f1_macro": 0.06911375661375661,
"eval_f1_micro": 0.17424242424242425,
"eval_f1_weighted": 0.07554713804713804,
"eval_loss": 1.882804274559021,
"eval_precision_macro": 0.06077694235588972,
"eval_precision_micro": 0.17424242424242425,
"eval_precision_weighted": 0.06578947368421052,
"eval_recall_macro": 0.1574829931972789,
"eval_recall_micro": 0.17424242424242425,
"eval_recall_weighted": 0.17424242424242425,
"eval_runtime": 1.6622,
"eval_samples_per_second": 79.414,
"eval_steps_per_second": 10.228,
"step": 116
},
{
"epoch": 2.0344827586206895,
"grad_norm": 3.171048879623413,
"learning_rate": 5.8128078817733986e-05,
"loss": 1.9026,
"step": 118
},
{
"epoch": 2.0689655172413794,
"grad_norm": 2.3211982250213623,
"learning_rate": 5.9113300492610844e-05,
"loss": 1.9666,
"step": 120
},
{
"epoch": 2.103448275862069,
"grad_norm": 2.0532777309417725,
"learning_rate": 6.0098522167487695e-05,
"loss": 1.8683,
"step": 122
},
{
"epoch": 2.1379310344827585,
"grad_norm": 2.8642830848693848,
"learning_rate": 6.108374384236453e-05,
"loss": 1.8174,
"step": 124
},
{
"epoch": 2.1724137931034484,
"grad_norm": 2.02150821685791,
"learning_rate": 6.206896551724138e-05,
"loss": 1.837,
"step": 126
},
{
"epoch": 2.206896551724138,
"grad_norm": 2.0854902267456055,
"learning_rate": 6.305418719211823e-05,
"loss": 1.8358,
"step": 128
},
{
"epoch": 2.2413793103448274,
"grad_norm": 2.257364511489868,
"learning_rate": 6.403940886699507e-05,
"loss": 1.7473,
"step": 130
},
{
"epoch": 2.2758620689655173,
"grad_norm": 2.777860641479492,
"learning_rate": 6.502463054187192e-05,
"loss": 2.0573,
"step": 132
},
{
"epoch": 2.310344827586207,
"grad_norm": 1.8564225435256958,
"learning_rate": 6.600985221674877e-05,
"loss": 1.7209,
"step": 134
},
{
"epoch": 2.344827586206897,
"grad_norm": 1.9192124605178833,
"learning_rate": 6.699507389162561e-05,
"loss": 1.8069,
"step": 136
},
{
"epoch": 2.3793103448275863,
"grad_norm": 2.5344839096069336,
"learning_rate": 6.798029556650246e-05,
"loss": 1.9476,
"step": 138
},
{
"epoch": 2.413793103448276,
"grad_norm": 2.2182462215423584,
"learning_rate": 6.896551724137931e-05,
"loss": 1.7292,
"step": 140
},
{
"epoch": 2.4482758620689653,
"grad_norm": 3.1175408363342285,
"learning_rate": 6.995073891625616e-05,
"loss": 1.8131,
"step": 142
},
{
"epoch": 2.4827586206896552,
"grad_norm": 2.5868771076202393,
"learning_rate": 7.093596059113302e-05,
"loss": 1.8445,
"step": 144
},
{
"epoch": 2.5172413793103448,
"grad_norm": 2.509629726409912,
"learning_rate": 7.192118226600985e-05,
"loss": 1.7201,
"step": 146
},
{
"epoch": 2.5517241379310347,
"grad_norm": 1.862570881843567,
"learning_rate": 7.29064039408867e-05,
"loss": 1.6859,
"step": 148
},
{
"epoch": 2.586206896551724,
"grad_norm": 2.31773042678833,
"learning_rate": 7.389162561576355e-05,
"loss": 1.754,
"step": 150
},
{
"epoch": 2.6206896551724137,
"grad_norm": 2.858905076980591,
"learning_rate": 7.487684729064039e-05,
"loss": 1.8169,
"step": 152
},
{
"epoch": 2.655172413793103,
"grad_norm": 4.044278621673584,
"learning_rate": 7.586206896551724e-05,
"loss": 1.871,
"step": 154
},
{
"epoch": 2.689655172413793,
"grad_norm": 2.7265326976776123,
"learning_rate": 7.684729064039408e-05,
"loss": 1.8314,
"step": 156
},
{
"epoch": 2.7241379310344827,
"grad_norm": 2.775766611099243,
"learning_rate": 7.783251231527095e-05,
"loss": 1.8483,
"step": 158
},
{
"epoch": 2.7586206896551726,
"grad_norm": 3.1784214973449707,
"learning_rate": 7.88177339901478e-05,
"loss": 1.7737,
"step": 160
},
{
"epoch": 2.793103448275862,
"grad_norm": 2.7301290035247803,
"learning_rate": 7.980295566502463e-05,
"loss": 1.8225,
"step": 162
},
{
"epoch": 2.8275862068965516,
"grad_norm": 3.0388286113739014,
"learning_rate": 8.078817733990148e-05,
"loss": 1.7341,
"step": 164
},
{
"epoch": 2.862068965517241,
"grad_norm": 2.806058883666992,
"learning_rate": 8.177339901477834e-05,
"loss": 1.9648,
"step": 166
},
{
"epoch": 2.896551724137931,
"grad_norm": 1.8368113040924072,
"learning_rate": 8.275862068965517e-05,
"loss": 1.7451,
"step": 168
},
{
"epoch": 2.9310344827586206,
"grad_norm": 2.3558056354522705,
"learning_rate": 8.374384236453202e-05,
"loss": 1.8633,
"step": 170
},
{
"epoch": 2.9655172413793105,
"grad_norm": 3.564084053039551,
"learning_rate": 8.472906403940886e-05,
"loss": 1.8561,
"step": 172
},
{
"epoch": 3.0,
"grad_norm": 2.9199440479278564,
"learning_rate": 8.571428571428571e-05,
"loss": 1.8527,
"step": 174
},
{
"epoch": 3.0,
"eval_accuracy": 0.3787878787878788,
"eval_f1_macro": 0.25029652886795745,
"eval_f1_micro": 0.3787878787878788,
"eval_f1_weighted": 0.30532939282939286,
"eval_loss": 1.7130813598632812,
"eval_precision_macro": 0.25725108225108223,
"eval_precision_micro": 0.3787878787878788,
"eval_precision_weighted": 0.30621556473829203,
"eval_recall_macro": 0.3094028722600151,
"eval_recall_micro": 0.3787878787878788,
"eval_recall_weighted": 0.3787878787878788,
"eval_runtime": 1.75,
"eval_samples_per_second": 75.429,
"eval_steps_per_second": 9.714,
"step": 174
},
{
"epoch": 3.0344827586206895,
"grad_norm": 2.774258613586426,
"learning_rate": 8.669950738916258e-05,
"loss": 1.8599,
"step": 176
},
{
"epoch": 3.0689655172413794,
"grad_norm": 2.9602231979370117,
"learning_rate": 8.768472906403941e-05,
"loss": 1.736,
"step": 178
},
{
"epoch": 3.103448275862069,
"grad_norm": 2.8377699851989746,
"learning_rate": 8.866995073891627e-05,
"loss": 1.5466,
"step": 180
},
{
"epoch": 3.1379310344827585,
"grad_norm": 2.192164182662964,
"learning_rate": 8.96551724137931e-05,
"loss": 1.7214,
"step": 182
},
{
"epoch": 3.1724137931034484,
"grad_norm": 3.274397134780884,
"learning_rate": 9.064039408866995e-05,
"loss": 1.7141,
"step": 184
},
{
"epoch": 3.206896551724138,
"grad_norm": 2.574033737182617,
"learning_rate": 9.16256157635468e-05,
"loss": 1.5941,
"step": 186
},
{
"epoch": 3.2413793103448274,
"grad_norm": 2.457569122314453,
"learning_rate": 9.261083743842364e-05,
"loss": 1.7504,
"step": 188
},
{
"epoch": 3.2758620689655173,
"grad_norm": 3.9681930541992188,
"learning_rate": 9.35960591133005e-05,
"loss": 1.7611,
"step": 190
},
{
"epoch": 3.310344827586207,
"grad_norm": 3.0164175033569336,
"learning_rate": 9.458128078817734e-05,
"loss": 1.8015,
"step": 192
},
{
"epoch": 3.344827586206897,
"grad_norm": 2.874098539352417,
"learning_rate": 9.55665024630542e-05,
"loss": 1.7201,
"step": 194
},
{
"epoch": 3.3793103448275863,
"grad_norm": 2.657230854034424,
"learning_rate": 9.655172413793105e-05,
"loss": 1.7127,
"step": 196
},
{
"epoch": 3.413793103448276,
"grad_norm": 3.1468231678009033,
"learning_rate": 9.753694581280788e-05,
"loss": 1.6399,
"step": 198
},
{
"epoch": 3.4482758620689653,
"grad_norm": 2.7480523586273193,
"learning_rate": 9.852216748768474e-05,
"loss": 1.5484,
"step": 200
},
{
"epoch": 3.4827586206896552,
"grad_norm": 3.981072187423706,
"learning_rate": 9.950738916256159e-05,
"loss": 1.9724,
"step": 202
},
{
"epoch": 3.5172413793103448,
"grad_norm": 3.0168025493621826,
"learning_rate": 9.994526546250684e-05,
"loss": 1.7675,
"step": 204
},
{
"epoch": 3.5517241379310347,
"grad_norm": 3.0766713619232178,
"learning_rate": 9.983579638752053e-05,
"loss": 1.7034,
"step": 206
},
{
"epoch": 3.586206896551724,
"grad_norm": 2.499846935272217,
"learning_rate": 9.972632731253421e-05,
"loss": 1.7265,
"step": 208
},
{
"epoch": 3.6206896551724137,
"grad_norm": 2.9722824096679688,
"learning_rate": 9.96168582375479e-05,
"loss": 1.7595,
"step": 210
},
{
"epoch": 3.655172413793103,
"grad_norm": 3.6022815704345703,
"learning_rate": 9.950738916256159e-05,
"loss": 2.0972,
"step": 212
},
{
"epoch": 3.689655172413793,
"grad_norm": 2.596907377243042,
"learning_rate": 9.939792008757527e-05,
"loss": 1.6429,
"step": 214
},
{
"epoch": 3.7241379310344827,
"grad_norm": 4.036109924316406,
"learning_rate": 9.928845101258894e-05,
"loss": 1.7363,
"step": 216
},
{
"epoch": 3.7586206896551726,
"grad_norm": 3.0401430130004883,
"learning_rate": 9.917898193760263e-05,
"loss": 1.7639,
"step": 218
},
{
"epoch": 3.793103448275862,
"grad_norm": 3.6351616382598877,
"learning_rate": 9.906951286261632e-05,
"loss": 1.8102,
"step": 220
},
{
"epoch": 3.8275862068965516,
"grad_norm": 1.9515084028244019,
"learning_rate": 9.896004378762999e-05,
"loss": 1.6084,
"step": 222
},
{
"epoch": 3.862068965517241,
"grad_norm": 3.5374197959899902,
"learning_rate": 9.885057471264369e-05,
"loss": 2.0067,
"step": 224
},
{
"epoch": 3.896551724137931,
"grad_norm": 3.3599019050598145,
"learning_rate": 9.874110563765738e-05,
"loss": 1.7673,
"step": 226
},
{
"epoch": 3.9310344827586206,
"grad_norm": 2.432833194732666,
"learning_rate": 9.863163656267105e-05,
"loss": 1.9012,
"step": 228
},
{
"epoch": 3.9655172413793105,
"grad_norm": 1.9806126356124878,
"learning_rate": 9.852216748768474e-05,
"loss": 1.862,
"step": 230
},
{
"epoch": 4.0,
"grad_norm": 3.215442419052124,
"learning_rate": 9.841269841269841e-05,
"loss": 1.6734,
"step": 232
},
{
"epoch": 4.0,
"eval_accuracy": 0.2803030303030303,
"eval_f1_macro": 0.1620519748179323,
"eval_f1_micro": 0.2803030303030303,
"eval_f1_weighted": 0.20874649385287686,
"eval_loss": 1.794013261795044,
"eval_precision_macro": 0.21449586012692806,
"eval_precision_micro": 0.2803030303030303,
"eval_precision_weighted": 0.2623532322340795,
"eval_recall_macro": 0.2075585789871504,
"eval_recall_micro": 0.2803030303030303,
"eval_recall_weighted": 0.2803030303030303,
"eval_runtime": 1.7188,
"eval_samples_per_second": 76.796,
"eval_steps_per_second": 9.89,
"step": 232
},
{
"epoch": 4.0344827586206895,
"grad_norm": 1.8983070850372314,
"learning_rate": 9.83032293377121e-05,
"loss": 1.8275,
"step": 234
},
{
"epoch": 4.068965517241379,
"grad_norm": 2.512375593185425,
"learning_rate": 9.819376026272578e-05,
"loss": 1.8915,
"step": 236
},
{
"epoch": 4.103448275862069,
"grad_norm": 2.564410924911499,
"learning_rate": 9.808429118773947e-05,
"loss": 1.6557,
"step": 238
},
{
"epoch": 4.137931034482759,
"grad_norm": 4.520391464233398,
"learning_rate": 9.797482211275315e-05,
"loss": 1.6821,
"step": 240
},
{
"epoch": 4.172413793103448,
"grad_norm": 2.6028499603271484,
"learning_rate": 9.786535303776684e-05,
"loss": 1.5911,
"step": 242
},
{
"epoch": 4.206896551724138,
"grad_norm": 2.6997692584991455,
"learning_rate": 9.775588396278051e-05,
"loss": 1.6449,
"step": 244
},
{
"epoch": 4.241379310344827,
"grad_norm": 5.493044376373291,
"learning_rate": 9.76464148877942e-05,
"loss": 1.6607,
"step": 246
},
{
"epoch": 4.275862068965517,
"grad_norm": 3.518596887588501,
"learning_rate": 9.753694581280788e-05,
"loss": 1.7582,
"step": 248
},
{
"epoch": 4.310344827586207,
"grad_norm": 5.089727401733398,
"learning_rate": 9.742747673782157e-05,
"loss": 1.64,
"step": 250
},
{
"epoch": 4.344827586206897,
"grad_norm": 2.1755893230438232,
"learning_rate": 9.731800766283526e-05,
"loss": 1.7475,
"step": 252
},
{
"epoch": 4.379310344827586,
"grad_norm": 3.20668363571167,
"learning_rate": 9.720853858784894e-05,
"loss": 1.683,
"step": 254
},
{
"epoch": 4.413793103448276,
"grad_norm": 1.785972237586975,
"learning_rate": 9.709906951286262e-05,
"loss": 1.6876,
"step": 256
},
{
"epoch": 4.448275862068965,
"grad_norm": 3.6471712589263916,
"learning_rate": 9.69896004378763e-05,
"loss": 2.1599,
"step": 258
},
{
"epoch": 4.482758620689655,
"grad_norm": 2.0776071548461914,
"learning_rate": 9.688013136288999e-05,
"loss": 2.009,
"step": 260
},
{
"epoch": 4.517241379310345,
"grad_norm": 2.757769823074341,
"learning_rate": 9.677066228790367e-05,
"loss": 2.0974,
"step": 262
},
{
"epoch": 4.551724137931035,
"grad_norm": 2.731898307800293,
"learning_rate": 9.666119321291736e-05,
"loss": 1.4793,
"step": 264
},
{
"epoch": 4.586206896551724,
"grad_norm": 2.585442543029785,
"learning_rate": 9.655172413793105e-05,
"loss": 1.8863,
"step": 266
},
{
"epoch": 4.620689655172414,
"grad_norm": 2.7498419284820557,
"learning_rate": 9.644225506294472e-05,
"loss": 1.6424,
"step": 268
},
{
"epoch": 4.655172413793103,
"grad_norm": 4.065057754516602,
"learning_rate": 9.63327859879584e-05,
"loss": 1.5362,
"step": 270
},
{
"epoch": 4.689655172413794,
"grad_norm": 3.4030730724334717,
"learning_rate": 9.622331691297209e-05,
"loss": 1.7119,
"step": 272
},
{
"epoch": 4.724137931034483,
"grad_norm": 3.3285632133483887,
"learning_rate": 9.611384783798576e-05,
"loss": 1.7643,
"step": 274
},
{
"epoch": 4.758620689655173,
"grad_norm": 3.027642011642456,
"learning_rate": 9.600437876299946e-05,
"loss": 1.7388,
"step": 276
},
{
"epoch": 4.793103448275862,
"grad_norm": 2.7076125144958496,
"learning_rate": 9.589490968801315e-05,
"loss": 1.7675,
"step": 278
},
{
"epoch": 4.827586206896552,
"grad_norm": 3.127922534942627,
"learning_rate": 9.578544061302682e-05,
"loss": 1.4597,
"step": 280
},
{
"epoch": 4.862068965517241,
"grad_norm": 4.732183456420898,
"learning_rate": 9.567597153804051e-05,
"loss": 1.7048,
"step": 282
},
{
"epoch": 4.896551724137931,
"grad_norm": 4.8749284744262695,
"learning_rate": 9.55665024630542e-05,
"loss": 1.8464,
"step": 284
},
{
"epoch": 4.931034482758621,
"grad_norm": 2.5762221813201904,
"learning_rate": 9.545703338806787e-05,
"loss": 1.9569,
"step": 286
},
{
"epoch": 4.9655172413793105,
"grad_norm": 3.143484354019165,
"learning_rate": 9.534756431308155e-05,
"loss": 1.7673,
"step": 288
},
{
"epoch": 5.0,
"grad_norm": 2.596794843673706,
"learning_rate": 9.523809523809524e-05,
"loss": 1.6408,
"step": 290
},
{
"epoch": 5.0,
"eval_accuracy": 0.3333333333333333,
"eval_f1_macro": 0.156998556998557,
"eval_f1_micro": 0.3333333333333333,
"eval_f1_weighted": 0.19651056014692378,
"eval_loss": 1.6808322668075562,
"eval_precision_macro": 0.14322344322344321,
"eval_precision_micro": 0.3333333333333333,
"eval_precision_weighted": 0.18583916083916083,
"eval_recall_macro": 0.27017384731670446,
"eval_recall_micro": 0.3333333333333333,
"eval_recall_weighted": 0.3333333333333333,
"eval_runtime": 1.7386,
"eval_samples_per_second": 75.923,
"eval_steps_per_second": 9.778,
"step": 290
},
{
"epoch": 5.0344827586206895,
"grad_norm": 3.2708096504211426,
"learning_rate": 9.512862616310893e-05,
"loss": 2.1016,
"step": 292
},
{
"epoch": 5.068965517241379,
"grad_norm": 2.2087342739105225,
"learning_rate": 9.501915708812261e-05,
"loss": 1.3591,
"step": 294
},
{
"epoch": 5.103448275862069,
"grad_norm": 3.294851779937744,
"learning_rate": 9.490968801313629e-05,
"loss": 1.8157,
"step": 296
},
{
"epoch": 5.137931034482759,
"grad_norm": 2.4422295093536377,
"learning_rate": 9.480021893814997e-05,
"loss": 1.4012,
"step": 298
},
{
"epoch": 5.172413793103448,
"grad_norm": 2.5836057662963867,
"learning_rate": 9.469074986316366e-05,
"loss": 1.6883,
"step": 300
},
{
"epoch": 5.206896551724138,
"grad_norm": 3.3566815853118896,
"learning_rate": 9.458128078817734e-05,
"loss": 1.4683,
"step": 302
},
{
"epoch": 5.241379310344827,
"grad_norm": 2.7103140354156494,
"learning_rate": 9.447181171319103e-05,
"loss": 1.7214,
"step": 304
},
{
"epoch": 5.275862068965517,
"grad_norm": 1.974759578704834,
"learning_rate": 9.436234263820472e-05,
"loss": 1.7763,
"step": 306
},
{
"epoch": 5.310344827586207,
"grad_norm": 3.063124418258667,
"learning_rate": 9.425287356321839e-05,
"loss": 1.6032,
"step": 308
},
{
"epoch": 5.344827586206897,
"grad_norm": 2.366842269897461,
"learning_rate": 9.414340448823208e-05,
"loss": 1.771,
"step": 310
},
{
"epoch": 5.379310344827586,
"grad_norm": 4.095522403717041,
"learning_rate": 9.403393541324576e-05,
"loss": 1.6587,
"step": 312
},
{
"epoch": 5.413793103448276,
"grad_norm": 3.9357569217681885,
"learning_rate": 9.392446633825945e-05,
"loss": 1.3663,
"step": 314
},
{
"epoch": 5.448275862068965,
"grad_norm": 4.434700965881348,
"learning_rate": 9.381499726327313e-05,
"loss": 1.5951,
"step": 316
},
{
"epoch": 5.482758620689655,
"grad_norm": 2.257758855819702,
"learning_rate": 9.370552818828682e-05,
"loss": 1.3281,
"step": 318
},
{
"epoch": 5.517241379310345,
"grad_norm": 2.474900960922241,
"learning_rate": 9.35960591133005e-05,
"loss": 1.3842,
"step": 320
},
{
"epoch": 5.551724137931035,
"grad_norm": 2.744278907775879,
"learning_rate": 9.348659003831418e-05,
"loss": 1.6494,
"step": 322
},
{
"epoch": 5.586206896551724,
"grad_norm": 3.2747697830200195,
"learning_rate": 9.337712096332787e-05,
"loss": 1.3476,
"step": 324
},
{
"epoch": 5.620689655172414,
"grad_norm": 3.0323941707611084,
"learning_rate": 9.326765188834154e-05,
"loss": 1.3379,
"step": 326
},
{
"epoch": 5.655172413793103,
"grad_norm": 2.5281591415405273,
"learning_rate": 9.315818281335524e-05,
"loss": 1.4997,
"step": 328
},
{
"epoch": 5.689655172413794,
"grad_norm": 4.624899864196777,
"learning_rate": 9.304871373836892e-05,
"loss": 1.5624,
"step": 330
},
{
"epoch": 5.724137931034483,
"grad_norm": 2.9435858726501465,
"learning_rate": 9.29392446633826e-05,
"loss": 1.3056,
"step": 332
},
{
"epoch": 5.758620689655173,
"grad_norm": 2.5763392448425293,
"learning_rate": 9.282977558839628e-05,
"loss": 1.5451,
"step": 334
},
{
"epoch": 5.793103448275862,
"grad_norm": 5.08242654800415,
"learning_rate": 9.272030651340997e-05,
"loss": 1.7072,
"step": 336
},
{
"epoch": 5.827586206896552,
"grad_norm": 2.7309770584106445,
"learning_rate": 9.261083743842364e-05,
"loss": 1.2524,
"step": 338
},
{
"epoch": 5.862068965517241,
"grad_norm": 5.227554798126221,
"learning_rate": 9.250136836343733e-05,
"loss": 1.5564,
"step": 340
},
{
"epoch": 5.896551724137931,
"grad_norm": 2.266554117202759,
"learning_rate": 9.239189928845102e-05,
"loss": 1.6249,
"step": 342
},
{
"epoch": 5.931034482758621,
"grad_norm": 3.1214962005615234,
"learning_rate": 9.22824302134647e-05,
"loss": 1.7091,
"step": 344
},
{
"epoch": 5.9655172413793105,
"grad_norm": 2.7962958812713623,
"learning_rate": 9.217296113847839e-05,
"loss": 1.7433,
"step": 346
},
{
"epoch": 6.0,
"grad_norm": 3.0703368186950684,
"learning_rate": 9.206349206349206e-05,
"loss": 1.5696,
"step": 348
},
{
"epoch": 6.0,
"eval_accuracy": 0.44696969696969696,
"eval_f1_macro": 0.31719822149201493,
"eval_f1_micro": 0.44696969696969696,
"eval_f1_weighted": 0.38023986322449504,
"eval_loss": 1.5061465501785278,
"eval_precision_macro": 0.38953973562669214,
"eval_precision_micro": 0.44696969696969696,
"eval_precision_weighted": 0.41856035269078745,
"eval_recall_macro": 0.3617989417989418,
"eval_recall_micro": 0.44696969696969696,
"eval_recall_weighted": 0.44696969696969696,
"eval_runtime": 1.7478,
"eval_samples_per_second": 75.524,
"eval_steps_per_second": 9.727,
"step": 348
},
{
"epoch": 6.0344827586206895,
"grad_norm": 3.7875027656555176,
"learning_rate": 9.195402298850575e-05,
"loss": 1.3364,
"step": 350
},
{
"epoch": 6.068965517241379,
"grad_norm": 3.290332078933716,
"learning_rate": 9.184455391351943e-05,
"loss": 1.4505,
"step": 352
},
{
"epoch": 6.103448275862069,
"grad_norm": 2.82051682472229,
"learning_rate": 9.173508483853312e-05,
"loss": 1.2286,
"step": 354
},
{
"epoch": 6.137931034482759,
"grad_norm": 2.4758310317993164,
"learning_rate": 9.16256157635468e-05,
"loss": 1.6257,
"step": 356
},
{
"epoch": 6.172413793103448,
"grad_norm": 2.9103426933288574,
"learning_rate": 9.151614668856049e-05,
"loss": 1.2839,
"step": 358
},
{
"epoch": 6.206896551724138,
"grad_norm": 3.811338424682617,
"learning_rate": 9.140667761357416e-05,
"loss": 1.5439,
"step": 360
},
{
"epoch": 6.241379310344827,
"grad_norm": 2.94343638420105,
"learning_rate": 9.129720853858785e-05,
"loss": 1.6658,
"step": 362
},
{
"epoch": 6.275862068965517,
"grad_norm": 3.5924301147460938,
"learning_rate": 9.118773946360154e-05,
"loss": 1.3065,
"step": 364
},
{
"epoch": 6.310344827586207,
"grad_norm": 2.893584728240967,
"learning_rate": 9.107827038861522e-05,
"loss": 1.7638,
"step": 366
},
{
"epoch": 6.344827586206897,
"grad_norm": 4.023632049560547,
"learning_rate": 9.096880131362891e-05,
"loss": 1.1021,
"step": 368
},
{
"epoch": 6.379310344827586,
"grad_norm": 2.928293228149414,
"learning_rate": 9.08593322386426e-05,
"loss": 1.3904,
"step": 370
},
{
"epoch": 6.413793103448276,
"grad_norm": 3.4317667484283447,
"learning_rate": 9.074986316365627e-05,
"loss": 1.3181,
"step": 372
},
{
"epoch": 6.448275862068965,
"grad_norm": 2.3280980587005615,
"learning_rate": 9.064039408866995e-05,
"loss": 1.0595,
"step": 374
},
{
"epoch": 6.482758620689655,
"grad_norm": 2.5254228115081787,
"learning_rate": 9.053092501368364e-05,
"loss": 1.2124,
"step": 376
},
{
"epoch": 6.517241379310345,
"grad_norm": 2.9616031646728516,
"learning_rate": 9.042145593869731e-05,
"loss": 1.4659,
"step": 378
},
{
"epoch": 6.551724137931035,
"grad_norm": 3.044532299041748,
"learning_rate": 9.031198686371101e-05,
"loss": 1.5561,
"step": 380
},
{
"epoch": 6.586206896551724,
"grad_norm": 4.845521450042725,
"learning_rate": 9.02025177887247e-05,
"loss": 1.3714,
"step": 382
},
{
"epoch": 6.620689655172414,
"grad_norm": 2.6942129135131836,
"learning_rate": 9.009304871373837e-05,
"loss": 1.0138,
"step": 384
},
{
"epoch": 6.655172413793103,
"grad_norm": 3.31146502494812,
"learning_rate": 8.998357963875206e-05,
"loss": 1.2017,
"step": 386
},
{
"epoch": 6.689655172413794,
"grad_norm": 4.051918029785156,
"learning_rate": 8.987411056376574e-05,
"loss": 1.6548,
"step": 388
},
{
"epoch": 6.724137931034483,
"grad_norm": 2.59892201423645,
"learning_rate": 8.976464148877942e-05,
"loss": 1.6622,
"step": 390
},
{
"epoch": 6.758620689655173,
"grad_norm": 3.7553224563598633,
"learning_rate": 8.96551724137931e-05,
"loss": 1.7852,
"step": 392
},
{
"epoch": 6.793103448275862,
"grad_norm": 3.6946635246276855,
"learning_rate": 8.95457033388068e-05,
"loss": 1.1174,
"step": 394
},
{
"epoch": 6.827586206896552,
"grad_norm": 4.1242194175720215,
"learning_rate": 8.943623426382048e-05,
"loss": 1.4384,
"step": 396
},
{
"epoch": 6.862068965517241,
"grad_norm": 2.6595194339752197,
"learning_rate": 8.932676518883416e-05,
"loss": 1.2971,
"step": 398
},
{
"epoch": 6.896551724137931,
"grad_norm": 9.484551429748535,
"learning_rate": 8.921729611384783e-05,
"loss": 1.1471,
"step": 400
},
{
"epoch": 6.931034482758621,
"grad_norm": 6.137091636657715,
"learning_rate": 8.910782703886152e-05,
"loss": 1.5765,
"step": 402
},
{
"epoch": 6.9655172413793105,
"grad_norm": 3.7353861331939697,
"learning_rate": 8.899835796387521e-05,
"loss": 1.4145,
"step": 404
},
{
"epoch": 7.0,
"grad_norm": 4.387484073638916,
"learning_rate": 8.888888888888889e-05,
"loss": 1.4543,
"step": 406
},
{
"epoch": 7.0,
"eval_accuracy": 0.5151515151515151,
"eval_f1_macro": 0.4112892443400918,
"eval_f1_micro": 0.5151515151515151,
"eval_f1_weighted": 0.4707506497953339,
"eval_loss": 1.3674346208572388,
"eval_precision_macro": 0.4076719576719577,
"eval_precision_micro": 0.5151515151515151,
"eval_precision_weighted": 0.46301247771836007,
"eval_recall_macro": 0.44794406651549507,
"eval_recall_micro": 0.5151515151515151,
"eval_recall_weighted": 0.5151515151515151,
"eval_runtime": 1.681,
"eval_samples_per_second": 78.524,
"eval_steps_per_second": 10.113,
"step": 406
},
{
"epoch": 7.0344827586206895,
"grad_norm": 5.052458763122559,
"learning_rate": 8.877941981390258e-05,
"loss": 1.1215,
"step": 408
},
{
"epoch": 7.068965517241379,
"grad_norm": 2.47261905670166,
"learning_rate": 8.866995073891627e-05,
"loss": 1.0094,
"step": 410
},
{
"epoch": 7.103448275862069,
"grad_norm": 3.871811866760254,
"learning_rate": 8.856048166392994e-05,
"loss": 1.3071,
"step": 412
},
{
"epoch": 7.137931034482759,
"grad_norm": 4.654438018798828,
"learning_rate": 8.845101258894362e-05,
"loss": 1.5312,
"step": 414
},
{
"epoch": 7.172413793103448,
"grad_norm": 5.699710845947266,
"learning_rate": 8.834154351395731e-05,
"loss": 1.376,
"step": 416
},
{
"epoch": 7.206896551724138,
"grad_norm": 5.32385778427124,
"learning_rate": 8.8232074438971e-05,
"loss": 1.3338,
"step": 418
},
{
"epoch": 7.241379310344827,
"grad_norm": 2.77158522605896,
"learning_rate": 8.812260536398468e-05,
"loss": 1.2957,
"step": 420
},
{
"epoch": 7.275862068965517,
"grad_norm": 2.6451570987701416,
"learning_rate": 8.801313628899837e-05,
"loss": 1.5218,
"step": 422
},
{
"epoch": 7.310344827586207,
"grad_norm": 5.987870216369629,
"learning_rate": 8.790366721401204e-05,
"loss": 1.5623,
"step": 424
},
{
"epoch": 7.344827586206897,
"grad_norm": 2.895263195037842,
"learning_rate": 8.779419813902573e-05,
"loss": 1.1866,
"step": 426
},
{
"epoch": 7.379310344827586,
"grad_norm": 5.6424360275268555,
"learning_rate": 8.768472906403941e-05,
"loss": 0.9502,
"step": 428
},
{
"epoch": 7.413793103448276,
"grad_norm": 9.878355026245117,
"learning_rate": 8.757525998905309e-05,
"loss": 1.5713,
"step": 430
},
{
"epoch": 7.448275862068965,
"grad_norm": 3.341871738433838,
"learning_rate": 8.746579091406679e-05,
"loss": 1.5982,
"step": 432
},
{
"epoch": 7.482758620689655,
"grad_norm": 5.839073181152344,
"learning_rate": 8.735632183908047e-05,
"loss": 1.6279,
"step": 434
},
{
"epoch": 7.517241379310345,
"grad_norm": 3.4865572452545166,
"learning_rate": 8.724685276409415e-05,
"loss": 1.5146,
"step": 436
},
{
"epoch": 7.551724137931035,
"grad_norm": 3.3559305667877197,
"learning_rate": 8.713738368910783e-05,
"loss": 1.2828,
"step": 438
},
{
"epoch": 7.586206896551724,
"grad_norm": 5.358764171600342,
"learning_rate": 8.702791461412152e-05,
"loss": 1.4213,
"step": 440
},
{
"epoch": 7.620689655172414,
"grad_norm": 3.7849018573760986,
"learning_rate": 8.691844553913519e-05,
"loss": 1.1776,
"step": 442
},
{
"epoch": 7.655172413793103,
"grad_norm": 5.408728122711182,
"learning_rate": 8.680897646414888e-05,
"loss": 1.2171,
"step": 444
},
{
"epoch": 7.689655172413794,
"grad_norm": 3.210322618484497,
"learning_rate": 8.669950738916258e-05,
"loss": 1.139,
"step": 446
},
{
"epoch": 7.724137931034483,
"grad_norm": 4.863003730773926,
"learning_rate": 8.659003831417625e-05,
"loss": 1.0228,
"step": 448
},
{
"epoch": 7.758620689655173,
"grad_norm": 2.4240548610687256,
"learning_rate": 8.648056923918994e-05,
"loss": 1.1159,
"step": 450
},
{
"epoch": 7.793103448275862,
"grad_norm": 4.020617961883545,
"learning_rate": 8.637110016420362e-05,
"loss": 1.6475,
"step": 452
},
{
"epoch": 7.827586206896552,
"grad_norm": 3.7977688312530518,
"learning_rate": 8.62616310892173e-05,
"loss": 1.0921,
"step": 454
},
{
"epoch": 7.862068965517241,
"grad_norm": 6.774123191833496,
"learning_rate": 8.615216201423098e-05,
"loss": 1.1647,
"step": 456
},
{
"epoch": 7.896551724137931,
"grad_norm": 3.9084153175354004,
"learning_rate": 8.604269293924467e-05,
"loss": 1.5981,
"step": 458
},
{
"epoch": 7.931034482758621,
"grad_norm": 3.0181827545166016,
"learning_rate": 8.593322386425835e-05,
"loss": 1.4641,
"step": 460
},
{
"epoch": 7.9655172413793105,
"grad_norm": 5.793522357940674,
"learning_rate": 8.582375478927204e-05,
"loss": 1.4984,
"step": 462
},
{
"epoch": 8.0,
"grad_norm": 3.726046085357666,
"learning_rate": 8.571428571428571e-05,
"loss": 1.2349,
"step": 464
},
{
"epoch": 8.0,
"eval_accuracy": 0.5,
"eval_f1_macro": 0.40237683828026144,
"eval_f1_micro": 0.5,
"eval_f1_weighted": 0.45497924943822293,
"eval_loss": 1.313656210899353,
"eval_precision_macro": 0.40499231950844855,
"eval_precision_micro": 0.5,
"eval_precision_weighted": 0.46061013359400454,
"eval_recall_macro": 0.4478835978835979,
"eval_recall_micro": 0.5,
"eval_recall_weighted": 0.5,
"eval_runtime": 1.7557,
"eval_samples_per_second": 75.184,
"eval_steps_per_second": 9.683,
"step": 464
},
{
"epoch": 8.03448275862069,
"grad_norm": 1.9977046251296997,
"learning_rate": 8.56048166392994e-05,
"loss": 1.0337,
"step": 466
},
{
"epoch": 8.068965517241379,
"grad_norm": 3.49200701713562,
"learning_rate": 8.549534756431309e-05,
"loss": 1.1619,
"step": 468
},
{
"epoch": 8.10344827586207,
"grad_norm": 6.4927825927734375,
"learning_rate": 8.538587848932677e-05,
"loss": 1.4676,
"step": 470
},
{
"epoch": 8.137931034482758,
"grad_norm": 1.9825376272201538,
"learning_rate": 8.527640941434046e-05,
"loss": 1.0621,
"step": 472
},
{
"epoch": 8.172413793103448,
"grad_norm": 3.2611658573150635,
"learning_rate": 8.516694033935414e-05,
"loss": 1.4685,
"step": 474
},
{
"epoch": 8.206896551724139,
"grad_norm": 6.502848148345947,
"learning_rate": 8.505747126436782e-05,
"loss": 1.7512,
"step": 476
},
{
"epoch": 8.241379310344827,
"grad_norm": 3.67802357673645,
"learning_rate": 8.49480021893815e-05,
"loss": 1.2843,
"step": 478
},
{
"epoch": 8.275862068965518,
"grad_norm": 3.7158236503601074,
"learning_rate": 8.483853311439519e-05,
"loss": 1.2995,
"step": 480
},
{
"epoch": 8.310344827586206,
"grad_norm": 2.446770191192627,
"learning_rate": 8.472906403940886e-05,
"loss": 1.2817,
"step": 482
},
{
"epoch": 8.344827586206897,
"grad_norm": 3.7978079319000244,
"learning_rate": 8.461959496442256e-05,
"loss": 1.1886,
"step": 484
},
{
"epoch": 8.379310344827585,
"grad_norm": 4.225602149963379,
"learning_rate": 8.451012588943625e-05,
"loss": 1.1122,
"step": 486
},
{
"epoch": 8.413793103448276,
"grad_norm": 4.149050712585449,
"learning_rate": 8.440065681444992e-05,
"loss": 1.2523,
"step": 488
},
{
"epoch": 8.448275862068966,
"grad_norm": 1.8350954055786133,
"learning_rate": 8.42911877394636e-05,
"loss": 1.2488,
"step": 490
},
{
"epoch": 8.482758620689655,
"grad_norm": 5.763144493103027,
"learning_rate": 8.418171866447729e-05,
"loss": 1.2622,
"step": 492
},
{
"epoch": 8.517241379310345,
"grad_norm": 3.465343952178955,
"learning_rate": 8.407224958949097e-05,
"loss": 1.1619,
"step": 494
},
{
"epoch": 8.551724137931034,
"grad_norm": 3.9785590171813965,
"learning_rate": 8.396278051450465e-05,
"loss": 1.3165,
"step": 496
},
{
"epoch": 8.586206896551724,
"grad_norm": 3.5696113109588623,
"learning_rate": 8.385331143951835e-05,
"loss": 1.2519,
"step": 498
},
{
"epoch": 8.620689655172415,
"grad_norm": 7.4920878410339355,
"learning_rate": 8.374384236453202e-05,
"loss": 1.1137,
"step": 500
},
{
"epoch": 8.655172413793103,
"grad_norm": 3.3087165355682373,
"learning_rate": 8.363437328954571e-05,
"loss": 1.0042,
"step": 502
},
{
"epoch": 8.689655172413794,
"grad_norm": 5.924715518951416,
"learning_rate": 8.35249042145594e-05,
"loss": 1.5938,
"step": 504
},
{
"epoch": 8.724137931034482,
"grad_norm": 4.854137897491455,
"learning_rate": 8.341543513957307e-05,
"loss": 1.6971,
"step": 506
},
{
"epoch": 8.758620689655173,
"grad_norm": 2.877127170562744,
"learning_rate": 8.330596606458676e-05,
"loss": 1.4602,
"step": 508
},
{
"epoch": 8.793103448275861,
"grad_norm": 4.012562274932861,
"learning_rate": 8.319649698960044e-05,
"loss": 1.1416,
"step": 510
},
{
"epoch": 8.827586206896552,
"grad_norm": 7.408483028411865,
"learning_rate": 8.308702791461413e-05,
"loss": 1.3352,
"step": 512
},
{
"epoch": 8.862068965517242,
"grad_norm": 3.3066165447235107,
"learning_rate": 8.297755883962781e-05,
"loss": 1.3075,
"step": 514
},
{
"epoch": 8.89655172413793,
"grad_norm": 5.024322509765625,
"learning_rate": 8.286808976464149e-05,
"loss": 1.3557,
"step": 516
},
{
"epoch": 8.931034482758621,
"grad_norm": 4.212279319763184,
"learning_rate": 8.275862068965517e-05,
"loss": 1.103,
"step": 518
},
{
"epoch": 8.96551724137931,
"grad_norm": 4.724401473999023,
"learning_rate": 8.264915161466886e-05,
"loss": 0.9081,
"step": 520
},
{
"epoch": 9.0,
"grad_norm": 4.074641704559326,
"learning_rate": 8.253968253968255e-05,
"loss": 1.2544,
"step": 522
},
{
"epoch": 9.0,
"eval_accuracy": 0.5075757575757576,
"eval_f1_macro": 0.42087451332734344,
"eval_f1_micro": 0.5075757575757576,
"eval_f1_weighted": 0.47479591512610375,
"eval_loss": 1.3322217464447021,
"eval_precision_macro": 0.4223733938019652,
"eval_precision_micro": 0.5075757575757576,
"eval_precision_weighted": 0.4737253487253487,
"eval_recall_macro": 0.447989417989418,
"eval_recall_micro": 0.5075757575757576,
"eval_recall_weighted": 0.5075757575757576,
"eval_runtime": 1.7338,
"eval_samples_per_second": 76.133,
"eval_steps_per_second": 9.805,
"step": 522
},
{
"epoch": 9.03448275862069,
"grad_norm": 3.3098955154418945,
"learning_rate": 8.243021346469623e-05,
"loss": 0.9961,
"step": 524
},
{
"epoch": 9.068965517241379,
"grad_norm": 4.669078826904297,
"learning_rate": 8.232074438970992e-05,
"loss": 0.8428,
"step": 526
},
{
"epoch": 9.10344827586207,
"grad_norm": 10.585116386413574,
"learning_rate": 8.221127531472359e-05,
"loss": 1.093,
"step": 528
},
{
"epoch": 9.137931034482758,
"grad_norm": 3.542888641357422,
"learning_rate": 8.210180623973728e-05,
"loss": 1.1071,
"step": 530
},
{
"epoch": 9.172413793103448,
"grad_norm": 3.5215578079223633,
"learning_rate": 8.199233716475096e-05,
"loss": 1.3982,
"step": 532
},
{
"epoch": 9.206896551724139,
"grad_norm": 2.9111316204071045,
"learning_rate": 8.188286808976464e-05,
"loss": 1.3159,
"step": 534
},
{
"epoch": 9.241379310344827,
"grad_norm": 3.4728190898895264,
"learning_rate": 8.177339901477834e-05,
"loss": 0.8367,
"step": 536
},
{
"epoch": 9.275862068965518,
"grad_norm": 7.045929908752441,
"learning_rate": 8.166392993979202e-05,
"loss": 1.1379,
"step": 538
},
{
"epoch": 9.310344827586206,
"grad_norm": 3.804302215576172,
"learning_rate": 8.15544608648057e-05,
"loss": 1.7455,
"step": 540
},
{
"epoch": 9.344827586206897,
"grad_norm": 5.53285551071167,
"learning_rate": 8.144499178981938e-05,
"loss": 0.8734,
"step": 542
},
{
"epoch": 9.379310344827585,
"grad_norm": 4.002763271331787,
"learning_rate": 8.133552271483307e-05,
"loss": 1.2031,
"step": 544
},
{
"epoch": 9.413793103448276,
"grad_norm": 3.3367197513580322,
"learning_rate": 8.122605363984674e-05,
"loss": 1.3156,
"step": 546
},
{
"epoch": 9.448275862068966,
"grad_norm": 6.759856700897217,
"learning_rate": 8.111658456486043e-05,
"loss": 1.5352,
"step": 548
},
{
"epoch": 9.482758620689655,
"grad_norm": 3.184265375137329,
"learning_rate": 8.100711548987413e-05,
"loss": 0.8941,
"step": 550
},
{
"epoch": 9.517241379310345,
"grad_norm": 5.55509090423584,
"learning_rate": 8.08976464148878e-05,
"loss": 1.1496,
"step": 552
},
{
"epoch": 9.551724137931034,
"grad_norm": 3.5079853534698486,
"learning_rate": 8.078817733990148e-05,
"loss": 1.0693,
"step": 554
},
{
"epoch": 9.586206896551724,
"grad_norm": 3.0940561294555664,
"learning_rate": 8.067870826491517e-05,
"loss": 1.0685,
"step": 556
},
{
"epoch": 9.620689655172415,
"grad_norm": 3.501739978790283,
"learning_rate": 8.056923918992884e-05,
"loss": 1.0207,
"step": 558
},
{
"epoch": 9.655172413793103,
"grad_norm": 2.481731414794922,
"learning_rate": 8.045977011494253e-05,
"loss": 0.7245,
"step": 560
},
{
"epoch": 9.689655172413794,
"grad_norm": 3.4049670696258545,
"learning_rate": 8.035030103995622e-05,
"loss": 1.4967,
"step": 562
},
{
"epoch": 9.724137931034482,
"grad_norm": 2.143556833267212,
"learning_rate": 8.02408319649699e-05,
"loss": 1.3994,
"step": 564
},
{
"epoch": 9.758620689655173,
"grad_norm": 3.563452959060669,
"learning_rate": 8.013136288998359e-05,
"loss": 1.3212,
"step": 566
},
{
"epoch": 9.793103448275861,
"grad_norm": 3.2039382457733154,
"learning_rate": 8.002189381499726e-05,
"loss": 0.9787,
"step": 568
},
{
"epoch": 9.827586206896552,
"grad_norm": 3.1124753952026367,
"learning_rate": 7.991242474001095e-05,
"loss": 0.9052,
"step": 570
},
{
"epoch": 9.862068965517242,
"grad_norm": 4.318131923675537,
"learning_rate": 7.980295566502463e-05,
"loss": 1.4156,
"step": 572
},
{
"epoch": 9.89655172413793,
"grad_norm": 3.689425230026245,
"learning_rate": 7.969348659003832e-05,
"loss": 1.1676,
"step": 574
},
{
"epoch": 9.931034482758621,
"grad_norm": 4.3956499099731445,
"learning_rate": 7.9584017515052e-05,
"loss": 1.1724,
"step": 576
},
{
"epoch": 9.96551724137931,
"grad_norm": 4.972238540649414,
"learning_rate": 7.947454844006569e-05,
"loss": 1.5352,
"step": 578
},
{
"epoch": 10.0,
"grad_norm": 4.761918544769287,
"learning_rate": 7.936507936507937e-05,
"loss": 1.206,
"step": 580
},
{
"epoch": 10.0,
"eval_accuracy": 0.4621212121212121,
"eval_f1_macro": 0.35546523346854747,
"eval_f1_micro": 0.4621212121212121,
"eval_f1_weighted": 0.4008642480388733,
"eval_loss": 1.3817532062530518,
"eval_precision_macro": 0.3931256296985198,
"eval_precision_micro": 0.4621212121212121,
"eval_precision_weighted": 0.43718074725398604,
"eval_recall_macro": 0.4129402872260015,
"eval_recall_micro": 0.4621212121212121,
"eval_recall_weighted": 0.4621212121212121,
"eval_runtime": 1.703,
"eval_samples_per_second": 77.512,
"eval_steps_per_second": 9.983,
"step": 580
},
{
"epoch": 10.03448275862069,
"grad_norm": 3.7589895725250244,
"learning_rate": 7.925561029009305e-05,
"loss": 1.096,
"step": 582
},
{
"epoch": 10.068965517241379,
"grad_norm": 5.561600685119629,
"learning_rate": 7.914614121510674e-05,
"loss": 1.381,
"step": 584
},
{
"epoch": 10.10344827586207,
"grad_norm": 4.066978454589844,
"learning_rate": 7.903667214012041e-05,
"loss": 1.1099,
"step": 586
},
{
"epoch": 10.137931034482758,
"grad_norm": 2.7694287300109863,
"learning_rate": 7.892720306513411e-05,
"loss": 0.8646,
"step": 588
},
{
"epoch": 10.172413793103448,
"grad_norm": 3.7669525146484375,
"learning_rate": 7.88177339901478e-05,
"loss": 1.02,
"step": 590
},
{
"epoch": 10.206896551724139,
"grad_norm": 13.383344650268555,
"learning_rate": 7.870826491516147e-05,
"loss": 1.7312,
"step": 592
},
{
"epoch": 10.241379310344827,
"grad_norm": 2.007171154022217,
"learning_rate": 7.859879584017516e-05,
"loss": 1.0665,
"step": 594
},
{
"epoch": 10.275862068965518,
"grad_norm": 4.019918441772461,
"learning_rate": 7.848932676518884e-05,
"loss": 1.2016,
"step": 596
},
{
"epoch": 10.310344827586206,
"grad_norm": 8.339564323425293,
"learning_rate": 7.837985769020251e-05,
"loss": 1.7457,
"step": 598
},
{
"epoch": 10.344827586206897,
"grad_norm": 2.7739768028259277,
"learning_rate": 7.82703886152162e-05,
"loss": 1.3515,
"step": 600
},
{
"epoch": 10.379310344827585,
"grad_norm": 5.876687526702881,
"learning_rate": 7.81609195402299e-05,
"loss": 0.9722,
"step": 602
},
{
"epoch": 10.413793103448276,
"grad_norm": 3.3724043369293213,
"learning_rate": 7.805145046524357e-05,
"loss": 0.9159,
"step": 604
},
{
"epoch": 10.448275862068966,
"grad_norm": 2.4745233058929443,
"learning_rate": 7.794198139025726e-05,
"loss": 1.0241,
"step": 606
},
{
"epoch": 10.482758620689655,
"grad_norm": 6.936390399932861,
"learning_rate": 7.783251231527095e-05,
"loss": 1.11,
"step": 608
},
{
"epoch": 10.517241379310345,
"grad_norm": 3.721477746963501,
"learning_rate": 7.772304324028462e-05,
"loss": 1.396,
"step": 610
},
{
"epoch": 10.551724137931034,
"grad_norm": 3.1573660373687744,
"learning_rate": 7.76135741652983e-05,
"loss": 0.925,
"step": 612
},
{
"epoch": 10.586206896551724,
"grad_norm": 3.663121461868286,
"learning_rate": 7.750410509031199e-05,
"loss": 1.0975,
"step": 614
},
{
"epoch": 10.620689655172415,
"grad_norm": 2.9894790649414062,
"learning_rate": 7.739463601532568e-05,
"loss": 1.3367,
"step": 616
},
{
"epoch": 10.655172413793103,
"grad_norm": 4.422184467315674,
"learning_rate": 7.728516694033936e-05,
"loss": 0.8371,
"step": 618
},
{
"epoch": 10.689655172413794,
"grad_norm": 3.6148533821105957,
"learning_rate": 7.717569786535304e-05,
"loss": 1.6326,
"step": 620
},
{
"epoch": 10.724137931034482,
"grad_norm": 2.576557159423828,
"learning_rate": 7.706622879036672e-05,
"loss": 1.0553,
"step": 622
},
{
"epoch": 10.758620689655173,
"grad_norm": 3.054694652557373,
"learning_rate": 7.695675971538041e-05,
"loss": 0.9281,
"step": 624
},
{
"epoch": 10.793103448275861,
"grad_norm": 5.310608863830566,
"learning_rate": 7.684729064039408e-05,
"loss": 1.1227,
"step": 626
},
{
"epoch": 10.827586206896552,
"grad_norm": 10.316115379333496,
"learning_rate": 7.673782156540778e-05,
"loss": 0.8636,
"step": 628
},
{
"epoch": 10.862068965517242,
"grad_norm": 2.8177926540374756,
"learning_rate": 7.662835249042147e-05,
"loss": 0.6684,
"step": 630
},
{
"epoch": 10.89655172413793,
"grad_norm": 8.028295516967773,
"learning_rate": 7.651888341543514e-05,
"loss": 0.6878,
"step": 632
},
{
"epoch": 10.931034482758621,
"grad_norm": 2.236884355545044,
"learning_rate": 7.640941434044883e-05,
"loss": 0.8953,
"step": 634
},
{
"epoch": 10.96551724137931,
"grad_norm": 3.125170946121216,
"learning_rate": 7.629994526546251e-05,
"loss": 0.7141,
"step": 636
},
{
"epoch": 11.0,
"grad_norm": 3.6889586448669434,
"learning_rate": 7.619047619047618e-05,
"loss": 1.0416,
"step": 638
},
{
"epoch": 11.0,
"eval_accuracy": 0.5606060606060606,
"eval_f1_macro": 0.46100235596487465,
"eval_f1_micro": 0.5606060606060606,
"eval_f1_weighted": 0.5249136795238745,
"eval_loss": 1.314196228981018,
"eval_precision_macro": 0.5218135772974483,
"eval_precision_micro": 0.5606060606060606,
"eval_precision_weighted": 0.5872401994102874,
"eval_recall_macro": 0.49511715797430084,
"eval_recall_micro": 0.5606060606060606,
"eval_recall_weighted": 0.5606060606060606,
"eval_runtime": 1.7117,
"eval_samples_per_second": 77.117,
"eval_steps_per_second": 9.932,
"step": 638
},
{
"epoch": 11.03448275862069,
"grad_norm": 10.25067138671875,
"learning_rate": 7.608100711548987e-05,
"loss": 1.0965,
"step": 640
},
{
"epoch": 11.068965517241379,
"grad_norm": 2.579963207244873,
"learning_rate": 7.597153804050357e-05,
"loss": 1.1408,
"step": 642
},
{
"epoch": 11.10344827586207,
"grad_norm": 3.9004721641540527,
"learning_rate": 7.586206896551724e-05,
"loss": 1.1872,
"step": 644
},
{
"epoch": 11.137931034482758,
"grad_norm": 3.8652117252349854,
"learning_rate": 7.575259989053093e-05,
"loss": 0.8976,
"step": 646
},
{
"epoch": 11.172413793103448,
"grad_norm": 4.144958972930908,
"learning_rate": 7.564313081554462e-05,
"loss": 0.7235,
"step": 648
},
{
"epoch": 11.206896551724139,
"grad_norm": 2.5177595615386963,
"learning_rate": 7.553366174055829e-05,
"loss": 0.8665,
"step": 650
},
{
"epoch": 11.241379310344827,
"grad_norm": 4.09409236907959,
"learning_rate": 7.542419266557197e-05,
"loss": 0.7202,
"step": 652
},
{
"epoch": 11.275862068965518,
"grad_norm": 6.234818458557129,
"learning_rate": 7.531472359058566e-05,
"loss": 0.9528,
"step": 654
},
{
"epoch": 11.310344827586206,
"grad_norm": 6.615692615509033,
"learning_rate": 7.520525451559935e-05,
"loss": 1.1562,
"step": 656
},
{
"epoch": 11.344827586206897,
"grad_norm": 3.72806978225708,
"learning_rate": 7.509578544061303e-05,
"loss": 1.2677,
"step": 658
},
{
"epoch": 11.379310344827585,
"grad_norm": 4.646265506744385,
"learning_rate": 7.498631636562672e-05,
"loss": 1.3499,
"step": 660
},
{
"epoch": 11.413793103448276,
"grad_norm": 2.4671194553375244,
"learning_rate": 7.487684729064039e-05,
"loss": 0.9052,
"step": 662
},
{
"epoch": 11.448275862068966,
"grad_norm": 3.04329514503479,
"learning_rate": 7.476737821565408e-05,
"loss": 1.2754,
"step": 664
},
{
"epoch": 11.482758620689655,
"grad_norm": 2.1641273498535156,
"learning_rate": 7.465790914066776e-05,
"loss": 0.9117,
"step": 666
},
{
"epoch": 11.517241379310345,
"grad_norm": 2.3654139041900635,
"learning_rate": 7.454844006568145e-05,
"loss": 0.9465,
"step": 668
},
{
"epoch": 11.551724137931034,
"grad_norm": 9.097860336303711,
"learning_rate": 7.443897099069514e-05,
"loss": 1.2733,
"step": 670
},
{
"epoch": 11.586206896551724,
"grad_norm": 4.378310680389404,
"learning_rate": 7.432950191570882e-05,
"loss": 0.9539,
"step": 672
},
{
"epoch": 11.620689655172415,
"grad_norm": 5.611904621124268,
"learning_rate": 7.42200328407225e-05,
"loss": 1.056,
"step": 674
},
{
"epoch": 11.655172413793103,
"grad_norm": 3.2342333793640137,
"learning_rate": 7.411056376573618e-05,
"loss": 0.9286,
"step": 676
},
{
"epoch": 11.689655172413794,
"grad_norm": 4.163808822631836,
"learning_rate": 7.400109469074985e-05,
"loss": 0.8409,
"step": 678
},
{
"epoch": 11.724137931034482,
"grad_norm": 3.296419382095337,
"learning_rate": 7.389162561576355e-05,
"loss": 0.9829,
"step": 680
},
{
"epoch": 11.758620689655173,
"grad_norm": 6.1151227951049805,
"learning_rate": 7.378215654077724e-05,
"loss": 1.2809,
"step": 682
},
{
"epoch": 11.793103448275861,
"grad_norm": 6.986661911010742,
"learning_rate": 7.367268746579091e-05,
"loss": 1.0579,
"step": 684
},
{
"epoch": 11.827586206896552,
"grad_norm": 3.160599946975708,
"learning_rate": 7.35632183908046e-05,
"loss": 0.805,
"step": 686
},
{
"epoch": 11.862068965517242,
"grad_norm": 2.7907633781433105,
"learning_rate": 7.345374931581829e-05,
"loss": 0.8927,
"step": 688
},
{
"epoch": 11.89655172413793,
"grad_norm": 6.923659324645996,
"learning_rate": 7.334428024083196e-05,
"loss": 1.1211,
"step": 690
},
{
"epoch": 11.931034482758621,
"grad_norm": 5.215874195098877,
"learning_rate": 7.323481116584564e-05,
"loss": 1.1497,
"step": 692
},
{
"epoch": 11.96551724137931,
"grad_norm": 3.4313321113586426,
"learning_rate": 7.312534209085934e-05,
"loss": 0.8678,
"step": 694
},
{
"epoch": 12.0,
"grad_norm": 4.915594100952148,
"learning_rate": 7.301587301587302e-05,
"loss": 1.1494,
"step": 696
},
{
"epoch": 12.0,
"eval_accuracy": 0.4772727272727273,
"eval_f1_macro": 0.41057591774355634,
"eval_f1_micro": 0.4772727272727273,
"eval_f1_weighted": 0.4652476264825862,
"eval_loss": 1.3793162107467651,
"eval_precision_macro": 0.46187641723356004,
"eval_precision_micro": 0.4772727272727273,
"eval_precision_weighted": 0.5255907287157288,
"eval_recall_macro": 0.4227286470143613,
"eval_recall_micro": 0.4772727272727273,
"eval_recall_weighted": 0.4772727272727273,
"eval_runtime": 1.691,
"eval_samples_per_second": 78.06,
"eval_steps_per_second": 10.053,
"step": 696
},
{
"epoch": 12.03448275862069,
"grad_norm": 6.37459659576416,
"learning_rate": 7.29064039408867e-05,
"loss": 1.2047,
"step": 698
},
{
"epoch": 12.068965517241379,
"grad_norm": 4.954467296600342,
"learning_rate": 7.279693486590039e-05,
"loss": 1.0239,
"step": 700
},
{
"epoch": 12.10344827586207,
"grad_norm": 2.187420129776001,
"learning_rate": 7.268746579091406e-05,
"loss": 0.5644,
"step": 702
},
{
"epoch": 12.137931034482758,
"grad_norm": 4.12716817855835,
"learning_rate": 7.257799671592775e-05,
"loss": 0.7901,
"step": 704
},
{
"epoch": 12.172413793103448,
"grad_norm": 4.0836873054504395,
"learning_rate": 7.246852764094144e-05,
"loss": 1.3414,
"step": 706
},
{
"epoch": 12.206896551724139,
"grad_norm": 4.051703929901123,
"learning_rate": 7.235905856595512e-05,
"loss": 1.1838,
"step": 708
},
{
"epoch": 12.241379310344827,
"grad_norm": 2.682950973510742,
"learning_rate": 7.224958949096881e-05,
"loss": 0.7483,
"step": 710
},
{
"epoch": 12.275862068965518,
"grad_norm": 2.350590944290161,
"learning_rate": 7.21401204159825e-05,
"loss": 0.6146,
"step": 712
},
{
"epoch": 12.310344827586206,
"grad_norm": 4.472531795501709,
"learning_rate": 7.203065134099617e-05,
"loss": 1.0984,
"step": 714
},
{
"epoch": 12.344827586206897,
"grad_norm": 3.710747480392456,
"learning_rate": 7.192118226600985e-05,
"loss": 0.7819,
"step": 716
},
{
"epoch": 12.379310344827585,
"grad_norm": 6.625567436218262,
"learning_rate": 7.181171319102354e-05,
"loss": 0.9725,
"step": 718
},
{
"epoch": 12.413793103448276,
"grad_norm": 5.959197998046875,
"learning_rate": 7.170224411603723e-05,
"loss": 1.0577,
"step": 720
},
{
"epoch": 12.448275862068966,
"grad_norm": 3.8905091285705566,
"learning_rate": 7.159277504105091e-05,
"loss": 0.7902,
"step": 722
},
{
"epoch": 12.482758620689655,
"grad_norm": 9.632880210876465,
"learning_rate": 7.14833059660646e-05,
"loss": 0.8034,
"step": 724
},
{
"epoch": 12.517241379310345,
"grad_norm": 2.0040698051452637,
"learning_rate": 7.137383689107827e-05,
"loss": 0.6375,
"step": 726
},
{
"epoch": 12.551724137931034,
"grad_norm": 3.397597074508667,
"learning_rate": 7.126436781609196e-05,
"loss": 0.9355,
"step": 728
},
{
"epoch": 12.586206896551724,
"grad_norm": 4.660778522491455,
"learning_rate": 7.115489874110563e-05,
"loss": 0.97,
"step": 730
},
{
"epoch": 12.620689655172415,
"grad_norm": 4.888659954071045,
"learning_rate": 7.104542966611933e-05,
"loss": 1.0236,
"step": 732
},
{
"epoch": 12.655172413793103,
"grad_norm": 2.9293081760406494,
"learning_rate": 7.093596059113302e-05,
"loss": 0.8456,
"step": 734
},
{
"epoch": 12.689655172413794,
"grad_norm": 7.08192253112793,
"learning_rate": 7.082649151614669e-05,
"loss": 1.3017,
"step": 736
},
{
"epoch": 12.724137931034482,
"grad_norm": 4.688408374786377,
"learning_rate": 7.071702244116037e-05,
"loss": 1.3321,
"step": 738
},
{
"epoch": 12.758620689655173,
"grad_norm": 6.150996685028076,
"learning_rate": 7.060755336617406e-05,
"loss": 1.5606,
"step": 740
},
{
"epoch": 12.793103448275861,
"grad_norm": 6.134938716888428,
"learning_rate": 7.049808429118773e-05,
"loss": 1.0207,
"step": 742
},
{
"epoch": 12.827586206896552,
"grad_norm": 4.253851890563965,
"learning_rate": 7.038861521620142e-05,
"loss": 0.637,
"step": 744
},
{
"epoch": 12.862068965517242,
"grad_norm": 2.356126546859741,
"learning_rate": 7.027914614121512e-05,
"loss": 0.5845,
"step": 746
},
{
"epoch": 12.89655172413793,
"grad_norm": 6.397014141082764,
"learning_rate": 7.016967706622879e-05,
"loss": 1.1618,
"step": 748
},
{
"epoch": 12.931034482758621,
"grad_norm": 2.425175666809082,
"learning_rate": 7.006020799124248e-05,
"loss": 0.8193,
"step": 750
},
{
"epoch": 12.96551724137931,
"grad_norm": 4.115069389343262,
"learning_rate": 6.995073891625616e-05,
"loss": 1.0236,
"step": 752
},
{
"epoch": 13.0,
"grad_norm": 3.485793113708496,
"learning_rate": 6.984126984126984e-05,
"loss": 0.7366,
"step": 754
},
{
"epoch": 13.0,
"eval_accuracy": 0.6515151515151515,
"eval_f1_macro": 0.5655740899926947,
"eval_f1_micro": 0.6515151515151515,
"eval_f1_weighted": 0.6382644888194571,
"eval_loss": 1.1935979127883911,
"eval_precision_macro": 0.5707684918779371,
"eval_precision_micro": 0.6515151515151515,
"eval_precision_weighted": 0.6445614261104243,
"eval_recall_macro": 0.5790249433106576,
"eval_recall_micro": 0.6515151515151515,
"eval_recall_weighted": 0.6515151515151515,
"eval_runtime": 1.6957,
"eval_samples_per_second": 77.845,
"eval_steps_per_second": 10.026,
"step": 754
},
{
"epoch": 13.03448275862069,
"grad_norm": 3.7799136638641357,
"learning_rate": 6.973180076628352e-05,
"loss": 0.5383,
"step": 756
},
{
"epoch": 13.068965517241379,
"grad_norm": 4.496936798095703,
"learning_rate": 6.962233169129721e-05,
"loss": 0.9326,
"step": 758
},
{
"epoch": 13.10344827586207,
"grad_norm": 2.7297375202178955,
"learning_rate": 6.95128626163109e-05,
"loss": 0.6216,
"step": 760
},
{
"epoch": 13.137931034482758,
"grad_norm": 4.5329179763793945,
"learning_rate": 6.940339354132458e-05,
"loss": 1.0146,
"step": 762
},
{
"epoch": 13.172413793103448,
"grad_norm": 4.96497917175293,
"learning_rate": 6.929392446633827e-05,
"loss": 0.8865,
"step": 764
},
{
"epoch": 13.206896551724139,
"grad_norm": 3.1082141399383545,
"learning_rate": 6.918445539135194e-05,
"loss": 0.8833,
"step": 766
},
{
"epoch": 13.241379310344827,
"grad_norm": 6.976032733917236,
"learning_rate": 6.907498631636563e-05,
"loss": 0.7684,
"step": 768
},
{
"epoch": 13.275862068965518,
"grad_norm": 1.962904691696167,
"learning_rate": 6.896551724137931e-05,
"loss": 0.3621,
"step": 770
},
{
"epoch": 13.310344827586206,
"grad_norm": 3.1863770484924316,
"learning_rate": 6.8856048166393e-05,
"loss": 0.3451,
"step": 772
},
{
"epoch": 13.344827586206897,
"grad_norm": 2.953831672668457,
"learning_rate": 6.874657909140669e-05,
"loss": 0.6924,
"step": 774
},
{
"epoch": 13.379310344827585,
"grad_norm": 4.513678073883057,
"learning_rate": 6.863711001642037e-05,
"loss": 0.6521,
"step": 776
},
{
"epoch": 13.413793103448276,
"grad_norm": 2.1181821823120117,
"learning_rate": 6.852764094143404e-05,
"loss": 0.7305,
"step": 778
},
{
"epoch": 13.448275862068966,
"grad_norm": 5.158665657043457,
"learning_rate": 6.841817186644773e-05,
"loss": 0.757,
"step": 780
},
{
"epoch": 13.482758620689655,
"grad_norm": 2.678704261779785,
"learning_rate": 6.830870279146142e-05,
"loss": 0.7164,
"step": 782
},
{
"epoch": 13.517241379310345,
"grad_norm": 6.794834613800049,
"learning_rate": 6.81992337164751e-05,
"loss": 1.0533,
"step": 784
},
{
"epoch": 13.551724137931034,
"grad_norm": 6.377530097961426,
"learning_rate": 6.808976464148879e-05,
"loss": 0.8787,
"step": 786
},
{
"epoch": 13.586206896551724,
"grad_norm": 3.35634183883667,
"learning_rate": 6.798029556650246e-05,
"loss": 0.5949,
"step": 788
},
{
"epoch": 13.620689655172415,
"grad_norm": 2.987086057662964,
"learning_rate": 6.787082649151615e-05,
"loss": 0.7907,
"step": 790
},
{
"epoch": 13.655172413793103,
"grad_norm": 7.851985454559326,
"learning_rate": 6.776135741652983e-05,
"loss": 1.0953,
"step": 792
},
{
"epoch": 13.689655172413794,
"grad_norm": 5.5945892333984375,
"learning_rate": 6.765188834154351e-05,
"loss": 0.5258,
"step": 794
},
{
"epoch": 13.724137931034482,
"grad_norm": 6.0871052742004395,
"learning_rate": 6.75424192665572e-05,
"loss": 1.6107,
"step": 796
},
{
"epoch": 13.758620689655173,
"grad_norm": 4.6772260665893555,
"learning_rate": 6.74329501915709e-05,
"loss": 1.1545,
"step": 798
},
{
"epoch": 13.793103448275861,
"grad_norm": 2.2203543186187744,
"learning_rate": 6.732348111658457e-05,
"loss": 0.8467,
"step": 800
},
{
"epoch": 13.827586206896552,
"grad_norm": 5.075698375701904,
"learning_rate": 6.721401204159825e-05,
"loss": 1.047,
"step": 802
},
{
"epoch": 13.862068965517242,
"grad_norm": 9.27517032623291,
"learning_rate": 6.710454296661194e-05,
"loss": 0.9641,
"step": 804
},
{
"epoch": 13.89655172413793,
"grad_norm": 3.1357972621917725,
"learning_rate": 6.699507389162561e-05,
"loss": 0.6859,
"step": 806
},
{
"epoch": 13.931034482758621,
"grad_norm": 9.246620178222656,
"learning_rate": 6.68856048166393e-05,
"loss": 1.2339,
"step": 808
},
{
"epoch": 13.96551724137931,
"grad_norm": 4.054126739501953,
"learning_rate": 6.677613574165298e-05,
"loss": 0.888,
"step": 810
},
{
"epoch": 14.0,
"grad_norm": 10.905935287475586,
"learning_rate": 6.666666666666667e-05,
"loss": 1.3729,
"step": 812
},
{
"epoch": 14.0,
"eval_accuracy": 0.6060606060606061,
"eval_f1_macro": 0.5151332211371547,
"eval_f1_micro": 0.6060606060606061,
"eval_f1_weighted": 0.5860558713061843,
"eval_loss": 1.2284520864486694,
"eval_precision_macro": 0.5714167385740136,
"eval_precision_micro": 0.6060606060606061,
"eval_precision_weighted": 0.6313951407182923,
"eval_recall_macro": 0.5224716553287982,
"eval_recall_micro": 0.6060606060606061,
"eval_recall_weighted": 0.6060606060606061,
"eval_runtime": 1.7058,
"eval_samples_per_second": 77.383,
"eval_steps_per_second": 9.966,
"step": 812
},
{
"epoch": 14.03448275862069,
"grad_norm": 5.283136367797852,
"learning_rate": 6.655719759168036e-05,
"loss": 0.7426,
"step": 814
},
{
"epoch": 14.068965517241379,
"grad_norm": 3.3359534740448,
"learning_rate": 6.644772851669404e-05,
"loss": 0.5729,
"step": 816
},
{
"epoch": 14.10344827586207,
"grad_norm": 2.1584465503692627,
"learning_rate": 6.633825944170772e-05,
"loss": 0.6175,
"step": 818
},
{
"epoch": 14.137931034482758,
"grad_norm": 4.724003791809082,
"learning_rate": 6.62287903667214e-05,
"loss": 0.5157,
"step": 820
},
{
"epoch": 14.172413793103448,
"grad_norm": 6.147416591644287,
"learning_rate": 6.611932129173509e-05,
"loss": 0.7852,
"step": 822
},
{
"epoch": 14.206896551724139,
"grad_norm": 5.155529499053955,
"learning_rate": 6.600985221674877e-05,
"loss": 0.8213,
"step": 824
},
{
"epoch": 14.241379310344827,
"grad_norm": 2.3989200592041016,
"learning_rate": 6.590038314176246e-05,
"loss": 0.4571,
"step": 826
},
{
"epoch": 14.275862068965518,
"grad_norm": 4.3873090744018555,
"learning_rate": 6.579091406677615e-05,
"loss": 1.1529,
"step": 828
},
{
"epoch": 14.310344827586206,
"grad_norm": 5.521265029907227,
"learning_rate": 6.568144499178982e-05,
"loss": 0.841,
"step": 830
},
{
"epoch": 14.344827586206897,
"grad_norm": 3.919457197189331,
"learning_rate": 6.55719759168035e-05,
"loss": 0.6576,
"step": 832
},
{
"epoch": 14.379310344827585,
"grad_norm": 5.222126007080078,
"learning_rate": 6.546250684181719e-05,
"loss": 0.7117,
"step": 834
},
{
"epoch": 14.413793103448276,
"grad_norm": 5.21601676940918,
"learning_rate": 6.535303776683088e-05,
"loss": 0.6117,
"step": 836
},
{
"epoch": 14.448275862068966,
"grad_norm": 3.0854995250701904,
"learning_rate": 6.524356869184456e-05,
"loss": 0.4594,
"step": 838
},
{
"epoch": 14.482758620689655,
"grad_norm": 4.040173053741455,
"learning_rate": 6.513409961685824e-05,
"loss": 0.6008,
"step": 840
},
{
"epoch": 14.517241379310345,
"grad_norm": 8.294901847839355,
"learning_rate": 6.502463054187192e-05,
"loss": 1.344,
"step": 842
},
{
"epoch": 14.551724137931034,
"grad_norm": 3.9648568630218506,
"learning_rate": 6.491516146688561e-05,
"loss": 0.9206,
"step": 844
},
{
"epoch": 14.586206896551724,
"grad_norm": 5.604014873504639,
"learning_rate": 6.480569239189928e-05,
"loss": 0.9679,
"step": 846
},
{
"epoch": 14.620689655172415,
"grad_norm": 4.383110523223877,
"learning_rate": 6.469622331691297e-05,
"loss": 0.8063,
"step": 848
},
{
"epoch": 14.655172413793103,
"grad_norm": 3.8494396209716797,
"learning_rate": 6.458675424192667e-05,
"loss": 1.0672,
"step": 850
},
{
"epoch": 14.689655172413794,
"grad_norm": 2.571416139602661,
"learning_rate": 6.447728516694034e-05,
"loss": 0.7218,
"step": 852
},
{
"epoch": 14.724137931034482,
"grad_norm": 13.170735359191895,
"learning_rate": 6.436781609195403e-05,
"loss": 0.7612,
"step": 854
},
{
"epoch": 14.758620689655173,
"grad_norm": 4.889159202575684,
"learning_rate": 6.425834701696771e-05,
"loss": 0.7591,
"step": 856
},
{
"epoch": 14.793103448275861,
"grad_norm": 4.082027435302734,
"learning_rate": 6.414887794198139e-05,
"loss": 0.9275,
"step": 858
},
{
"epoch": 14.827586206896552,
"grad_norm": 4.967465400695801,
"learning_rate": 6.403940886699507e-05,
"loss": 0.4512,
"step": 860
},
{
"epoch": 14.862068965517242,
"grad_norm": 8.991899490356445,
"learning_rate": 6.392993979200876e-05,
"loss": 0.8968,
"step": 862
},
{
"epoch": 14.89655172413793,
"grad_norm": 3.955719232559204,
"learning_rate": 6.382047071702244e-05,
"loss": 0.4046,
"step": 864
},
{
"epoch": 14.931034482758621,
"grad_norm": 8.865561485290527,
"learning_rate": 6.371100164203613e-05,
"loss": 0.8692,
"step": 866
},
{
"epoch": 14.96551724137931,
"grad_norm": 4.807842254638672,
"learning_rate": 6.360153256704982e-05,
"loss": 1.1433,
"step": 868
},
{
"epoch": 15.0,
"grad_norm": 3.4882490634918213,
"learning_rate": 6.349206349206349e-05,
"loss": 1.3638,
"step": 870
},
{
"epoch": 15.0,
"eval_accuracy": 0.6212121212121212,
"eval_f1_macro": 0.5388922283385812,
"eval_f1_micro": 0.6212121212121212,
"eval_f1_weighted": 0.6054871995244131,
"eval_loss": 1.1741819381713867,
"eval_precision_macro": 0.561660793349598,
"eval_precision_micro": 0.6212121212121212,
"eval_precision_weighted": 0.6333975796499517,
"eval_recall_macro": 0.5513378684807256,
"eval_recall_micro": 0.6212121212121212,
"eval_recall_weighted": 0.6212121212121212,
"eval_runtime": 1.6952,
"eval_samples_per_second": 77.867,
"eval_steps_per_second": 10.028,
"step": 870
},
{
"epoch": 15.03448275862069,
"grad_norm": 6.959043502807617,
"learning_rate": 6.338259441707718e-05,
"loss": 0.6661,
"step": 872
},
{
"epoch": 15.068965517241379,
"grad_norm": 4.410765647888184,
"learning_rate": 6.327312534209086e-05,
"loss": 0.6515,
"step": 874
},
{
"epoch": 15.10344827586207,
"grad_norm": 5.796648025512695,
"learning_rate": 6.316365626710455e-05,
"loss": 0.5223,
"step": 876
},
{
"epoch": 15.137931034482758,
"grad_norm": 7.7657904624938965,
"learning_rate": 6.305418719211823e-05,
"loss": 0.6789,
"step": 878
},
{
"epoch": 15.172413793103448,
"grad_norm": 2.134707450866699,
"learning_rate": 6.294471811713192e-05,
"loss": 0.3882,
"step": 880
},
{
"epoch": 15.206896551724139,
"grad_norm": 2.849294900894165,
"learning_rate": 6.283524904214559e-05,
"loss": 0.8977,
"step": 882
},
{
"epoch": 15.241379310344827,
"grad_norm": 3.2845396995544434,
"learning_rate": 6.272577996715928e-05,
"loss": 0.5675,
"step": 884
},
{
"epoch": 15.275862068965518,
"grad_norm": 7.925327777862549,
"learning_rate": 6.261631089217297e-05,
"loss": 1.0438,
"step": 886
},
{
"epoch": 15.310344827586206,
"grad_norm": 3.015355348587036,
"learning_rate": 6.250684181718665e-05,
"loss": 0.6477,
"step": 888
},
{
"epoch": 15.344827586206897,
"grad_norm": 4.829744815826416,
"learning_rate": 6.239737274220034e-05,
"loss": 0.8828,
"step": 890
},
{
"epoch": 15.379310344827585,
"grad_norm": 1.1450001001358032,
"learning_rate": 6.228790366721402e-05,
"loss": 0.59,
"step": 892
},
{
"epoch": 15.413793103448276,
"grad_norm": 3.852428436279297,
"learning_rate": 6.21784345922277e-05,
"loss": 0.6749,
"step": 894
},
{
"epoch": 15.448275862068966,
"grad_norm": 8.538708686828613,
"learning_rate": 6.206896551724138e-05,
"loss": 0.7512,
"step": 896
},
{
"epoch": 15.482758620689655,
"grad_norm": 1.829010248184204,
"learning_rate": 6.195949644225506e-05,
"loss": 0.5363,
"step": 898
},
{
"epoch": 15.517241379310345,
"grad_norm": 4.311962127685547,
"learning_rate": 6.185002736726874e-05,
"loss": 0.507,
"step": 900
},
{
"epoch": 15.551724137931034,
"grad_norm": 6.414646625518799,
"learning_rate": 6.174055829228244e-05,
"loss": 0.4162,
"step": 902
},
{
"epoch": 15.586206896551724,
"grad_norm": 4.423099517822266,
"learning_rate": 6.163108921729611e-05,
"loss": 0.8151,
"step": 904
},
{
"epoch": 15.620689655172415,
"grad_norm": 1.6412297487258911,
"learning_rate": 6.15216201423098e-05,
"loss": 0.4281,
"step": 906
},
{
"epoch": 15.655172413793103,
"grad_norm": 8.947938919067383,
"learning_rate": 6.141215106732349e-05,
"loss": 0.8403,
"step": 908
},
{
"epoch": 15.689655172413794,
"grad_norm": 8.341460227966309,
"learning_rate": 6.130268199233716e-05,
"loss": 0.7946,
"step": 910
},
{
"epoch": 15.724137931034482,
"grad_norm": 7.36137580871582,
"learning_rate": 6.119321291735085e-05,
"loss": 0.3792,
"step": 912
},
{
"epoch": 15.758620689655173,
"grad_norm": 4.560554504394531,
"learning_rate": 6.108374384236453e-05,
"loss": 0.7343,
"step": 914
},
{
"epoch": 15.793103448275861,
"grad_norm": 10.598052978515625,
"learning_rate": 6.097427476737821e-05,
"loss": 0.6625,
"step": 916
},
{
"epoch": 15.827586206896552,
"grad_norm": 3.1005873680114746,
"learning_rate": 6.0864805692391905e-05,
"loss": 0.4325,
"step": 918
},
{
"epoch": 15.862068965517242,
"grad_norm": 4.4200663566589355,
"learning_rate": 6.075533661740559e-05,
"loss": 0.9942,
"step": 920
},
{
"epoch": 15.89655172413793,
"grad_norm": 5.5699381828308105,
"learning_rate": 6.0645867542419264e-05,
"loss": 0.3725,
"step": 922
},
{
"epoch": 15.931034482758621,
"grad_norm": 4.177437782287598,
"learning_rate": 6.053639846743295e-05,
"loss": 0.5522,
"step": 924
},
{
"epoch": 15.96551724137931,
"grad_norm": 2.993516206741333,
"learning_rate": 6.042692939244664e-05,
"loss": 0.4704,
"step": 926
},
{
"epoch": 16.0,
"grad_norm": 4.057467937469482,
"learning_rate": 6.0317460317460316e-05,
"loss": 0.9063,
"step": 928
},
{
"epoch": 16.0,
"eval_accuracy": 0.5984848484848485,
"eval_f1_macro": 0.5079468662671953,
"eval_f1_micro": 0.5984848484848485,
"eval_f1_weighted": 0.5770103503469938,
"eval_loss": 1.2325303554534912,
"eval_precision_macro": 0.50772811148751,
"eval_precision_micro": 0.5984848484848485,
"eval_precision_weighted": 0.5714871385924017,
"eval_recall_macro": 0.52151171579743,
"eval_recall_micro": 0.5984848484848485,
"eval_recall_weighted": 0.5984848484848485,
"eval_runtime": 1.6873,
"eval_samples_per_second": 78.231,
"eval_steps_per_second": 10.075,
"step": 928
},
{
"epoch": 16.03448275862069,
"grad_norm": 1.7874680757522583,
"learning_rate": 6.0207991242474e-05,
"loss": 0.2892,
"step": 930
},
{
"epoch": 16.06896551724138,
"grad_norm": 3.2080960273742676,
"learning_rate": 6.0098522167487695e-05,
"loss": 0.7073,
"step": 932
},
{
"epoch": 16.103448275862068,
"grad_norm": 1.299979567527771,
"learning_rate": 5.998905309250137e-05,
"loss": 0.2762,
"step": 934
},
{
"epoch": 16.137931034482758,
"grad_norm": 2.3571527004241943,
"learning_rate": 5.9879584017515054e-05,
"loss": 0.5417,
"step": 936
},
{
"epoch": 16.17241379310345,
"grad_norm": 6.40372371673584,
"learning_rate": 5.977011494252874e-05,
"loss": 0.8834,
"step": 938
},
{
"epoch": 16.20689655172414,
"grad_norm": 3.0044682025909424,
"learning_rate": 5.966064586754242e-05,
"loss": 0.3184,
"step": 940
},
{
"epoch": 16.24137931034483,
"grad_norm": 10.370132446289062,
"learning_rate": 5.9551176792556106e-05,
"loss": 0.8784,
"step": 942
},
{
"epoch": 16.275862068965516,
"grad_norm": 4.730606555938721,
"learning_rate": 5.944170771756979e-05,
"loss": 0.8115,
"step": 944
},
{
"epoch": 16.310344827586206,
"grad_norm": 8.03954029083252,
"learning_rate": 5.933223864258347e-05,
"loss": 0.4177,
"step": 946
},
{
"epoch": 16.344827586206897,
"grad_norm": 4.975473403930664,
"learning_rate": 5.922276956759716e-05,
"loss": 0.4525,
"step": 948
},
{
"epoch": 16.379310344827587,
"grad_norm": 4.188839912414551,
"learning_rate": 5.9113300492610844e-05,
"loss": 0.8818,
"step": 950
},
{
"epoch": 16.413793103448278,
"grad_norm": 7.270519733428955,
"learning_rate": 5.900383141762452e-05,
"loss": 0.6999,
"step": 952
},
{
"epoch": 16.448275862068964,
"grad_norm": 3.524183988571167,
"learning_rate": 5.889436234263821e-05,
"loss": 0.9776,
"step": 954
},
{
"epoch": 16.482758620689655,
"grad_norm": 3.496511936187744,
"learning_rate": 5.878489326765189e-05,
"loss": 0.6665,
"step": 956
},
{
"epoch": 16.517241379310345,
"grad_norm": 8.359578132629395,
"learning_rate": 5.8675424192665575e-05,
"loss": 0.5769,
"step": 958
},
{
"epoch": 16.551724137931036,
"grad_norm": 1.640060305595398,
"learning_rate": 5.856595511767926e-05,
"loss": 0.4558,
"step": 960
},
{
"epoch": 16.586206896551722,
"grad_norm": 3.3751068115234375,
"learning_rate": 5.8456486042692934e-05,
"loss": 0.7395,
"step": 962
},
{
"epoch": 16.620689655172413,
"grad_norm": 6.145583629608154,
"learning_rate": 5.834701696770663e-05,
"loss": 0.5962,
"step": 964
},
{
"epoch": 16.655172413793103,
"grad_norm": 4.918842315673828,
"learning_rate": 5.823754789272031e-05,
"loss": 0.9492,
"step": 966
},
{
"epoch": 16.689655172413794,
"grad_norm": 8.429754257202148,
"learning_rate": 5.8128078817733986e-05,
"loss": 0.7788,
"step": 968
},
{
"epoch": 16.724137931034484,
"grad_norm": 1.9383467435836792,
"learning_rate": 5.801860974274768e-05,
"loss": 0.561,
"step": 970
},
{
"epoch": 16.75862068965517,
"grad_norm": 8.061017990112305,
"learning_rate": 5.7909140667761365e-05,
"loss": 0.5616,
"step": 972
},
{
"epoch": 16.79310344827586,
"grad_norm": 3.3627376556396484,
"learning_rate": 5.779967159277504e-05,
"loss": 0.4317,
"step": 974
},
{
"epoch": 16.82758620689655,
"grad_norm": 4.939265727996826,
"learning_rate": 5.7690202517788724e-05,
"loss": 0.7131,
"step": 976
},
{
"epoch": 16.862068965517242,
"grad_norm": 8.868173599243164,
"learning_rate": 5.758073344280242e-05,
"loss": 0.848,
"step": 978
},
{
"epoch": 16.896551724137932,
"grad_norm": 6.587118625640869,
"learning_rate": 5.747126436781609e-05,
"loss": 0.4767,
"step": 980
},
{
"epoch": 16.93103448275862,
"grad_norm": 5.797451972961426,
"learning_rate": 5.7361795292829776e-05,
"loss": 0.625,
"step": 982
},
{
"epoch": 16.96551724137931,
"grad_norm": 3.9016692638397217,
"learning_rate": 5.725232621784347e-05,
"loss": 0.6723,
"step": 984
},
{
"epoch": 17.0,
"grad_norm": 2.006951332092285,
"learning_rate": 5.714285714285714e-05,
"loss": 0.4584,
"step": 986
},
{
"epoch": 17.0,
"eval_accuracy": 0.6363636363636364,
"eval_f1_macro": 0.5514969546015317,
"eval_f1_micro": 0.6363636363636364,
"eval_f1_weighted": 0.6209731762304284,
"eval_loss": 1.1496680974960327,
"eval_precision_macro": 0.5676373113347903,
"eval_precision_micro": 0.6363636363636364,
"eval_precision_weighted": 0.6285885138826316,
"eval_recall_macro": 0.5575207860922147,
"eval_recall_micro": 0.6363636363636364,
"eval_recall_weighted": 0.6363636363636364,
"eval_runtime": 1.699,
"eval_samples_per_second": 77.694,
"eval_steps_per_second": 10.006,
"step": 986
},
{
"epoch": 17.03448275862069,
"grad_norm": 6.7209343910217285,
"learning_rate": 5.703338806787083e-05,
"loss": 0.5839,
"step": 988
},
{
"epoch": 17.06896551724138,
"grad_norm": 4.373691558837891,
"learning_rate": 5.6923918992884514e-05,
"loss": 0.455,
"step": 990
},
{
"epoch": 17.103448275862068,
"grad_norm": 7.231139183044434,
"learning_rate": 5.6814449917898194e-05,
"loss": 1.0114,
"step": 992
},
{
"epoch": 17.137931034482758,
"grad_norm": 7.747330665588379,
"learning_rate": 5.670498084291188e-05,
"loss": 0.9266,
"step": 994
},
{
"epoch": 17.17241379310345,
"grad_norm": 6.351534843444824,
"learning_rate": 5.6595511767925566e-05,
"loss": 0.4284,
"step": 996
},
{
"epoch": 17.20689655172414,
"grad_norm": 10.491331100463867,
"learning_rate": 5.6486042692939246e-05,
"loss": 0.4253,
"step": 998
},
{
"epoch": 17.24137931034483,
"grad_norm": 4.956140518188477,
"learning_rate": 5.637657361795293e-05,
"loss": 0.4558,
"step": 1000
},
{
"epoch": 17.275862068965516,
"grad_norm": 3.0368525981903076,
"learning_rate": 5.626710454296662e-05,
"loss": 1.0867,
"step": 1002
},
{
"epoch": 17.310344827586206,
"grad_norm": 1.7548032999038696,
"learning_rate": 5.61576354679803e-05,
"loss": 0.2738,
"step": 1004
},
{
"epoch": 17.344827586206897,
"grad_norm": 3.532531261444092,
"learning_rate": 5.6048166392993984e-05,
"loss": 0.344,
"step": 1006
},
{
"epoch": 17.379310344827587,
"grad_norm": 8.146524429321289,
"learning_rate": 5.593869731800766e-05,
"loss": 0.67,
"step": 1008
},
{
"epoch": 17.413793103448278,
"grad_norm": 11.48868465423584,
"learning_rate": 5.582922824302135e-05,
"loss": 0.5444,
"step": 1010
},
{
"epoch": 17.448275862068964,
"grad_norm": 4.138803958892822,
"learning_rate": 5.5719759168035036e-05,
"loss": 0.4858,
"step": 1012
},
{
"epoch": 17.482758620689655,
"grad_norm": 2.8997292518615723,
"learning_rate": 5.561029009304871e-05,
"loss": 0.2196,
"step": 1014
},
{
"epoch": 17.517241379310345,
"grad_norm": 7.666640281677246,
"learning_rate": 5.55008210180624e-05,
"loss": 0.7209,
"step": 1016
},
{
"epoch": 17.551724137931036,
"grad_norm": 1.5840152502059937,
"learning_rate": 5.539135194307609e-05,
"loss": 0.4648,
"step": 1018
},
{
"epoch": 17.586206896551722,
"grad_norm": 3.6097986698150635,
"learning_rate": 5.528188286808976e-05,
"loss": 0.5552,
"step": 1020
},
{
"epoch": 17.620689655172413,
"grad_norm": 9.492504119873047,
"learning_rate": 5.517241379310345e-05,
"loss": 0.9165,
"step": 1022
},
{
"epoch": 17.655172413793103,
"grad_norm": 8.897369384765625,
"learning_rate": 5.506294471811714e-05,
"loss": 0.5663,
"step": 1024
},
{
"epoch": 17.689655172413794,
"grad_norm": 3.185572862625122,
"learning_rate": 5.495347564313081e-05,
"loss": 0.5031,
"step": 1026
},
{
"epoch": 17.724137931034484,
"grad_norm": 2.46030855178833,
"learning_rate": 5.48440065681445e-05,
"loss": 0.5205,
"step": 1028
},
{
"epoch": 17.75862068965517,
"grad_norm": 1.1284418106079102,
"learning_rate": 5.473453749315819e-05,
"loss": 0.5335,
"step": 1030
},
{
"epoch": 17.79310344827586,
"grad_norm": 7.253750801086426,
"learning_rate": 5.4625068418171864e-05,
"loss": 0.655,
"step": 1032
},
{
"epoch": 17.82758620689655,
"grad_norm": 3.3334109783172607,
"learning_rate": 5.451559934318555e-05,
"loss": 0.3426,
"step": 1034
},
{
"epoch": 17.862068965517242,
"grad_norm": 4.30219841003418,
"learning_rate": 5.440613026819924e-05,
"loss": 0.5999,
"step": 1036
},
{
"epoch": 17.896551724137932,
"grad_norm": 6.265329360961914,
"learning_rate": 5.4296661193212916e-05,
"loss": 0.4511,
"step": 1038
},
{
"epoch": 17.93103448275862,
"grad_norm": 1.11660635471344,
"learning_rate": 5.41871921182266e-05,
"loss": 0.4022,
"step": 1040
},
{
"epoch": 17.96551724137931,
"grad_norm": 4.282124996185303,
"learning_rate": 5.407772304324029e-05,
"loss": 0.3705,
"step": 1042
},
{
"epoch": 18.0,
"grad_norm": 4.951636791229248,
"learning_rate": 5.396825396825397e-05,
"loss": 0.86,
"step": 1044
},
{
"epoch": 18.0,
"eval_accuracy": 0.5909090909090909,
"eval_f1_macro": 0.49254721886300834,
"eval_f1_micro": 0.5909090909090909,
"eval_f1_weighted": 0.5718522484072723,
"eval_loss": 1.2673066854476929,
"eval_precision_macro": 0.49684601113172544,
"eval_precision_micro": 0.5909090909090909,
"eval_precision_weighted": 0.568108028335301,
"eval_recall_macro": 0.5030990173847316,
"eval_recall_micro": 0.5909090909090909,
"eval_recall_weighted": 0.5909090909090909,
"eval_runtime": 1.6855,
"eval_samples_per_second": 78.316,
"eval_steps_per_second": 10.086,
"step": 1044
},
{
"epoch": 18.03448275862069,
"grad_norm": 5.494875907897949,
"learning_rate": 5.3858784893267654e-05,
"loss": 0.6363,
"step": 1046
},
{
"epoch": 18.06896551724138,
"grad_norm": 5.767899036407471,
"learning_rate": 5.374931581828134e-05,
"loss": 0.2099,
"step": 1048
},
{
"epoch": 18.103448275862068,
"grad_norm": 2.901460886001587,
"learning_rate": 5.363984674329502e-05,
"loss": 0.2682,
"step": 1050
},
{
"epoch": 18.137931034482758,
"grad_norm": 7.761710166931152,
"learning_rate": 5.3530377668308706e-05,
"loss": 0.3704,
"step": 1052
},
{
"epoch": 18.17241379310345,
"grad_norm": 4.4380879402160645,
"learning_rate": 5.342090859332239e-05,
"loss": 0.273,
"step": 1054
},
{
"epoch": 18.20689655172414,
"grad_norm": 7.681371688842773,
"learning_rate": 5.331143951833607e-05,
"loss": 0.5835,
"step": 1056
},
{
"epoch": 18.24137931034483,
"grad_norm": 10.872917175292969,
"learning_rate": 5.320197044334976e-05,
"loss": 0.529,
"step": 1058
},
{
"epoch": 18.275862068965516,
"grad_norm": 6.020653247833252,
"learning_rate": 5.3092501368363444e-05,
"loss": 0.3757,
"step": 1060
},
{
"epoch": 18.310344827586206,
"grad_norm": 7.052387714385986,
"learning_rate": 5.2983032293377124e-05,
"loss": 0.3641,
"step": 1062
},
{
"epoch": 18.344827586206897,
"grad_norm": 6.335596084594727,
"learning_rate": 5.287356321839081e-05,
"loss": 0.3232,
"step": 1064
},
{
"epoch": 18.379310344827587,
"grad_norm": 6.076163291931152,
"learning_rate": 5.276409414340448e-05,
"loss": 0.6439,
"step": 1066
},
{
"epoch": 18.413793103448278,
"grad_norm": 0.6590009331703186,
"learning_rate": 5.2654625068418176e-05,
"loss": 0.1923,
"step": 1068
},
{
"epoch": 18.448275862068964,
"grad_norm": 0.6382133364677429,
"learning_rate": 5.254515599343186e-05,
"loss": 0.567,
"step": 1070
},
{
"epoch": 18.482758620689655,
"grad_norm": 10.746706008911133,
"learning_rate": 5.2435686918445535e-05,
"loss": 0.6355,
"step": 1072
},
{
"epoch": 18.517241379310345,
"grad_norm": 6.357698917388916,
"learning_rate": 5.232621784345923e-05,
"loss": 0.4048,
"step": 1074
},
{
"epoch": 18.551724137931036,
"grad_norm": 4.186829566955566,
"learning_rate": 5.2216748768472914e-05,
"loss": 0.5531,
"step": 1076
},
{
"epoch": 18.586206896551722,
"grad_norm": 7.867221832275391,
"learning_rate": 5.2107279693486586e-05,
"loss": 0.3116,
"step": 1078
},
{
"epoch": 18.620689655172413,
"grad_norm": 3.410994052886963,
"learning_rate": 5.199781061850027e-05,
"loss": 0.4549,
"step": 1080
},
{
"epoch": 18.655172413793103,
"grad_norm": 4.2880706787109375,
"learning_rate": 5.1888341543513966e-05,
"loss": 0.5768,
"step": 1082
},
{
"epoch": 18.689655172413794,
"grad_norm": 5.609067440032959,
"learning_rate": 5.177887246852764e-05,
"loss": 0.2996,
"step": 1084
},
{
"epoch": 18.724137931034484,
"grad_norm": 7.243040561676025,
"learning_rate": 5.1669403393541325e-05,
"loss": 0.6335,
"step": 1086
},
{
"epoch": 18.75862068965517,
"grad_norm": 9.456660270690918,
"learning_rate": 5.155993431855502e-05,
"loss": 0.771,
"step": 1088
},
{
"epoch": 18.79310344827586,
"grad_norm": 3.2803101539611816,
"learning_rate": 5.145046524356869e-05,
"loss": 0.4275,
"step": 1090
},
{
"epoch": 18.82758620689655,
"grad_norm": 5.173320293426514,
"learning_rate": 5.1340996168582377e-05,
"loss": 0.5103,
"step": 1092
},
{
"epoch": 18.862068965517242,
"grad_norm": 9.215474128723145,
"learning_rate": 5.123152709359606e-05,
"loss": 0.4572,
"step": 1094
},
{
"epoch": 18.896551724137932,
"grad_norm": 3.3474466800689697,
"learning_rate": 5.112205801860974e-05,
"loss": 0.1896,
"step": 1096
},
{
"epoch": 18.93103448275862,
"grad_norm": 4.091315746307373,
"learning_rate": 5.101258894362343e-05,
"loss": 0.6855,
"step": 1098
},
{
"epoch": 18.96551724137931,
"grad_norm": 5.4540696144104,
"learning_rate": 5.0903119868637115e-05,
"loss": 0.2874,
"step": 1100
},
{
"epoch": 19.0,
"grad_norm": 0.8723583817481995,
"learning_rate": 5.0793650793650794e-05,
"loss": 0.2113,
"step": 1102
},
{
"epoch": 19.0,
"eval_accuracy": 0.6212121212121212,
"eval_f1_macro": 0.5179873730419885,
"eval_f1_micro": 0.6212121212121212,
"eval_f1_weighted": 0.5986162885696074,
"eval_loss": 1.2132326364517212,
"eval_precision_macro": 0.5386071190769648,
"eval_precision_micro": 0.6212121212121212,
"eval_precision_weighted": 0.6049018570893172,
"eval_recall_macro": 0.5257445200302343,
"eval_recall_micro": 0.6212121212121212,
"eval_recall_weighted": 0.6212121212121212,
"eval_runtime": 1.7051,
"eval_samples_per_second": 77.414,
"eval_steps_per_second": 9.97,
"step": 1102
},
{
"epoch": 19.03448275862069,
"grad_norm": 3.2095038890838623,
"learning_rate": 5.068418171866448e-05,
"loss": 0.3305,
"step": 1104
},
{
"epoch": 19.06896551724138,
"grad_norm": 7.022287368774414,
"learning_rate": 5.057471264367817e-05,
"loss": 0.6748,
"step": 1106
},
{
"epoch": 19.103448275862068,
"grad_norm": 8.976895332336426,
"learning_rate": 5.0465243568691846e-05,
"loss": 0.4676,
"step": 1108
},
{
"epoch": 19.137931034482758,
"grad_norm": 5.533504962921143,
"learning_rate": 5.035577449370553e-05,
"loss": 0.2232,
"step": 1110
},
{
"epoch": 19.17241379310345,
"grad_norm": 4.6383466720581055,
"learning_rate": 5.024630541871922e-05,
"loss": 0.4356,
"step": 1112
},
{
"epoch": 19.20689655172414,
"grad_norm": 0.5434678792953491,
"learning_rate": 5.01368363437329e-05,
"loss": 0.1359,
"step": 1114
},
{
"epoch": 19.24137931034483,
"grad_norm": 4.4346795082092285,
"learning_rate": 5.0027367268746584e-05,
"loss": 0.5523,
"step": 1116
},
{
"epoch": 19.275862068965516,
"grad_norm": 2.3363232612609863,
"learning_rate": 4.9917898193760264e-05,
"loss": 0.1374,
"step": 1118
},
{
"epoch": 19.310344827586206,
"grad_norm": 3.592006206512451,
"learning_rate": 4.980842911877395e-05,
"loss": 0.4274,
"step": 1120
},
{
"epoch": 19.344827586206897,
"grad_norm": 1.3552830219268799,
"learning_rate": 4.9698960043787636e-05,
"loss": 0.2087,
"step": 1122
},
{
"epoch": 19.379310344827587,
"grad_norm": 5.443765640258789,
"learning_rate": 4.9589490968801316e-05,
"loss": 0.3383,
"step": 1124
},
{
"epoch": 19.413793103448278,
"grad_norm": 8.5775785446167,
"learning_rate": 4.9480021893814995e-05,
"loss": 0.3641,
"step": 1126
},
{
"epoch": 19.448275862068964,
"grad_norm": 8.10037899017334,
"learning_rate": 4.937055281882869e-05,
"loss": 0.4428,
"step": 1128
},
{
"epoch": 19.482758620689655,
"grad_norm": 1.8419495820999146,
"learning_rate": 4.926108374384237e-05,
"loss": 0.4689,
"step": 1130
},
{
"epoch": 19.517241379310345,
"grad_norm": 8.350903511047363,
"learning_rate": 4.915161466885605e-05,
"loss": 0.1867,
"step": 1132
},
{
"epoch": 19.551724137931036,
"grad_norm": 0.38135936856269836,
"learning_rate": 4.904214559386973e-05,
"loss": 0.2662,
"step": 1134
},
{
"epoch": 19.586206896551722,
"grad_norm": 7.118171691894531,
"learning_rate": 4.893267651888342e-05,
"loss": 0.4822,
"step": 1136
},
{
"epoch": 19.620689655172413,
"grad_norm": 6.289510250091553,
"learning_rate": 4.88232074438971e-05,
"loss": 0.3416,
"step": 1138
},
{
"epoch": 19.655172413793103,
"grad_norm": 7.544029235839844,
"learning_rate": 4.8713738368910785e-05,
"loss": 0.2186,
"step": 1140
},
{
"epoch": 19.689655172413794,
"grad_norm": 5.812250137329102,
"learning_rate": 4.860426929392447e-05,
"loss": 0.9843,
"step": 1142
},
{
"epoch": 19.724137931034484,
"grad_norm": 5.986517429351807,
"learning_rate": 4.849480021893815e-05,
"loss": 0.4633,
"step": 1144
},
{
"epoch": 19.75862068965517,
"grad_norm": 11.750970840454102,
"learning_rate": 4.838533114395184e-05,
"loss": 0.2648,
"step": 1146
},
{
"epoch": 19.79310344827586,
"grad_norm": 1.8454886674880981,
"learning_rate": 4.827586206896552e-05,
"loss": 0.3346,
"step": 1148
},
{
"epoch": 19.82758620689655,
"grad_norm": 12.597700119018555,
"learning_rate": 4.81663929939792e-05,
"loss": 0.54,
"step": 1150
},
{
"epoch": 19.862068965517242,
"grad_norm": 8.612327575683594,
"learning_rate": 4.805692391899288e-05,
"loss": 0.3575,
"step": 1152
},
{
"epoch": 19.896551724137932,
"grad_norm": 3.5571696758270264,
"learning_rate": 4.7947454844006575e-05,
"loss": 0.5335,
"step": 1154
},
{
"epoch": 19.93103448275862,
"grad_norm": 5.682160377502441,
"learning_rate": 4.7837985769020255e-05,
"loss": 0.2374,
"step": 1156
},
{
"epoch": 19.96551724137931,
"grad_norm": 6.373334884643555,
"learning_rate": 4.7728516694033934e-05,
"loss": 0.6124,
"step": 1158
},
{
"epoch": 20.0,
"grad_norm": 0.5094832181930542,
"learning_rate": 4.761904761904762e-05,
"loss": 0.1168,
"step": 1160
},
{
"epoch": 20.0,
"eval_accuracy": 0.6136363636363636,
"eval_f1_macro": 0.5543005733250872,
"eval_f1_micro": 0.6136363636363636,
"eval_f1_weighted": 0.6069833060571026,
"eval_loss": 1.2442402839660645,
"eval_precision_macro": 0.5742089820117771,
"eval_precision_micro": 0.6136363636363636,
"eval_precision_weighted": 0.616364865879877,
"eval_recall_macro": 0.5517233560090703,
"eval_recall_micro": 0.6136363636363636,
"eval_recall_weighted": 0.6136363636363636,
"eval_runtime": 1.7788,
"eval_samples_per_second": 74.206,
"eval_steps_per_second": 9.557,
"step": 1160
},
{
"epoch": 20.03448275862069,
"grad_norm": 4.78882360458374,
"learning_rate": 4.7509578544061307e-05,
"loss": 0.142,
"step": 1162
},
{
"epoch": 20.06896551724138,
"grad_norm": 9.2319917678833,
"learning_rate": 4.7400109469074986e-05,
"loss": 0.2529,
"step": 1164
},
{
"epoch": 20.103448275862068,
"grad_norm": 9.061631202697754,
"learning_rate": 4.729064039408867e-05,
"loss": 0.6863,
"step": 1166
},
{
"epoch": 20.137931034482758,
"grad_norm": 2.0638833045959473,
"learning_rate": 4.718117131910236e-05,
"loss": 0.4872,
"step": 1168
},
{
"epoch": 20.17241379310345,
"grad_norm": 1.029740571975708,
"learning_rate": 4.707170224411604e-05,
"loss": 0.2375,
"step": 1170
},
{
"epoch": 20.20689655172414,
"grad_norm": 4.14223575592041,
"learning_rate": 4.6962233169129724e-05,
"loss": 0.6973,
"step": 1172
},
{
"epoch": 20.24137931034483,
"grad_norm": 5.068900108337402,
"learning_rate": 4.685276409414341e-05,
"loss": 0.7413,
"step": 1174
},
{
"epoch": 20.275862068965516,
"grad_norm": 2.607081413269043,
"learning_rate": 4.674329501915709e-05,
"loss": 0.454,
"step": 1176
},
{
"epoch": 20.310344827586206,
"grad_norm": 2.115675926208496,
"learning_rate": 4.663382594417077e-05,
"loss": 0.3017,
"step": 1178
},
{
"epoch": 20.344827586206897,
"grad_norm": 1.3065847158432007,
"learning_rate": 4.652435686918446e-05,
"loss": 0.4131,
"step": 1180
},
{
"epoch": 20.379310344827587,
"grad_norm": 10.156610488891602,
"learning_rate": 4.641488779419814e-05,
"loss": 0.6956,
"step": 1182
},
{
"epoch": 20.413793103448278,
"grad_norm": 6.317163944244385,
"learning_rate": 4.630541871921182e-05,
"loss": 0.1953,
"step": 1184
},
{
"epoch": 20.448275862068964,
"grad_norm": 4.764923095703125,
"learning_rate": 4.619594964422551e-05,
"loss": 0.1456,
"step": 1186
},
{
"epoch": 20.482758620689655,
"grad_norm": 3.1632251739501953,
"learning_rate": 4.6086480569239194e-05,
"loss": 0.2287,
"step": 1188
},
{
"epoch": 20.517241379310345,
"grad_norm": 12.108798027038574,
"learning_rate": 4.597701149425287e-05,
"loss": 0.2027,
"step": 1190
},
{
"epoch": 20.551724137931036,
"grad_norm": 11.341327667236328,
"learning_rate": 4.586754241926656e-05,
"loss": 0.4052,
"step": 1192
},
{
"epoch": 20.586206896551722,
"grad_norm": 5.067739009857178,
"learning_rate": 4.5758073344280246e-05,
"loss": 0.1575,
"step": 1194
},
{
"epoch": 20.620689655172413,
"grad_norm": 7.721131324768066,
"learning_rate": 4.5648604269293925e-05,
"loss": 0.4256,
"step": 1196
},
{
"epoch": 20.655172413793103,
"grad_norm": 10.028607368469238,
"learning_rate": 4.553913519430761e-05,
"loss": 0.5147,
"step": 1198
},
{
"epoch": 20.689655172413794,
"grad_norm": 1.6635549068450928,
"learning_rate": 4.54296661193213e-05,
"loss": 0.1427,
"step": 1200
},
{
"epoch": 20.724137931034484,
"grad_norm": 6.918335914611816,
"learning_rate": 4.532019704433498e-05,
"loss": 0.2659,
"step": 1202
},
{
"epoch": 20.75862068965517,
"grad_norm": 2.401365041732788,
"learning_rate": 4.5210727969348656e-05,
"loss": 0.3284,
"step": 1204
},
{
"epoch": 20.79310344827586,
"grad_norm": 3.1419484615325928,
"learning_rate": 4.510125889436235e-05,
"loss": 0.2873,
"step": 1206
},
{
"epoch": 20.82758620689655,
"grad_norm": 5.024045944213867,
"learning_rate": 4.499178981937603e-05,
"loss": 0.3941,
"step": 1208
},
{
"epoch": 20.862068965517242,
"grad_norm": 0.6695942878723145,
"learning_rate": 4.488232074438971e-05,
"loss": 0.103,
"step": 1210
},
{
"epoch": 20.896551724137932,
"grad_norm": 2.601149320602417,
"learning_rate": 4.47728516694034e-05,
"loss": 0.2325,
"step": 1212
},
{
"epoch": 20.93103448275862,
"grad_norm": 10.553409576416016,
"learning_rate": 4.466338259441708e-05,
"loss": 0.3237,
"step": 1214
},
{
"epoch": 20.96551724137931,
"grad_norm": 4.566164016723633,
"learning_rate": 4.455391351943076e-05,
"loss": 0.2796,
"step": 1216
},
{
"epoch": 21.0,
"grad_norm": 0.7345550060272217,
"learning_rate": 4.4444444444444447e-05,
"loss": 0.3149,
"step": 1218
},
{
"epoch": 21.0,
"eval_accuracy": 0.6287878787878788,
"eval_f1_macro": 0.5445690340153033,
"eval_f1_micro": 0.6287878787878788,
"eval_f1_weighted": 0.6145913352521732,
"eval_loss": 1.2900217771530151,
"eval_precision_macro": 0.5463030249320572,
"eval_precision_micro": 0.6287878787878788,
"eval_precision_weighted": 0.6119575429051235,
"eval_recall_macro": 0.5533938019652305,
"eval_recall_micro": 0.6287878787878788,
"eval_recall_weighted": 0.6287878787878788,
"eval_runtime": 1.6672,
"eval_samples_per_second": 79.174,
"eval_steps_per_second": 10.197,
"step": 1218
},
{
"epoch": 21.03448275862069,
"grad_norm": 6.113945007324219,
"learning_rate": 4.433497536945813e-05,
"loss": 0.1953,
"step": 1220
},
{
"epoch": 21.06896551724138,
"grad_norm": 6.228808879852295,
"learning_rate": 4.422550629447181e-05,
"loss": 0.1327,
"step": 1222
},
{
"epoch": 21.103448275862068,
"grad_norm": 2.212075710296631,
"learning_rate": 4.41160372194855e-05,
"loss": 0.0969,
"step": 1224
},
{
"epoch": 21.137931034482758,
"grad_norm": 0.48354583978652954,
"learning_rate": 4.4006568144499185e-05,
"loss": 0.3936,
"step": 1226
},
{
"epoch": 21.17241379310345,
"grad_norm": 20.688846588134766,
"learning_rate": 4.3897099069512864e-05,
"loss": 0.5529,
"step": 1228
},
{
"epoch": 21.20689655172414,
"grad_norm": 0.4432692527770996,
"learning_rate": 4.3787629994526544e-05,
"loss": 0.2458,
"step": 1230
},
{
"epoch": 21.24137931034483,
"grad_norm": 0.3409382402896881,
"learning_rate": 4.367816091954024e-05,
"loss": 0.0771,
"step": 1232
},
{
"epoch": 21.275862068965516,
"grad_norm": 3.208294153213501,
"learning_rate": 4.3568691844553916e-05,
"loss": 0.1512,
"step": 1234
},
{
"epoch": 21.310344827586206,
"grad_norm": 5.407877445220947,
"learning_rate": 4.3459222769567596e-05,
"loss": 0.1977,
"step": 1236
},
{
"epoch": 21.344827586206897,
"grad_norm": 6.478481292724609,
"learning_rate": 4.334975369458129e-05,
"loss": 0.3786,
"step": 1238
},
{
"epoch": 21.379310344827587,
"grad_norm": 12.132913589477539,
"learning_rate": 4.324028461959497e-05,
"loss": 0.2917,
"step": 1240
},
{
"epoch": 21.413793103448278,
"grad_norm": 3.9148740768432617,
"learning_rate": 4.313081554460865e-05,
"loss": 0.2521,
"step": 1242
},
{
"epoch": 21.448275862068964,
"grad_norm": 9.572196006774902,
"learning_rate": 4.3021346469622334e-05,
"loss": 0.5989,
"step": 1244
},
{
"epoch": 21.482758620689655,
"grad_norm": 14.302295684814453,
"learning_rate": 4.291187739463602e-05,
"loss": 0.3296,
"step": 1246
},
{
"epoch": 21.517241379310345,
"grad_norm": 3.571140766143799,
"learning_rate": 4.28024083196497e-05,
"loss": 0.2097,
"step": 1248
},
{
"epoch": 21.551724137931036,
"grad_norm": 20.992183685302734,
"learning_rate": 4.2692939244663386e-05,
"loss": 0.4902,
"step": 1250
},
{
"epoch": 21.586206896551722,
"grad_norm": 0.8532655239105225,
"learning_rate": 4.258347016967707e-05,
"loss": 0.3269,
"step": 1252
},
{
"epoch": 21.620689655172413,
"grad_norm": 6.140798091888428,
"learning_rate": 4.247400109469075e-05,
"loss": 0.3355,
"step": 1254
},
{
"epoch": 21.655172413793103,
"grad_norm": 3.988628625869751,
"learning_rate": 4.236453201970443e-05,
"loss": 0.3815,
"step": 1256
},
{
"epoch": 21.689655172413794,
"grad_norm": 3.6218347549438477,
"learning_rate": 4.2255062944718124e-05,
"loss": 0.3056,
"step": 1258
},
{
"epoch": 21.724137931034484,
"grad_norm": 2.697680711746216,
"learning_rate": 4.21455938697318e-05,
"loss": 0.2947,
"step": 1260
},
{
"epoch": 21.75862068965517,
"grad_norm": 0.8190564513206482,
"learning_rate": 4.203612479474548e-05,
"loss": 0.1653,
"step": 1262
},
{
"epoch": 21.79310344827586,
"grad_norm": 4.2845635414123535,
"learning_rate": 4.1926655719759176e-05,
"loss": 0.1636,
"step": 1264
},
{
"epoch": 21.82758620689655,
"grad_norm": 4.332767009735107,
"learning_rate": 4.1817186644772855e-05,
"loss": 0.2388,
"step": 1266
},
{
"epoch": 21.862068965517242,
"grad_norm": 8.83471393585205,
"learning_rate": 4.1707717569786535e-05,
"loss": 0.3177,
"step": 1268
},
{
"epoch": 21.896551724137932,
"grad_norm": 0.2615772485733032,
"learning_rate": 4.159824849480022e-05,
"loss": 0.3468,
"step": 1270
},
{
"epoch": 21.93103448275862,
"grad_norm": 1.6727783679962158,
"learning_rate": 4.148877941981391e-05,
"loss": 0.5736,
"step": 1272
},
{
"epoch": 21.96551724137931,
"grad_norm": 0.6974768042564392,
"learning_rate": 4.1379310344827587e-05,
"loss": 0.1321,
"step": 1274
},
{
"epoch": 22.0,
"grad_norm": 0.36256420612335205,
"learning_rate": 4.126984126984127e-05,
"loss": 0.0793,
"step": 1276
},
{
"epoch": 22.0,
"eval_accuracy": 0.6287878787878788,
"eval_f1_macro": 0.569184754221156,
"eval_f1_micro": 0.6287878787878788,
"eval_f1_weighted": 0.6209601528534142,
"eval_loss": 1.3289726972579956,
"eval_precision_macro": 0.5959995620321707,
"eval_precision_micro": 0.6287878787878788,
"eval_precision_weighted": 0.6358640227526097,
"eval_recall_macro": 0.5650793650793651,
"eval_recall_micro": 0.6287878787878788,
"eval_recall_weighted": 0.6287878787878788,
"eval_runtime": 1.7261,
"eval_samples_per_second": 76.472,
"eval_steps_per_second": 9.849,
"step": 1276
},
{
"epoch": 22.03448275862069,
"grad_norm": 1.9399197101593018,
"learning_rate": 4.116037219485496e-05,
"loss": 0.354,
"step": 1278
},
{
"epoch": 22.06896551724138,
"grad_norm": 0.5011938810348511,
"learning_rate": 4.105090311986864e-05,
"loss": 0.0993,
"step": 1280
},
{
"epoch": 22.103448275862068,
"grad_norm": 0.2772790193557739,
"learning_rate": 4.094143404488232e-05,
"loss": 0.2445,
"step": 1282
},
{
"epoch": 22.137931034482758,
"grad_norm": 11.734522819519043,
"learning_rate": 4.083196496989601e-05,
"loss": 0.5215,
"step": 1284
},
{
"epoch": 22.17241379310345,
"grad_norm": 5.657594680786133,
"learning_rate": 4.072249589490969e-05,
"loss": 0.2948,
"step": 1286
},
{
"epoch": 22.20689655172414,
"grad_norm": 5.29397439956665,
"learning_rate": 4.061302681992337e-05,
"loss": 0.367,
"step": 1288
},
{
"epoch": 22.24137931034483,
"grad_norm": 1.1745647192001343,
"learning_rate": 4.050355774493706e-05,
"loss": 0.1183,
"step": 1290
},
{
"epoch": 22.275862068965516,
"grad_norm": 10.664090156555176,
"learning_rate": 4.039408866995074e-05,
"loss": 0.1827,
"step": 1292
},
{
"epoch": 22.310344827586206,
"grad_norm": 2.1371960639953613,
"learning_rate": 4.028461959496442e-05,
"loss": 0.1411,
"step": 1294
},
{
"epoch": 22.344827586206897,
"grad_norm": 14.131455421447754,
"learning_rate": 4.017515051997811e-05,
"loss": 0.487,
"step": 1296
},
{
"epoch": 22.379310344827587,
"grad_norm": 0.5914010405540466,
"learning_rate": 4.0065681444991794e-05,
"loss": 0.0997,
"step": 1298
},
{
"epoch": 22.413793103448278,
"grad_norm": 8.857903480529785,
"learning_rate": 3.9956212370005474e-05,
"loss": 0.4644,
"step": 1300
},
{
"epoch": 22.448275862068964,
"grad_norm": 1.2613619565963745,
"learning_rate": 3.984674329501916e-05,
"loss": 0.0965,
"step": 1302
},
{
"epoch": 22.482758620689655,
"grad_norm": 4.910549640655518,
"learning_rate": 3.9737274220032846e-05,
"loss": 0.6465,
"step": 1304
},
{
"epoch": 22.517241379310345,
"grad_norm": 1.887235164642334,
"learning_rate": 3.9627805145046526e-05,
"loss": 0.1883,
"step": 1306
},
{
"epoch": 22.551724137931036,
"grad_norm": 5.57004451751709,
"learning_rate": 3.9518336070060205e-05,
"loss": 0.2007,
"step": 1308
},
{
"epoch": 22.586206896551722,
"grad_norm": 7.169775485992432,
"learning_rate": 3.94088669950739e-05,
"loss": 0.1982,
"step": 1310
},
{
"epoch": 22.620689655172413,
"grad_norm": 0.8967238068580627,
"learning_rate": 3.929939792008758e-05,
"loss": 0.1722,
"step": 1312
},
{
"epoch": 22.655172413793103,
"grad_norm": 3.6087558269500732,
"learning_rate": 3.918992884510126e-05,
"loss": 0.2057,
"step": 1314
},
{
"epoch": 22.689655172413794,
"grad_norm": 2.1949057579040527,
"learning_rate": 3.908045977011495e-05,
"loss": 0.5147,
"step": 1316
},
{
"epoch": 22.724137931034484,
"grad_norm": 0.6924113631248474,
"learning_rate": 3.897099069512863e-05,
"loss": 0.2777,
"step": 1318
},
{
"epoch": 22.75862068965517,
"grad_norm": 0.32273250818252563,
"learning_rate": 3.886152162014231e-05,
"loss": 0.0778,
"step": 1320
},
{
"epoch": 22.79310344827586,
"grad_norm": 3.3658881187438965,
"learning_rate": 3.8752052545155995e-05,
"loss": 0.1048,
"step": 1322
},
{
"epoch": 22.82758620689655,
"grad_norm": 6.8869194984436035,
"learning_rate": 3.864258347016968e-05,
"loss": 0.2943,
"step": 1324
},
{
"epoch": 22.862068965517242,
"grad_norm": 3.4153761863708496,
"learning_rate": 3.853311439518336e-05,
"loss": 0.5214,
"step": 1326
},
{
"epoch": 22.896551724137932,
"grad_norm": 22.111948013305664,
"learning_rate": 3.842364532019704e-05,
"loss": 0.1767,
"step": 1328
},
{
"epoch": 22.93103448275862,
"grad_norm": 1.5236107110977173,
"learning_rate": 3.831417624521073e-05,
"loss": 0.0809,
"step": 1330
},
{
"epoch": 22.96551724137931,
"grad_norm": 8.651114463806152,
"learning_rate": 3.820470717022441e-05,
"loss": 0.3069,
"step": 1332
},
{
"epoch": 23.0,
"grad_norm": 2.85559344291687,
"learning_rate": 3.809523809523809e-05,
"loss": 0.1761,
"step": 1334
},
{
"epoch": 23.0,
"eval_accuracy": 0.6212121212121212,
"eval_f1_macro": 0.5572308233638282,
"eval_f1_micro": 0.6212121212121212,
"eval_f1_weighted": 0.6032445763049211,
"eval_loss": 1.4283698797225952,
"eval_precision_macro": 0.6453514739229025,
"eval_precision_micro": 0.6212121212121212,
"eval_precision_weighted": 0.6562950937950938,
"eval_recall_macro": 0.5515646258503402,
"eval_recall_micro": 0.6212121212121212,
"eval_recall_weighted": 0.6212121212121212,
"eval_runtime": 1.7048,
"eval_samples_per_second": 77.427,
"eval_steps_per_second": 9.972,
"step": 1334
},
{
"epoch": 23.03448275862069,
"grad_norm": 10.627945899963379,
"learning_rate": 3.7985769020251785e-05,
"loss": 0.2271,
"step": 1336
},
{
"epoch": 23.06896551724138,
"grad_norm": 2.46520733833313,
"learning_rate": 3.7876299945265465e-05,
"loss": 0.0865,
"step": 1338
},
{
"epoch": 23.103448275862068,
"grad_norm": 14.325843811035156,
"learning_rate": 3.7766830870279144e-05,
"loss": 0.5699,
"step": 1340
},
{
"epoch": 23.137931034482758,
"grad_norm": 3.0035719871520996,
"learning_rate": 3.765736179529283e-05,
"loss": 0.2166,
"step": 1342
},
{
"epoch": 23.17241379310345,
"grad_norm": 7.00339412689209,
"learning_rate": 3.7547892720306517e-05,
"loss": 0.3422,
"step": 1344
},
{
"epoch": 23.20689655172414,
"grad_norm": 4.217713356018066,
"learning_rate": 3.7438423645320196e-05,
"loss": 0.1615,
"step": 1346
},
{
"epoch": 23.24137931034483,
"grad_norm": 3.0487077236175537,
"learning_rate": 3.732895457033388e-05,
"loss": 0.3146,
"step": 1348
},
{
"epoch": 23.275862068965516,
"grad_norm": 11.421404838562012,
"learning_rate": 3.721948549534757e-05,
"loss": 0.2691,
"step": 1350
},
{
"epoch": 23.310344827586206,
"grad_norm": 0.3206021189689636,
"learning_rate": 3.711001642036125e-05,
"loss": 0.0665,
"step": 1352
},
{
"epoch": 23.344827586206897,
"grad_norm": 1.73373544216156,
"learning_rate": 3.700054734537493e-05,
"loss": 0.154,
"step": 1354
},
{
"epoch": 23.379310344827587,
"grad_norm": 0.29473578929901123,
"learning_rate": 3.689107827038862e-05,
"loss": 0.344,
"step": 1356
},
{
"epoch": 23.413793103448278,
"grad_norm": 3.3459489345550537,
"learning_rate": 3.67816091954023e-05,
"loss": 0.1159,
"step": 1358
},
{
"epoch": 23.448275862068964,
"grad_norm": 1.0497227907180786,
"learning_rate": 3.667214012041598e-05,
"loss": 0.093,
"step": 1360
},
{
"epoch": 23.482758620689655,
"grad_norm": 9.836808204650879,
"learning_rate": 3.656267104542967e-05,
"loss": 0.1138,
"step": 1362
},
{
"epoch": 23.517241379310345,
"grad_norm": 0.22488915920257568,
"learning_rate": 3.645320197044335e-05,
"loss": 0.0985,
"step": 1364
},
{
"epoch": 23.551724137931036,
"grad_norm": 2.9141793251037598,
"learning_rate": 3.634373289545703e-05,
"loss": 0.1183,
"step": 1366
},
{
"epoch": 23.586206896551722,
"grad_norm": 7.301143646240234,
"learning_rate": 3.623426382047072e-05,
"loss": 0.1026,
"step": 1368
},
{
"epoch": 23.620689655172413,
"grad_norm": 2.478011131286621,
"learning_rate": 3.6124794745484404e-05,
"loss": 0.175,
"step": 1370
},
{
"epoch": 23.655172413793103,
"grad_norm": 0.43685877323150635,
"learning_rate": 3.601532567049808e-05,
"loss": 0.0807,
"step": 1372
},
{
"epoch": 23.689655172413794,
"grad_norm": 0.7601661086082458,
"learning_rate": 3.590585659551177e-05,
"loss": 0.2639,
"step": 1374
},
{
"epoch": 23.724137931034484,
"grad_norm": 3.5471153259277344,
"learning_rate": 3.5796387520525456e-05,
"loss": 0.1622,
"step": 1376
},
{
"epoch": 23.75862068965517,
"grad_norm": 5.943011283874512,
"learning_rate": 3.5686918445539135e-05,
"loss": 0.2493,
"step": 1378
},
{
"epoch": 23.79310344827586,
"grad_norm": 0.5235481858253479,
"learning_rate": 3.5577449370552815e-05,
"loss": 0.0738,
"step": 1380
},
{
"epoch": 23.82758620689655,
"grad_norm": 0.9073754549026489,
"learning_rate": 3.546798029556651e-05,
"loss": 0.2598,
"step": 1382
},
{
"epoch": 23.862068965517242,
"grad_norm": 0.35284557938575745,
"learning_rate": 3.535851122058019e-05,
"loss": 0.0684,
"step": 1384
},
{
"epoch": 23.896551724137932,
"grad_norm": 7.837213039398193,
"learning_rate": 3.5249042145593867e-05,
"loss": 0.1072,
"step": 1386
},
{
"epoch": 23.93103448275862,
"grad_norm": 10.070199966430664,
"learning_rate": 3.513957307060756e-05,
"loss": 0.2412,
"step": 1388
},
{
"epoch": 23.96551724137931,
"grad_norm": 22.912311553955078,
"learning_rate": 3.503010399562124e-05,
"loss": 0.379,
"step": 1390
},
{
"epoch": 24.0,
"grad_norm": 11.414230346679688,
"learning_rate": 3.492063492063492e-05,
"loss": 0.1714,
"step": 1392
},
{
"epoch": 24.0,
"eval_accuracy": 0.6287878787878788,
"eval_f1_macro": 0.5781647222832837,
"eval_f1_micro": 0.6287878787878788,
"eval_f1_weighted": 0.6343655897181453,
"eval_loss": 1.2994105815887451,
"eval_precision_macro": 0.5899192043967345,
"eval_precision_micro": 0.6287878787878788,
"eval_precision_weighted": 0.6461095703103377,
"eval_recall_macro": 0.5727739984882841,
"eval_recall_micro": 0.6287878787878788,
"eval_recall_weighted": 0.6287878787878788,
"eval_runtime": 1.7262,
"eval_samples_per_second": 76.469,
"eval_steps_per_second": 9.848,
"step": 1392
},
{
"epoch": 24.03448275862069,
"grad_norm": 1.8347530364990234,
"learning_rate": 3.4811165845648605e-05,
"loss": 0.0856,
"step": 1394
},
{
"epoch": 24.06896551724138,
"grad_norm": 0.23063726723194122,
"learning_rate": 3.470169677066229e-05,
"loss": 0.0486,
"step": 1396
},
{
"epoch": 24.103448275862068,
"grad_norm": 13.704474449157715,
"learning_rate": 3.459222769567597e-05,
"loss": 0.0688,
"step": 1398
},
{
"epoch": 24.137931034482758,
"grad_norm": 0.7486428022384644,
"learning_rate": 3.4482758620689657e-05,
"loss": 0.0726,
"step": 1400
},
{
"epoch": 24.17241379310345,
"grad_norm": 5.045259952545166,
"learning_rate": 3.437328954570334e-05,
"loss": 0.5948,
"step": 1402
},
{
"epoch": 24.20689655172414,
"grad_norm": 17.36922836303711,
"learning_rate": 3.426382047071702e-05,
"loss": 0.1763,
"step": 1404
},
{
"epoch": 24.24137931034483,
"grad_norm": 4.404787063598633,
"learning_rate": 3.415435139573071e-05,
"loss": 0.0887,
"step": 1406
},
{
"epoch": 24.275862068965516,
"grad_norm": 1.7757786512374878,
"learning_rate": 3.4044882320744395e-05,
"loss": 0.0816,
"step": 1408
},
{
"epoch": 24.310344827586206,
"grad_norm": 10.455934524536133,
"learning_rate": 3.3935413245758074e-05,
"loss": 0.3544,
"step": 1410
},
{
"epoch": 24.344827586206897,
"grad_norm": 1.8070427179336548,
"learning_rate": 3.3825944170771754e-05,
"loss": 0.3629,
"step": 1412
},
{
"epoch": 24.379310344827587,
"grad_norm": 12.575775146484375,
"learning_rate": 3.371647509578545e-05,
"loss": 0.1447,
"step": 1414
},
{
"epoch": 24.413793103448278,
"grad_norm": 17.860628128051758,
"learning_rate": 3.3607006020799126e-05,
"loss": 0.2491,
"step": 1416
},
{
"epoch": 24.448275862068964,
"grad_norm": 0.40125876665115356,
"learning_rate": 3.3497536945812806e-05,
"loss": 0.0505,
"step": 1418
},
{
"epoch": 24.482758620689655,
"grad_norm": 0.3735012710094452,
"learning_rate": 3.338806787082649e-05,
"loss": 0.0842,
"step": 1420
},
{
"epoch": 24.517241379310345,
"grad_norm": 10.689638137817383,
"learning_rate": 3.327859879584018e-05,
"loss": 0.4145,
"step": 1422
},
{
"epoch": 24.551724137931036,
"grad_norm": 0.5746628642082214,
"learning_rate": 3.316912972085386e-05,
"loss": 0.1959,
"step": 1424
},
{
"epoch": 24.586206896551722,
"grad_norm": 1.2556354999542236,
"learning_rate": 3.3059660645867544e-05,
"loss": 0.1955,
"step": 1426
},
{
"epoch": 24.620689655172413,
"grad_norm": 0.3656192421913147,
"learning_rate": 3.295019157088123e-05,
"loss": 0.0546,
"step": 1428
},
{
"epoch": 24.655172413793103,
"grad_norm": 0.373201847076416,
"learning_rate": 3.284072249589491e-05,
"loss": 0.0566,
"step": 1430
},
{
"epoch": 24.689655172413794,
"grad_norm": 10.170561790466309,
"learning_rate": 3.2731253420908596e-05,
"loss": 0.129,
"step": 1432
},
{
"epoch": 24.724137931034484,
"grad_norm": 0.8907169103622437,
"learning_rate": 3.262178434592228e-05,
"loss": 0.0553,
"step": 1434
},
{
"epoch": 24.75862068965517,
"grad_norm": 9.43267822265625,
"learning_rate": 3.251231527093596e-05,
"loss": 0.149,
"step": 1436
},
{
"epoch": 24.79310344827586,
"grad_norm": 8.570932388305664,
"learning_rate": 3.240284619594964e-05,
"loss": 0.1292,
"step": 1438
},
{
"epoch": 24.82758620689655,
"grad_norm": 13.18994426727295,
"learning_rate": 3.2293377120963334e-05,
"loss": 0.2692,
"step": 1440
},
{
"epoch": 24.862068965517242,
"grad_norm": 0.47455695271492004,
"learning_rate": 3.218390804597701e-05,
"loss": 0.41,
"step": 1442
},
{
"epoch": 24.896551724137932,
"grad_norm": 1.234464406967163,
"learning_rate": 3.207443897099069e-05,
"loss": 0.056,
"step": 1444
},
{
"epoch": 24.93103448275862,
"grad_norm": 1.0157136917114258,
"learning_rate": 3.196496989600438e-05,
"loss": 0.1239,
"step": 1446
},
{
"epoch": 24.96551724137931,
"grad_norm": 0.6129853129386902,
"learning_rate": 3.1855500821018065e-05,
"loss": 0.0641,
"step": 1448
},
{
"epoch": 25.0,
"grad_norm": 8.584732055664062,
"learning_rate": 3.1746031746031745e-05,
"loss": 0.465,
"step": 1450
},
{
"epoch": 25.0,
"eval_accuracy": 0.6136363636363636,
"eval_f1_macro": 0.558068783068783,
"eval_f1_micro": 0.6136363636363636,
"eval_f1_weighted": 0.6134259259259258,
"eval_loss": 1.4011425971984863,
"eval_precision_macro": 0.5662329205186348,
"eval_precision_micro": 0.6136363636363636,
"eval_precision_weighted": 0.6187895437895439,
"eval_recall_macro": 0.5556084656084657,
"eval_recall_micro": 0.6136363636363636,
"eval_recall_weighted": 0.6136363636363636,
"eval_runtime": 1.7373,
"eval_samples_per_second": 75.98,
"eval_steps_per_second": 9.785,
"step": 1450
},
{
"epoch": 25.03448275862069,
"grad_norm": 0.2674843966960907,
"learning_rate": 3.163656267104543e-05,
"loss": 0.063,
"step": 1452
},
{
"epoch": 25.06896551724138,
"grad_norm": 0.20413754880428314,
"learning_rate": 3.152709359605912e-05,
"loss": 0.2553,
"step": 1454
},
{
"epoch": 25.103448275862068,
"grad_norm": 1.4728457927703857,
"learning_rate": 3.1417624521072797e-05,
"loss": 0.0623,
"step": 1456
},
{
"epoch": 25.137931034482758,
"grad_norm": 1.164231300354004,
"learning_rate": 3.130815544608648e-05,
"loss": 0.0657,
"step": 1458
},
{
"epoch": 25.17241379310345,
"grad_norm": 0.4934793710708618,
"learning_rate": 3.119868637110017e-05,
"loss": 0.1067,
"step": 1460
},
{
"epoch": 25.20689655172414,
"grad_norm": 11.123279571533203,
"learning_rate": 3.108921729611385e-05,
"loss": 0.5103,
"step": 1462
},
{
"epoch": 25.24137931034483,
"grad_norm": 0.14121675491333008,
"learning_rate": 3.097974822112753e-05,
"loss": 0.0531,
"step": 1464
},
{
"epoch": 25.275862068965516,
"grad_norm": 0.2784970998764038,
"learning_rate": 3.087027914614122e-05,
"loss": 0.0509,
"step": 1466
},
{
"epoch": 25.310344827586206,
"grad_norm": 2.157268762588501,
"learning_rate": 3.07608100711549e-05,
"loss": 0.0852,
"step": 1468
},
{
"epoch": 25.344827586206897,
"grad_norm": 2.208381414413452,
"learning_rate": 3.065134099616858e-05,
"loss": 0.0847,
"step": 1470
},
{
"epoch": 25.379310344827587,
"grad_norm": 0.3056719899177551,
"learning_rate": 3.0541871921182266e-05,
"loss": 0.044,
"step": 1472
},
{
"epoch": 25.413793103448278,
"grad_norm": 0.2943095266819,
"learning_rate": 3.0432402846195952e-05,
"loss": 0.1533,
"step": 1474
},
{
"epoch": 25.448275862068964,
"grad_norm": 7.445497035980225,
"learning_rate": 3.0322933771209632e-05,
"loss": 0.254,
"step": 1476
},
{
"epoch": 25.482758620689655,
"grad_norm": 1.454938292503357,
"learning_rate": 3.021346469622332e-05,
"loss": 0.0573,
"step": 1478
},
{
"epoch": 25.517241379310345,
"grad_norm": 0.25220704078674316,
"learning_rate": 3.0103995621237e-05,
"loss": 0.0506,
"step": 1480
},
{
"epoch": 25.551724137931036,
"grad_norm": 1.2520936727523804,
"learning_rate": 2.9994526546250684e-05,
"loss": 0.0533,
"step": 1482
},
{
"epoch": 25.586206896551722,
"grad_norm": 0.5618668794631958,
"learning_rate": 2.988505747126437e-05,
"loss": 0.05,
"step": 1484
},
{
"epoch": 25.620689655172413,
"grad_norm": 6.944793701171875,
"learning_rate": 2.9775588396278053e-05,
"loss": 0.1004,
"step": 1486
},
{
"epoch": 25.655172413793103,
"grad_norm": 0.36061984300613403,
"learning_rate": 2.9666119321291736e-05,
"loss": 0.0442,
"step": 1488
},
{
"epoch": 25.689655172413794,
"grad_norm": 13.36592960357666,
"learning_rate": 2.9556650246305422e-05,
"loss": 0.1224,
"step": 1490
},
{
"epoch": 25.724137931034484,
"grad_norm": 1.499154806137085,
"learning_rate": 2.9447181171319105e-05,
"loss": 0.0552,
"step": 1492
},
{
"epoch": 25.75862068965517,
"grad_norm": 1.4718117713928223,
"learning_rate": 2.9337712096332788e-05,
"loss": 0.1021,
"step": 1494
},
{
"epoch": 25.79310344827586,
"grad_norm": 25.769798278808594,
"learning_rate": 2.9228243021346467e-05,
"loss": 0.6279,
"step": 1496
},
{
"epoch": 25.82758620689655,
"grad_norm": 1.972213625907898,
"learning_rate": 2.9118773946360157e-05,
"loss": 0.2089,
"step": 1498
},
{
"epoch": 25.862068965517242,
"grad_norm": 0.9450302124023438,
"learning_rate": 2.900930487137384e-05,
"loss": 0.0474,
"step": 1500
},
{
"epoch": 25.896551724137932,
"grad_norm": 2.7113711833953857,
"learning_rate": 2.889983579638752e-05,
"loss": 0.4025,
"step": 1502
},
{
"epoch": 25.93103448275862,
"grad_norm": 23.863096237182617,
"learning_rate": 2.879036672140121e-05,
"loss": 0.3943,
"step": 1504
},
{
"epoch": 25.96551724137931,
"grad_norm": 0.5137322545051575,
"learning_rate": 2.8680897646414888e-05,
"loss": 0.0574,
"step": 1506
},
{
"epoch": 26.0,
"grad_norm": 9.085604667663574,
"learning_rate": 2.857142857142857e-05,
"loss": 0.2203,
"step": 1508
},
{
"epoch": 26.0,
"eval_accuracy": 0.6287878787878788,
"eval_f1_macro": 0.5740664325639695,
"eval_f1_micro": 0.6287878787878788,
"eval_f1_weighted": 0.6265666159279011,
"eval_loss": 1.4700660705566406,
"eval_precision_macro": 0.6167028006681489,
"eval_precision_micro": 0.6287878787878788,
"eval_precision_weighted": 0.6552921047278235,
"eval_recall_macro": 0.5675888133030991,
"eval_recall_micro": 0.6287878787878788,
"eval_recall_weighted": 0.6287878787878788,
"eval_runtime": 1.7235,
"eval_samples_per_second": 76.588,
"eval_steps_per_second": 9.864,
"step": 1508
},
{
"epoch": 26.03448275862069,
"grad_norm": 4.422103404998779,
"learning_rate": 2.8461959496442257e-05,
"loss": 0.0811,
"step": 1510
},
{
"epoch": 26.06896551724138,
"grad_norm": 4.039704322814941,
"learning_rate": 2.835249042145594e-05,
"loss": 0.3148,
"step": 1512
},
{
"epoch": 26.103448275862068,
"grad_norm": 0.1570342779159546,
"learning_rate": 2.8243021346469623e-05,
"loss": 0.0437,
"step": 1514
},
{
"epoch": 26.137931034482758,
"grad_norm": 6.058873176574707,
"learning_rate": 2.813355227148331e-05,
"loss": 0.1608,
"step": 1516
},
{
"epoch": 26.17241379310345,
"grad_norm": 3.495460033416748,
"learning_rate": 2.8024083196496992e-05,
"loss": 0.0555,
"step": 1518
},
{
"epoch": 26.20689655172414,
"grad_norm": 7.322564601898193,
"learning_rate": 2.7914614121510675e-05,
"loss": 0.0886,
"step": 1520
},
{
"epoch": 26.24137931034483,
"grad_norm": 0.18538200855255127,
"learning_rate": 2.7805145046524354e-05,
"loss": 0.0562,
"step": 1522
},
{
"epoch": 26.275862068965516,
"grad_norm": 0.3719606101512909,
"learning_rate": 2.7695675971538044e-05,
"loss": 0.041,
"step": 1524
},
{
"epoch": 26.310344827586206,
"grad_norm": 0.2550795376300812,
"learning_rate": 2.7586206896551727e-05,
"loss": 0.1232,
"step": 1526
},
{
"epoch": 26.344827586206897,
"grad_norm": 0.3839069604873657,
"learning_rate": 2.7476737821565406e-05,
"loss": 0.204,
"step": 1528
},
{
"epoch": 26.379310344827587,
"grad_norm": 13.937886238098145,
"learning_rate": 2.7367268746579096e-05,
"loss": 0.4575,
"step": 1530
},
{
"epoch": 26.413793103448278,
"grad_norm": 3.547407865524292,
"learning_rate": 2.7257799671592775e-05,
"loss": 0.2855,
"step": 1532
},
{
"epoch": 26.448275862068964,
"grad_norm": 18.570228576660156,
"learning_rate": 2.7148330596606458e-05,
"loss": 0.1721,
"step": 1534
},
{
"epoch": 26.482758620689655,
"grad_norm": 0.25473734736442566,
"learning_rate": 2.7038861521620144e-05,
"loss": 0.0521,
"step": 1536
},
{
"epoch": 26.517241379310345,
"grad_norm": 3.582709312438965,
"learning_rate": 2.6929392446633827e-05,
"loss": 0.0713,
"step": 1538
},
{
"epoch": 26.551724137931036,
"grad_norm": 0.18364673852920532,
"learning_rate": 2.681992337164751e-05,
"loss": 0.219,
"step": 1540
},
{
"epoch": 26.586206896551722,
"grad_norm": 0.24068348109722137,
"learning_rate": 2.6710454296661196e-05,
"loss": 0.3014,
"step": 1542
},
{
"epoch": 26.620689655172413,
"grad_norm": 0.4433784782886505,
"learning_rate": 2.660098522167488e-05,
"loss": 0.0689,
"step": 1544
},
{
"epoch": 26.655172413793103,
"grad_norm": 0.49119868874549866,
"learning_rate": 2.6491516146688562e-05,
"loss": 0.0461,
"step": 1546
},
{
"epoch": 26.689655172413794,
"grad_norm": 5.021825313568115,
"learning_rate": 2.638204707170224e-05,
"loss": 0.2385,
"step": 1548
},
{
"epoch": 26.724137931034484,
"grad_norm": 0.26685020327568054,
"learning_rate": 2.627257799671593e-05,
"loss": 0.0427,
"step": 1550
},
{
"epoch": 26.75862068965517,
"grad_norm": 1.1268709897994995,
"learning_rate": 2.6163108921729614e-05,
"loss": 0.0586,
"step": 1552
},
{
"epoch": 26.79310344827586,
"grad_norm": 0.20585598051548004,
"learning_rate": 2.6053639846743293e-05,
"loss": 0.1416,
"step": 1554
},
{
"epoch": 26.82758620689655,
"grad_norm": 0.39748844504356384,
"learning_rate": 2.5944170771756983e-05,
"loss": 0.0484,
"step": 1556
},
{
"epoch": 26.862068965517242,
"grad_norm": 0.6568892598152161,
"learning_rate": 2.5834701696770662e-05,
"loss": 0.0427,
"step": 1558
},
{
"epoch": 26.896551724137932,
"grad_norm": 0.6816446185112,
"learning_rate": 2.5725232621784345e-05,
"loss": 0.3039,
"step": 1560
},
{
"epoch": 26.93103448275862,
"grad_norm": 0.5687500238418579,
"learning_rate": 2.561576354679803e-05,
"loss": 0.0555,
"step": 1562
},
{
"epoch": 26.96551724137931,
"grad_norm": 0.29769548773765564,
"learning_rate": 2.5506294471811714e-05,
"loss": 0.3812,
"step": 1564
},
{
"epoch": 27.0,
"grad_norm": 0.5284446477890015,
"learning_rate": 2.5396825396825397e-05,
"loss": 0.0574,
"step": 1566
},
{
"epoch": 27.0,
"eval_accuracy": 0.6363636363636364,
"eval_f1_macro": 0.5799920937164608,
"eval_f1_micro": 0.6363636363636364,
"eval_f1_weighted": 0.6352104119661408,
"eval_loss": 1.451111078262329,
"eval_precision_macro": 0.6073050209589084,
"eval_precision_micro": 0.6363636363636364,
"eval_precision_weighted": 0.65455105239588,
"eval_recall_macro": 0.5738397581254724,
"eval_recall_micro": 0.6363636363636364,
"eval_recall_weighted": 0.6363636363636364,
"eval_runtime": 1.6969,
"eval_samples_per_second": 77.788,
"eval_steps_per_second": 10.018,
"step": 1566
},
{
"epoch": 27.03448275862069,
"grad_norm": 5.421750068664551,
"learning_rate": 2.5287356321839083e-05,
"loss": 0.0798,
"step": 1568
},
{
"epoch": 27.06896551724138,
"grad_norm": 0.11500166356563568,
"learning_rate": 2.5177887246852766e-05,
"loss": 0.0314,
"step": 1570
},
{
"epoch": 27.103448275862068,
"grad_norm": 0.19810307025909424,
"learning_rate": 2.506841817186645e-05,
"loss": 0.2181,
"step": 1572
},
{
"epoch": 27.137931034482758,
"grad_norm": 0.21842528879642487,
"learning_rate": 2.4958949096880132e-05,
"loss": 0.041,
"step": 1574
},
{
"epoch": 27.17241379310345,
"grad_norm": 3.9452264308929443,
"learning_rate": 2.4849480021893818e-05,
"loss": 0.0789,
"step": 1576
},
{
"epoch": 27.20689655172414,
"grad_norm": 0.18447476625442505,
"learning_rate": 2.4740010946907498e-05,
"loss": 0.0376,
"step": 1578
},
{
"epoch": 27.24137931034483,
"grad_norm": 1.6792452335357666,
"learning_rate": 2.4630541871921184e-05,
"loss": 0.049,
"step": 1580
},
{
"epoch": 27.275862068965516,
"grad_norm": 0.5313147306442261,
"learning_rate": 2.4521072796934867e-05,
"loss": 0.0555,
"step": 1582
},
{
"epoch": 27.310344827586206,
"grad_norm": 0.2032177746295929,
"learning_rate": 2.441160372194855e-05,
"loss": 0.0846,
"step": 1584
},
{
"epoch": 27.344827586206897,
"grad_norm": 5.808465480804443,
"learning_rate": 2.4302134646962236e-05,
"loss": 0.308,
"step": 1586
},
{
"epoch": 27.379310344827587,
"grad_norm": 15.264229774475098,
"learning_rate": 2.419266557197592e-05,
"loss": 0.262,
"step": 1588
},
{
"epoch": 27.413793103448278,
"grad_norm": 0.4792754650115967,
"learning_rate": 2.40831964969896e-05,
"loss": 0.2704,
"step": 1590
},
{
"epoch": 27.448275862068964,
"grad_norm": 1.3727836608886719,
"learning_rate": 2.3973727422003288e-05,
"loss": 0.2656,
"step": 1592
},
{
"epoch": 27.482758620689655,
"grad_norm": 0.4460311532020569,
"learning_rate": 2.3864258347016967e-05,
"loss": 0.0396,
"step": 1594
},
{
"epoch": 27.517241379310345,
"grad_norm": 0.4063487946987152,
"learning_rate": 2.3754789272030653e-05,
"loss": 0.074,
"step": 1596
},
{
"epoch": 27.551724137931036,
"grad_norm": 0.8473443984985352,
"learning_rate": 2.3645320197044336e-05,
"loss": 0.0509,
"step": 1598
},
{
"epoch": 27.586206896551722,
"grad_norm": 0.139051616191864,
"learning_rate": 2.353585112205802e-05,
"loss": 0.0326,
"step": 1600
},
{
"epoch": 27.620689655172413,
"grad_norm": 8.467299461364746,
"learning_rate": 2.3426382047071705e-05,
"loss": 0.0749,
"step": 1602
},
{
"epoch": 27.655172413793103,
"grad_norm": 0.3194819688796997,
"learning_rate": 2.3316912972085385e-05,
"loss": 0.0845,
"step": 1604
},
{
"epoch": 27.689655172413794,
"grad_norm": 0.4060407280921936,
"learning_rate": 2.320744389709907e-05,
"loss": 0.0819,
"step": 1606
},
{
"epoch": 27.724137931034484,
"grad_norm": 0.14499817788600922,
"learning_rate": 2.3097974822112754e-05,
"loss": 0.0405,
"step": 1608
},
{
"epoch": 27.75862068965517,
"grad_norm": 0.17447809875011444,
"learning_rate": 2.2988505747126437e-05,
"loss": 0.0356,
"step": 1610
},
{
"epoch": 27.79310344827586,
"grad_norm": 3.4749953746795654,
"learning_rate": 2.2879036672140123e-05,
"loss": 0.0674,
"step": 1612
},
{
"epoch": 27.82758620689655,
"grad_norm": 0.12591542303562164,
"learning_rate": 2.2769567597153806e-05,
"loss": 0.1736,
"step": 1614
},
{
"epoch": 27.862068965517242,
"grad_norm": 0.7413695454597473,
"learning_rate": 2.266009852216749e-05,
"loss": 0.044,
"step": 1616
},
{
"epoch": 27.896551724137932,
"grad_norm": 0.27023714780807495,
"learning_rate": 2.2550629447181175e-05,
"loss": 0.0764,
"step": 1618
},
{
"epoch": 27.93103448275862,
"grad_norm": 0.7378279566764832,
"learning_rate": 2.2441160372194854e-05,
"loss": 0.1553,
"step": 1620
},
{
"epoch": 27.96551724137931,
"grad_norm": 12.137617111206055,
"learning_rate": 2.233169129720854e-05,
"loss": 0.5215,
"step": 1622
},
{
"epoch": 28.0,
"grad_norm": 0.11637034267187119,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.0399,
"step": 1624
},
{
"epoch": 28.0,
"eval_accuracy": 0.6060606060606061,
"eval_f1_macro": 0.5674308896409629,
"eval_f1_micro": 0.6060606060606061,
"eval_f1_weighted": 0.6133032153841548,
"eval_loss": 1.4920562505722046,
"eval_precision_macro": 0.5933232665951561,
"eval_precision_micro": 0.6060606060606061,
"eval_precision_weighted": 0.6389820020318553,
"eval_recall_macro": 0.5644671201814059,
"eval_recall_micro": 0.6060606060606061,
"eval_recall_weighted": 0.6060606060606061,
"eval_runtime": 1.6837,
"eval_samples_per_second": 78.398,
"eval_steps_per_second": 10.097,
"step": 1624
},
{
"epoch": 28.03448275862069,
"grad_norm": 0.1886104792356491,
"learning_rate": 2.2112753147235906e-05,
"loss": 0.0641,
"step": 1626
},
{
"epoch": 28.06896551724138,
"grad_norm": 1.1970809698104858,
"learning_rate": 2.2003284072249592e-05,
"loss": 0.1138,
"step": 1628
},
{
"epoch": 28.103448275862068,
"grad_norm": 0.11415582150220871,
"learning_rate": 2.1893814997263272e-05,
"loss": 0.0359,
"step": 1630
},
{
"epoch": 28.137931034482758,
"grad_norm": 0.26719969511032104,
"learning_rate": 2.1784345922276958e-05,
"loss": 0.035,
"step": 1632
},
{
"epoch": 28.17241379310345,
"grad_norm": 7.150755882263184,
"learning_rate": 2.1674876847290644e-05,
"loss": 0.0961,
"step": 1634
},
{
"epoch": 28.20689655172414,
"grad_norm": 0.15368057787418365,
"learning_rate": 2.1565407772304324e-05,
"loss": 0.0322,
"step": 1636
},
{
"epoch": 28.24137931034483,
"grad_norm": 1.2831923961639404,
"learning_rate": 2.145593869731801e-05,
"loss": 0.0906,
"step": 1638
},
{
"epoch": 28.275862068965516,
"grad_norm": 0.44488441944122314,
"learning_rate": 2.1346469622331693e-05,
"loss": 0.0373,
"step": 1640
},
{
"epoch": 28.310344827586206,
"grad_norm": 0.1512623429298401,
"learning_rate": 2.1237000547345376e-05,
"loss": 0.0398,
"step": 1642
},
{
"epoch": 28.344827586206897,
"grad_norm": 15.000018119812012,
"learning_rate": 2.1127531472359062e-05,
"loss": 0.1686,
"step": 1644
},
{
"epoch": 28.379310344827587,
"grad_norm": 0.198665052652359,
"learning_rate": 2.101806239737274e-05,
"loss": 0.0306,
"step": 1646
},
{
"epoch": 28.413793103448278,
"grad_norm": 1.9664894342422485,
"learning_rate": 2.0908593322386428e-05,
"loss": 0.0556,
"step": 1648
},
{
"epoch": 28.448275862068964,
"grad_norm": 0.1294247806072235,
"learning_rate": 2.079912424740011e-05,
"loss": 0.0341,
"step": 1650
},
{
"epoch": 28.482758620689655,
"grad_norm": 0.22127756476402283,
"learning_rate": 2.0689655172413793e-05,
"loss": 0.0372,
"step": 1652
},
{
"epoch": 28.517241379310345,
"grad_norm": 0.16517043113708496,
"learning_rate": 2.058018609742748e-05,
"loss": 0.0316,
"step": 1654
},
{
"epoch": 28.551724137931036,
"grad_norm": 0.6368513107299805,
"learning_rate": 2.047071702244116e-05,
"loss": 0.0947,
"step": 1656
},
{
"epoch": 28.586206896551722,
"grad_norm": 0.39542946219444275,
"learning_rate": 2.0361247947454845e-05,
"loss": 0.0356,
"step": 1658
},
{
"epoch": 28.620689655172413,
"grad_norm": 0.3416825234889984,
"learning_rate": 2.025177887246853e-05,
"loss": 0.0625,
"step": 1660
},
{
"epoch": 28.655172413793103,
"grad_norm": 1.6610913276672363,
"learning_rate": 2.014230979748221e-05,
"loss": 0.0392,
"step": 1662
},
{
"epoch": 28.689655172413794,
"grad_norm": 0.4484691023826599,
"learning_rate": 2.0032840722495897e-05,
"loss": 0.0426,
"step": 1664
},
{
"epoch": 28.724137931034484,
"grad_norm": 9.614035606384277,
"learning_rate": 1.992337164750958e-05,
"loss": 0.0708,
"step": 1666
},
{
"epoch": 28.75862068965517,
"grad_norm": 0.17257235944271088,
"learning_rate": 1.9813902572523263e-05,
"loss": 0.0649,
"step": 1668
},
{
"epoch": 28.79310344827586,
"grad_norm": 0.8080697059631348,
"learning_rate": 1.970443349753695e-05,
"loss": 0.0337,
"step": 1670
},
{
"epoch": 28.82758620689655,
"grad_norm": 0.12455487251281738,
"learning_rate": 1.959496442255063e-05,
"loss": 0.0292,
"step": 1672
},
{
"epoch": 28.862068965517242,
"grad_norm": 4.5023369789123535,
"learning_rate": 1.9485495347564315e-05,
"loss": 0.0603,
"step": 1674
},
{
"epoch": 28.896551724137932,
"grad_norm": 7.025498390197754,
"learning_rate": 1.9376026272577998e-05,
"loss": 0.1706,
"step": 1676
},
{
"epoch": 28.93103448275862,
"grad_norm": 5.604184627532959,
"learning_rate": 1.926655719759168e-05,
"loss": 0.3358,
"step": 1678
},
{
"epoch": 28.96551724137931,
"grad_norm": 0.6545395255088806,
"learning_rate": 1.9157088122605367e-05,
"loss": 0.0469,
"step": 1680
},
{
"epoch": 29.0,
"grad_norm": 0.1356768012046814,
"learning_rate": 1.9047619047619046e-05,
"loss": 0.0269,
"step": 1682
},
{
"epoch": 29.0,
"eval_accuracy": 0.6287878787878788,
"eval_f1_macro": 0.5563089279356143,
"eval_f1_micro": 0.6287878787878788,
"eval_f1_weighted": 0.6282606190642379,
"eval_loss": 1.4752185344696045,
"eval_precision_macro": 0.5686201153488518,
"eval_precision_micro": 0.6287878787878788,
"eval_precision_weighted": 0.6350446686164523,
"eval_recall_macro": 0.5514965986394558,
"eval_recall_micro": 0.6287878787878788,
"eval_recall_weighted": 0.6287878787878788,
"eval_runtime": 1.7276,
"eval_samples_per_second": 76.408,
"eval_steps_per_second": 9.84,
"step": 1682
},
{
"epoch": 29.03448275862069,
"grad_norm": 0.24686339497566223,
"learning_rate": 1.8938149972632732e-05,
"loss": 0.1751,
"step": 1684
},
{
"epoch": 29.06896551724138,
"grad_norm": 14.179889678955078,
"learning_rate": 1.8828680897646415e-05,
"loss": 0.4523,
"step": 1686
},
{
"epoch": 29.103448275862068,
"grad_norm": 0.6621524691581726,
"learning_rate": 1.8719211822660098e-05,
"loss": 0.0353,
"step": 1688
},
{
"epoch": 29.137931034482758,
"grad_norm": 0.2671413719654083,
"learning_rate": 1.8609742747673784e-05,
"loss": 0.1784,
"step": 1690
},
{
"epoch": 29.17241379310345,
"grad_norm": 0.11761974543333054,
"learning_rate": 1.8500273672687464e-05,
"loss": 0.0333,
"step": 1692
},
{
"epoch": 29.20689655172414,
"grad_norm": 0.14111700654029846,
"learning_rate": 1.839080459770115e-05,
"loss": 0.0316,
"step": 1694
},
{
"epoch": 29.24137931034483,
"grad_norm": 0.12016208469867706,
"learning_rate": 1.8281335522714836e-05,
"loss": 0.0484,
"step": 1696
},
{
"epoch": 29.275862068965516,
"grad_norm": 0.16200877726078033,
"learning_rate": 1.8171866447728516e-05,
"loss": 0.0296,
"step": 1698
},
{
"epoch": 29.310344827586206,
"grad_norm": 0.16708754003047943,
"learning_rate": 1.8062397372742202e-05,
"loss": 0.0322,
"step": 1700
},
{
"epoch": 29.344827586206897,
"grad_norm": 0.1727229207754135,
"learning_rate": 1.7952928297755885e-05,
"loss": 0.0319,
"step": 1702
},
{
"epoch": 29.379310344827587,
"grad_norm": 4.953850269317627,
"learning_rate": 1.7843459222769568e-05,
"loss": 0.1111,
"step": 1704
},
{
"epoch": 29.413793103448278,
"grad_norm": 0.14302562177181244,
"learning_rate": 1.7733990147783254e-05,
"loss": 0.0494,
"step": 1706
},
{
"epoch": 29.448275862068964,
"grad_norm": 0.39635199308395386,
"learning_rate": 1.7624521072796933e-05,
"loss": 0.0708,
"step": 1708
},
{
"epoch": 29.482758620689655,
"grad_norm": 0.4933320879936218,
"learning_rate": 1.751505199781062e-05,
"loss": 0.0564,
"step": 1710
},
{
"epoch": 29.517241379310345,
"grad_norm": 3.0435335636138916,
"learning_rate": 1.7405582922824302e-05,
"loss": 0.0757,
"step": 1712
},
{
"epoch": 29.551724137931036,
"grad_norm": 0.11169429123401642,
"learning_rate": 1.7296113847837985e-05,
"loss": 0.0322,
"step": 1714
},
{
"epoch": 29.586206896551722,
"grad_norm": 0.19250567257404327,
"learning_rate": 1.718664477285167e-05,
"loss": 0.0339,
"step": 1716
},
{
"epoch": 29.620689655172413,
"grad_norm": 15.015722274780273,
"learning_rate": 1.7077175697865354e-05,
"loss": 0.1737,
"step": 1718
},
{
"epoch": 29.655172413793103,
"grad_norm": 0.593941330909729,
"learning_rate": 1.6967706622879037e-05,
"loss": 0.0301,
"step": 1720
},
{
"epoch": 29.689655172413794,
"grad_norm": 0.10976947844028473,
"learning_rate": 1.6858237547892723e-05,
"loss": 0.0442,
"step": 1722
},
{
"epoch": 29.724137931034484,
"grad_norm": 29.424667358398438,
"learning_rate": 1.6748768472906403e-05,
"loss": 0.1292,
"step": 1724
},
{
"epoch": 29.75862068965517,
"grad_norm": 0.1969483345746994,
"learning_rate": 1.663929939792009e-05,
"loss": 0.0394,
"step": 1726
},
{
"epoch": 29.79310344827586,
"grad_norm": 0.39891931414604187,
"learning_rate": 1.6529830322933772e-05,
"loss": 0.0378,
"step": 1728
},
{
"epoch": 29.82758620689655,
"grad_norm": 2.7617032527923584,
"learning_rate": 1.6420361247947455e-05,
"loss": 0.0351,
"step": 1730
},
{
"epoch": 29.862068965517242,
"grad_norm": 4.983090400695801,
"learning_rate": 1.631089217296114e-05,
"loss": 0.0676,
"step": 1732
},
{
"epoch": 29.896551724137932,
"grad_norm": 0.8957940340042114,
"learning_rate": 1.620142309797482e-05,
"loss": 0.0957,
"step": 1734
},
{
"epoch": 29.93103448275862,
"grad_norm": 0.19039399921894073,
"learning_rate": 1.6091954022988507e-05,
"loss": 0.0263,
"step": 1736
},
{
"epoch": 29.96551724137931,
"grad_norm": 0.091913141310215,
"learning_rate": 1.598248494800219e-05,
"loss": 0.0269,
"step": 1738
},
{
"epoch": 30.0,
"grad_norm": 0.10125374048948288,
"learning_rate": 1.5873015873015872e-05,
"loss": 0.0267,
"step": 1740
},
{
"epoch": 30.0,
"eval_accuracy": 0.6136363636363636,
"eval_f1_macro": 0.562093981777284,
"eval_f1_micro": 0.6136363636363636,
"eval_f1_weighted": 0.6142366070214389,
"eval_loss": 1.5352805852890015,
"eval_precision_macro": 0.5859041645331968,
"eval_precision_micro": 0.6136363636363636,
"eval_precision_weighted": 0.6323715424857898,
"eval_recall_macro": 0.5564550264550264,
"eval_recall_micro": 0.6136363636363636,
"eval_recall_weighted": 0.6136363636363636,
"eval_runtime": 1.6762,
"eval_samples_per_second": 78.751,
"eval_steps_per_second": 10.142,
"step": 1740
},
{
"epoch": 30.03448275862069,
"grad_norm": 0.1440192312002182,
"learning_rate": 1.576354679802956e-05,
"loss": 0.0348,
"step": 1742
},
{
"epoch": 30.06896551724138,
"grad_norm": 0.8625943064689636,
"learning_rate": 1.565407772304324e-05,
"loss": 0.0343,
"step": 1744
},
{
"epoch": 30.103448275862068,
"grad_norm": 0.2873701751232147,
"learning_rate": 1.5544608648056924e-05,
"loss": 0.0709,
"step": 1746
},
{
"epoch": 30.137931034482758,
"grad_norm": 3.517399787902832,
"learning_rate": 1.543513957307061e-05,
"loss": 0.0444,
"step": 1748
},
{
"epoch": 30.17241379310345,
"grad_norm": 0.18623045086860657,
"learning_rate": 1.532567049808429e-05,
"loss": 0.0292,
"step": 1750
},
{
"epoch": 30.20689655172414,
"grad_norm": 0.24593006074428558,
"learning_rate": 1.5216201423097976e-05,
"loss": 0.0281,
"step": 1752
},
{
"epoch": 30.24137931034483,
"grad_norm": 13.92945384979248,
"learning_rate": 1.510673234811166e-05,
"loss": 0.1024,
"step": 1754
},
{
"epoch": 30.275862068965516,
"grad_norm": 0.12701399624347687,
"learning_rate": 1.4997263273125342e-05,
"loss": 0.0546,
"step": 1756
},
{
"epoch": 30.310344827586206,
"grad_norm": 0.2253095507621765,
"learning_rate": 1.4887794198139026e-05,
"loss": 0.0313,
"step": 1758
},
{
"epoch": 30.344827586206897,
"grad_norm": 0.18442504107952118,
"learning_rate": 1.4778325123152711e-05,
"loss": 0.045,
"step": 1760
},
{
"epoch": 30.379310344827587,
"grad_norm": 0.8345377445220947,
"learning_rate": 1.4668856048166394e-05,
"loss": 0.0872,
"step": 1762
},
{
"epoch": 30.413793103448278,
"grad_norm": 0.16499698162078857,
"learning_rate": 1.4559386973180078e-05,
"loss": 0.0521,
"step": 1764
},
{
"epoch": 30.448275862068964,
"grad_norm": 29.77411460876465,
"learning_rate": 1.444991789819376e-05,
"loss": 0.075,
"step": 1766
},
{
"epoch": 30.482758620689655,
"grad_norm": 13.939007759094238,
"learning_rate": 1.4340448823207444e-05,
"loss": 0.2595,
"step": 1768
},
{
"epoch": 30.517241379310345,
"grad_norm": 0.110160693526268,
"learning_rate": 1.4230979748221129e-05,
"loss": 0.0255,
"step": 1770
},
{
"epoch": 30.551724137931036,
"grad_norm": 0.17738622426986694,
"learning_rate": 1.4121510673234811e-05,
"loss": 0.0263,
"step": 1772
},
{
"epoch": 30.586206896551722,
"grad_norm": 0.12670966982841492,
"learning_rate": 1.4012041598248496e-05,
"loss": 0.0309,
"step": 1774
},
{
"epoch": 30.620689655172413,
"grad_norm": 0.08485020697116852,
"learning_rate": 1.3902572523262177e-05,
"loss": 0.0237,
"step": 1776
},
{
"epoch": 30.655172413793103,
"grad_norm": 0.1434456706047058,
"learning_rate": 1.3793103448275863e-05,
"loss": 0.03,
"step": 1778
},
{
"epoch": 30.689655172413794,
"grad_norm": 0.9384951591491699,
"learning_rate": 1.3683634373289548e-05,
"loss": 0.0309,
"step": 1780
},
{
"epoch": 30.724137931034484,
"grad_norm": 1.0260897874832153,
"learning_rate": 1.3574165298303229e-05,
"loss": 0.0875,
"step": 1782
},
{
"epoch": 30.75862068965517,
"grad_norm": 0.11444691568613052,
"learning_rate": 1.3464696223316914e-05,
"loss": 0.318,
"step": 1784
},
{
"epoch": 30.79310344827586,
"grad_norm": 0.163078173995018,
"learning_rate": 1.3355227148330598e-05,
"loss": 0.0263,
"step": 1786
},
{
"epoch": 30.82758620689655,
"grad_norm": 30.215320587158203,
"learning_rate": 1.3245758073344281e-05,
"loss": 0.1381,
"step": 1788
},
{
"epoch": 30.862068965517242,
"grad_norm": 0.09894564747810364,
"learning_rate": 1.3136288998357965e-05,
"loss": 0.0427,
"step": 1790
},
{
"epoch": 30.896551724137932,
"grad_norm": 0.15414094924926758,
"learning_rate": 1.3026819923371647e-05,
"loss": 0.0362,
"step": 1792
},
{
"epoch": 30.93103448275862,
"grad_norm": 0.653509795665741,
"learning_rate": 1.2917350848385331e-05,
"loss": 0.0336,
"step": 1794
},
{
"epoch": 30.96551724137931,
"grad_norm": 0.1620655208826065,
"learning_rate": 1.2807881773399016e-05,
"loss": 0.0314,
"step": 1796
},
{
"epoch": 31.0,
"grad_norm": 0.39400652050971985,
"learning_rate": 1.2698412698412699e-05,
"loss": 0.1094,
"step": 1798
},
{
"epoch": 31.0,
"eval_accuracy": 0.6515151515151515,
"eval_f1_macro": 0.5912410632947628,
"eval_f1_micro": 0.6515151515151515,
"eval_f1_weighted": 0.6529064358109834,
"eval_loss": 1.5125658512115479,
"eval_precision_macro": 0.602827742217164,
"eval_precision_micro": 0.6515151515151515,
"eval_precision_weighted": 0.6604101388171674,
"eval_recall_macro": 0.5866893424036281,
"eval_recall_micro": 0.6515151515151515,
"eval_recall_weighted": 0.6515151515151515,
"eval_runtime": 1.7146,
"eval_samples_per_second": 76.988,
"eval_steps_per_second": 9.915,
"step": 1798
},
{
"epoch": 31.03448275862069,
"grad_norm": 0.20587500929832458,
"learning_rate": 1.2588943623426383e-05,
"loss": 0.0298,
"step": 1800
},
{
"epoch": 31.06896551724138,
"grad_norm": 0.27925363183021545,
"learning_rate": 1.2479474548440066e-05,
"loss": 0.0316,
"step": 1802
},
{
"epoch": 31.103448275862068,
"grad_norm": 0.110267773270607,
"learning_rate": 1.2370005473453749e-05,
"loss": 0.0252,
"step": 1804
},
{
"epoch": 31.137931034482758,
"grad_norm": 0.7436184883117676,
"learning_rate": 1.2260536398467433e-05,
"loss": 0.0288,
"step": 1806
},
{
"epoch": 31.17241379310345,
"grad_norm": 0.1437726467847824,
"learning_rate": 1.2151067323481118e-05,
"loss": 0.0645,
"step": 1808
},
{
"epoch": 31.20689655172414,
"grad_norm": 0.10505229979753494,
"learning_rate": 1.20415982484948e-05,
"loss": 0.7232,
"step": 1810
},
{
"epoch": 31.24137931034483,
"grad_norm": 0.2323630303144455,
"learning_rate": 1.1932129173508484e-05,
"loss": 0.031,
"step": 1812
},
{
"epoch": 31.275862068965516,
"grad_norm": 0.15194958448410034,
"learning_rate": 1.1822660098522168e-05,
"loss": 0.0301,
"step": 1814
},
{
"epoch": 31.310344827586206,
"grad_norm": 0.29873228073120117,
"learning_rate": 1.1713191023535853e-05,
"loss": 0.0289,
"step": 1816
},
{
"epoch": 31.344827586206897,
"grad_norm": 0.08876697719097137,
"learning_rate": 1.1603721948549535e-05,
"loss": 0.0267,
"step": 1818
},
{
"epoch": 31.379310344827587,
"grad_norm": 0.5476172566413879,
"learning_rate": 1.1494252873563218e-05,
"loss": 0.0398,
"step": 1820
},
{
"epoch": 31.413793103448278,
"grad_norm": 6.674413204193115,
"learning_rate": 1.1384783798576903e-05,
"loss": 0.0472,
"step": 1822
},
{
"epoch": 31.448275862068964,
"grad_norm": 0.09850436449050903,
"learning_rate": 1.1275314723590587e-05,
"loss": 0.0296,
"step": 1824
},
{
"epoch": 31.482758620689655,
"grad_norm": 0.5808874368667603,
"learning_rate": 1.116584564860427e-05,
"loss": 0.0507,
"step": 1826
},
{
"epoch": 31.517241379310345,
"grad_norm": 0.10986245423555374,
"learning_rate": 1.1056376573617953e-05,
"loss": 0.0271,
"step": 1828
},
{
"epoch": 31.551724137931036,
"grad_norm": 0.17757023870944977,
"learning_rate": 1.0946907498631636e-05,
"loss": 0.0306,
"step": 1830
},
{
"epoch": 31.586206896551722,
"grad_norm": 0.1803562492132187,
"learning_rate": 1.0837438423645322e-05,
"loss": 0.0263,
"step": 1832
},
{
"epoch": 31.620689655172413,
"grad_norm": 0.5573262572288513,
"learning_rate": 1.0727969348659005e-05,
"loss": 0.0277,
"step": 1834
},
{
"epoch": 31.655172413793103,
"grad_norm": 0.9371552467346191,
"learning_rate": 1.0618500273672688e-05,
"loss": 0.0396,
"step": 1836
},
{
"epoch": 31.689655172413794,
"grad_norm": 0.5467616319656372,
"learning_rate": 1.050903119868637e-05,
"loss": 0.035,
"step": 1838
},
{
"epoch": 31.724137931034484,
"grad_norm": 0.18951688706874847,
"learning_rate": 1.0399562123700055e-05,
"loss": 0.0292,
"step": 1840
},
{
"epoch": 31.75862068965517,
"grad_norm": 0.13354890048503876,
"learning_rate": 1.029009304871374e-05,
"loss": 0.0244,
"step": 1842
},
{
"epoch": 31.79310344827586,
"grad_norm": 0.436275452375412,
"learning_rate": 1.0180623973727423e-05,
"loss": 0.0301,
"step": 1844
},
{
"epoch": 31.82758620689655,
"grad_norm": 0.963607907295227,
"learning_rate": 1.0071154898741105e-05,
"loss": 0.0286,
"step": 1846
},
{
"epoch": 31.862068965517242,
"grad_norm": 5.803387641906738,
"learning_rate": 9.96168582375479e-06,
"loss": 0.0649,
"step": 1848
},
{
"epoch": 31.896551724137932,
"grad_norm": 0.3790993392467499,
"learning_rate": 9.852216748768475e-06,
"loss": 0.0327,
"step": 1850
},
{
"epoch": 31.93103448275862,
"grad_norm": 0.08771317452192307,
"learning_rate": 9.742747673782157e-06,
"loss": 0.0247,
"step": 1852
},
{
"epoch": 31.96551724137931,
"grad_norm": 0.15166075527668,
"learning_rate": 9.63327859879584e-06,
"loss": 0.0284,
"step": 1854
},
{
"epoch": 32.0,
"grad_norm": 0.09096769243478775,
"learning_rate": 9.523809523809523e-06,
"loss": 0.0243,
"step": 1856
},
{
"epoch": 32.0,
"eval_accuracy": 0.6590909090909091,
"eval_f1_macro": 0.5985429204941399,
"eval_f1_micro": 0.6590909090909091,
"eval_f1_weighted": 0.6563486864207485,
"eval_loss": 1.490037202835083,
"eval_precision_macro": 0.6103033255742615,
"eval_precision_micro": 0.6590909090909091,
"eval_precision_weighted": 0.6603901845281156,
"eval_recall_macro": 0.5934920634920635,
"eval_recall_micro": 0.6590909090909091,
"eval_recall_weighted": 0.6590909090909091,
"eval_runtime": 1.727,
"eval_samples_per_second": 76.433,
"eval_steps_per_second": 9.844,
"step": 1856
},
{
"epoch": 32.03448275862069,
"grad_norm": 0.18323098123073578,
"learning_rate": 9.414340448823208e-06,
"loss": 0.0304,
"step": 1858
},
{
"epoch": 32.06896551724138,
"grad_norm": 0.15102934837341309,
"learning_rate": 9.304871373836892e-06,
"loss": 0.0247,
"step": 1860
},
{
"epoch": 32.10344827586207,
"grad_norm": 0.151427760720253,
"learning_rate": 9.195402298850575e-06,
"loss": 0.0246,
"step": 1862
},
{
"epoch": 32.13793103448276,
"grad_norm": 0.10605157166719437,
"learning_rate": 9.085933223864258e-06,
"loss": 0.0282,
"step": 1864
},
{
"epoch": 32.172413793103445,
"grad_norm": 0.0751444399356842,
"learning_rate": 8.976464148877942e-06,
"loss": 0.023,
"step": 1866
},
{
"epoch": 32.206896551724135,
"grad_norm": 0.13312040269374847,
"learning_rate": 8.866995073891627e-06,
"loss": 0.0256,
"step": 1868
},
{
"epoch": 32.241379310344826,
"grad_norm": 0.10063087195158005,
"learning_rate": 8.75752599890531e-06,
"loss": 0.022,
"step": 1870
},
{
"epoch": 32.275862068965516,
"grad_norm": 0.11364129185676575,
"learning_rate": 8.648056923918993e-06,
"loss": 0.0968,
"step": 1872
},
{
"epoch": 32.310344827586206,
"grad_norm": 0.09259811043739319,
"learning_rate": 8.538587848932677e-06,
"loss": 0.0244,
"step": 1874
},
{
"epoch": 32.3448275862069,
"grad_norm": 3.0221097469329834,
"learning_rate": 8.429118773946362e-06,
"loss": 0.0491,
"step": 1876
},
{
"epoch": 32.37931034482759,
"grad_norm": 0.09685959666967392,
"learning_rate": 8.319649698960045e-06,
"loss": 0.0244,
"step": 1878
},
{
"epoch": 32.41379310344828,
"grad_norm": 0.13511282205581665,
"learning_rate": 8.210180623973727e-06,
"loss": 0.0271,
"step": 1880
},
{
"epoch": 32.44827586206897,
"grad_norm": 0.12241239845752716,
"learning_rate": 8.10071154898741e-06,
"loss": 0.0272,
"step": 1882
},
{
"epoch": 32.48275862068966,
"grad_norm": 0.1032300814986229,
"learning_rate": 7.991242474001095e-06,
"loss": 0.0229,
"step": 1884
},
{
"epoch": 32.51724137931034,
"grad_norm": 0.4652920961380005,
"learning_rate": 7.88177339901478e-06,
"loss": 0.0273,
"step": 1886
},
{
"epoch": 32.55172413793103,
"grad_norm": 0.28032049536705017,
"learning_rate": 7.772304324028462e-06,
"loss": 0.0298,
"step": 1888
},
{
"epoch": 32.58620689655172,
"grad_norm": 0.6975933313369751,
"learning_rate": 7.662835249042145e-06,
"loss": 0.0293,
"step": 1890
},
{
"epoch": 32.62068965517241,
"grad_norm": 0.1151973232626915,
"learning_rate": 7.55336617405583e-06,
"loss": 0.0233,
"step": 1892
},
{
"epoch": 32.6551724137931,
"grad_norm": 16.620948791503906,
"learning_rate": 7.443897099069513e-06,
"loss": 0.1329,
"step": 1894
},
{
"epoch": 32.689655172413794,
"grad_norm": 0.11886937916278839,
"learning_rate": 7.334428024083197e-06,
"loss": 0.0215,
"step": 1896
},
{
"epoch": 32.724137931034484,
"grad_norm": 0.699009120464325,
"learning_rate": 7.22495894909688e-06,
"loss": 0.0262,
"step": 1898
},
{
"epoch": 32.758620689655174,
"grad_norm": 0.5381059050559998,
"learning_rate": 7.115489874110564e-06,
"loss": 0.0339,
"step": 1900
},
{
"epoch": 32.793103448275865,
"grad_norm": 0.09165710210800171,
"learning_rate": 7.006020799124248e-06,
"loss": 0.0237,
"step": 1902
},
{
"epoch": 32.827586206896555,
"grad_norm": 0.09409786015748978,
"learning_rate": 6.896551724137932e-06,
"loss": 0.0376,
"step": 1904
},
{
"epoch": 32.86206896551724,
"grad_norm": 0.11274611204862595,
"learning_rate": 6.7870826491516145e-06,
"loss": 0.0268,
"step": 1906
},
{
"epoch": 32.89655172413793,
"grad_norm": 0.7047907710075378,
"learning_rate": 6.677613574165299e-06,
"loss": 0.0679,
"step": 1908
},
{
"epoch": 32.93103448275862,
"grad_norm": 0.11477731913328171,
"learning_rate": 6.568144499178983e-06,
"loss": 0.0208,
"step": 1910
},
{
"epoch": 32.96551724137931,
"grad_norm": 0.09211686998605728,
"learning_rate": 6.458675424192666e-06,
"loss": 0.2335,
"step": 1912
},
{
"epoch": 33.0,
"grad_norm": 0.13210824131965637,
"learning_rate": 6.349206349206349e-06,
"loss": 0.0366,
"step": 1914
},
{
"epoch": 33.0,
"eval_accuracy": 0.6893939393939394,
"eval_f1_macro": 0.6274757091289592,
"eval_f1_micro": 0.6893939393939394,
"eval_f1_weighted": 0.6850690417795446,
"eval_loss": 1.467975378036499,
"eval_precision_macro": 0.6369170238628366,
"eval_precision_micro": 0.6893939393939394,
"eval_precision_weighted": 0.6855083526713621,
"eval_recall_macro": 0.6241043083900227,
"eval_recall_micro": 0.6893939393939394,
"eval_recall_weighted": 0.6893939393939394,
"eval_runtime": 1.744,
"eval_samples_per_second": 75.687,
"eval_steps_per_second": 9.748,
"step": 1914
},
{
"epoch": 33.03448275862069,
"grad_norm": 0.13359691202640533,
"learning_rate": 6.239737274220033e-06,
"loss": 0.0275,
"step": 1916
},
{
"epoch": 33.06896551724138,
"grad_norm": 0.2554221749305725,
"learning_rate": 6.130268199233717e-06,
"loss": 0.0298,
"step": 1918
},
{
"epoch": 33.10344827586207,
"grad_norm": 0.1016729325056076,
"learning_rate": 6.0207991242474e-06,
"loss": 0.0271,
"step": 1920
},
{
"epoch": 33.13793103448276,
"grad_norm": 0.0994502529501915,
"learning_rate": 5.911330049261084e-06,
"loss": 0.0238,
"step": 1922
},
{
"epoch": 33.172413793103445,
"grad_norm": 0.08981093764305115,
"learning_rate": 5.801860974274768e-06,
"loss": 0.0206,
"step": 1924
},
{
"epoch": 33.206896551724135,
"grad_norm": 0.7163501977920532,
"learning_rate": 5.692391899288451e-06,
"loss": 0.0307,
"step": 1926
},
{
"epoch": 33.241379310344826,
"grad_norm": 2.008106231689453,
"learning_rate": 5.582922824302135e-06,
"loss": 0.0682,
"step": 1928
},
{
"epoch": 33.275862068965516,
"grad_norm": 0.15033219754695892,
"learning_rate": 5.473453749315818e-06,
"loss": 0.0248,
"step": 1930
},
{
"epoch": 33.310344827586206,
"grad_norm": 0.5542562007904053,
"learning_rate": 5.3639846743295025e-06,
"loss": 0.0236,
"step": 1932
},
{
"epoch": 33.3448275862069,
"grad_norm": 0.19591717422008514,
"learning_rate": 5.254515599343185e-06,
"loss": 0.0299,
"step": 1934
},
{
"epoch": 33.37931034482759,
"grad_norm": 0.12007873505353928,
"learning_rate": 5.14504652435687e-06,
"loss": 0.0249,
"step": 1936
},
{
"epoch": 33.41379310344828,
"grad_norm": 0.6954270005226135,
"learning_rate": 5.035577449370553e-06,
"loss": 0.0287,
"step": 1938
},
{
"epoch": 33.44827586206897,
"grad_norm": 1.665319800376892,
"learning_rate": 4.926108374384237e-06,
"loss": 0.0373,
"step": 1940
},
{
"epoch": 33.48275862068966,
"grad_norm": 0.8834956884384155,
"learning_rate": 4.81663929939792e-06,
"loss": 0.1,
"step": 1942
},
{
"epoch": 33.51724137931034,
"grad_norm": 0.09338194131851196,
"learning_rate": 4.707170224411604e-06,
"loss": 0.0281,
"step": 1944
},
{
"epoch": 33.55172413793103,
"grad_norm": 0.11382108926773071,
"learning_rate": 4.5977011494252875e-06,
"loss": 0.0255,
"step": 1946
},
{
"epoch": 33.58620689655172,
"grad_norm": 0.17863136529922485,
"learning_rate": 4.488232074438971e-06,
"loss": 0.0271,
"step": 1948
},
{
"epoch": 33.62068965517241,
"grad_norm": 0.7330889105796814,
"learning_rate": 4.378762999452655e-06,
"loss": 0.0244,
"step": 1950
},
{
"epoch": 33.6551724137931,
"grad_norm": 0.08643897622823715,
"learning_rate": 4.2692939244663386e-06,
"loss": 0.0217,
"step": 1952
},
{
"epoch": 33.689655172413794,
"grad_norm": 0.09883402287960052,
"learning_rate": 4.159824849480022e-06,
"loss": 0.0238,
"step": 1954
},
{
"epoch": 33.724137931034484,
"grad_norm": 0.11817536503076553,
"learning_rate": 4.050355774493705e-06,
"loss": 0.0254,
"step": 1956
},
{
"epoch": 33.758620689655174,
"grad_norm": 0.09555190801620483,
"learning_rate": 3.94088669950739e-06,
"loss": 0.0211,
"step": 1958
},
{
"epoch": 33.793103448275865,
"grad_norm": 18.8179931640625,
"learning_rate": 3.8314176245210725e-06,
"loss": 0.1301,
"step": 1960
},
{
"epoch": 33.827586206896555,
"grad_norm": 0.10722629725933075,
"learning_rate": 3.7219485495347566e-06,
"loss": 0.0984,
"step": 1962
},
{
"epoch": 33.86206896551724,
"grad_norm": 7.371537208557129,
"learning_rate": 3.61247947454844e-06,
"loss": 0.0517,
"step": 1964
},
{
"epoch": 33.89655172413793,
"grad_norm": 0.13865648210048676,
"learning_rate": 3.503010399562124e-06,
"loss": 0.0268,
"step": 1966
},
{
"epoch": 33.93103448275862,
"grad_norm": 0.08597588539123535,
"learning_rate": 3.3935413245758073e-06,
"loss": 0.0238,
"step": 1968
},
{
"epoch": 33.96551724137931,
"grad_norm": 0.10678116977214813,
"learning_rate": 3.2840722495894914e-06,
"loss": 0.0234,
"step": 1970
},
{
"epoch": 34.0,
"grad_norm": 0.17705120146274567,
"learning_rate": 3.1746031746031746e-06,
"loss": 0.0235,
"step": 1972
},
{
"epoch": 34.0,
"eval_accuracy": 0.6818181818181818,
"eval_f1_macro": 0.6216473911055191,
"eval_f1_micro": 0.6818181818181818,
"eval_f1_weighted": 0.6795081669559204,
"eval_loss": 1.477163553237915,
"eval_precision_macro": 0.6323631514971935,
"eval_precision_micro": 0.6818181818181818,
"eval_precision_weighted": 0.6836445631033983,
"eval_recall_macro": 0.6173015873015872,
"eval_recall_micro": 0.6818181818181818,
"eval_recall_weighted": 0.6818181818181818,
"eval_runtime": 1.697,
"eval_samples_per_second": 77.783,
"eval_steps_per_second": 10.018,
"step": 1972
},
{
"epoch": 34.03448275862069,
"grad_norm": 0.09892508387565613,
"learning_rate": 3.0651340996168583e-06,
"loss": 0.0225,
"step": 1974
},
{
"epoch": 34.06896551724138,
"grad_norm": 0.09665203094482422,
"learning_rate": 2.955665024630542e-06,
"loss": 0.0312,
"step": 1976
},
{
"epoch": 34.10344827586207,
"grad_norm": 0.09741330146789551,
"learning_rate": 2.8461959496442257e-06,
"loss": 0.0222,
"step": 1978
},
{
"epoch": 34.13793103448276,
"grad_norm": 0.08742016553878784,
"learning_rate": 2.736726874657909e-06,
"loss": 0.1584,
"step": 1980
},
{
"epoch": 34.172413793103445,
"grad_norm": 0.5281866192817688,
"learning_rate": 2.6272577996715927e-06,
"loss": 0.027,
"step": 1982
},
{
"epoch": 34.206896551724135,
"grad_norm": 0.12132374942302704,
"learning_rate": 2.5177887246852764e-06,
"loss": 0.0298,
"step": 1984
},
{
"epoch": 34.241379310344826,
"grad_norm": 0.0994863212108612,
"learning_rate": 2.40831964969896e-06,
"loss": 0.0251,
"step": 1986
},
{
"epoch": 34.275862068965516,
"grad_norm": 11.063030242919922,
"learning_rate": 2.2988505747126437e-06,
"loss": 0.0678,
"step": 1988
},
{
"epoch": 34.310344827586206,
"grad_norm": 0.30384892225265503,
"learning_rate": 2.1893814997263274e-06,
"loss": 0.0243,
"step": 1990
},
{
"epoch": 34.3448275862069,
"grad_norm": 0.09084097295999527,
"learning_rate": 2.079912424740011e-06,
"loss": 0.0826,
"step": 1992
},
{
"epoch": 34.37931034482759,
"grad_norm": 0.10241986811161041,
"learning_rate": 1.970443349753695e-06,
"loss": 0.0222,
"step": 1994
},
{
"epoch": 34.41379310344828,
"grad_norm": 0.0835302397608757,
"learning_rate": 1.8609742747673783e-06,
"loss": 0.026,
"step": 1996
},
{
"epoch": 34.44827586206897,
"grad_norm": 0.0980919674038887,
"learning_rate": 1.751505199781062e-06,
"loss": 0.0267,
"step": 1998
},
{
"epoch": 34.48275862068966,
"grad_norm": 0.15463948249816895,
"learning_rate": 1.6420361247947457e-06,
"loss": 0.0259,
"step": 2000
},
{
"epoch": 34.51724137931034,
"grad_norm": 5.89515495300293,
"learning_rate": 1.5325670498084292e-06,
"loss": 0.0348,
"step": 2002
},
{
"epoch": 34.55172413793103,
"grad_norm": 0.08560877293348312,
"learning_rate": 1.4230979748221129e-06,
"loss": 0.0227,
"step": 2004
},
{
"epoch": 34.58620689655172,
"grad_norm": 0.152249276638031,
"learning_rate": 1.3136288998357963e-06,
"loss": 0.0232,
"step": 2006
},
{
"epoch": 34.62068965517241,
"grad_norm": 0.13286259770393372,
"learning_rate": 1.20415982484948e-06,
"loss": 0.0227,
"step": 2008
},
{
"epoch": 34.6551724137931,
"grad_norm": 0.13158464431762695,
"learning_rate": 1.0946907498631637e-06,
"loss": 0.0221,
"step": 2010
},
{
"epoch": 34.689655172413794,
"grad_norm": 0.07431570440530777,
"learning_rate": 9.852216748768474e-07,
"loss": 0.0268,
"step": 2012
},
{
"epoch": 34.724137931034484,
"grad_norm": 0.11424873024225235,
"learning_rate": 8.75752599890531e-07,
"loss": 0.0236,
"step": 2014
},
{
"epoch": 34.758620689655174,
"grad_norm": 0.5630809664726257,
"learning_rate": 7.662835249042146e-07,
"loss": 0.0301,
"step": 2016
},
{
"epoch": 34.793103448275865,
"grad_norm": 0.15935018658638,
"learning_rate": 6.568144499178982e-07,
"loss": 0.0274,
"step": 2018
},
{
"epoch": 34.827586206896555,
"grad_norm": 0.08231520652770996,
"learning_rate": 5.473453749315819e-07,
"loss": 0.022,
"step": 2020
},
{
"epoch": 34.86206896551724,
"grad_norm": 0.28459644317626953,
"learning_rate": 4.378762999452655e-07,
"loss": 0.0256,
"step": 2022
},
{
"epoch": 34.89655172413793,
"grad_norm": 0.11006944626569748,
"learning_rate": 3.284072249589491e-07,
"loss": 0.0702,
"step": 2024
},
{
"epoch": 34.93103448275862,
"grad_norm": 1.2310376167297363,
"learning_rate": 2.1893814997263275e-07,
"loss": 0.025,
"step": 2026
},
{
"epoch": 34.96551724137931,
"grad_norm": 0.082310751080513,
"learning_rate": 1.0946907498631637e-07,
"loss": 0.0244,
"step": 2028
},
{
"epoch": 35.0,
"grad_norm": 0.20153437554836273,
"learning_rate": 0.0,
"loss": 0.0345,
"step": 2030
},
{
"epoch": 35.0,
"eval_accuracy": 0.696969696969697,
"eval_f1_macro": 0.6556072550024405,
"eval_f1_micro": 0.696969696969697,
"eval_f1_weighted": 0.6961160958984463,
"eval_loss": 1.4754124879837036,
"eval_precision_macro": 0.6722334315930374,
"eval_precision_micro": 0.696969696969697,
"eval_precision_weighted": 0.703819782620723,
"eval_recall_macro": 0.6479138321995465,
"eval_recall_micro": 0.696969696969697,
"eval_recall_weighted": 0.696969696969697,
"eval_runtime": 1.8533,
"eval_samples_per_second": 71.223,
"eval_steps_per_second": 9.173,
"step": 2030
},
{
"epoch": 35.0,
"step": 2030,
"total_flos": 1.2531016253190758e+18,
"train_loss": 0.7230312045223167,
"train_runtime": 345.4127,
"train_samples_per_second": 46.814,
"train_steps_per_second": 5.877
}
],
"logging_steps": 2,
"max_steps": 2030,
"num_input_tokens_seen": 0,
"num_train_epochs": 35,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.2531016253190758e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}