square_run_second_vote / trainer_state.json
corranm's picture
End of training
ea94f0e verified
{
"best_metric": 1.0619021654129028,
"best_model_checkpoint": "square_run_second_vote/checkpoint-464",
"epoch": 30.0,
"eval_steps": 500,
"global_step": 1740,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.034482758620689655,
"grad_norm": 10.100388526916504,
"learning_rate": 1.1494252873563219e-06,
"loss": 1.9216,
"step": 2
},
{
"epoch": 0.06896551724137931,
"grad_norm": 15.660673141479492,
"learning_rate": 2.2988505747126437e-06,
"loss": 1.9891,
"step": 4
},
{
"epoch": 0.10344827586206896,
"grad_norm": 13.529168128967285,
"learning_rate": 3.448275862068966e-06,
"loss": 2.0535,
"step": 6
},
{
"epoch": 0.13793103448275862,
"grad_norm": 12.513371467590332,
"learning_rate": 4.5977011494252875e-06,
"loss": 2.0399,
"step": 8
},
{
"epoch": 0.1724137931034483,
"grad_norm": 12.855453491210938,
"learning_rate": 5.747126436781609e-06,
"loss": 1.9636,
"step": 10
},
{
"epoch": 0.20689655172413793,
"grad_norm": 9.419121742248535,
"learning_rate": 6.896551724137932e-06,
"loss": 1.7951,
"step": 12
},
{
"epoch": 0.2413793103448276,
"grad_norm": 8.58608627319336,
"learning_rate": 8.045977011494253e-06,
"loss": 1.938,
"step": 14
},
{
"epoch": 0.27586206896551724,
"grad_norm": 15.212312698364258,
"learning_rate": 9.195402298850575e-06,
"loss": 1.947,
"step": 16
},
{
"epoch": 0.3103448275862069,
"grad_norm": 10.53651237487793,
"learning_rate": 1.0344827586206897e-05,
"loss": 2.001,
"step": 18
},
{
"epoch": 0.3448275862068966,
"grad_norm": 10.844449043273926,
"learning_rate": 1.1494252873563218e-05,
"loss": 1.6643,
"step": 20
},
{
"epoch": 0.3793103448275862,
"grad_norm": 12.122403144836426,
"learning_rate": 1.2643678160919542e-05,
"loss": 1.7857,
"step": 22
},
{
"epoch": 0.41379310344827586,
"grad_norm": 10.027009963989258,
"learning_rate": 1.3793103448275863e-05,
"loss": 2.0627,
"step": 24
},
{
"epoch": 0.4482758620689655,
"grad_norm": 10.017681121826172,
"learning_rate": 1.4942528735632185e-05,
"loss": 1.8997,
"step": 26
},
{
"epoch": 0.4827586206896552,
"grad_norm": 11.976802825927734,
"learning_rate": 1.6091954022988507e-05,
"loss": 1.9658,
"step": 28
},
{
"epoch": 0.5172413793103449,
"grad_norm": 17.975006103515625,
"learning_rate": 1.7241379310344828e-05,
"loss": 2.1084,
"step": 30
},
{
"epoch": 0.5517241379310345,
"grad_norm": 10.179977416992188,
"learning_rate": 1.839080459770115e-05,
"loss": 1.8625,
"step": 32
},
{
"epoch": 0.5862068965517241,
"grad_norm": 12.881868362426758,
"learning_rate": 1.9540229885057475e-05,
"loss": 2.0918,
"step": 34
},
{
"epoch": 0.6206896551724138,
"grad_norm": 11.166668891906738,
"learning_rate": 2.0689655172413793e-05,
"loss": 1.7992,
"step": 36
},
{
"epoch": 0.6551724137931034,
"grad_norm": 9.509991645812988,
"learning_rate": 2.183908045977012e-05,
"loss": 1.7821,
"step": 38
},
{
"epoch": 0.6896551724137931,
"grad_norm": 9.292908668518066,
"learning_rate": 2.2988505747126437e-05,
"loss": 1.8435,
"step": 40
},
{
"epoch": 0.7241379310344828,
"grad_norm": 9.034067153930664,
"learning_rate": 2.413793103448276e-05,
"loss": 1.7203,
"step": 42
},
{
"epoch": 0.7586206896551724,
"grad_norm": 10.527994155883789,
"learning_rate": 2.5287356321839083e-05,
"loss": 2.0411,
"step": 44
},
{
"epoch": 0.7931034482758621,
"grad_norm": 12.680684089660645,
"learning_rate": 2.6436781609195405e-05,
"loss": 1.7733,
"step": 46
},
{
"epoch": 0.8275862068965517,
"grad_norm": 11.482306480407715,
"learning_rate": 2.7586206896551727e-05,
"loss": 1.9162,
"step": 48
},
{
"epoch": 0.8620689655172413,
"grad_norm": 9.575610160827637,
"learning_rate": 2.8735632183908045e-05,
"loss": 1.9413,
"step": 50
},
{
"epoch": 0.896551724137931,
"grad_norm": 9.212911605834961,
"learning_rate": 2.988505747126437e-05,
"loss": 1.6267,
"step": 52
},
{
"epoch": 0.9310344827586207,
"grad_norm": 10.29886245727539,
"learning_rate": 3.103448275862069e-05,
"loss": 2.0705,
"step": 54
},
{
"epoch": 0.9655172413793104,
"grad_norm": 10.353363990783691,
"learning_rate": 3.218390804597701e-05,
"loss": 1.7528,
"step": 56
},
{
"epoch": 1.0,
"grad_norm": 10.195358276367188,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.8754,
"step": 58
},
{
"epoch": 1.0,
"eval_accuracy": 0.2803030303030303,
"eval_f1_macro": 0.13852914120300855,
"eval_f1_micro": 0.2803030303030303,
"eval_f1_weighted": 0.17218993609140762,
"eval_loss": 1.7960734367370605,
"eval_precision_macro": 0.14259740259740258,
"eval_precision_micro": 0.2803030303030303,
"eval_precision_weighted": 0.16029384756657483,
"eval_recall_macro": 0.20982142857142858,
"eval_recall_micro": 0.2803030303030303,
"eval_recall_weighted": 0.2803030303030303,
"eval_runtime": 2.0654,
"eval_samples_per_second": 63.909,
"eval_steps_per_second": 8.231,
"step": 58
},
{
"epoch": 1.0344827586206897,
"grad_norm": 6.763228416442871,
"learning_rate": 3.4482758620689657e-05,
"loss": 1.556,
"step": 60
},
{
"epoch": 1.0689655172413792,
"grad_norm": 12.80778694152832,
"learning_rate": 3.563218390804598e-05,
"loss": 1.8112,
"step": 62
},
{
"epoch": 1.103448275862069,
"grad_norm": 5.85200309753418,
"learning_rate": 3.67816091954023e-05,
"loss": 1.6181,
"step": 64
},
{
"epoch": 1.1379310344827587,
"grad_norm": 13.358109474182129,
"learning_rate": 3.793103448275862e-05,
"loss": 1.5691,
"step": 66
},
{
"epoch": 1.1724137931034484,
"grad_norm": 13.59086799621582,
"learning_rate": 3.908045977011495e-05,
"loss": 2.028,
"step": 68
},
{
"epoch": 1.206896551724138,
"grad_norm": 12.442203521728516,
"learning_rate": 4.0229885057471265e-05,
"loss": 1.8991,
"step": 70
},
{
"epoch": 1.2413793103448276,
"grad_norm": 10.374919891357422,
"learning_rate": 4.1379310344827587e-05,
"loss": 1.7926,
"step": 72
},
{
"epoch": 1.2758620689655173,
"grad_norm": 8.41528034210205,
"learning_rate": 4.252873563218391e-05,
"loss": 1.9024,
"step": 74
},
{
"epoch": 1.3103448275862069,
"grad_norm": 8.301165580749512,
"learning_rate": 4.367816091954024e-05,
"loss": 1.5357,
"step": 76
},
{
"epoch": 1.3448275862068966,
"grad_norm": 10.928062438964844,
"learning_rate": 4.482758620689655e-05,
"loss": 1.7872,
"step": 78
},
{
"epoch": 1.3793103448275863,
"grad_norm": 12.0917329788208,
"learning_rate": 4.597701149425287e-05,
"loss": 1.9049,
"step": 80
},
{
"epoch": 1.4137931034482758,
"grad_norm": 10.680750846862793,
"learning_rate": 4.7126436781609195e-05,
"loss": 1.818,
"step": 82
},
{
"epoch": 1.4482758620689655,
"grad_norm": 10.11230754852295,
"learning_rate": 4.827586206896552e-05,
"loss": 1.7855,
"step": 84
},
{
"epoch": 1.4827586206896552,
"grad_norm": 15.761420249938965,
"learning_rate": 4.9425287356321845e-05,
"loss": 1.7779,
"step": 86
},
{
"epoch": 1.5172413793103448,
"grad_norm": 15.814207077026367,
"learning_rate": 5.057471264367817e-05,
"loss": 1.6615,
"step": 88
},
{
"epoch": 1.5517241379310345,
"grad_norm": 10.473943710327148,
"learning_rate": 5.172413793103449e-05,
"loss": 1.7123,
"step": 90
},
{
"epoch": 1.5862068965517242,
"grad_norm": 9.527724266052246,
"learning_rate": 5.287356321839081e-05,
"loss": 1.5118,
"step": 92
},
{
"epoch": 1.6206896551724137,
"grad_norm": 13.386604309082031,
"learning_rate": 5.402298850574713e-05,
"loss": 2.0467,
"step": 94
},
{
"epoch": 1.6551724137931034,
"grad_norm": 11.433362007141113,
"learning_rate": 5.517241379310345e-05,
"loss": 1.7908,
"step": 96
},
{
"epoch": 1.6896551724137931,
"grad_norm": 10.231264114379883,
"learning_rate": 5.632183908045977e-05,
"loss": 2.0643,
"step": 98
},
{
"epoch": 1.7241379310344827,
"grad_norm": 10.50631046295166,
"learning_rate": 5.747126436781609e-05,
"loss": 1.6573,
"step": 100
},
{
"epoch": 1.7586206896551724,
"grad_norm": 10.044520378112793,
"learning_rate": 5.862068965517241e-05,
"loss": 1.6,
"step": 102
},
{
"epoch": 1.793103448275862,
"grad_norm": 11.720151901245117,
"learning_rate": 5.977011494252874e-05,
"loss": 1.9807,
"step": 104
},
{
"epoch": 1.8275862068965516,
"grad_norm": 8.176350593566895,
"learning_rate": 6.091954022988506e-05,
"loss": 1.772,
"step": 106
},
{
"epoch": 1.8620689655172413,
"grad_norm": 10.523119926452637,
"learning_rate": 6.206896551724138e-05,
"loss": 1.5979,
"step": 108
},
{
"epoch": 1.896551724137931,
"grad_norm": 12.917177200317383,
"learning_rate": 6.32183908045977e-05,
"loss": 1.9565,
"step": 110
},
{
"epoch": 1.9310344827586206,
"grad_norm": 9.43907356262207,
"learning_rate": 6.436781609195403e-05,
"loss": 1.6814,
"step": 112
},
{
"epoch": 1.9655172413793105,
"grad_norm": 7.691906452178955,
"learning_rate": 6.551724137931034e-05,
"loss": 1.8594,
"step": 114
},
{
"epoch": 2.0,
"grad_norm": 12.988773345947266,
"learning_rate": 6.666666666666667e-05,
"loss": 2.0246,
"step": 116
},
{
"epoch": 2.0,
"eval_accuracy": 0.3106060606060606,
"eval_f1_macro": 0.22363870840750824,
"eval_f1_micro": 0.3106060606060606,
"eval_f1_weighted": 0.2483673419087291,
"eval_loss": 2.0137689113616943,
"eval_precision_macro": 0.2557566049675684,
"eval_precision_micro": 0.3106060606060606,
"eval_precision_weighted": 0.2691759790597,
"eval_recall_macro": 0.2841635338345864,
"eval_recall_micro": 0.3106060606060606,
"eval_recall_weighted": 0.3106060606060606,
"eval_runtime": 1.9498,
"eval_samples_per_second": 67.698,
"eval_steps_per_second": 8.719,
"step": 116
},
{
"epoch": 2.0344827586206895,
"grad_norm": 10.840887069702148,
"learning_rate": 6.781609195402298e-05,
"loss": 1.7498,
"step": 118
},
{
"epoch": 2.0689655172413794,
"grad_norm": 11.36091423034668,
"learning_rate": 6.896551724137931e-05,
"loss": 1.7945,
"step": 120
},
{
"epoch": 2.103448275862069,
"grad_norm": 9.655730247497559,
"learning_rate": 7.011494252873564e-05,
"loss": 1.6088,
"step": 122
},
{
"epoch": 2.1379310344827585,
"grad_norm": 9.597773551940918,
"learning_rate": 7.126436781609196e-05,
"loss": 1.6544,
"step": 124
},
{
"epoch": 2.1724137931034484,
"grad_norm": 8.65636157989502,
"learning_rate": 7.241379310344828e-05,
"loss": 1.4854,
"step": 126
},
{
"epoch": 2.206896551724138,
"grad_norm": 8.050541877746582,
"learning_rate": 7.35632183908046e-05,
"loss": 1.3232,
"step": 128
},
{
"epoch": 2.2413793103448274,
"grad_norm": 11.399789810180664,
"learning_rate": 7.471264367816091e-05,
"loss": 1.6108,
"step": 130
},
{
"epoch": 2.2758620689655173,
"grad_norm": 10.911064147949219,
"learning_rate": 7.586206896551724e-05,
"loss": 1.3609,
"step": 132
},
{
"epoch": 2.310344827586207,
"grad_norm": 10.229703903198242,
"learning_rate": 7.701149425287356e-05,
"loss": 1.4551,
"step": 134
},
{
"epoch": 2.344827586206897,
"grad_norm": 10.077006340026855,
"learning_rate": 7.81609195402299e-05,
"loss": 1.3409,
"step": 136
},
{
"epoch": 2.3793103448275863,
"grad_norm": 12.368110656738281,
"learning_rate": 7.931034482758621e-05,
"loss": 1.8165,
"step": 138
},
{
"epoch": 2.413793103448276,
"grad_norm": 16.638439178466797,
"learning_rate": 8.045977011494253e-05,
"loss": 1.6881,
"step": 140
},
{
"epoch": 2.4482758620689653,
"grad_norm": 10.63574504852295,
"learning_rate": 8.160919540229886e-05,
"loss": 1.3659,
"step": 142
},
{
"epoch": 2.4827586206896552,
"grad_norm": 12.455951690673828,
"learning_rate": 8.275862068965517e-05,
"loss": 1.3084,
"step": 144
},
{
"epoch": 2.5172413793103448,
"grad_norm": 7.663853168487549,
"learning_rate": 8.39080459770115e-05,
"loss": 1.3424,
"step": 146
},
{
"epoch": 2.5517241379310347,
"grad_norm": 8.313244819641113,
"learning_rate": 8.505747126436782e-05,
"loss": 1.2946,
"step": 148
},
{
"epoch": 2.586206896551724,
"grad_norm": 9.524584770202637,
"learning_rate": 8.620689655172413e-05,
"loss": 1.1958,
"step": 150
},
{
"epoch": 2.6206896551724137,
"grad_norm": 12.151946067810059,
"learning_rate": 8.735632183908047e-05,
"loss": 1.6999,
"step": 152
},
{
"epoch": 2.655172413793103,
"grad_norm": 9.74065113067627,
"learning_rate": 8.850574712643679e-05,
"loss": 1.5755,
"step": 154
},
{
"epoch": 2.689655172413793,
"grad_norm": 14.421919822692871,
"learning_rate": 8.96551724137931e-05,
"loss": 1.428,
"step": 156
},
{
"epoch": 2.7241379310344827,
"grad_norm": 11.11561393737793,
"learning_rate": 9.080459770114943e-05,
"loss": 1.4898,
"step": 158
},
{
"epoch": 2.7586206896551726,
"grad_norm": 13.893324851989746,
"learning_rate": 9.195402298850575e-05,
"loss": 1.901,
"step": 160
},
{
"epoch": 2.793103448275862,
"grad_norm": 8.19894027709961,
"learning_rate": 9.310344827586207e-05,
"loss": 1.4159,
"step": 162
},
{
"epoch": 2.8275862068965516,
"grad_norm": 10.198213577270508,
"learning_rate": 9.425287356321839e-05,
"loss": 1.6808,
"step": 164
},
{
"epoch": 2.862068965517241,
"grad_norm": 7.003215312957764,
"learning_rate": 9.540229885057472e-05,
"loss": 1.859,
"step": 166
},
{
"epoch": 2.896551724137931,
"grad_norm": 8.648194313049316,
"learning_rate": 9.655172413793105e-05,
"loss": 1.5442,
"step": 168
},
{
"epoch": 2.9310344827586206,
"grad_norm": 9.167272567749023,
"learning_rate": 9.770114942528736e-05,
"loss": 1.6219,
"step": 170
},
{
"epoch": 2.9655172413793105,
"grad_norm": 13.052528381347656,
"learning_rate": 9.885057471264369e-05,
"loss": 1.637,
"step": 172
},
{
"epoch": 3.0,
"grad_norm": 10.723739624023438,
"learning_rate": 0.0001,
"loss": 1.6189,
"step": 174
},
{
"epoch": 3.0,
"eval_accuracy": 0.38636363636363635,
"eval_f1_macro": 0.244379876244283,
"eval_f1_micro": 0.38636363636363635,
"eval_f1_weighted": 0.3195200593505679,
"eval_loss": 1.5038808584213257,
"eval_precision_macro": 0.2633030990173847,
"eval_precision_micro": 0.38636363636363635,
"eval_precision_weighted": 0.33009660092993426,
"eval_recall_macro": 0.28469611528822053,
"eval_recall_micro": 0.38636363636363635,
"eval_recall_weighted": 0.38636363636363635,
"eval_runtime": 1.953,
"eval_samples_per_second": 67.59,
"eval_steps_per_second": 8.705,
"step": 174
},
{
"epoch": 3.0344827586206895,
"grad_norm": 7.145821571350098,
"learning_rate": 9.987228607918264e-05,
"loss": 1.3525,
"step": 176
},
{
"epoch": 3.0689655172413794,
"grad_norm": 8.73121166229248,
"learning_rate": 9.974457215836527e-05,
"loss": 1.3759,
"step": 178
},
{
"epoch": 3.103448275862069,
"grad_norm": 11.502145767211914,
"learning_rate": 9.96168582375479e-05,
"loss": 1.3372,
"step": 180
},
{
"epoch": 3.1379310344827585,
"grad_norm": 8.244510650634766,
"learning_rate": 9.948914431673053e-05,
"loss": 1.5859,
"step": 182
},
{
"epoch": 3.1724137931034484,
"grad_norm": 9.313932418823242,
"learning_rate": 9.936143039591316e-05,
"loss": 1.4753,
"step": 184
},
{
"epoch": 3.206896551724138,
"grad_norm": 13.148027420043945,
"learning_rate": 9.92337164750958e-05,
"loss": 1.5913,
"step": 186
},
{
"epoch": 3.2413793103448274,
"grad_norm": 14.681943893432617,
"learning_rate": 9.910600255427843e-05,
"loss": 1.4892,
"step": 188
},
{
"epoch": 3.2758620689655173,
"grad_norm": 13.758460998535156,
"learning_rate": 9.897828863346104e-05,
"loss": 1.333,
"step": 190
},
{
"epoch": 3.310344827586207,
"grad_norm": 11.166180610656738,
"learning_rate": 9.885057471264369e-05,
"loss": 1.5819,
"step": 192
},
{
"epoch": 3.344827586206897,
"grad_norm": 9.059310913085938,
"learning_rate": 9.872286079182631e-05,
"loss": 0.9963,
"step": 194
},
{
"epoch": 3.3793103448275863,
"grad_norm": 9.729276657104492,
"learning_rate": 9.859514687100895e-05,
"loss": 1.8236,
"step": 196
},
{
"epoch": 3.413793103448276,
"grad_norm": 11.96312427520752,
"learning_rate": 9.846743295019157e-05,
"loss": 1.4265,
"step": 198
},
{
"epoch": 3.4482758620689653,
"grad_norm": 11.733696937561035,
"learning_rate": 9.833971902937422e-05,
"loss": 1.1957,
"step": 200
},
{
"epoch": 3.4827586206896552,
"grad_norm": 19.109695434570312,
"learning_rate": 9.821200510855683e-05,
"loss": 2.2991,
"step": 202
},
{
"epoch": 3.5172413793103448,
"grad_norm": 11.54105281829834,
"learning_rate": 9.808429118773947e-05,
"loss": 1.5837,
"step": 204
},
{
"epoch": 3.5517241379310347,
"grad_norm": 14.453530311584473,
"learning_rate": 9.79565772669221e-05,
"loss": 1.4211,
"step": 206
},
{
"epoch": 3.586206896551724,
"grad_norm": 12.695812225341797,
"learning_rate": 9.782886334610473e-05,
"loss": 1.0612,
"step": 208
},
{
"epoch": 3.6206896551724137,
"grad_norm": 9.610103607177734,
"learning_rate": 9.770114942528736e-05,
"loss": 1.2058,
"step": 210
},
{
"epoch": 3.655172413793103,
"grad_norm": 12.852982521057129,
"learning_rate": 9.757343550446999e-05,
"loss": 1.735,
"step": 212
},
{
"epoch": 3.689655172413793,
"grad_norm": 10.032821655273438,
"learning_rate": 9.744572158365262e-05,
"loss": 1.6214,
"step": 214
},
{
"epoch": 3.7241379310344827,
"grad_norm": 13.28847885131836,
"learning_rate": 9.731800766283526e-05,
"loss": 1.2198,
"step": 216
},
{
"epoch": 3.7586206896551726,
"grad_norm": 9.55737590789795,
"learning_rate": 9.719029374201787e-05,
"loss": 1.3811,
"step": 218
},
{
"epoch": 3.793103448275862,
"grad_norm": 12.440496444702148,
"learning_rate": 9.706257982120052e-05,
"loss": 1.2533,
"step": 220
},
{
"epoch": 3.8275862068965516,
"grad_norm": 9.515503883361816,
"learning_rate": 9.693486590038314e-05,
"loss": 1.1579,
"step": 222
},
{
"epoch": 3.862068965517241,
"grad_norm": 14.026083946228027,
"learning_rate": 9.680715197956578e-05,
"loss": 1.7226,
"step": 224
},
{
"epoch": 3.896551724137931,
"grad_norm": 10.697053909301758,
"learning_rate": 9.66794380587484e-05,
"loss": 1.1127,
"step": 226
},
{
"epoch": 3.9310344827586206,
"grad_norm": 11.028640747070312,
"learning_rate": 9.655172413793105e-05,
"loss": 1.3266,
"step": 228
},
{
"epoch": 3.9655172413793105,
"grad_norm": 15.626441955566406,
"learning_rate": 9.642401021711366e-05,
"loss": 1.9315,
"step": 230
},
{
"epoch": 4.0,
"grad_norm": 13.396081924438477,
"learning_rate": 9.62962962962963e-05,
"loss": 1.3445,
"step": 232
},
{
"epoch": 4.0,
"eval_accuracy": 0.4393939393939394,
"eval_f1_macro": 0.3286744239291642,
"eval_f1_micro": 0.4393939393939394,
"eval_f1_weighted": 0.38656078267083965,
"eval_loss": 1.3982452154159546,
"eval_precision_macro": 0.31856682769726247,
"eval_precision_micro": 0.4393939393939394,
"eval_precision_weighted": 0.37300139071878197,
"eval_recall_macro": 0.3695958646616541,
"eval_recall_micro": 0.4393939393939394,
"eval_recall_weighted": 0.4393939393939394,
"eval_runtime": 1.9654,
"eval_samples_per_second": 67.161,
"eval_steps_per_second": 8.65,
"step": 232
},
{
"epoch": 4.0344827586206895,
"grad_norm": 9.478927612304688,
"learning_rate": 9.616858237547893e-05,
"loss": 1.3661,
"step": 234
},
{
"epoch": 4.068965517241379,
"grad_norm": 9.680974006652832,
"learning_rate": 9.604086845466156e-05,
"loss": 1.0935,
"step": 236
},
{
"epoch": 4.103448275862069,
"grad_norm": 7.175812244415283,
"learning_rate": 9.591315453384419e-05,
"loss": 1.1414,
"step": 238
},
{
"epoch": 4.137931034482759,
"grad_norm": 9.398384094238281,
"learning_rate": 9.578544061302682e-05,
"loss": 1.1399,
"step": 240
},
{
"epoch": 4.172413793103448,
"grad_norm": 6.083748817443848,
"learning_rate": 9.565772669220945e-05,
"loss": 0.8365,
"step": 242
},
{
"epoch": 4.206896551724138,
"grad_norm": 7.871669292449951,
"learning_rate": 9.553001277139209e-05,
"loss": 0.747,
"step": 244
},
{
"epoch": 4.241379310344827,
"grad_norm": 12.75833797454834,
"learning_rate": 9.540229885057472e-05,
"loss": 1.1442,
"step": 246
},
{
"epoch": 4.275862068965517,
"grad_norm": 9.026577949523926,
"learning_rate": 9.527458492975735e-05,
"loss": 1.1016,
"step": 248
},
{
"epoch": 4.310344827586207,
"grad_norm": 11.575862884521484,
"learning_rate": 9.514687100893998e-05,
"loss": 1.6752,
"step": 250
},
{
"epoch": 4.344827586206897,
"grad_norm": 11.908188819885254,
"learning_rate": 9.501915708812261e-05,
"loss": 1.6481,
"step": 252
},
{
"epoch": 4.379310344827586,
"grad_norm": 11.563265800476074,
"learning_rate": 9.489144316730524e-05,
"loss": 1.2965,
"step": 254
},
{
"epoch": 4.413793103448276,
"grad_norm": 8.027771949768066,
"learning_rate": 9.476372924648788e-05,
"loss": 1.1308,
"step": 256
},
{
"epoch": 4.448275862068965,
"grad_norm": 8.491975784301758,
"learning_rate": 9.463601532567051e-05,
"loss": 0.9251,
"step": 258
},
{
"epoch": 4.482758620689655,
"grad_norm": 13.935683250427246,
"learning_rate": 9.450830140485314e-05,
"loss": 1.7397,
"step": 260
},
{
"epoch": 4.517241379310345,
"grad_norm": 14.531791687011719,
"learning_rate": 9.438058748403577e-05,
"loss": 1.67,
"step": 262
},
{
"epoch": 4.551724137931035,
"grad_norm": 9.794015884399414,
"learning_rate": 9.425287356321839e-05,
"loss": 1.0594,
"step": 264
},
{
"epoch": 4.586206896551724,
"grad_norm": 9.404264450073242,
"learning_rate": 9.412515964240103e-05,
"loss": 1.1629,
"step": 266
},
{
"epoch": 4.620689655172414,
"grad_norm": 11.694725036621094,
"learning_rate": 9.399744572158365e-05,
"loss": 1.2181,
"step": 268
},
{
"epoch": 4.655172413793103,
"grad_norm": 12.964648246765137,
"learning_rate": 9.38697318007663e-05,
"loss": 1.2802,
"step": 270
},
{
"epoch": 4.689655172413794,
"grad_norm": 9.874382972717285,
"learning_rate": 9.374201787994892e-05,
"loss": 1.124,
"step": 272
},
{
"epoch": 4.724137931034483,
"grad_norm": 11.962569236755371,
"learning_rate": 9.361430395913156e-05,
"loss": 1.6443,
"step": 274
},
{
"epoch": 4.758620689655173,
"grad_norm": 9.690713882446289,
"learning_rate": 9.348659003831418e-05,
"loss": 1.6067,
"step": 276
},
{
"epoch": 4.793103448275862,
"grad_norm": 11.790706634521484,
"learning_rate": 9.335887611749681e-05,
"loss": 1.29,
"step": 278
},
{
"epoch": 4.827586206896552,
"grad_norm": 8.600451469421387,
"learning_rate": 9.323116219667944e-05,
"loss": 1.696,
"step": 280
},
{
"epoch": 4.862068965517241,
"grad_norm": 7.481589317321777,
"learning_rate": 9.310344827586207e-05,
"loss": 1.1751,
"step": 282
},
{
"epoch": 4.896551724137931,
"grad_norm": 11.036040306091309,
"learning_rate": 9.29757343550447e-05,
"loss": 0.9911,
"step": 284
},
{
"epoch": 4.931034482758621,
"grad_norm": 10.061944961547852,
"learning_rate": 9.284802043422734e-05,
"loss": 1.4282,
"step": 286
},
{
"epoch": 4.9655172413793105,
"grad_norm": 8.946602821350098,
"learning_rate": 9.272030651340997e-05,
"loss": 1.4806,
"step": 288
},
{
"epoch": 5.0,
"grad_norm": 8.66653823852539,
"learning_rate": 9.25925925925926e-05,
"loss": 1.3387,
"step": 290
},
{
"epoch": 5.0,
"eval_accuracy": 0.5757575757575758,
"eval_f1_macro": 0.4401252312339328,
"eval_f1_micro": 0.5757575757575758,
"eval_f1_weighted": 0.5265262883858709,
"eval_loss": 1.1920479536056519,
"eval_precision_macro": 0.4314786059351277,
"eval_precision_micro": 0.5757575757575758,
"eval_precision_weighted": 0.5031131056946274,
"eval_recall_macro": 0.4682644110275689,
"eval_recall_micro": 0.5757575757575758,
"eval_recall_weighted": 0.5757575757575758,
"eval_runtime": 1.976,
"eval_samples_per_second": 66.801,
"eval_steps_per_second": 8.603,
"step": 290
},
{
"epoch": 5.0344827586206895,
"grad_norm": 7.769369602203369,
"learning_rate": 9.246487867177522e-05,
"loss": 1.0804,
"step": 292
},
{
"epoch": 5.068965517241379,
"grad_norm": 6.443802356719971,
"learning_rate": 9.233716475095786e-05,
"loss": 0.7804,
"step": 294
},
{
"epoch": 5.103448275862069,
"grad_norm": 5.448488712310791,
"learning_rate": 9.220945083014048e-05,
"loss": 1.0786,
"step": 296
},
{
"epoch": 5.137931034482759,
"grad_norm": 7.302280902862549,
"learning_rate": 9.208173690932313e-05,
"loss": 1.0511,
"step": 298
},
{
"epoch": 5.172413793103448,
"grad_norm": 9.579891204833984,
"learning_rate": 9.195402298850575e-05,
"loss": 1.3382,
"step": 300
},
{
"epoch": 5.206896551724138,
"grad_norm": 8.274906158447266,
"learning_rate": 9.182630906768839e-05,
"loss": 1.0457,
"step": 302
},
{
"epoch": 5.241379310344827,
"grad_norm": 9.329421997070312,
"learning_rate": 9.169859514687101e-05,
"loss": 1.076,
"step": 304
},
{
"epoch": 5.275862068965517,
"grad_norm": 8.050252914428711,
"learning_rate": 9.157088122605364e-05,
"loss": 0.744,
"step": 306
},
{
"epoch": 5.310344827586207,
"grad_norm": 9.972197532653809,
"learning_rate": 9.144316730523627e-05,
"loss": 0.9465,
"step": 308
},
{
"epoch": 5.344827586206897,
"grad_norm": 7.221776485443115,
"learning_rate": 9.13154533844189e-05,
"loss": 1.1189,
"step": 310
},
{
"epoch": 5.379310344827586,
"grad_norm": 12.02575969696045,
"learning_rate": 9.118773946360154e-05,
"loss": 1.242,
"step": 312
},
{
"epoch": 5.413793103448276,
"grad_norm": 10.018948554992676,
"learning_rate": 9.106002554278417e-05,
"loss": 0.8253,
"step": 314
},
{
"epoch": 5.448275862068965,
"grad_norm": 16.198856353759766,
"learning_rate": 9.09323116219668e-05,
"loss": 2.0836,
"step": 316
},
{
"epoch": 5.482758620689655,
"grad_norm": 15.048312187194824,
"learning_rate": 9.080459770114943e-05,
"loss": 1.583,
"step": 318
},
{
"epoch": 5.517241379310345,
"grad_norm": 9.893528938293457,
"learning_rate": 9.067688378033205e-05,
"loss": 1.5501,
"step": 320
},
{
"epoch": 5.551724137931035,
"grad_norm": 8.719097137451172,
"learning_rate": 9.05491698595147e-05,
"loss": 1.1545,
"step": 322
},
{
"epoch": 5.586206896551724,
"grad_norm": 5.774980545043945,
"learning_rate": 9.042145593869731e-05,
"loss": 0.7719,
"step": 324
},
{
"epoch": 5.620689655172414,
"grad_norm": 6.541261672973633,
"learning_rate": 9.029374201787996e-05,
"loss": 0.8762,
"step": 326
},
{
"epoch": 5.655172413793103,
"grad_norm": 8.278448104858398,
"learning_rate": 9.016602809706258e-05,
"loss": 0.9054,
"step": 328
},
{
"epoch": 5.689655172413794,
"grad_norm": 8.492527961730957,
"learning_rate": 9.003831417624522e-05,
"loss": 0.8369,
"step": 330
},
{
"epoch": 5.724137931034483,
"grad_norm": 10.219111442565918,
"learning_rate": 8.991060025542784e-05,
"loss": 1.2223,
"step": 332
},
{
"epoch": 5.758620689655173,
"grad_norm": 8.872493743896484,
"learning_rate": 8.978288633461047e-05,
"loss": 1.3745,
"step": 334
},
{
"epoch": 5.793103448275862,
"grad_norm": 7.07079553604126,
"learning_rate": 8.96551724137931e-05,
"loss": 0.8249,
"step": 336
},
{
"epoch": 5.827586206896552,
"grad_norm": 11.903797149658203,
"learning_rate": 8.952745849297573e-05,
"loss": 1.4273,
"step": 338
},
{
"epoch": 5.862068965517241,
"grad_norm": 7.793108940124512,
"learning_rate": 8.939974457215837e-05,
"loss": 0.8989,
"step": 340
},
{
"epoch": 5.896551724137931,
"grad_norm": 7.905730247497559,
"learning_rate": 8.9272030651341e-05,
"loss": 1.1299,
"step": 342
},
{
"epoch": 5.931034482758621,
"grad_norm": 8.985093116760254,
"learning_rate": 8.914431673052363e-05,
"loss": 1.356,
"step": 344
},
{
"epoch": 5.9655172413793105,
"grad_norm": 18.639375686645508,
"learning_rate": 8.901660280970626e-05,
"loss": 1.4211,
"step": 346
},
{
"epoch": 6.0,
"grad_norm": 12.543028831481934,
"learning_rate": 8.888888888888889e-05,
"loss": 1.1664,
"step": 348
},
{
"epoch": 6.0,
"eval_accuracy": 0.5075757575757576,
"eval_f1_macro": 0.41789107824630456,
"eval_f1_micro": 0.5075757575757576,
"eval_f1_weighted": 0.49881339629823973,
"eval_loss": 1.177772879600525,
"eval_precision_macro": 0.506774193548387,
"eval_precision_micro": 0.5075757575757576,
"eval_precision_weighted": 0.5861559139784946,
"eval_recall_macro": 0.43952067669172934,
"eval_recall_micro": 0.5075757575757576,
"eval_recall_weighted": 0.5075757575757576,
"eval_runtime": 1.9845,
"eval_samples_per_second": 66.515,
"eval_steps_per_second": 8.566,
"step": 348
},
{
"epoch": 6.0344827586206895,
"grad_norm": 10.878969192504883,
"learning_rate": 8.876117496807152e-05,
"loss": 1.2538,
"step": 350
},
{
"epoch": 6.068965517241379,
"grad_norm": 16.26839828491211,
"learning_rate": 8.863346104725416e-05,
"loss": 2.0077,
"step": 352
},
{
"epoch": 6.103448275862069,
"grad_norm": 4.248413562774658,
"learning_rate": 8.850574712643679e-05,
"loss": 0.6126,
"step": 354
},
{
"epoch": 6.137931034482759,
"grad_norm": 8.243701934814453,
"learning_rate": 8.837803320561942e-05,
"loss": 0.9125,
"step": 356
},
{
"epoch": 6.172413793103448,
"grad_norm": 10.138152122497559,
"learning_rate": 8.825031928480205e-05,
"loss": 0.7693,
"step": 358
},
{
"epoch": 6.206896551724138,
"grad_norm": 9.491286277770996,
"learning_rate": 8.812260536398468e-05,
"loss": 1.1705,
"step": 360
},
{
"epoch": 6.241379310344827,
"grad_norm": 10.823892593383789,
"learning_rate": 8.79948914431673e-05,
"loss": 1.4309,
"step": 362
},
{
"epoch": 6.275862068965517,
"grad_norm": 9.899202346801758,
"learning_rate": 8.786717752234995e-05,
"loss": 1.0038,
"step": 364
},
{
"epoch": 6.310344827586207,
"grad_norm": 8.473255157470703,
"learning_rate": 8.773946360153256e-05,
"loss": 1.1241,
"step": 366
},
{
"epoch": 6.344827586206897,
"grad_norm": 8.142688751220703,
"learning_rate": 8.761174968071521e-05,
"loss": 0.9774,
"step": 368
},
{
"epoch": 6.379310344827586,
"grad_norm": 8.222920417785645,
"learning_rate": 8.748403575989783e-05,
"loss": 1.105,
"step": 370
},
{
"epoch": 6.413793103448276,
"grad_norm": 8.64330768585205,
"learning_rate": 8.735632183908047e-05,
"loss": 0.7421,
"step": 372
},
{
"epoch": 6.448275862068965,
"grad_norm": 8.659346580505371,
"learning_rate": 8.722860791826309e-05,
"loss": 0.8458,
"step": 374
},
{
"epoch": 6.482758620689655,
"grad_norm": 7.194849967956543,
"learning_rate": 8.710089399744572e-05,
"loss": 0.6802,
"step": 376
},
{
"epoch": 6.517241379310345,
"grad_norm": 8.775270462036133,
"learning_rate": 8.697318007662835e-05,
"loss": 1.1627,
"step": 378
},
{
"epoch": 6.551724137931035,
"grad_norm": 9.507420539855957,
"learning_rate": 8.684546615581099e-05,
"loss": 0.9029,
"step": 380
},
{
"epoch": 6.586206896551724,
"grad_norm": 9.500436782836914,
"learning_rate": 8.671775223499362e-05,
"loss": 1.1927,
"step": 382
},
{
"epoch": 6.620689655172414,
"grad_norm": 2.656595230102539,
"learning_rate": 8.659003831417625e-05,
"loss": 0.8011,
"step": 384
},
{
"epoch": 6.655172413793103,
"grad_norm": 11.397368431091309,
"learning_rate": 8.646232439335888e-05,
"loss": 1.3364,
"step": 386
},
{
"epoch": 6.689655172413794,
"grad_norm": 9.544795036315918,
"learning_rate": 8.633461047254151e-05,
"loss": 0.9104,
"step": 388
},
{
"epoch": 6.724137931034483,
"grad_norm": 9.221898078918457,
"learning_rate": 8.620689655172413e-05,
"loss": 1.3124,
"step": 390
},
{
"epoch": 6.758620689655173,
"grad_norm": 8.60506534576416,
"learning_rate": 8.607918263090678e-05,
"loss": 0.8603,
"step": 392
},
{
"epoch": 6.793103448275862,
"grad_norm": 3.3715968132019043,
"learning_rate": 8.59514687100894e-05,
"loss": 0.8369,
"step": 394
},
{
"epoch": 6.827586206896552,
"grad_norm": 7.430781841278076,
"learning_rate": 8.582375478927204e-05,
"loss": 0.7418,
"step": 396
},
{
"epoch": 6.862068965517241,
"grad_norm": 11.155794143676758,
"learning_rate": 8.569604086845466e-05,
"loss": 1.2551,
"step": 398
},
{
"epoch": 6.896551724137931,
"grad_norm": 12.489243507385254,
"learning_rate": 8.55683269476373e-05,
"loss": 1.3012,
"step": 400
},
{
"epoch": 6.931034482758621,
"grad_norm": 10.465293884277344,
"learning_rate": 8.544061302681992e-05,
"loss": 1.122,
"step": 402
},
{
"epoch": 6.9655172413793105,
"grad_norm": 7.462335109710693,
"learning_rate": 8.531289910600255e-05,
"loss": 1.2381,
"step": 404
},
{
"epoch": 7.0,
"grad_norm": 8.508047103881836,
"learning_rate": 8.518518518518518e-05,
"loss": 1.1622,
"step": 406
},
{
"epoch": 7.0,
"eval_accuracy": 0.5378787878787878,
"eval_f1_macro": 0.45176980273325784,
"eval_f1_micro": 0.5378787878787878,
"eval_f1_weighted": 0.52507696437929,
"eval_loss": 1.1722992658615112,
"eval_precision_macro": 0.45142096229052747,
"eval_precision_micro": 0.5378787878787878,
"eval_precision_weighted": 0.5525594574507617,
"eval_recall_macro": 0.4867324561403509,
"eval_recall_micro": 0.5378787878787878,
"eval_recall_weighted": 0.5378787878787878,
"eval_runtime": 1.9976,
"eval_samples_per_second": 66.081,
"eval_steps_per_second": 8.51,
"step": 406
},
{
"epoch": 7.0344827586206895,
"grad_norm": 9.81274700164795,
"learning_rate": 8.505747126436782e-05,
"loss": 0.8411,
"step": 408
},
{
"epoch": 7.068965517241379,
"grad_norm": 9.398898124694824,
"learning_rate": 8.492975734355045e-05,
"loss": 1.0148,
"step": 410
},
{
"epoch": 7.103448275862069,
"grad_norm": 8.494013786315918,
"learning_rate": 8.480204342273308e-05,
"loss": 0.5251,
"step": 412
},
{
"epoch": 7.137931034482759,
"grad_norm": 8.39294147491455,
"learning_rate": 8.467432950191571e-05,
"loss": 0.6457,
"step": 414
},
{
"epoch": 7.172413793103448,
"grad_norm": 12.938483238220215,
"learning_rate": 8.454661558109834e-05,
"loss": 1.0682,
"step": 416
},
{
"epoch": 7.206896551724138,
"grad_norm": 8.100533485412598,
"learning_rate": 8.441890166028097e-05,
"loss": 1.0759,
"step": 418
},
{
"epoch": 7.241379310344827,
"grad_norm": 9.130753517150879,
"learning_rate": 8.42911877394636e-05,
"loss": 0.665,
"step": 420
},
{
"epoch": 7.275862068965517,
"grad_norm": 7.9572906494140625,
"learning_rate": 8.416347381864624e-05,
"loss": 0.5862,
"step": 422
},
{
"epoch": 7.310344827586207,
"grad_norm": 7.2201104164123535,
"learning_rate": 8.403575989782887e-05,
"loss": 0.8631,
"step": 424
},
{
"epoch": 7.344827586206897,
"grad_norm": 9.218771934509277,
"learning_rate": 8.39080459770115e-05,
"loss": 0.9256,
"step": 426
},
{
"epoch": 7.379310344827586,
"grad_norm": 10.637073516845703,
"learning_rate": 8.378033205619413e-05,
"loss": 0.5491,
"step": 428
},
{
"epoch": 7.413793103448276,
"grad_norm": 6.346437931060791,
"learning_rate": 8.365261813537676e-05,
"loss": 0.7555,
"step": 430
},
{
"epoch": 7.448275862068965,
"grad_norm": 5.5799384117126465,
"learning_rate": 8.35249042145594e-05,
"loss": 0.3958,
"step": 432
},
{
"epoch": 7.482758620689655,
"grad_norm": 11.625855445861816,
"learning_rate": 8.339719029374203e-05,
"loss": 0.7716,
"step": 434
},
{
"epoch": 7.517241379310345,
"grad_norm": 11.031689643859863,
"learning_rate": 8.326947637292465e-05,
"loss": 0.946,
"step": 436
},
{
"epoch": 7.551724137931035,
"grad_norm": 14.949385643005371,
"learning_rate": 8.314176245210729e-05,
"loss": 1.448,
"step": 438
},
{
"epoch": 7.586206896551724,
"grad_norm": 9.996256828308105,
"learning_rate": 8.301404853128991e-05,
"loss": 1.0621,
"step": 440
},
{
"epoch": 7.620689655172414,
"grad_norm": 12.930780410766602,
"learning_rate": 8.288633461047255e-05,
"loss": 1.5245,
"step": 442
},
{
"epoch": 7.655172413793103,
"grad_norm": 9.777554512023926,
"learning_rate": 8.275862068965517e-05,
"loss": 0.7166,
"step": 444
},
{
"epoch": 7.689655172413794,
"grad_norm": 15.452765464782715,
"learning_rate": 8.263090676883782e-05,
"loss": 0.81,
"step": 446
},
{
"epoch": 7.724137931034483,
"grad_norm": 5.021923542022705,
"learning_rate": 8.250319284802044e-05,
"loss": 0.4846,
"step": 448
},
{
"epoch": 7.758620689655173,
"grad_norm": 10.42216682434082,
"learning_rate": 8.237547892720307e-05,
"loss": 0.8314,
"step": 450
},
{
"epoch": 7.793103448275862,
"grad_norm": 10.091817855834961,
"learning_rate": 8.22477650063857e-05,
"loss": 0.7462,
"step": 452
},
{
"epoch": 7.827586206896552,
"grad_norm": 10.696878433227539,
"learning_rate": 8.212005108556833e-05,
"loss": 1.1465,
"step": 454
},
{
"epoch": 7.862068965517241,
"grad_norm": 13.578857421875,
"learning_rate": 8.199233716475096e-05,
"loss": 1.7135,
"step": 456
},
{
"epoch": 7.896551724137931,
"grad_norm": 8.861135482788086,
"learning_rate": 8.18646232439336e-05,
"loss": 0.9397,
"step": 458
},
{
"epoch": 7.931034482758621,
"grad_norm": 8.846278190612793,
"learning_rate": 8.173690932311623e-05,
"loss": 0.932,
"step": 460
},
{
"epoch": 7.9655172413793105,
"grad_norm": 13.574858665466309,
"learning_rate": 8.160919540229886e-05,
"loss": 1.6147,
"step": 462
},
{
"epoch": 8.0,
"grad_norm": 11.378049850463867,
"learning_rate": 8.148148148148148e-05,
"loss": 0.9827,
"step": 464
},
{
"epoch": 8.0,
"eval_accuracy": 0.6212121212121212,
"eval_f1_macro": 0.5083652151343768,
"eval_f1_micro": 0.6212121212121212,
"eval_f1_weighted": 0.6074328355724158,
"eval_loss": 1.0619021654129028,
"eval_precision_macro": 0.5036885527157723,
"eval_precision_micro": 0.6212121212121212,
"eval_precision_weighted": 0.61396284194398,
"eval_recall_macro": 0.5344924812030075,
"eval_recall_micro": 0.6212121212121212,
"eval_recall_weighted": 0.6212121212121212,
"eval_runtime": 1.9917,
"eval_samples_per_second": 66.275,
"eval_steps_per_second": 8.535,
"step": 464
},
{
"epoch": 8.03448275862069,
"grad_norm": 5.481504440307617,
"learning_rate": 8.135376756066412e-05,
"loss": 0.4917,
"step": 466
},
{
"epoch": 8.068965517241379,
"grad_norm": 7.311232566833496,
"learning_rate": 8.122605363984674e-05,
"loss": 0.7269,
"step": 468
},
{
"epoch": 8.10344827586207,
"grad_norm": 9.246880531311035,
"learning_rate": 8.109833971902938e-05,
"loss": 0.8737,
"step": 470
},
{
"epoch": 8.137931034482758,
"grad_norm": 8.400725364685059,
"learning_rate": 8.0970625798212e-05,
"loss": 0.5171,
"step": 472
},
{
"epoch": 8.172413793103448,
"grad_norm": 7.584453105926514,
"learning_rate": 8.084291187739465e-05,
"loss": 0.4877,
"step": 474
},
{
"epoch": 8.206896551724139,
"grad_norm": 3.6026341915130615,
"learning_rate": 8.071519795657727e-05,
"loss": 0.3995,
"step": 476
},
{
"epoch": 8.241379310344827,
"grad_norm": 11.562987327575684,
"learning_rate": 8.05874840357599e-05,
"loss": 0.7385,
"step": 478
},
{
"epoch": 8.275862068965518,
"grad_norm": 6.191165447235107,
"learning_rate": 8.045977011494253e-05,
"loss": 0.5029,
"step": 480
},
{
"epoch": 8.310344827586206,
"grad_norm": 6.5581583976745605,
"learning_rate": 8.033205619412516e-05,
"loss": 0.4527,
"step": 482
},
{
"epoch": 8.344827586206897,
"grad_norm": 5.58575963973999,
"learning_rate": 8.020434227330779e-05,
"loss": 0.505,
"step": 484
},
{
"epoch": 8.379310344827585,
"grad_norm": 7.359447956085205,
"learning_rate": 8.007662835249042e-05,
"loss": 0.3806,
"step": 486
},
{
"epoch": 8.413793103448276,
"grad_norm": 9.096012115478516,
"learning_rate": 7.994891443167306e-05,
"loss": 0.7836,
"step": 488
},
{
"epoch": 8.448275862068966,
"grad_norm": 5.758499622344971,
"learning_rate": 7.982120051085569e-05,
"loss": 0.6284,
"step": 490
},
{
"epoch": 8.482758620689655,
"grad_norm": 9.046234130859375,
"learning_rate": 7.969348659003832e-05,
"loss": 0.8215,
"step": 492
},
{
"epoch": 8.517241379310345,
"grad_norm": 10.501338958740234,
"learning_rate": 7.956577266922095e-05,
"loss": 0.7549,
"step": 494
},
{
"epoch": 8.551724137931034,
"grad_norm": 12.925354957580566,
"learning_rate": 7.943805874840358e-05,
"loss": 0.9301,
"step": 496
},
{
"epoch": 8.586206896551724,
"grad_norm": 9.080878257751465,
"learning_rate": 7.931034482758621e-05,
"loss": 0.9217,
"step": 498
},
{
"epoch": 8.620689655172415,
"grad_norm": 11.387174606323242,
"learning_rate": 7.918263090676885e-05,
"loss": 1.1331,
"step": 500
},
{
"epoch": 8.655172413793103,
"grad_norm": 4.864457130432129,
"learning_rate": 7.905491698595148e-05,
"loss": 0.3399,
"step": 502
},
{
"epoch": 8.689655172413794,
"grad_norm": 9.243449211120605,
"learning_rate": 7.892720306513411e-05,
"loss": 0.8055,
"step": 504
},
{
"epoch": 8.724137931034482,
"grad_norm": 13.595425605773926,
"learning_rate": 7.879948914431673e-05,
"loss": 1.2361,
"step": 506
},
{
"epoch": 8.758620689655173,
"grad_norm": 9.03889274597168,
"learning_rate": 7.867177522349937e-05,
"loss": 1.2648,
"step": 508
},
{
"epoch": 8.793103448275861,
"grad_norm": 10.691272735595703,
"learning_rate": 7.854406130268199e-05,
"loss": 0.8417,
"step": 510
},
{
"epoch": 8.827586206896552,
"grad_norm": 15.45632266998291,
"learning_rate": 7.841634738186464e-05,
"loss": 1.1186,
"step": 512
},
{
"epoch": 8.862068965517242,
"grad_norm": 10.35450267791748,
"learning_rate": 7.828863346104725e-05,
"loss": 1.0734,
"step": 514
},
{
"epoch": 8.89655172413793,
"grad_norm": 5.785059452056885,
"learning_rate": 7.81609195402299e-05,
"loss": 0.5601,
"step": 516
},
{
"epoch": 8.931034482758621,
"grad_norm": 8.064878463745117,
"learning_rate": 7.803320561941252e-05,
"loss": 0.8273,
"step": 518
},
{
"epoch": 8.96551724137931,
"grad_norm": 9.697986602783203,
"learning_rate": 7.790549169859515e-05,
"loss": 0.7774,
"step": 520
},
{
"epoch": 9.0,
"grad_norm": 14.628928184509277,
"learning_rate": 7.777777777777778e-05,
"loss": 1.3416,
"step": 522
},
{
"epoch": 9.0,
"eval_accuracy": 0.5,
"eval_f1_macro": 0.3996626928502454,
"eval_f1_micro": 0.5,
"eval_f1_weighted": 0.4689747329928523,
"eval_loss": 1.3995325565338135,
"eval_precision_macro": 0.4217794875689612,
"eval_precision_micro": 0.5,
"eval_precision_weighted": 0.5023755373635757,
"eval_recall_macro": 0.45092418546365914,
"eval_recall_micro": 0.5,
"eval_recall_weighted": 0.5,
"eval_runtime": 1.9802,
"eval_samples_per_second": 66.659,
"eval_steps_per_second": 8.585,
"step": 522
},
{
"epoch": 9.03448275862069,
"grad_norm": 8.5454683303833,
"learning_rate": 7.765006385696041e-05,
"loss": 0.7221,
"step": 524
},
{
"epoch": 9.068965517241379,
"grad_norm": 12.215627670288086,
"learning_rate": 7.752234993614304e-05,
"loss": 1.1389,
"step": 526
},
{
"epoch": 9.10344827586207,
"grad_norm": 7.772884368896484,
"learning_rate": 7.739463601532568e-05,
"loss": 0.5547,
"step": 528
},
{
"epoch": 9.137931034482758,
"grad_norm": 6.657981872558594,
"learning_rate": 7.726692209450831e-05,
"loss": 0.4854,
"step": 530
},
{
"epoch": 9.172413793103448,
"grad_norm": 4.549035549163818,
"learning_rate": 7.713920817369094e-05,
"loss": 0.4071,
"step": 532
},
{
"epoch": 9.206896551724139,
"grad_norm": 10.139058113098145,
"learning_rate": 7.701149425287356e-05,
"loss": 0.8507,
"step": 534
},
{
"epoch": 9.241379310344827,
"grad_norm": 9.346701622009277,
"learning_rate": 7.68837803320562e-05,
"loss": 0.5146,
"step": 536
},
{
"epoch": 9.275862068965518,
"grad_norm": 8.409459114074707,
"learning_rate": 7.675606641123882e-05,
"loss": 0.7852,
"step": 538
},
{
"epoch": 9.310344827586206,
"grad_norm": 10.624256134033203,
"learning_rate": 7.662835249042147e-05,
"loss": 0.7101,
"step": 540
},
{
"epoch": 9.344827586206897,
"grad_norm": 10.048233032226562,
"learning_rate": 7.650063856960408e-05,
"loss": 0.9228,
"step": 542
},
{
"epoch": 9.379310344827585,
"grad_norm": 7.511538982391357,
"learning_rate": 7.637292464878673e-05,
"loss": 0.4572,
"step": 544
},
{
"epoch": 9.413793103448276,
"grad_norm": 5.808769226074219,
"learning_rate": 7.624521072796935e-05,
"loss": 0.3693,
"step": 546
},
{
"epoch": 9.448275862068966,
"grad_norm": 7.7615156173706055,
"learning_rate": 7.611749680715198e-05,
"loss": 0.5247,
"step": 548
},
{
"epoch": 9.482758620689655,
"grad_norm": 13.348309516906738,
"learning_rate": 7.598978288633461e-05,
"loss": 1.0443,
"step": 550
},
{
"epoch": 9.517241379310345,
"grad_norm": 9.64974594116211,
"learning_rate": 7.586206896551724e-05,
"loss": 0.563,
"step": 552
},
{
"epoch": 9.551724137931034,
"grad_norm": 11.617941856384277,
"learning_rate": 7.573435504469987e-05,
"loss": 1.021,
"step": 554
},
{
"epoch": 9.586206896551724,
"grad_norm": 8.760254859924316,
"learning_rate": 7.56066411238825e-05,
"loss": 0.7207,
"step": 556
},
{
"epoch": 9.620689655172415,
"grad_norm": 9.84865951538086,
"learning_rate": 7.547892720306514e-05,
"loss": 0.4963,
"step": 558
},
{
"epoch": 9.655172413793103,
"grad_norm": 9.160222053527832,
"learning_rate": 7.535121328224777e-05,
"loss": 0.8536,
"step": 560
},
{
"epoch": 9.689655172413794,
"grad_norm": 6.880321502685547,
"learning_rate": 7.522349936143039e-05,
"loss": 1.1081,
"step": 562
},
{
"epoch": 9.724137931034482,
"grad_norm": 7.537998676300049,
"learning_rate": 7.509578544061303e-05,
"loss": 0.5926,
"step": 564
},
{
"epoch": 9.758620689655173,
"grad_norm": 10.582006454467773,
"learning_rate": 7.496807151979565e-05,
"loss": 0.9216,
"step": 566
},
{
"epoch": 9.793103448275861,
"grad_norm": 8.612391471862793,
"learning_rate": 7.48403575989783e-05,
"loss": 0.4138,
"step": 568
},
{
"epoch": 9.827586206896552,
"grad_norm": 9.592196464538574,
"learning_rate": 7.471264367816091e-05,
"loss": 0.7652,
"step": 570
},
{
"epoch": 9.862068965517242,
"grad_norm": 7.557244300842285,
"learning_rate": 7.458492975734356e-05,
"loss": 0.4475,
"step": 572
},
{
"epoch": 9.89655172413793,
"grad_norm": 12.449685096740723,
"learning_rate": 7.445721583652618e-05,
"loss": 0.7721,
"step": 574
},
{
"epoch": 9.931034482758621,
"grad_norm": 7.203516960144043,
"learning_rate": 7.432950191570882e-05,
"loss": 1.0207,
"step": 576
},
{
"epoch": 9.96551724137931,
"grad_norm": 4.533941745758057,
"learning_rate": 7.420178799489144e-05,
"loss": 0.5956,
"step": 578
},
{
"epoch": 10.0,
"grad_norm": 21.596515655517578,
"learning_rate": 7.407407407407407e-05,
"loss": 0.758,
"step": 580
},
{
"epoch": 10.0,
"eval_accuracy": 0.5984848484848485,
"eval_f1_macro": 0.5065544106205532,
"eval_f1_micro": 0.5984848484848485,
"eval_f1_weighted": 0.5835752856194464,
"eval_loss": 1.1693105697631836,
"eval_precision_macro": 0.5261672850958565,
"eval_precision_micro": 0.5984848484848485,
"eval_precision_weighted": 0.6030692476715204,
"eval_recall_macro": 0.5278665413533835,
"eval_recall_micro": 0.5984848484848485,
"eval_recall_weighted": 0.5984848484848485,
"eval_runtime": 2.0002,
"eval_samples_per_second": 65.992,
"eval_steps_per_second": 8.499,
"step": 580
},
{
"epoch": 10.03448275862069,
"grad_norm": 5.144809246063232,
"learning_rate": 7.39463601532567e-05,
"loss": 0.2258,
"step": 582
},
{
"epoch": 10.068965517241379,
"grad_norm": 8.242935180664062,
"learning_rate": 7.381864623243934e-05,
"loss": 0.45,
"step": 584
},
{
"epoch": 10.10344827586207,
"grad_norm": 3.4672491550445557,
"learning_rate": 7.369093231162197e-05,
"loss": 0.4987,
"step": 586
},
{
"epoch": 10.137931034482758,
"grad_norm": 1.9440521001815796,
"learning_rate": 7.35632183908046e-05,
"loss": 0.2707,
"step": 588
},
{
"epoch": 10.172413793103448,
"grad_norm": 11.417020797729492,
"learning_rate": 7.343550446998723e-05,
"loss": 0.6887,
"step": 590
},
{
"epoch": 10.206896551724139,
"grad_norm": 6.448490142822266,
"learning_rate": 7.330779054916986e-05,
"loss": 0.3351,
"step": 592
},
{
"epoch": 10.241379310344827,
"grad_norm": 21.801414489746094,
"learning_rate": 7.31800766283525e-05,
"loss": 0.8969,
"step": 594
},
{
"epoch": 10.275862068965518,
"grad_norm": 3.7770888805389404,
"learning_rate": 7.305236270753513e-05,
"loss": 0.3175,
"step": 596
},
{
"epoch": 10.310344827586206,
"grad_norm": 12.79362678527832,
"learning_rate": 7.292464878671776e-05,
"loss": 0.756,
"step": 598
},
{
"epoch": 10.344827586206897,
"grad_norm": 12.172070503234863,
"learning_rate": 7.279693486590039e-05,
"loss": 0.6943,
"step": 600
},
{
"epoch": 10.379310344827585,
"grad_norm": 11.363933563232422,
"learning_rate": 7.266922094508302e-05,
"loss": 0.7048,
"step": 602
},
{
"epoch": 10.413793103448276,
"grad_norm": 7.672904968261719,
"learning_rate": 7.254150702426565e-05,
"loss": 0.2959,
"step": 604
},
{
"epoch": 10.448275862068966,
"grad_norm": 9.661025047302246,
"learning_rate": 7.241379310344828e-05,
"loss": 0.4829,
"step": 606
},
{
"epoch": 10.482758620689655,
"grad_norm": 8.507035255432129,
"learning_rate": 7.22860791826309e-05,
"loss": 0.2263,
"step": 608
},
{
"epoch": 10.517241379310345,
"grad_norm": 6.187190055847168,
"learning_rate": 7.215836526181355e-05,
"loss": 0.5414,
"step": 610
},
{
"epoch": 10.551724137931034,
"grad_norm": 11.629401206970215,
"learning_rate": 7.203065134099617e-05,
"loss": 0.7474,
"step": 612
},
{
"epoch": 10.586206896551724,
"grad_norm": 7.820981025695801,
"learning_rate": 7.190293742017881e-05,
"loss": 0.4997,
"step": 614
},
{
"epoch": 10.620689655172415,
"grad_norm": 14.111104011535645,
"learning_rate": 7.177522349936143e-05,
"loss": 0.4296,
"step": 616
},
{
"epoch": 10.655172413793103,
"grad_norm": 10.963750839233398,
"learning_rate": 7.164750957854408e-05,
"loss": 0.4871,
"step": 618
},
{
"epoch": 10.689655172413794,
"grad_norm": 17.322771072387695,
"learning_rate": 7.151979565772669e-05,
"loss": 0.9553,
"step": 620
},
{
"epoch": 10.724137931034482,
"grad_norm": 9.986748695373535,
"learning_rate": 7.139208173690932e-05,
"loss": 0.5509,
"step": 622
},
{
"epoch": 10.758620689655173,
"grad_norm": 9.360352516174316,
"learning_rate": 7.126436781609196e-05,
"loss": 0.6703,
"step": 624
},
{
"epoch": 10.793103448275861,
"grad_norm": 3.3035664558410645,
"learning_rate": 7.113665389527459e-05,
"loss": 0.4547,
"step": 626
},
{
"epoch": 10.827586206896552,
"grad_norm": 8.55704116821289,
"learning_rate": 7.100893997445722e-05,
"loss": 0.7755,
"step": 628
},
{
"epoch": 10.862068965517242,
"grad_norm": 12.647590637207031,
"learning_rate": 7.088122605363985e-05,
"loss": 0.8421,
"step": 630
},
{
"epoch": 10.89655172413793,
"grad_norm": 4.907244682312012,
"learning_rate": 7.075351213282248e-05,
"loss": 0.3781,
"step": 632
},
{
"epoch": 10.931034482758621,
"grad_norm": 10.101003646850586,
"learning_rate": 7.062579821200511e-05,
"loss": 0.6021,
"step": 634
},
{
"epoch": 10.96551724137931,
"grad_norm": 15.203731536865234,
"learning_rate": 7.049808429118773e-05,
"loss": 0.9916,
"step": 636
},
{
"epoch": 11.0,
"grad_norm": 20.572900772094727,
"learning_rate": 7.037037037037038e-05,
"loss": 0.7758,
"step": 638
},
{
"epoch": 11.0,
"eval_accuracy": 0.6515151515151515,
"eval_f1_macro": 0.5490749541247198,
"eval_f1_micro": 0.6515151515151515,
"eval_f1_weighted": 0.6319976363055722,
"eval_loss": 1.0799717903137207,
"eval_precision_macro": 0.5729282057650646,
"eval_precision_micro": 0.6515151515151515,
"eval_precision_weighted": 0.6500887606871381,
"eval_recall_macro": 0.5710213032581454,
"eval_recall_micro": 0.6515151515151515,
"eval_recall_weighted": 0.6515151515151515,
"eval_runtime": 1.9879,
"eval_samples_per_second": 66.401,
"eval_steps_per_second": 8.552,
"step": 638
},
{
"epoch": 11.03448275862069,
"grad_norm": 6.214511871337891,
"learning_rate": 7.0242656449553e-05,
"loss": 0.4373,
"step": 640
},
{
"epoch": 11.068965517241379,
"grad_norm": 13.804490089416504,
"learning_rate": 7.011494252873564e-05,
"loss": 0.7242,
"step": 642
},
{
"epoch": 11.10344827586207,
"grad_norm": 7.258790969848633,
"learning_rate": 6.998722860791826e-05,
"loss": 0.3412,
"step": 644
},
{
"epoch": 11.137931034482758,
"grad_norm": 13.48838996887207,
"learning_rate": 6.98595146871009e-05,
"loss": 0.7093,
"step": 646
},
{
"epoch": 11.172413793103448,
"grad_norm": 3.5530500411987305,
"learning_rate": 6.973180076628352e-05,
"loss": 0.1557,
"step": 648
},
{
"epoch": 11.206896551724139,
"grad_norm": 5.173003196716309,
"learning_rate": 6.960408684546615e-05,
"loss": 0.2043,
"step": 650
},
{
"epoch": 11.241379310344827,
"grad_norm": 8.948324203491211,
"learning_rate": 6.947637292464879e-05,
"loss": 0.3718,
"step": 652
},
{
"epoch": 11.275862068965518,
"grad_norm": 13.394603729248047,
"learning_rate": 6.934865900383142e-05,
"loss": 0.4858,
"step": 654
},
{
"epoch": 11.310344827586206,
"grad_norm": 3.575120210647583,
"learning_rate": 6.922094508301405e-05,
"loss": 0.2133,
"step": 656
},
{
"epoch": 11.344827586206897,
"grad_norm": 6.710623264312744,
"learning_rate": 6.909323116219668e-05,
"loss": 0.4072,
"step": 658
},
{
"epoch": 11.379310344827585,
"grad_norm": 14.04590892791748,
"learning_rate": 6.896551724137931e-05,
"loss": 0.9938,
"step": 660
},
{
"epoch": 11.413793103448276,
"grad_norm": 7.6388444900512695,
"learning_rate": 6.883780332056194e-05,
"loss": 0.4864,
"step": 662
},
{
"epoch": 11.448275862068966,
"grad_norm": 6.261629104614258,
"learning_rate": 6.871008939974458e-05,
"loss": 0.3112,
"step": 664
},
{
"epoch": 11.482758620689655,
"grad_norm": 7.990070819854736,
"learning_rate": 6.858237547892721e-05,
"loss": 0.6052,
"step": 666
},
{
"epoch": 11.517241379310345,
"grad_norm": 5.1240949630737305,
"learning_rate": 6.845466155810984e-05,
"loss": 0.6029,
"step": 668
},
{
"epoch": 11.551724137931034,
"grad_norm": 3.3157217502593994,
"learning_rate": 6.832694763729247e-05,
"loss": 0.2027,
"step": 670
},
{
"epoch": 11.586206896551724,
"grad_norm": 9.071480751037598,
"learning_rate": 6.81992337164751e-05,
"loss": 0.257,
"step": 672
},
{
"epoch": 11.620689655172415,
"grad_norm": 13.492743492126465,
"learning_rate": 6.807151979565773e-05,
"loss": 0.4914,
"step": 674
},
{
"epoch": 11.655172413793103,
"grad_norm": 1.5881291627883911,
"learning_rate": 6.794380587484037e-05,
"loss": 0.3057,
"step": 676
},
{
"epoch": 11.689655172413794,
"grad_norm": 13.12863540649414,
"learning_rate": 6.781609195402298e-05,
"loss": 0.4456,
"step": 678
},
{
"epoch": 11.724137931034482,
"grad_norm": 15.156206130981445,
"learning_rate": 6.768837803320563e-05,
"loss": 0.4095,
"step": 680
},
{
"epoch": 11.758620689655173,
"grad_norm": 12.477018356323242,
"learning_rate": 6.756066411238825e-05,
"loss": 0.6728,
"step": 682
},
{
"epoch": 11.793103448275861,
"grad_norm": 6.536929607391357,
"learning_rate": 6.74329501915709e-05,
"loss": 0.3572,
"step": 684
},
{
"epoch": 11.827586206896552,
"grad_norm": 8.295530319213867,
"learning_rate": 6.730523627075351e-05,
"loss": 0.3506,
"step": 686
},
{
"epoch": 11.862068965517242,
"grad_norm": 2.183727979660034,
"learning_rate": 6.717752234993616e-05,
"loss": 0.3145,
"step": 688
},
{
"epoch": 11.89655172413793,
"grad_norm": 9.358091354370117,
"learning_rate": 6.704980842911877e-05,
"loss": 0.5877,
"step": 690
},
{
"epoch": 11.931034482758621,
"grad_norm": 7.218227386474609,
"learning_rate": 6.69220945083014e-05,
"loss": 0.4683,
"step": 692
},
{
"epoch": 11.96551724137931,
"grad_norm": 6.784433364868164,
"learning_rate": 6.679438058748404e-05,
"loss": 0.3328,
"step": 694
},
{
"epoch": 12.0,
"grad_norm": 4.573869228363037,
"learning_rate": 6.666666666666667e-05,
"loss": 0.2319,
"step": 696
},
{
"epoch": 12.0,
"eval_accuracy": 0.6742424242424242,
"eval_f1_macro": 0.546672932330827,
"eval_f1_micro": 0.6742424242424242,
"eval_f1_weighted": 0.6409964779372674,
"eval_loss": 1.1553009748458862,
"eval_precision_macro": 0.5816405209018068,
"eval_precision_micro": 0.6742424242424242,
"eval_precision_weighted": 0.6699464486928733,
"eval_recall_macro": 0.5711466165413533,
"eval_recall_micro": 0.6742424242424242,
"eval_recall_weighted": 0.6742424242424242,
"eval_runtime": 1.9872,
"eval_samples_per_second": 66.426,
"eval_steps_per_second": 8.555,
"step": 696
},
{
"epoch": 12.03448275862069,
"grad_norm": 1.5767076015472412,
"learning_rate": 6.65389527458493e-05,
"loss": 0.235,
"step": 698
},
{
"epoch": 12.068965517241379,
"grad_norm": 7.686869144439697,
"learning_rate": 6.641123882503193e-05,
"loss": 0.5692,
"step": 700
},
{
"epoch": 12.10344827586207,
"grad_norm": 13.89122200012207,
"learning_rate": 6.628352490421456e-05,
"loss": 0.4877,
"step": 702
},
{
"epoch": 12.137931034482758,
"grad_norm": 4.811250686645508,
"learning_rate": 6.61558109833972e-05,
"loss": 0.2121,
"step": 704
},
{
"epoch": 12.172413793103448,
"grad_norm": 2.0311198234558105,
"learning_rate": 6.602809706257981e-05,
"loss": 0.2037,
"step": 706
},
{
"epoch": 12.206896551724139,
"grad_norm": 7.1766133308410645,
"learning_rate": 6.590038314176246e-05,
"loss": 0.2169,
"step": 708
},
{
"epoch": 12.241379310344827,
"grad_norm": 5.523502826690674,
"learning_rate": 6.577266922094508e-05,
"loss": 1.335,
"step": 710
},
{
"epoch": 12.275862068965518,
"grad_norm": 16.38228416442871,
"learning_rate": 6.564495530012772e-05,
"loss": 0.7502,
"step": 712
},
{
"epoch": 12.310344827586206,
"grad_norm": 8.219087600708008,
"learning_rate": 6.551724137931034e-05,
"loss": 0.7137,
"step": 714
},
{
"epoch": 12.344827586206897,
"grad_norm": 4.552286148071289,
"learning_rate": 6.538952745849299e-05,
"loss": 0.2233,
"step": 716
},
{
"epoch": 12.379310344827585,
"grad_norm": 2.2111048698425293,
"learning_rate": 6.52618135376756e-05,
"loss": 0.1856,
"step": 718
},
{
"epoch": 12.413793103448276,
"grad_norm": 10.02652359008789,
"learning_rate": 6.513409961685824e-05,
"loss": 0.4808,
"step": 720
},
{
"epoch": 12.448275862068966,
"grad_norm": 4.486725807189941,
"learning_rate": 6.500638569604087e-05,
"loss": 0.4057,
"step": 722
},
{
"epoch": 12.482758620689655,
"grad_norm": 1.8516465425491333,
"learning_rate": 6.48786717752235e-05,
"loss": 0.1974,
"step": 724
},
{
"epoch": 12.517241379310345,
"grad_norm": 6.200466156005859,
"learning_rate": 6.475095785440613e-05,
"loss": 0.2568,
"step": 726
},
{
"epoch": 12.551724137931034,
"grad_norm": 16.36850929260254,
"learning_rate": 6.462324393358876e-05,
"loss": 0.5874,
"step": 728
},
{
"epoch": 12.586206896551724,
"grad_norm": 4.7899346351623535,
"learning_rate": 6.44955300127714e-05,
"loss": 0.3983,
"step": 730
},
{
"epoch": 12.620689655172415,
"grad_norm": 7.357030868530273,
"learning_rate": 6.436781609195403e-05,
"loss": 0.3513,
"step": 732
},
{
"epoch": 12.655172413793103,
"grad_norm": 5.966126441955566,
"learning_rate": 6.424010217113666e-05,
"loss": 0.4016,
"step": 734
},
{
"epoch": 12.689655172413794,
"grad_norm": 6.382541656494141,
"learning_rate": 6.411238825031929e-05,
"loss": 0.1682,
"step": 736
},
{
"epoch": 12.724137931034482,
"grad_norm": 8.352551460266113,
"learning_rate": 6.398467432950191e-05,
"loss": 0.3286,
"step": 738
},
{
"epoch": 12.758620689655173,
"grad_norm": 9.189525604248047,
"learning_rate": 6.385696040868455e-05,
"loss": 0.49,
"step": 740
},
{
"epoch": 12.793103448275861,
"grad_norm": 15.152857780456543,
"learning_rate": 6.372924648786717e-05,
"loss": 0.1309,
"step": 742
},
{
"epoch": 12.827586206896552,
"grad_norm": 3.6843764781951904,
"learning_rate": 6.360153256704982e-05,
"loss": 0.1974,
"step": 744
},
{
"epoch": 12.862068965517242,
"grad_norm": 10.768837928771973,
"learning_rate": 6.347381864623243e-05,
"loss": 0.8215,
"step": 746
},
{
"epoch": 12.89655172413793,
"grad_norm": 7.068192005157471,
"learning_rate": 6.334610472541508e-05,
"loss": 0.2304,
"step": 748
},
{
"epoch": 12.931034482758621,
"grad_norm": 12.653234481811523,
"learning_rate": 6.32183908045977e-05,
"loss": 0.5768,
"step": 750
},
{
"epoch": 12.96551724137931,
"grad_norm": 7.09431266784668,
"learning_rate": 6.309067688378033e-05,
"loss": 0.6238,
"step": 752
},
{
"epoch": 13.0,
"grad_norm": 7.309642791748047,
"learning_rate": 6.296296296296296e-05,
"loss": 0.3528,
"step": 754
},
{
"epoch": 13.0,
"eval_accuracy": 0.6893939393939394,
"eval_f1_macro": 0.5794293474965745,
"eval_f1_micro": 0.6893939393939394,
"eval_f1_weighted": 0.6711440544996694,
"eval_loss": 1.1685433387756348,
"eval_precision_macro": 0.5887005339770317,
"eval_precision_micro": 0.6893939393939394,
"eval_precision_weighted": 0.6752094679514034,
"eval_recall_macro": 0.5955043859649124,
"eval_recall_micro": 0.6893939393939394,
"eval_recall_weighted": 0.6893939393939394,
"eval_runtime": 1.9392,
"eval_samples_per_second": 68.07,
"eval_steps_per_second": 8.767,
"step": 754
},
{
"epoch": 13.03448275862069,
"grad_norm": 9.388544082641602,
"learning_rate": 6.283524904214559e-05,
"loss": 0.6725,
"step": 756
},
{
"epoch": 13.068965517241379,
"grad_norm": 15.097204208374023,
"learning_rate": 6.270753512132822e-05,
"loss": 0.6723,
"step": 758
},
{
"epoch": 13.10344827586207,
"grad_norm": 7.6261796951293945,
"learning_rate": 6.257982120051086e-05,
"loss": 0.3793,
"step": 760
},
{
"epoch": 13.137931034482758,
"grad_norm": 2.728222370147705,
"learning_rate": 6.245210727969349e-05,
"loss": 0.1291,
"step": 762
},
{
"epoch": 13.172413793103448,
"grad_norm": 7.391420841217041,
"learning_rate": 6.232439335887612e-05,
"loss": 0.4385,
"step": 764
},
{
"epoch": 13.206896551724139,
"grad_norm": 9.982900619506836,
"learning_rate": 6.219667943805875e-05,
"loss": 0.3353,
"step": 766
},
{
"epoch": 13.241379310344827,
"grad_norm": 7.945486545562744,
"learning_rate": 6.206896551724138e-05,
"loss": 0.1623,
"step": 768
},
{
"epoch": 13.275862068965518,
"grad_norm": 11.18921184539795,
"learning_rate": 6.194125159642401e-05,
"loss": 0.1834,
"step": 770
},
{
"epoch": 13.310344827586206,
"grad_norm": 1.6263447999954224,
"learning_rate": 6.181353767560665e-05,
"loss": 0.0572,
"step": 772
},
{
"epoch": 13.344827586206897,
"grad_norm": 8.16580581665039,
"learning_rate": 6.168582375478928e-05,
"loss": 0.3301,
"step": 774
},
{
"epoch": 13.379310344827585,
"grad_norm": 2.9834048748016357,
"learning_rate": 6.155810983397191e-05,
"loss": 0.1367,
"step": 776
},
{
"epoch": 13.413793103448276,
"grad_norm": 9.801024436950684,
"learning_rate": 6.143039591315454e-05,
"loss": 0.2472,
"step": 778
},
{
"epoch": 13.448275862068966,
"grad_norm": 13.256599426269531,
"learning_rate": 6.130268199233716e-05,
"loss": 0.6242,
"step": 780
},
{
"epoch": 13.482758620689655,
"grad_norm": 12.600865364074707,
"learning_rate": 6.11749680715198e-05,
"loss": 0.387,
"step": 782
},
{
"epoch": 13.517241379310345,
"grad_norm": 12.394946098327637,
"learning_rate": 6.104725415070242e-05,
"loss": 0.3973,
"step": 784
},
{
"epoch": 13.551724137931034,
"grad_norm": 16.835023880004883,
"learning_rate": 6.091954022988506e-05,
"loss": 0.5736,
"step": 786
},
{
"epoch": 13.586206896551724,
"grad_norm": 10.348804473876953,
"learning_rate": 6.0791826309067686e-05,
"loss": 0.3203,
"step": 788
},
{
"epoch": 13.620689655172415,
"grad_norm": 10.827553749084473,
"learning_rate": 6.0664112388250325e-05,
"loss": 0.3311,
"step": 790
},
{
"epoch": 13.655172413793103,
"grad_norm": 4.213782787322998,
"learning_rate": 6.053639846743295e-05,
"loss": 0.0665,
"step": 792
},
{
"epoch": 13.689655172413794,
"grad_norm": 5.154265403747559,
"learning_rate": 6.040868454661558e-05,
"loss": 0.3031,
"step": 794
},
{
"epoch": 13.724137931034482,
"grad_norm": 18.196022033691406,
"learning_rate": 6.028097062579821e-05,
"loss": 0.8001,
"step": 796
},
{
"epoch": 13.758620689655173,
"grad_norm": 3.6230857372283936,
"learning_rate": 6.0153256704980845e-05,
"loss": 0.1406,
"step": 798
},
{
"epoch": 13.793103448275861,
"grad_norm": 0.9606171250343323,
"learning_rate": 6.0025542784163477e-05,
"loss": 0.0544,
"step": 800
},
{
"epoch": 13.827586206896552,
"grad_norm": 13.326411247253418,
"learning_rate": 5.989782886334611e-05,
"loss": 0.2442,
"step": 802
},
{
"epoch": 13.862068965517242,
"grad_norm": 8.518882751464844,
"learning_rate": 5.977011494252874e-05,
"loss": 0.2051,
"step": 804
},
{
"epoch": 13.89655172413793,
"grad_norm": 4.24729585647583,
"learning_rate": 5.964240102171137e-05,
"loss": 0.4868,
"step": 806
},
{
"epoch": 13.931034482758621,
"grad_norm": 0.7721981406211853,
"learning_rate": 5.9514687100893996e-05,
"loss": 0.1428,
"step": 808
},
{
"epoch": 13.96551724137931,
"grad_norm": 11.86274242401123,
"learning_rate": 5.9386973180076635e-05,
"loss": 0.7225,
"step": 810
},
{
"epoch": 14.0,
"grad_norm": 14.592938423156738,
"learning_rate": 5.925925925925926e-05,
"loss": 0.6238,
"step": 812
},
{
"epoch": 14.0,
"eval_accuracy": 0.6439393939393939,
"eval_f1_macro": 0.5579426867483074,
"eval_f1_micro": 0.6439393939393939,
"eval_f1_weighted": 0.6285409584589912,
"eval_loss": 1.1781333684921265,
"eval_precision_macro": 0.5450511546824911,
"eval_precision_micro": 0.6439393939393939,
"eval_precision_weighted": 0.6277759353712432,
"eval_recall_macro": 0.5856046365914788,
"eval_recall_micro": 0.6439393939393939,
"eval_recall_weighted": 0.6439393939393939,
"eval_runtime": 1.9417,
"eval_samples_per_second": 67.982,
"eval_steps_per_second": 8.755,
"step": 812
},
{
"epoch": 14.03448275862069,
"grad_norm": 11.65465259552002,
"learning_rate": 5.91315453384419e-05,
"loss": 0.3351,
"step": 814
},
{
"epoch": 14.068965517241379,
"grad_norm": 9.413543701171875,
"learning_rate": 5.900383141762452e-05,
"loss": 0.3187,
"step": 816
},
{
"epoch": 14.10344827586207,
"grad_norm": 3.177682638168335,
"learning_rate": 5.887611749680716e-05,
"loss": 0.0781,
"step": 818
},
{
"epoch": 14.137931034482758,
"grad_norm": 9.098755836486816,
"learning_rate": 5.8748403575989787e-05,
"loss": 0.1169,
"step": 820
},
{
"epoch": 14.172413793103448,
"grad_norm": 1.7022143602371216,
"learning_rate": 5.862068965517241e-05,
"loss": 0.2369,
"step": 822
},
{
"epoch": 14.206896551724139,
"grad_norm": 6.126805782318115,
"learning_rate": 5.849297573435505e-05,
"loss": 0.3991,
"step": 824
},
{
"epoch": 14.241379310344827,
"grad_norm": 6.332644939422607,
"learning_rate": 5.8365261813537675e-05,
"loss": 0.1479,
"step": 826
},
{
"epoch": 14.275862068965518,
"grad_norm": 5.615021228790283,
"learning_rate": 5.823754789272031e-05,
"loss": 0.1755,
"step": 828
},
{
"epoch": 14.310344827586206,
"grad_norm": 12.874805450439453,
"learning_rate": 5.810983397190294e-05,
"loss": 0.4218,
"step": 830
},
{
"epoch": 14.344827586206897,
"grad_norm": 0.3466058671474457,
"learning_rate": 5.798212005108558e-05,
"loss": 0.1017,
"step": 832
},
{
"epoch": 14.379310344827585,
"grad_norm": 0.1405513435602188,
"learning_rate": 5.78544061302682e-05,
"loss": 0.0431,
"step": 834
},
{
"epoch": 14.413793103448276,
"grad_norm": 2.837080717086792,
"learning_rate": 5.7726692209450826e-05,
"loss": 0.1162,
"step": 836
},
{
"epoch": 14.448275862068966,
"grad_norm": 0.49279162287712097,
"learning_rate": 5.7598978288633465e-05,
"loss": 0.094,
"step": 838
},
{
"epoch": 14.482758620689655,
"grad_norm": 3.950249671936035,
"learning_rate": 5.747126436781609e-05,
"loss": 0.2088,
"step": 840
},
{
"epoch": 14.517241379310345,
"grad_norm": 9.883584022521973,
"learning_rate": 5.734355044699873e-05,
"loss": 0.2122,
"step": 842
},
{
"epoch": 14.551724137931034,
"grad_norm": 8.051910400390625,
"learning_rate": 5.721583652618135e-05,
"loss": 0.3278,
"step": 844
},
{
"epoch": 14.586206896551724,
"grad_norm": 7.80661678314209,
"learning_rate": 5.708812260536399e-05,
"loss": 0.1348,
"step": 846
},
{
"epoch": 14.620689655172415,
"grad_norm": 6.488097190856934,
"learning_rate": 5.6960408684546617e-05,
"loss": 0.3984,
"step": 848
},
{
"epoch": 14.655172413793103,
"grad_norm": 12.74706745147705,
"learning_rate": 5.683269476372924e-05,
"loss": 0.3044,
"step": 850
},
{
"epoch": 14.689655172413794,
"grad_norm": 7.486535549163818,
"learning_rate": 5.670498084291188e-05,
"loss": 0.3548,
"step": 852
},
{
"epoch": 14.724137931034482,
"grad_norm": 4.710317611694336,
"learning_rate": 5.6577266922094505e-05,
"loss": 0.131,
"step": 854
},
{
"epoch": 14.758620689655173,
"grad_norm": 0.45310112833976746,
"learning_rate": 5.644955300127714e-05,
"loss": 0.2907,
"step": 856
},
{
"epoch": 14.793103448275861,
"grad_norm": 11.309956550598145,
"learning_rate": 5.632183908045977e-05,
"loss": 0.3132,
"step": 858
},
{
"epoch": 14.827586206896552,
"grad_norm": 16.222618103027344,
"learning_rate": 5.6194125159642407e-05,
"loss": 0.3499,
"step": 860
},
{
"epoch": 14.862068965517242,
"grad_norm": 19.321205139160156,
"learning_rate": 5.606641123882503e-05,
"loss": 1.193,
"step": 862
},
{
"epoch": 14.89655172413793,
"grad_norm": 14.328798294067383,
"learning_rate": 5.593869731800766e-05,
"loss": 0.1844,
"step": 864
},
{
"epoch": 14.931034482758621,
"grad_norm": 14.36063289642334,
"learning_rate": 5.5810983397190295e-05,
"loss": 1.2338,
"step": 866
},
{
"epoch": 14.96551724137931,
"grad_norm": 2.4482498168945312,
"learning_rate": 5.5683269476372927e-05,
"loss": 0.0565,
"step": 868
},
{
"epoch": 15.0,
"grad_norm": 9.643209457397461,
"learning_rate": 5.555555555555556e-05,
"loss": 0.1869,
"step": 870
},
{
"epoch": 15.0,
"eval_accuracy": 0.6060606060606061,
"eval_f1_macro": 0.5145970477437697,
"eval_f1_micro": 0.6060606060606061,
"eval_f1_weighted": 0.5982909919119969,
"eval_loss": 1.2305328845977783,
"eval_precision_macro": 0.5032325523840949,
"eval_precision_micro": 0.6060606060606061,
"eval_precision_weighted": 0.6012548090624109,
"eval_recall_macro": 0.5369360902255639,
"eval_recall_micro": 0.6060606060606061,
"eval_recall_weighted": 0.6060606060606061,
"eval_runtime": 1.9435,
"eval_samples_per_second": 67.917,
"eval_steps_per_second": 8.747,
"step": 870
},
{
"epoch": 15.03448275862069,
"grad_norm": 6.357883453369141,
"learning_rate": 5.542784163473819e-05,
"loss": 0.3321,
"step": 872
},
{
"epoch": 15.068965517241379,
"grad_norm": 13.48095989227295,
"learning_rate": 5.530012771392082e-05,
"loss": 0.1638,
"step": 874
},
{
"epoch": 15.10344827586207,
"grad_norm": 9.955169677734375,
"learning_rate": 5.517241379310345e-05,
"loss": 0.2777,
"step": 876
},
{
"epoch": 15.137931034482758,
"grad_norm": 6.751859664916992,
"learning_rate": 5.504469987228608e-05,
"loss": 0.1799,
"step": 878
},
{
"epoch": 15.172413793103448,
"grad_norm": 4.653921127319336,
"learning_rate": 5.491698595146872e-05,
"loss": 0.2553,
"step": 880
},
{
"epoch": 15.206896551724139,
"grad_norm": 5.335418224334717,
"learning_rate": 5.478927203065134e-05,
"loss": 0.1899,
"step": 882
},
{
"epoch": 15.241379310344827,
"grad_norm": 5.713738441467285,
"learning_rate": 5.466155810983398e-05,
"loss": 0.4833,
"step": 884
},
{
"epoch": 15.275862068965518,
"grad_norm": 5.765931129455566,
"learning_rate": 5.4533844189016605e-05,
"loss": 0.1149,
"step": 886
},
{
"epoch": 15.310344827586206,
"grad_norm": 8.66435718536377,
"learning_rate": 5.440613026819924e-05,
"loss": 0.3092,
"step": 888
},
{
"epoch": 15.344827586206897,
"grad_norm": 2.088322877883911,
"learning_rate": 5.427841634738187e-05,
"loss": 0.1967,
"step": 890
},
{
"epoch": 15.379310344827585,
"grad_norm": 12.177457809448242,
"learning_rate": 5.415070242656451e-05,
"loss": 0.1026,
"step": 892
},
{
"epoch": 15.413793103448276,
"grad_norm": 10.64006233215332,
"learning_rate": 5.402298850574713e-05,
"loss": 0.2939,
"step": 894
},
{
"epoch": 15.448275862068966,
"grad_norm": 18.743099212646484,
"learning_rate": 5.3895274584929756e-05,
"loss": 0.2609,
"step": 896
},
{
"epoch": 15.482758620689655,
"grad_norm": 10.08409309387207,
"learning_rate": 5.3767560664112395e-05,
"loss": 0.1403,
"step": 898
},
{
"epoch": 15.517241379310345,
"grad_norm": 10.607285499572754,
"learning_rate": 5.363984674329502e-05,
"loss": 0.2563,
"step": 900
},
{
"epoch": 15.551724137931034,
"grad_norm": 2.387784004211426,
"learning_rate": 5.351213282247766e-05,
"loss": 0.0304,
"step": 902
},
{
"epoch": 15.586206896551724,
"grad_norm": 1.1395808458328247,
"learning_rate": 5.338441890166028e-05,
"loss": 0.0853,
"step": 904
},
{
"epoch": 15.620689655172415,
"grad_norm": 7.147764682769775,
"learning_rate": 5.325670498084292e-05,
"loss": 0.3563,
"step": 906
},
{
"epoch": 15.655172413793103,
"grad_norm": 4.958852767944336,
"learning_rate": 5.3128991060025547e-05,
"loss": 0.1273,
"step": 908
},
{
"epoch": 15.689655172413794,
"grad_norm": 6.212413311004639,
"learning_rate": 5.300127713920817e-05,
"loss": 0.1748,
"step": 910
},
{
"epoch": 15.724137931034482,
"grad_norm": 1.9379197359085083,
"learning_rate": 5.287356321839081e-05,
"loss": 0.0297,
"step": 912
},
{
"epoch": 15.758620689655173,
"grad_norm": 1.3377583026885986,
"learning_rate": 5.2745849297573435e-05,
"loss": 0.0356,
"step": 914
},
{
"epoch": 15.793103448275861,
"grad_norm": 0.8058100938796997,
"learning_rate": 5.261813537675607e-05,
"loss": 0.1674,
"step": 916
},
{
"epoch": 15.827586206896552,
"grad_norm": 9.972665786743164,
"learning_rate": 5.24904214559387e-05,
"loss": 0.211,
"step": 918
},
{
"epoch": 15.862068965517242,
"grad_norm": 10.261574745178223,
"learning_rate": 5.236270753512134e-05,
"loss": 0.2202,
"step": 920
},
{
"epoch": 15.89655172413793,
"grad_norm": 4.782658576965332,
"learning_rate": 5.223499361430396e-05,
"loss": 0.1333,
"step": 922
},
{
"epoch": 15.931034482758621,
"grad_norm": 7.032027244567871,
"learning_rate": 5.2107279693486586e-05,
"loss": 0.2523,
"step": 924
},
{
"epoch": 15.96551724137931,
"grad_norm": 0.28720977902412415,
"learning_rate": 5.1979565772669225e-05,
"loss": 0.0323,
"step": 926
},
{
"epoch": 16.0,
"grad_norm": 9.86831283569336,
"learning_rate": 5.185185185185185e-05,
"loss": 0.1015,
"step": 928
},
{
"epoch": 16.0,
"eval_accuracy": 0.5909090909090909,
"eval_f1_macro": 0.501919425368257,
"eval_f1_micro": 0.5909090909090909,
"eval_f1_weighted": 0.5932466389209485,
"eval_loss": 1.3576174974441528,
"eval_precision_macro": 0.5439596385486938,
"eval_precision_micro": 0.5909090909090909,
"eval_precision_weighted": 0.6311543139507142,
"eval_recall_macro": 0.4959429824561404,
"eval_recall_micro": 0.5909090909090909,
"eval_recall_weighted": 0.5909090909090909,
"eval_runtime": 1.9838,
"eval_samples_per_second": 66.538,
"eval_steps_per_second": 8.569,
"step": 928
},
{
"epoch": 16.03448275862069,
"grad_norm": 2.869445323944092,
"learning_rate": 5.172413793103449e-05,
"loss": 0.3521,
"step": 930
},
{
"epoch": 16.06896551724138,
"grad_norm": 12.4180908203125,
"learning_rate": 5.159642401021711e-05,
"loss": 0.3301,
"step": 932
},
{
"epoch": 16.103448275862068,
"grad_norm": 3.6048808097839355,
"learning_rate": 5.146871008939975e-05,
"loss": 0.1339,
"step": 934
},
{
"epoch": 16.137931034482758,
"grad_norm": 0.6583413481712341,
"learning_rate": 5.1340996168582377e-05,
"loss": 0.3911,
"step": 936
},
{
"epoch": 16.17241379310345,
"grad_norm": 1.6497776508331299,
"learning_rate": 5.1213282247765e-05,
"loss": 0.2196,
"step": 938
},
{
"epoch": 16.20689655172414,
"grad_norm": 0.1662745475769043,
"learning_rate": 5.108556832694764e-05,
"loss": 0.0094,
"step": 940
},
{
"epoch": 16.24137931034483,
"grad_norm": 2.7962138652801514,
"learning_rate": 5.0957854406130265e-05,
"loss": 0.0605,
"step": 942
},
{
"epoch": 16.275862068965516,
"grad_norm": 5.833149433135986,
"learning_rate": 5.08301404853129e-05,
"loss": 0.2013,
"step": 944
},
{
"epoch": 16.310344827586206,
"grad_norm": 13.730070114135742,
"learning_rate": 5.070242656449553e-05,
"loss": 0.2639,
"step": 946
},
{
"epoch": 16.344827586206897,
"grad_norm": 5.876251220703125,
"learning_rate": 5.057471264367817e-05,
"loss": 0.3352,
"step": 948
},
{
"epoch": 16.379310344827587,
"grad_norm": 3.6209003925323486,
"learning_rate": 5.044699872286079e-05,
"loss": 0.1183,
"step": 950
},
{
"epoch": 16.413793103448278,
"grad_norm": 0.6604540944099426,
"learning_rate": 5.031928480204342e-05,
"loss": 0.0778,
"step": 952
},
{
"epoch": 16.448275862068964,
"grad_norm": 1.6417039632797241,
"learning_rate": 5.0191570881226055e-05,
"loss": 0.0523,
"step": 954
},
{
"epoch": 16.482758620689655,
"grad_norm": 4.871641159057617,
"learning_rate": 5.0063856960408687e-05,
"loss": 0.1488,
"step": 956
},
{
"epoch": 16.517241379310345,
"grad_norm": 0.6309295892715454,
"learning_rate": 4.993614303959132e-05,
"loss": 0.039,
"step": 958
},
{
"epoch": 16.551724137931036,
"grad_norm": 9.119877815246582,
"learning_rate": 4.980842911877395e-05,
"loss": 0.1025,
"step": 960
},
{
"epoch": 16.586206896551722,
"grad_norm": 1.3798292875289917,
"learning_rate": 4.968071519795658e-05,
"loss": 0.0307,
"step": 962
},
{
"epoch": 16.620689655172413,
"grad_norm": 0.3394834101200104,
"learning_rate": 4.955300127713921e-05,
"loss": 0.04,
"step": 964
},
{
"epoch": 16.655172413793103,
"grad_norm": 0.6206883788108826,
"learning_rate": 4.9425287356321845e-05,
"loss": 0.1483,
"step": 966
},
{
"epoch": 16.689655172413794,
"grad_norm": 9.715845108032227,
"learning_rate": 4.929757343550448e-05,
"loss": 0.3286,
"step": 968
},
{
"epoch": 16.724137931034484,
"grad_norm": 1.0101009607315063,
"learning_rate": 4.916985951468711e-05,
"loss": 0.158,
"step": 970
},
{
"epoch": 16.75862068965517,
"grad_norm": 17.89195442199707,
"learning_rate": 4.904214559386973e-05,
"loss": 0.0978,
"step": 972
},
{
"epoch": 16.79310344827586,
"grad_norm": 3.084064245223999,
"learning_rate": 4.8914431673052365e-05,
"loss": 0.0461,
"step": 974
},
{
"epoch": 16.82758620689655,
"grad_norm": 0.12563039362430573,
"learning_rate": 4.8786717752234997e-05,
"loss": 0.0234,
"step": 976
},
{
"epoch": 16.862068965517242,
"grad_norm": 1.2776066064834595,
"learning_rate": 4.865900383141763e-05,
"loss": 0.0931,
"step": 978
},
{
"epoch": 16.896551724137932,
"grad_norm": 2.729607343673706,
"learning_rate": 4.853128991060026e-05,
"loss": 0.0507,
"step": 980
},
{
"epoch": 16.93103448275862,
"grad_norm": 0.35684946179389954,
"learning_rate": 4.840357598978289e-05,
"loss": 0.1429,
"step": 982
},
{
"epoch": 16.96551724137931,
"grad_norm": 4.096828460693359,
"learning_rate": 4.827586206896552e-05,
"loss": 0.321,
"step": 984
},
{
"epoch": 17.0,
"grad_norm": 0.25383704900741577,
"learning_rate": 4.814814814814815e-05,
"loss": 0.3809,
"step": 986
},
{
"epoch": 17.0,
"eval_accuracy": 0.6590909090909091,
"eval_f1_macro": 0.5666514454561888,
"eval_f1_micro": 0.6590909090909091,
"eval_f1_weighted": 0.652650863169293,
"eval_loss": 1.2998155355453491,
"eval_precision_macro": 0.5827908086311447,
"eval_precision_micro": 0.6590909090909091,
"eval_precision_weighted": 0.6885351168439403,
"eval_recall_macro": 0.5838189223057644,
"eval_recall_micro": 0.6590909090909091,
"eval_recall_weighted": 0.6590909090909091,
"eval_runtime": 2.7743,
"eval_samples_per_second": 47.58,
"eval_steps_per_second": 6.128,
"step": 986
},
{
"epoch": 17.03448275862069,
"grad_norm": 2.5796058177948,
"learning_rate": 4.802043422733078e-05,
"loss": 0.0584,
"step": 988
},
{
"epoch": 17.06896551724138,
"grad_norm": 0.15105970203876495,
"learning_rate": 4.789272030651341e-05,
"loss": 0.0102,
"step": 990
},
{
"epoch": 17.103448275862068,
"grad_norm": 1.5413386821746826,
"learning_rate": 4.776500638569604e-05,
"loss": 0.039,
"step": 992
},
{
"epoch": 17.137931034482758,
"grad_norm": 20.432376861572266,
"learning_rate": 4.7637292464878675e-05,
"loss": 0.1994,
"step": 994
},
{
"epoch": 17.17241379310345,
"grad_norm": 13.159724235534668,
"learning_rate": 4.7509578544061307e-05,
"loss": 0.1881,
"step": 996
},
{
"epoch": 17.20689655172414,
"grad_norm": 0.3454015552997589,
"learning_rate": 4.738186462324394e-05,
"loss": 0.0048,
"step": 998
},
{
"epoch": 17.24137931034483,
"grad_norm": 20.270763397216797,
"learning_rate": 4.725415070242657e-05,
"loss": 0.123,
"step": 1000
},
{
"epoch": 17.275862068965516,
"grad_norm": 22.238080978393555,
"learning_rate": 4.7126436781609195e-05,
"loss": 0.2302,
"step": 1002
},
{
"epoch": 17.310344827586206,
"grad_norm": 2.1673974990844727,
"learning_rate": 4.6998722860791827e-05,
"loss": 0.0187,
"step": 1004
},
{
"epoch": 17.344827586206897,
"grad_norm": 0.3968747556209564,
"learning_rate": 4.687100893997446e-05,
"loss": 0.0076,
"step": 1006
},
{
"epoch": 17.379310344827587,
"grad_norm": 0.5099461078643799,
"learning_rate": 4.674329501915709e-05,
"loss": 0.0134,
"step": 1008
},
{
"epoch": 17.413793103448278,
"grad_norm": 8.667265892028809,
"learning_rate": 4.661558109833972e-05,
"loss": 0.2712,
"step": 1010
},
{
"epoch": 17.448275862068964,
"grad_norm": 6.667606353759766,
"learning_rate": 4.648786717752235e-05,
"loss": 0.1044,
"step": 1012
},
{
"epoch": 17.482758620689655,
"grad_norm": 13.070887565612793,
"learning_rate": 4.6360153256704985e-05,
"loss": 0.3492,
"step": 1014
},
{
"epoch": 17.517241379310345,
"grad_norm": 4.142744064331055,
"learning_rate": 4.623243933588761e-05,
"loss": 0.0549,
"step": 1016
},
{
"epoch": 17.551724137931036,
"grad_norm": 0.10112213343381882,
"learning_rate": 4.610472541507024e-05,
"loss": 0.0567,
"step": 1018
},
{
"epoch": 17.586206896551722,
"grad_norm": 0.03555409982800484,
"learning_rate": 4.597701149425287e-05,
"loss": 0.0382,
"step": 1020
},
{
"epoch": 17.620689655172413,
"grad_norm": 0.5085342526435852,
"learning_rate": 4.5849297573435505e-05,
"loss": 0.0261,
"step": 1022
},
{
"epoch": 17.655172413793103,
"grad_norm": 1.362773060798645,
"learning_rate": 4.5721583652618137e-05,
"loss": 0.1,
"step": 1024
},
{
"epoch": 17.689655172413794,
"grad_norm": 2.903794050216675,
"learning_rate": 4.559386973180077e-05,
"loss": 0.0157,
"step": 1026
},
{
"epoch": 17.724137931034484,
"grad_norm": 0.29353153705596924,
"learning_rate": 4.54661558109834e-05,
"loss": 0.0185,
"step": 1028
},
{
"epoch": 17.75862068965517,
"grad_norm": 4.77042293548584,
"learning_rate": 4.5338441890166025e-05,
"loss": 0.0405,
"step": 1030
},
{
"epoch": 17.79310344827586,
"grad_norm": 2.864474296569824,
"learning_rate": 4.5210727969348656e-05,
"loss": 0.3131,
"step": 1032
},
{
"epoch": 17.82758620689655,
"grad_norm": 1.4102516174316406,
"learning_rate": 4.508301404853129e-05,
"loss": 0.1175,
"step": 1034
},
{
"epoch": 17.862068965517242,
"grad_norm": 5.741951942443848,
"learning_rate": 4.495530012771392e-05,
"loss": 0.3193,
"step": 1036
},
{
"epoch": 17.896551724137932,
"grad_norm": 0.3935476243495941,
"learning_rate": 4.482758620689655e-05,
"loss": 0.0074,
"step": 1038
},
{
"epoch": 17.93103448275862,
"grad_norm": 6.773815155029297,
"learning_rate": 4.469987228607918e-05,
"loss": 0.0759,
"step": 1040
},
{
"epoch": 17.96551724137931,
"grad_norm": 14.86123275756836,
"learning_rate": 4.4572158365261815e-05,
"loss": 0.1914,
"step": 1042
},
{
"epoch": 18.0,
"grad_norm": 5.217438697814941,
"learning_rate": 4.4444444444444447e-05,
"loss": 0.0887,
"step": 1044
},
{
"epoch": 18.0,
"eval_accuracy": 0.6666666666666666,
"eval_f1_macro": 0.5571781194602418,
"eval_f1_micro": 0.6666666666666666,
"eval_f1_weighted": 0.6489059749380235,
"eval_loss": 1.4153717756271362,
"eval_precision_macro": 0.5682278640899331,
"eval_precision_micro": 0.6666666666666666,
"eval_precision_weighted": 0.6518116175012726,
"eval_recall_macro": 0.568342731829574,
"eval_recall_micro": 0.6666666666666666,
"eval_recall_weighted": 0.6666666666666666,
"eval_runtime": 2.0979,
"eval_samples_per_second": 62.919,
"eval_steps_per_second": 8.103,
"step": 1044
},
{
"epoch": 18.03448275862069,
"grad_norm": 15.641746520996094,
"learning_rate": 4.431673052362708e-05,
"loss": 0.1409,
"step": 1046
},
{
"epoch": 18.06896551724138,
"grad_norm": 11.60824203491211,
"learning_rate": 4.418901660280971e-05,
"loss": 0.4299,
"step": 1048
},
{
"epoch": 18.103448275862068,
"grad_norm": 0.08567314594984055,
"learning_rate": 4.406130268199234e-05,
"loss": 0.0062,
"step": 1050
},
{
"epoch": 18.137931034482758,
"grad_norm": 0.12726525962352753,
"learning_rate": 4.393358876117497e-05,
"loss": 0.0031,
"step": 1052
},
{
"epoch": 18.17241379310345,
"grad_norm": 0.3815951645374298,
"learning_rate": 4.3805874840357605e-05,
"loss": 0.004,
"step": 1054
},
{
"epoch": 18.20689655172414,
"grad_norm": 17.277345657348633,
"learning_rate": 4.367816091954024e-05,
"loss": 0.1928,
"step": 1056
},
{
"epoch": 18.24137931034483,
"grad_norm": 0.14995414018630981,
"learning_rate": 4.355044699872286e-05,
"loss": 0.1884,
"step": 1058
},
{
"epoch": 18.275862068965516,
"grad_norm": 0.1205284371972084,
"learning_rate": 4.342273307790549e-05,
"loss": 0.1822,
"step": 1060
},
{
"epoch": 18.310344827586206,
"grad_norm": 0.35154199600219727,
"learning_rate": 4.3295019157088125e-05,
"loss": 0.019,
"step": 1062
},
{
"epoch": 18.344827586206897,
"grad_norm": 4.302131652832031,
"learning_rate": 4.3167305236270757e-05,
"loss": 0.0265,
"step": 1064
},
{
"epoch": 18.379310344827587,
"grad_norm": 1.0111949443817139,
"learning_rate": 4.303959131545339e-05,
"loss": 0.0126,
"step": 1066
},
{
"epoch": 18.413793103448278,
"grad_norm": 12.860360145568848,
"learning_rate": 4.291187739463602e-05,
"loss": 0.1918,
"step": 1068
},
{
"epoch": 18.448275862068964,
"grad_norm": 1.465510368347168,
"learning_rate": 4.278416347381865e-05,
"loss": 0.1672,
"step": 1070
},
{
"epoch": 18.482758620689655,
"grad_norm": 0.1530478149652481,
"learning_rate": 4.2656449553001277e-05,
"loss": 0.0624,
"step": 1072
},
{
"epoch": 18.517241379310345,
"grad_norm": 7.125781059265137,
"learning_rate": 4.252873563218391e-05,
"loss": 0.359,
"step": 1074
},
{
"epoch": 18.551724137931036,
"grad_norm": 3.524441957473755,
"learning_rate": 4.240102171136654e-05,
"loss": 0.0263,
"step": 1076
},
{
"epoch": 18.586206896551722,
"grad_norm": 2.0102035999298096,
"learning_rate": 4.227330779054917e-05,
"loss": 0.0602,
"step": 1078
},
{
"epoch": 18.620689655172413,
"grad_norm": 0.3677597939968109,
"learning_rate": 4.21455938697318e-05,
"loss": 0.0251,
"step": 1080
},
{
"epoch": 18.655172413793103,
"grad_norm": 19.12733268737793,
"learning_rate": 4.2017879948914435e-05,
"loss": 0.4779,
"step": 1082
},
{
"epoch": 18.689655172413794,
"grad_norm": 2.7869174480438232,
"learning_rate": 4.189016602809707e-05,
"loss": 0.1591,
"step": 1084
},
{
"epoch": 18.724137931034484,
"grad_norm": 2.577024459838867,
"learning_rate": 4.17624521072797e-05,
"loss": 0.0862,
"step": 1086
},
{
"epoch": 18.75862068965517,
"grad_norm": 0.28967535495758057,
"learning_rate": 4.163473818646232e-05,
"loss": 0.0645,
"step": 1088
},
{
"epoch": 18.79310344827586,
"grad_norm": 15.79033088684082,
"learning_rate": 4.1507024265644955e-05,
"loss": 0.1082,
"step": 1090
},
{
"epoch": 18.82758620689655,
"grad_norm": 0.5589704513549805,
"learning_rate": 4.1379310344827587e-05,
"loss": 0.0205,
"step": 1092
},
{
"epoch": 18.862068965517242,
"grad_norm": 0.6776816844940186,
"learning_rate": 4.125159642401022e-05,
"loss": 0.0067,
"step": 1094
},
{
"epoch": 18.896551724137932,
"grad_norm": 0.27798280119895935,
"learning_rate": 4.112388250319285e-05,
"loss": 0.0039,
"step": 1096
},
{
"epoch": 18.93103448275862,
"grad_norm": 12.297942161560059,
"learning_rate": 4.099616858237548e-05,
"loss": 0.151,
"step": 1098
},
{
"epoch": 18.96551724137931,
"grad_norm": 2.289623737335205,
"learning_rate": 4.086845466155811e-05,
"loss": 0.176,
"step": 1100
},
{
"epoch": 19.0,
"grad_norm": 11.227123260498047,
"learning_rate": 4.074074074074074e-05,
"loss": 0.1422,
"step": 1102
},
{
"epoch": 19.0,
"eval_accuracy": 0.6666666666666666,
"eval_f1_macro": 0.5608736126029359,
"eval_f1_micro": 0.6666666666666666,
"eval_f1_weighted": 0.647220323536113,
"eval_loss": 1.398878574371338,
"eval_precision_macro": 0.5671867606078133,
"eval_precision_micro": 0.6666666666666666,
"eval_precision_weighted": 0.6419607605313348,
"eval_recall_macro": 0.5695332080200501,
"eval_recall_micro": 0.6666666666666666,
"eval_recall_weighted": 0.6666666666666666,
"eval_runtime": 3.3441,
"eval_samples_per_second": 39.472,
"eval_steps_per_second": 5.084,
"step": 1102
},
{
"epoch": 19.03448275862069,
"grad_norm": 3.0859837532043457,
"learning_rate": 4.061302681992337e-05,
"loss": 0.0213,
"step": 1104
},
{
"epoch": 19.06896551724138,
"grad_norm": 12.9458589553833,
"learning_rate": 4.0485312899106e-05,
"loss": 0.1051,
"step": 1106
},
{
"epoch": 19.103448275862068,
"grad_norm": 0.20263761281967163,
"learning_rate": 4.035759897828863e-05,
"loss": 0.0172,
"step": 1108
},
{
"epoch": 19.137931034482758,
"grad_norm": 0.19154572486877441,
"learning_rate": 4.0229885057471265e-05,
"loss": 0.035,
"step": 1110
},
{
"epoch": 19.17241379310345,
"grad_norm": 0.30079570412635803,
"learning_rate": 4.0102171136653897e-05,
"loss": 0.0363,
"step": 1112
},
{
"epoch": 19.20689655172414,
"grad_norm": 0.17321471869945526,
"learning_rate": 3.997445721583653e-05,
"loss": 0.0143,
"step": 1114
},
{
"epoch": 19.24137931034483,
"grad_norm": 0.6283549666404724,
"learning_rate": 3.984674329501916e-05,
"loss": 0.0288,
"step": 1116
},
{
"epoch": 19.275862068965516,
"grad_norm": 0.014564587734639645,
"learning_rate": 3.971902937420179e-05,
"loss": 0.005,
"step": 1118
},
{
"epoch": 19.310344827586206,
"grad_norm": 0.0865383893251419,
"learning_rate": 3.959131545338442e-05,
"loss": 0.0134,
"step": 1120
},
{
"epoch": 19.344827586206897,
"grad_norm": 0.13445672392845154,
"learning_rate": 3.9463601532567055e-05,
"loss": 0.0104,
"step": 1122
},
{
"epoch": 19.379310344827587,
"grad_norm": 1.2037104368209839,
"learning_rate": 3.933588761174969e-05,
"loss": 0.0126,
"step": 1124
},
{
"epoch": 19.413793103448278,
"grad_norm": 0.8391666412353516,
"learning_rate": 3.920817369093232e-05,
"loss": 0.0176,
"step": 1126
},
{
"epoch": 19.448275862068964,
"grad_norm": 11.825739860534668,
"learning_rate": 3.908045977011495e-05,
"loss": 0.0852,
"step": 1128
},
{
"epoch": 19.482758620689655,
"grad_norm": 4.42478609085083,
"learning_rate": 3.8952745849297575e-05,
"loss": 0.077,
"step": 1130
},
{
"epoch": 19.517241379310345,
"grad_norm": 0.5317286849021912,
"learning_rate": 3.8825031928480207e-05,
"loss": 0.0204,
"step": 1132
},
{
"epoch": 19.551724137931036,
"grad_norm": 0.39885902404785156,
"learning_rate": 3.869731800766284e-05,
"loss": 0.0064,
"step": 1134
},
{
"epoch": 19.586206896551722,
"grad_norm": 0.8824494481086731,
"learning_rate": 3.856960408684547e-05,
"loss": 0.0184,
"step": 1136
},
{
"epoch": 19.620689655172413,
"grad_norm": 1.4008420705795288,
"learning_rate": 3.84418901660281e-05,
"loss": 0.0778,
"step": 1138
},
{
"epoch": 19.655172413793103,
"grad_norm": 0.5302988290786743,
"learning_rate": 3.831417624521073e-05,
"loss": 0.0935,
"step": 1140
},
{
"epoch": 19.689655172413794,
"grad_norm": 12.098783493041992,
"learning_rate": 3.8186462324393365e-05,
"loss": 0.4228,
"step": 1142
},
{
"epoch": 19.724137931034484,
"grad_norm": 2.2826879024505615,
"learning_rate": 3.805874840357599e-05,
"loss": 0.0317,
"step": 1144
},
{
"epoch": 19.75862068965517,
"grad_norm": 0.4077673852443695,
"learning_rate": 3.793103448275862e-05,
"loss": 0.0078,
"step": 1146
},
{
"epoch": 19.79310344827586,
"grad_norm": 6.110368728637695,
"learning_rate": 3.780332056194125e-05,
"loss": 0.1554,
"step": 1148
},
{
"epoch": 19.82758620689655,
"grad_norm": 0.027513781562447548,
"learning_rate": 3.7675606641123885e-05,
"loss": 0.0625,
"step": 1150
},
{
"epoch": 19.862068965517242,
"grad_norm": 6.546108245849609,
"learning_rate": 3.7547892720306517e-05,
"loss": 0.0988,
"step": 1152
},
{
"epoch": 19.896551724137932,
"grad_norm": 0.08968118578195572,
"learning_rate": 3.742017879948915e-05,
"loss": 0.002,
"step": 1154
},
{
"epoch": 19.93103448275862,
"grad_norm": 0.14957240223884583,
"learning_rate": 3.729246487867178e-05,
"loss": 0.0024,
"step": 1156
},
{
"epoch": 19.96551724137931,
"grad_norm": 21.937902450561523,
"learning_rate": 3.716475095785441e-05,
"loss": 0.4017,
"step": 1158
},
{
"epoch": 20.0,
"grad_norm": 0.162915900349617,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.0037,
"step": 1160
},
{
"epoch": 20.0,
"eval_accuracy": 0.6212121212121212,
"eval_f1_macro": 0.5241622523135129,
"eval_f1_micro": 0.6212121212121212,
"eval_f1_weighted": 0.6077529051459533,
"eval_loss": 1.5133600234985352,
"eval_precision_macro": 0.5262823725981621,
"eval_precision_micro": 0.6212121212121212,
"eval_precision_weighted": 0.6092636895268474,
"eval_recall_macro": 0.537390350877193,
"eval_recall_micro": 0.6212121212121212,
"eval_recall_weighted": 0.6212121212121212,
"eval_runtime": 2.2507,
"eval_samples_per_second": 58.65,
"eval_steps_per_second": 7.553,
"step": 1160
},
{
"epoch": 20.03448275862069,
"grad_norm": 0.17806491255760193,
"learning_rate": 3.690932311621967e-05,
"loss": 0.0142,
"step": 1162
},
{
"epoch": 20.06896551724138,
"grad_norm": 0.30130231380462646,
"learning_rate": 3.67816091954023e-05,
"loss": 0.0266,
"step": 1164
},
{
"epoch": 20.103448275862068,
"grad_norm": 0.30467385053634644,
"learning_rate": 3.665389527458493e-05,
"loss": 0.0289,
"step": 1166
},
{
"epoch": 20.137931034482758,
"grad_norm": 6.654253959655762,
"learning_rate": 3.652618135376756e-05,
"loss": 0.1494,
"step": 1168
},
{
"epoch": 20.17241379310345,
"grad_norm": 29.355484008789062,
"learning_rate": 3.6398467432950195e-05,
"loss": 0.3555,
"step": 1170
},
{
"epoch": 20.20689655172414,
"grad_norm": 0.25190678238868713,
"learning_rate": 3.627075351213283e-05,
"loss": 0.0065,
"step": 1172
},
{
"epoch": 20.24137931034483,
"grad_norm": 7.903636455535889,
"learning_rate": 3.614303959131545e-05,
"loss": 0.0348,
"step": 1174
},
{
"epoch": 20.275862068965516,
"grad_norm": 1.2426676750183105,
"learning_rate": 3.601532567049808e-05,
"loss": 0.0348,
"step": 1176
},
{
"epoch": 20.310344827586206,
"grad_norm": 0.05831276252865791,
"learning_rate": 3.5887611749680715e-05,
"loss": 0.0171,
"step": 1178
},
{
"epoch": 20.344827586206897,
"grad_norm": 3.8727033138275146,
"learning_rate": 3.5759897828863347e-05,
"loss": 0.2391,
"step": 1180
},
{
"epoch": 20.379310344827587,
"grad_norm": 2.7642955780029297,
"learning_rate": 3.563218390804598e-05,
"loss": 0.0218,
"step": 1182
},
{
"epoch": 20.413793103448278,
"grad_norm": 28.808361053466797,
"learning_rate": 3.550446998722861e-05,
"loss": 0.1445,
"step": 1184
},
{
"epoch": 20.448275862068964,
"grad_norm": 0.25608864426612854,
"learning_rate": 3.537675606641124e-05,
"loss": 0.0038,
"step": 1186
},
{
"epoch": 20.482758620689655,
"grad_norm": 0.18029409646987915,
"learning_rate": 3.5249042145593867e-05,
"loss": 0.0034,
"step": 1188
},
{
"epoch": 20.517241379310345,
"grad_norm": 0.03822282329201698,
"learning_rate": 3.51213282247765e-05,
"loss": 0.0623,
"step": 1190
},
{
"epoch": 20.551724137931036,
"grad_norm": 0.24421456456184387,
"learning_rate": 3.499361430395913e-05,
"loss": 0.0054,
"step": 1192
},
{
"epoch": 20.586206896551722,
"grad_norm": 13.820355415344238,
"learning_rate": 3.486590038314176e-05,
"loss": 0.0834,
"step": 1194
},
{
"epoch": 20.620689655172413,
"grad_norm": 3.458310127258301,
"learning_rate": 3.473818646232439e-05,
"loss": 0.0236,
"step": 1196
},
{
"epoch": 20.655172413793103,
"grad_norm": 0.02704853191971779,
"learning_rate": 3.4610472541507025e-05,
"loss": 0.0033,
"step": 1198
},
{
"epoch": 20.689655172413794,
"grad_norm": 1.1011626720428467,
"learning_rate": 3.4482758620689657e-05,
"loss": 0.0165,
"step": 1200
},
{
"epoch": 20.724137931034484,
"grad_norm": 21.770254135131836,
"learning_rate": 3.435504469987229e-05,
"loss": 0.1164,
"step": 1202
},
{
"epoch": 20.75862068965517,
"grad_norm": 0.3167782425880432,
"learning_rate": 3.422733077905492e-05,
"loss": 0.0036,
"step": 1204
},
{
"epoch": 20.79310344827586,
"grad_norm": 0.057486578822135925,
"learning_rate": 3.409961685823755e-05,
"loss": 0.0029,
"step": 1206
},
{
"epoch": 20.82758620689655,
"grad_norm": 0.7752865552902222,
"learning_rate": 3.397190293742018e-05,
"loss": 0.1632,
"step": 1208
},
{
"epoch": 20.862068965517242,
"grad_norm": 1.2659624814987183,
"learning_rate": 3.3844189016602815e-05,
"loss": 0.0084,
"step": 1210
},
{
"epoch": 20.896551724137932,
"grad_norm": 0.0808514654636383,
"learning_rate": 3.371647509578545e-05,
"loss": 0.0011,
"step": 1212
},
{
"epoch": 20.93103448275862,
"grad_norm": 0.502754807472229,
"learning_rate": 3.358876117496808e-05,
"loss": 0.0062,
"step": 1214
},
{
"epoch": 20.96551724137931,
"grad_norm": 0.09211990982294083,
"learning_rate": 3.34610472541507e-05,
"loss": 0.0043,
"step": 1216
},
{
"epoch": 21.0,
"grad_norm": 0.22179439663887024,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.0602,
"step": 1218
},
{
"epoch": 21.0,
"eval_accuracy": 0.6666666666666666,
"eval_f1_macro": 0.5660444695559945,
"eval_f1_micro": 0.6666666666666666,
"eval_f1_weighted": 0.6544395105724697,
"eval_loss": 1.5348953008651733,
"eval_precision_macro": 0.5710371698009604,
"eval_precision_micro": 0.6666666666666666,
"eval_precision_weighted": 0.6503411219319607,
"eval_recall_macro": 0.5670739348370927,
"eval_recall_micro": 0.6666666666666666,
"eval_recall_weighted": 0.6666666666666666,
"eval_runtime": 2.9914,
"eval_samples_per_second": 44.127,
"eval_steps_per_second": 5.683,
"step": 1218
},
{
"epoch": 21.03448275862069,
"grad_norm": 0.02936175838112831,
"learning_rate": 3.3205619412515967e-05,
"loss": 0.0007,
"step": 1220
},
{
"epoch": 21.06896551724138,
"grad_norm": 10.468208312988281,
"learning_rate": 3.30779054916986e-05,
"loss": 0.0762,
"step": 1222
},
{
"epoch": 21.103448275862068,
"grad_norm": 0.010613945312798023,
"learning_rate": 3.295019157088123e-05,
"loss": 0.0008,
"step": 1224
},
{
"epoch": 21.137931034482758,
"grad_norm": 0.8747249841690063,
"learning_rate": 3.282247765006386e-05,
"loss": 0.3968,
"step": 1226
},
{
"epoch": 21.17241379310345,
"grad_norm": 0.014937883242964745,
"learning_rate": 3.269476372924649e-05,
"loss": 0.0526,
"step": 1228
},
{
"epoch": 21.20689655172414,
"grad_norm": 0.29066944122314453,
"learning_rate": 3.256704980842912e-05,
"loss": 0.0051,
"step": 1230
},
{
"epoch": 21.24137931034483,
"grad_norm": 0.05553491786122322,
"learning_rate": 3.243933588761175e-05,
"loss": 0.0015,
"step": 1232
},
{
"epoch": 21.275862068965516,
"grad_norm": 0.1087840124964714,
"learning_rate": 3.231162196679438e-05,
"loss": 0.0035,
"step": 1234
},
{
"epoch": 21.310344827586206,
"grad_norm": 2.526913642883301,
"learning_rate": 3.218390804597701e-05,
"loss": 0.0228,
"step": 1236
},
{
"epoch": 21.344827586206897,
"grad_norm": 0.09952912479639053,
"learning_rate": 3.2056194125159645e-05,
"loss": 0.0025,
"step": 1238
},
{
"epoch": 21.379310344827587,
"grad_norm": 0.03265494853258133,
"learning_rate": 3.192848020434228e-05,
"loss": 0.0044,
"step": 1240
},
{
"epoch": 21.413793103448278,
"grad_norm": 5.046082973480225,
"learning_rate": 3.180076628352491e-05,
"loss": 0.058,
"step": 1242
},
{
"epoch": 21.448275862068964,
"grad_norm": 15.606002807617188,
"learning_rate": 3.167305236270754e-05,
"loss": 0.1473,
"step": 1244
},
{
"epoch": 21.482758620689655,
"grad_norm": 19.64705467224121,
"learning_rate": 3.1545338441890165e-05,
"loss": 0.1913,
"step": 1246
},
{
"epoch": 21.517241379310345,
"grad_norm": 4.021636009216309,
"learning_rate": 3.1417624521072797e-05,
"loss": 0.0729,
"step": 1248
},
{
"epoch": 21.551724137931036,
"grad_norm": 0.22693483531475067,
"learning_rate": 3.128991060025543e-05,
"loss": 0.0402,
"step": 1250
},
{
"epoch": 21.586206896551722,
"grad_norm": 0.1609225869178772,
"learning_rate": 3.116219667943806e-05,
"loss": 0.0025,
"step": 1252
},
{
"epoch": 21.620689655172413,
"grad_norm": 0.10900776833295822,
"learning_rate": 3.103448275862069e-05,
"loss": 0.014,
"step": 1254
},
{
"epoch": 21.655172413793103,
"grad_norm": 0.47104957699775696,
"learning_rate": 3.090676883780332e-05,
"loss": 0.0057,
"step": 1256
},
{
"epoch": 21.689655172413794,
"grad_norm": 2.721670150756836,
"learning_rate": 3.0779054916985955e-05,
"loss": 0.0277,
"step": 1258
},
{
"epoch": 21.724137931034484,
"grad_norm": 0.09523604065179825,
"learning_rate": 3.065134099616858e-05,
"loss": 0.0015,
"step": 1260
},
{
"epoch": 21.75862068965517,
"grad_norm": 4.91738748550415,
"learning_rate": 3.052362707535121e-05,
"loss": 0.0663,
"step": 1262
},
{
"epoch": 21.79310344827586,
"grad_norm": 0.12091673910617828,
"learning_rate": 3.0395913154533843e-05,
"loss": 0.0024,
"step": 1264
},
{
"epoch": 21.82758620689655,
"grad_norm": 11.164044380187988,
"learning_rate": 3.0268199233716475e-05,
"loss": 0.0475,
"step": 1266
},
{
"epoch": 21.862068965517242,
"grad_norm": 0.018948553130030632,
"learning_rate": 3.0140485312899107e-05,
"loss": 0.0597,
"step": 1268
},
{
"epoch": 21.896551724137932,
"grad_norm": 4.384052753448486,
"learning_rate": 3.0012771392081738e-05,
"loss": 0.1747,
"step": 1270
},
{
"epoch": 21.93103448275862,
"grad_norm": 8.05239486694336,
"learning_rate": 2.988505747126437e-05,
"loss": 0.0828,
"step": 1272
},
{
"epoch": 21.96551724137931,
"grad_norm": 0.07449853420257568,
"learning_rate": 2.9757343550446998e-05,
"loss": 0.0028,
"step": 1274
},
{
"epoch": 22.0,
"grad_norm": 6.600012302398682,
"learning_rate": 2.962962962962963e-05,
"loss": 0.0353,
"step": 1276
},
{
"epoch": 22.0,
"eval_accuracy": 0.7045454545454546,
"eval_f1_macro": 0.6136674105378829,
"eval_f1_micro": 0.7045454545454546,
"eval_f1_weighted": 0.6918989425351217,
"eval_loss": 1.448930263519287,
"eval_precision_macro": 0.6146465063758297,
"eval_precision_micro": 0.7045454545454546,
"eval_precision_weighted": 0.6908581184896975,
"eval_recall_macro": 0.6242167919799498,
"eval_recall_micro": 0.7045454545454546,
"eval_recall_weighted": 0.7045454545454546,
"eval_runtime": 2.3557,
"eval_samples_per_second": 56.034,
"eval_steps_per_second": 7.217,
"step": 1276
},
{
"epoch": 22.03448275862069,
"grad_norm": 0.022473517805337906,
"learning_rate": 2.950191570881226e-05,
"loss": 0.0027,
"step": 1278
},
{
"epoch": 22.06896551724138,
"grad_norm": 0.06254889816045761,
"learning_rate": 2.9374201787994893e-05,
"loss": 0.0087,
"step": 1280
},
{
"epoch": 22.103448275862068,
"grad_norm": 0.07748962938785553,
"learning_rate": 2.9246487867177525e-05,
"loss": 0.0042,
"step": 1282
},
{
"epoch": 22.137931034482758,
"grad_norm": 0.07348134368658066,
"learning_rate": 2.9118773946360157e-05,
"loss": 0.0018,
"step": 1284
},
{
"epoch": 22.17241379310345,
"grad_norm": 5.811524868011475,
"learning_rate": 2.899106002554279e-05,
"loss": 0.0482,
"step": 1286
},
{
"epoch": 22.20689655172414,
"grad_norm": 0.06995466351509094,
"learning_rate": 2.8863346104725413e-05,
"loss": 0.002,
"step": 1288
},
{
"epoch": 22.24137931034483,
"grad_norm": 0.033994317054748535,
"learning_rate": 2.8735632183908045e-05,
"loss": 0.0219,
"step": 1290
},
{
"epoch": 22.275862068965516,
"grad_norm": 0.03432360664010048,
"learning_rate": 2.8607918263090677e-05,
"loss": 0.0009,
"step": 1292
},
{
"epoch": 22.310344827586206,
"grad_norm": 0.01812071166932583,
"learning_rate": 2.8480204342273308e-05,
"loss": 0.0015,
"step": 1294
},
{
"epoch": 22.344827586206897,
"grad_norm": 1.0397570133209229,
"learning_rate": 2.835249042145594e-05,
"loss": 0.0078,
"step": 1296
},
{
"epoch": 22.379310344827587,
"grad_norm": 0.5670585036277771,
"learning_rate": 2.822477650063857e-05,
"loss": 0.0359,
"step": 1298
},
{
"epoch": 22.413793103448278,
"grad_norm": 0.03518284112215042,
"learning_rate": 2.8097062579821203e-05,
"loss": 0.001,
"step": 1300
},
{
"epoch": 22.448275862068964,
"grad_norm": 0.3894251585006714,
"learning_rate": 2.796934865900383e-05,
"loss": 0.0021,
"step": 1302
},
{
"epoch": 22.482758620689655,
"grad_norm": 0.12059098482131958,
"learning_rate": 2.7841634738186463e-05,
"loss": 0.0014,
"step": 1304
},
{
"epoch": 22.517241379310345,
"grad_norm": 3.6728742122650146,
"learning_rate": 2.7713920817369095e-05,
"loss": 0.0199,
"step": 1306
},
{
"epoch": 22.551724137931036,
"grad_norm": 0.029478440061211586,
"learning_rate": 2.7586206896551727e-05,
"loss": 0.0009,
"step": 1308
},
{
"epoch": 22.586206896551722,
"grad_norm": 0.31572234630584717,
"learning_rate": 2.745849297573436e-05,
"loss": 0.004,
"step": 1310
},
{
"epoch": 22.620689655172413,
"grad_norm": 0.04008958116173744,
"learning_rate": 2.733077905491699e-05,
"loss": 0.0017,
"step": 1312
},
{
"epoch": 22.655172413793103,
"grad_norm": 1.0323200225830078,
"learning_rate": 2.720306513409962e-05,
"loss": 0.0077,
"step": 1314
},
{
"epoch": 22.689655172413794,
"grad_norm": 0.032519642263650894,
"learning_rate": 2.7075351213282253e-05,
"loss": 0.0027,
"step": 1316
},
{
"epoch": 22.724137931034484,
"grad_norm": 2.1410605907440186,
"learning_rate": 2.6947637292464878e-05,
"loss": 0.0125,
"step": 1318
},
{
"epoch": 22.75862068965517,
"grad_norm": 0.016050921753048897,
"learning_rate": 2.681992337164751e-05,
"loss": 0.0331,
"step": 1320
},
{
"epoch": 22.79310344827586,
"grad_norm": 0.1428113877773285,
"learning_rate": 2.669220945083014e-05,
"loss": 0.0049,
"step": 1322
},
{
"epoch": 22.82758620689655,
"grad_norm": 0.1641322523355484,
"learning_rate": 2.6564495530012773e-05,
"loss": 0.002,
"step": 1324
},
{
"epoch": 22.862068965517242,
"grad_norm": 7.986566066741943,
"learning_rate": 2.6436781609195405e-05,
"loss": 0.0351,
"step": 1326
},
{
"epoch": 22.896551724137932,
"grad_norm": 0.08804414421319962,
"learning_rate": 2.6309067688378037e-05,
"loss": 0.0013,
"step": 1328
},
{
"epoch": 22.93103448275862,
"grad_norm": 7.736631870269775,
"learning_rate": 2.618135376756067e-05,
"loss": 0.0779,
"step": 1330
},
{
"epoch": 22.96551724137931,
"grad_norm": 0.015013402327895164,
"learning_rate": 2.6053639846743293e-05,
"loss": 0.0827,
"step": 1332
},
{
"epoch": 23.0,
"grad_norm": 0.01156703382730484,
"learning_rate": 2.5925925925925925e-05,
"loss": 0.001,
"step": 1334
},
{
"epoch": 23.0,
"eval_accuracy": 0.6666666666666666,
"eval_f1_macro": 0.5714664821494502,
"eval_f1_micro": 0.6666666666666666,
"eval_f1_weighted": 0.6541314709388933,
"eval_loss": 1.4781274795532227,
"eval_precision_macro": 0.5657452925012241,
"eval_precision_micro": 0.6666666666666666,
"eval_precision_weighted": 0.6448584800964208,
"eval_recall_macro": 0.580545112781955,
"eval_recall_micro": 0.6666666666666666,
"eval_recall_weighted": 0.6666666666666666,
"eval_runtime": 1.954,
"eval_samples_per_second": 67.553,
"eval_steps_per_second": 8.7,
"step": 1334
},
{
"epoch": 23.03448275862069,
"grad_norm": 0.006409560330212116,
"learning_rate": 2.5798212005108557e-05,
"loss": 0.0018,
"step": 1336
},
{
"epoch": 23.06896551724138,
"grad_norm": 2.2381739616394043,
"learning_rate": 2.5670498084291188e-05,
"loss": 0.0467,
"step": 1338
},
{
"epoch": 23.103448275862068,
"grad_norm": 0.5874573588371277,
"learning_rate": 2.554278416347382e-05,
"loss": 0.0048,
"step": 1340
},
{
"epoch": 23.137931034482758,
"grad_norm": 22.487873077392578,
"learning_rate": 2.541507024265645e-05,
"loss": 0.1,
"step": 1342
},
{
"epoch": 23.17241379310345,
"grad_norm": 0.020600179210305214,
"learning_rate": 2.5287356321839083e-05,
"loss": 0.025,
"step": 1344
},
{
"epoch": 23.20689655172414,
"grad_norm": 0.17713558673858643,
"learning_rate": 2.515964240102171e-05,
"loss": 0.0037,
"step": 1346
},
{
"epoch": 23.24137931034483,
"grad_norm": 0.19636306166648865,
"learning_rate": 2.5031928480204343e-05,
"loss": 0.0024,
"step": 1348
},
{
"epoch": 23.275862068965516,
"grad_norm": 0.5884350538253784,
"learning_rate": 2.4904214559386975e-05,
"loss": 0.0041,
"step": 1350
},
{
"epoch": 23.310344827586206,
"grad_norm": 0.07146797329187393,
"learning_rate": 2.4776500638569607e-05,
"loss": 0.0012,
"step": 1352
},
{
"epoch": 23.344827586206897,
"grad_norm": 0.11290651559829712,
"learning_rate": 2.464878671775224e-05,
"loss": 0.008,
"step": 1354
},
{
"epoch": 23.379310344827587,
"grad_norm": 0.09537604451179504,
"learning_rate": 2.4521072796934867e-05,
"loss": 0.0339,
"step": 1356
},
{
"epoch": 23.413793103448278,
"grad_norm": 0.27110394835472107,
"learning_rate": 2.4393358876117498e-05,
"loss": 0.0021,
"step": 1358
},
{
"epoch": 23.448275862068964,
"grad_norm": 0.2351570874452591,
"learning_rate": 2.426564495530013e-05,
"loss": 0.0015,
"step": 1360
},
{
"epoch": 23.482758620689655,
"grad_norm": 0.018089979887008667,
"learning_rate": 2.413793103448276e-05,
"loss": 0.0023,
"step": 1362
},
{
"epoch": 23.517241379310345,
"grad_norm": 0.12375891953706741,
"learning_rate": 2.401021711366539e-05,
"loss": 0.0035,
"step": 1364
},
{
"epoch": 23.551724137931036,
"grad_norm": 0.2663002908229828,
"learning_rate": 2.388250319284802e-05,
"loss": 0.0048,
"step": 1366
},
{
"epoch": 23.586206896551722,
"grad_norm": 0.031247558072209358,
"learning_rate": 2.3754789272030653e-05,
"loss": 0.092,
"step": 1368
},
{
"epoch": 23.620689655172413,
"grad_norm": 0.024893470108509064,
"learning_rate": 2.3627075351213285e-05,
"loss": 0.0008,
"step": 1370
},
{
"epoch": 23.655172413793103,
"grad_norm": 0.040011290460824966,
"learning_rate": 2.3499361430395913e-05,
"loss": 0.0017,
"step": 1372
},
{
"epoch": 23.689655172413794,
"grad_norm": 0.6574517488479614,
"learning_rate": 2.3371647509578545e-05,
"loss": 0.024,
"step": 1374
},
{
"epoch": 23.724137931034484,
"grad_norm": 0.02834693342447281,
"learning_rate": 2.3243933588761177e-05,
"loss": 0.0006,
"step": 1376
},
{
"epoch": 23.75862068965517,
"grad_norm": 1.0888547897338867,
"learning_rate": 2.3116219667943805e-05,
"loss": 0.006,
"step": 1378
},
{
"epoch": 23.79310344827586,
"grad_norm": 0.03572368249297142,
"learning_rate": 2.2988505747126437e-05,
"loss": 0.0028,
"step": 1380
},
{
"epoch": 23.82758620689655,
"grad_norm": 5.217251777648926,
"learning_rate": 2.2860791826309068e-05,
"loss": 0.1481,
"step": 1382
},
{
"epoch": 23.862068965517242,
"grad_norm": 0.4067634046077728,
"learning_rate": 2.27330779054917e-05,
"loss": 0.0045,
"step": 1384
},
{
"epoch": 23.896551724137932,
"grad_norm": 0.1273973435163498,
"learning_rate": 2.2605363984674328e-05,
"loss": 0.0013,
"step": 1386
},
{
"epoch": 23.93103448275862,
"grad_norm": 0.3500019907951355,
"learning_rate": 2.247765006385696e-05,
"loss": 0.0023,
"step": 1388
},
{
"epoch": 23.96551724137931,
"grad_norm": 2.0916292667388916,
"learning_rate": 2.234993614303959e-05,
"loss": 0.0074,
"step": 1390
},
{
"epoch": 24.0,
"grad_norm": 0.03820287436246872,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.0007,
"step": 1392
},
{
"epoch": 24.0,
"eval_accuracy": 0.6590909090909091,
"eval_f1_macro": 0.5712607596935955,
"eval_f1_micro": 0.6590909090909091,
"eval_f1_weighted": 0.6510971028694909,
"eval_loss": 1.632594347000122,
"eval_precision_macro": 0.5870793772054277,
"eval_precision_micro": 0.6590909090909091,
"eval_precision_weighted": 0.664782083632351,
"eval_recall_macro": 0.5786027568922305,
"eval_recall_micro": 0.6590909090909091,
"eval_recall_weighted": 0.6590909090909091,
"eval_runtime": 1.9749,
"eval_samples_per_second": 66.84,
"eval_steps_per_second": 8.608,
"step": 1392
},
{
"epoch": 24.03448275862069,
"grad_norm": 0.07503662258386612,
"learning_rate": 2.2094508301404855e-05,
"loss": 0.0244,
"step": 1394
},
{
"epoch": 24.06896551724138,
"grad_norm": 27.82219123840332,
"learning_rate": 2.1966794380587487e-05,
"loss": 0.196,
"step": 1396
},
{
"epoch": 24.103448275862068,
"grad_norm": 0.029466254636645317,
"learning_rate": 2.183908045977012e-05,
"loss": 0.0007,
"step": 1398
},
{
"epoch": 24.137931034482758,
"grad_norm": 0.15405841171741486,
"learning_rate": 2.1711366538952747e-05,
"loss": 0.0015,
"step": 1400
},
{
"epoch": 24.17241379310345,
"grad_norm": 2.168177366256714,
"learning_rate": 2.1583652618135378e-05,
"loss": 0.0114,
"step": 1402
},
{
"epoch": 24.20689655172414,
"grad_norm": 1.049078345298767,
"learning_rate": 2.145593869731801e-05,
"loss": 0.0039,
"step": 1404
},
{
"epoch": 24.24137931034483,
"grad_norm": 0.10657618939876556,
"learning_rate": 2.1328224776500638e-05,
"loss": 0.0016,
"step": 1406
},
{
"epoch": 24.275862068965516,
"grad_norm": 2.1004295349121094,
"learning_rate": 2.120051085568327e-05,
"loss": 0.0427,
"step": 1408
},
{
"epoch": 24.310344827586206,
"grad_norm": 0.026559889316558838,
"learning_rate": 2.10727969348659e-05,
"loss": 0.0008,
"step": 1410
},
{
"epoch": 24.344827586206897,
"grad_norm": 0.2494554966688156,
"learning_rate": 2.0945083014048533e-05,
"loss": 0.0021,
"step": 1412
},
{
"epoch": 24.379310344827587,
"grad_norm": 0.5512819290161133,
"learning_rate": 2.081736909323116e-05,
"loss": 0.0093,
"step": 1414
},
{
"epoch": 24.413793103448278,
"grad_norm": 0.032853931188583374,
"learning_rate": 2.0689655172413793e-05,
"loss": 0.0006,
"step": 1416
},
{
"epoch": 24.448275862068964,
"grad_norm": 0.29787227511405945,
"learning_rate": 2.0561941251596425e-05,
"loss": 0.0037,
"step": 1418
},
{
"epoch": 24.482758620689655,
"grad_norm": 0.22131195664405823,
"learning_rate": 2.0434227330779057e-05,
"loss": 0.0086,
"step": 1420
},
{
"epoch": 24.517241379310345,
"grad_norm": 0.15168803930282593,
"learning_rate": 2.0306513409961685e-05,
"loss": 0.0017,
"step": 1422
},
{
"epoch": 24.551724137931036,
"grad_norm": 0.006662359926849604,
"learning_rate": 2.0178799489144317e-05,
"loss": 0.1826,
"step": 1424
},
{
"epoch": 24.586206896551722,
"grad_norm": 0.008878910914063454,
"learning_rate": 2.0051085568326948e-05,
"loss": 0.0096,
"step": 1426
},
{
"epoch": 24.620689655172413,
"grad_norm": 0.09360943734645844,
"learning_rate": 1.992337164750958e-05,
"loss": 0.0015,
"step": 1428
},
{
"epoch": 24.655172413793103,
"grad_norm": 0.13396118581295013,
"learning_rate": 1.979565772669221e-05,
"loss": 0.3838,
"step": 1430
},
{
"epoch": 24.689655172413794,
"grad_norm": 0.010010026395320892,
"learning_rate": 1.9667943805874843e-05,
"loss": 0.0005,
"step": 1432
},
{
"epoch": 24.724137931034484,
"grad_norm": 1.2189490795135498,
"learning_rate": 1.9540229885057475e-05,
"loss": 0.0052,
"step": 1434
},
{
"epoch": 24.75862068965517,
"grad_norm": 0.0254853293299675,
"learning_rate": 1.9412515964240103e-05,
"loss": 0.007,
"step": 1436
},
{
"epoch": 24.79310344827586,
"grad_norm": 5.6452765464782715,
"learning_rate": 1.9284802043422735e-05,
"loss": 0.0186,
"step": 1438
},
{
"epoch": 24.82758620689655,
"grad_norm": 0.12432337552309036,
"learning_rate": 1.9157088122605367e-05,
"loss": 0.0012,
"step": 1440
},
{
"epoch": 24.862068965517242,
"grad_norm": 0.06803122162818909,
"learning_rate": 1.9029374201787995e-05,
"loss": 0.001,
"step": 1442
},
{
"epoch": 24.896551724137932,
"grad_norm": 0.07736227661371231,
"learning_rate": 1.8901660280970627e-05,
"loss": 0.0007,
"step": 1444
},
{
"epoch": 24.93103448275862,
"grad_norm": 0.140510693192482,
"learning_rate": 1.8773946360153258e-05,
"loss": 0.0032,
"step": 1446
},
{
"epoch": 24.96551724137931,
"grad_norm": 0.1517113447189331,
"learning_rate": 1.864623243933589e-05,
"loss": 0.0142,
"step": 1448
},
{
"epoch": 25.0,
"grad_norm": 1.449758768081665,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.0084,
"step": 1450
},
{
"epoch": 25.0,
"eval_accuracy": 0.6590909090909091,
"eval_f1_macro": 0.5684048691621123,
"eval_f1_micro": 0.6590909090909091,
"eval_f1_weighted": 0.6569451587148032,
"eval_loss": 1.5855995416641235,
"eval_precision_macro": 0.5662025575780552,
"eval_precision_micro": 0.6590909090909091,
"eval_precision_weighted": 0.6671501716534084,
"eval_recall_macro": 0.5802474937343358,
"eval_recall_micro": 0.6590909090909091,
"eval_recall_weighted": 0.6590909090909091,
"eval_runtime": 2.0013,
"eval_samples_per_second": 65.957,
"eval_steps_per_second": 8.495,
"step": 1450
},
{
"epoch": 25.03448275862069,
"grad_norm": 3.8146300315856934,
"learning_rate": 1.839080459770115e-05,
"loss": 0.116,
"step": 1452
},
{
"epoch": 25.06896551724138,
"grad_norm": 0.013178830966353416,
"learning_rate": 1.826309067688378e-05,
"loss": 0.0004,
"step": 1454
},
{
"epoch": 25.103448275862068,
"grad_norm": 0.04289768263697624,
"learning_rate": 1.8135376756066413e-05,
"loss": 0.001,
"step": 1456
},
{
"epoch": 25.137931034482758,
"grad_norm": 0.3486016094684601,
"learning_rate": 1.800766283524904e-05,
"loss": 0.0018,
"step": 1458
},
{
"epoch": 25.17241379310345,
"grad_norm": 0.101795494556427,
"learning_rate": 1.7879948914431673e-05,
"loss": 0.0487,
"step": 1460
},
{
"epoch": 25.20689655172414,
"grad_norm": 0.008328469470143318,
"learning_rate": 1.7752234993614305e-05,
"loss": 0.0003,
"step": 1462
},
{
"epoch": 25.24137931034483,
"grad_norm": 0.022965148091316223,
"learning_rate": 1.7624521072796933e-05,
"loss": 0.0007,
"step": 1464
},
{
"epoch": 25.275862068965516,
"grad_norm": 0.05658239498734474,
"learning_rate": 1.7496807151979565e-05,
"loss": 0.0012,
"step": 1466
},
{
"epoch": 25.310344827586206,
"grad_norm": 0.4014154374599457,
"learning_rate": 1.7369093231162197e-05,
"loss": 0.0046,
"step": 1468
},
{
"epoch": 25.344827586206897,
"grad_norm": 5.419172763824463,
"learning_rate": 1.7241379310344828e-05,
"loss": 0.0219,
"step": 1470
},
{
"epoch": 25.379310344827587,
"grad_norm": 0.022299258038401604,
"learning_rate": 1.711366538952746e-05,
"loss": 0.0007,
"step": 1472
},
{
"epoch": 25.413793103448278,
"grad_norm": 0.8597404360771179,
"learning_rate": 1.698595146871009e-05,
"loss": 0.0042,
"step": 1474
},
{
"epoch": 25.448275862068964,
"grad_norm": 0.056232478469610214,
"learning_rate": 1.6858237547892723e-05,
"loss": 0.0007,
"step": 1476
},
{
"epoch": 25.482758620689655,
"grad_norm": 0.8406282663345337,
"learning_rate": 1.673052362707535e-05,
"loss": 0.0036,
"step": 1478
},
{
"epoch": 25.517241379310345,
"grad_norm": 0.03363404422998428,
"learning_rate": 1.6602809706257983e-05,
"loss": 0.0014,
"step": 1480
},
{
"epoch": 25.551724137931036,
"grad_norm": 0.015727031975984573,
"learning_rate": 1.6475095785440615e-05,
"loss": 0.0008,
"step": 1482
},
{
"epoch": 25.586206896551722,
"grad_norm": 0.03139304369688034,
"learning_rate": 1.6347381864623247e-05,
"loss": 0.0017,
"step": 1484
},
{
"epoch": 25.620689655172413,
"grad_norm": 0.02697753719985485,
"learning_rate": 1.6219667943805875e-05,
"loss": 0.0008,
"step": 1486
},
{
"epoch": 25.655172413793103,
"grad_norm": 0.004935800563544035,
"learning_rate": 1.6091954022988507e-05,
"loss": 0.0006,
"step": 1488
},
{
"epoch": 25.689655172413794,
"grad_norm": 1.9771162271499634,
"learning_rate": 1.596424010217114e-05,
"loss": 0.0139,
"step": 1490
},
{
"epoch": 25.724137931034484,
"grad_norm": 0.014661573804914951,
"learning_rate": 1.583652618135377e-05,
"loss": 0.0004,
"step": 1492
},
{
"epoch": 25.75862068965517,
"grad_norm": 2.9789557456970215,
"learning_rate": 1.5708812260536398e-05,
"loss": 0.0784,
"step": 1494
},
{
"epoch": 25.79310344827586,
"grad_norm": 0.05912244692444801,
"learning_rate": 1.558109833971903e-05,
"loss": 0.002,
"step": 1496
},
{
"epoch": 25.82758620689655,
"grad_norm": 0.21335746347904205,
"learning_rate": 1.545338441890166e-05,
"loss": 0.002,
"step": 1498
},
{
"epoch": 25.862068965517242,
"grad_norm": 0.01072037685662508,
"learning_rate": 1.532567049808429e-05,
"loss": 0.0005,
"step": 1500
},
{
"epoch": 25.896551724137932,
"grad_norm": 0.04208362475037575,
"learning_rate": 1.5197956577266922e-05,
"loss": 0.0007,
"step": 1502
},
{
"epoch": 25.93103448275862,
"grad_norm": 0.026036394760012627,
"learning_rate": 1.5070242656449553e-05,
"loss": 0.0006,
"step": 1504
},
{
"epoch": 25.96551724137931,
"grad_norm": 0.08341384679079056,
"learning_rate": 1.4942528735632185e-05,
"loss": 0.0012,
"step": 1506
},
{
"epoch": 26.0,
"grad_norm": 0.0347595140337944,
"learning_rate": 1.4814814814814815e-05,
"loss": 0.0008,
"step": 1508
},
{
"epoch": 26.0,
"eval_accuracy": 0.6818181818181818,
"eval_f1_macro": 0.5826228878648233,
"eval_f1_micro": 0.6818181818181818,
"eval_f1_weighted": 0.6675219941348973,
"eval_loss": 1.5798661708831787,
"eval_precision_macro": 0.5849358974358975,
"eval_precision_micro": 0.6818181818181818,
"eval_precision_weighted": 0.6631993006993008,
"eval_recall_macro": 0.5884241854636592,
"eval_recall_micro": 0.6818181818181818,
"eval_recall_weighted": 0.6818181818181818,
"eval_runtime": 1.9704,
"eval_samples_per_second": 66.992,
"eval_steps_per_second": 8.628,
"step": 1508
},
{
"epoch": 26.03448275862069,
"grad_norm": 0.10061348974704742,
"learning_rate": 1.4687100893997447e-05,
"loss": 0.0221,
"step": 1510
},
{
"epoch": 26.06896551724138,
"grad_norm": 0.005162129178643227,
"learning_rate": 1.4559386973180078e-05,
"loss": 0.0003,
"step": 1512
},
{
"epoch": 26.103448275862068,
"grad_norm": 0.02409091778099537,
"learning_rate": 1.4431673052362707e-05,
"loss": 0.0004,
"step": 1514
},
{
"epoch": 26.137931034482758,
"grad_norm": 0.02003251574933529,
"learning_rate": 1.4303959131545338e-05,
"loss": 0.0009,
"step": 1516
},
{
"epoch": 26.17241379310345,
"grad_norm": 0.11420497298240662,
"learning_rate": 1.417624521072797e-05,
"loss": 0.0011,
"step": 1518
},
{
"epoch": 26.20689655172414,
"grad_norm": 0.03824389725923538,
"learning_rate": 1.4048531289910602e-05,
"loss": 0.0008,
"step": 1520
},
{
"epoch": 26.24137931034483,
"grad_norm": 0.00969379860907793,
"learning_rate": 1.3920817369093232e-05,
"loss": 0.0003,
"step": 1522
},
{
"epoch": 26.275862068965516,
"grad_norm": 1.241509199142456,
"learning_rate": 1.3793103448275863e-05,
"loss": 0.0038,
"step": 1524
},
{
"epoch": 26.310344827586206,
"grad_norm": 0.011483917012810707,
"learning_rate": 1.3665389527458495e-05,
"loss": 0.0011,
"step": 1526
},
{
"epoch": 26.344827586206897,
"grad_norm": 0.02756449580192566,
"learning_rate": 1.3537675606641127e-05,
"loss": 0.0007,
"step": 1528
},
{
"epoch": 26.379310344827587,
"grad_norm": 0.01528858207166195,
"learning_rate": 1.3409961685823755e-05,
"loss": 0.0003,
"step": 1530
},
{
"epoch": 26.413793103448278,
"grad_norm": 0.01113022118806839,
"learning_rate": 1.3282247765006387e-05,
"loss": 0.0005,
"step": 1532
},
{
"epoch": 26.448275862068964,
"grad_norm": 18.05221176147461,
"learning_rate": 1.3154533844189018e-05,
"loss": 0.1686,
"step": 1534
},
{
"epoch": 26.482758620689655,
"grad_norm": 0.009326491504907608,
"learning_rate": 1.3026819923371647e-05,
"loss": 0.0008,
"step": 1536
},
{
"epoch": 26.517241379310345,
"grad_norm": 0.04548182338476181,
"learning_rate": 1.2899106002554278e-05,
"loss": 0.0007,
"step": 1538
},
{
"epoch": 26.551724137931036,
"grad_norm": 0.008745373226702213,
"learning_rate": 1.277139208173691e-05,
"loss": 0.0004,
"step": 1540
},
{
"epoch": 26.586206896551722,
"grad_norm": 0.24436865746974945,
"learning_rate": 1.2643678160919542e-05,
"loss": 0.0021,
"step": 1542
},
{
"epoch": 26.620689655172413,
"grad_norm": 0.01390091422945261,
"learning_rate": 1.2515964240102172e-05,
"loss": 0.0008,
"step": 1544
},
{
"epoch": 26.655172413793103,
"grad_norm": 0.016228720545768738,
"learning_rate": 1.2388250319284803e-05,
"loss": 0.0009,
"step": 1546
},
{
"epoch": 26.689655172413794,
"grad_norm": 1.8321493864059448,
"learning_rate": 1.2260536398467433e-05,
"loss": 0.0245,
"step": 1548
},
{
"epoch": 26.724137931034484,
"grad_norm": 0.11651375144720078,
"learning_rate": 1.2132822477650065e-05,
"loss": 0.0012,
"step": 1550
},
{
"epoch": 26.75862068965517,
"grad_norm": 0.26734495162963867,
"learning_rate": 1.2005108556832695e-05,
"loss": 0.0306,
"step": 1552
},
{
"epoch": 26.79310344827586,
"grad_norm": 1.002111792564392,
"learning_rate": 1.1877394636015327e-05,
"loss": 0.0049,
"step": 1554
},
{
"epoch": 26.82758620689655,
"grad_norm": 0.014619125053286552,
"learning_rate": 1.1749680715197957e-05,
"loss": 0.0012,
"step": 1556
},
{
"epoch": 26.862068965517242,
"grad_norm": 0.18940378725528717,
"learning_rate": 1.1621966794380588e-05,
"loss": 0.0013,
"step": 1558
},
{
"epoch": 26.896551724137932,
"grad_norm": 0.002744639990851283,
"learning_rate": 1.1494252873563218e-05,
"loss": 0.0003,
"step": 1560
},
{
"epoch": 26.93103448275862,
"grad_norm": 0.14458243548870087,
"learning_rate": 1.136653895274585e-05,
"loss": 0.0017,
"step": 1562
},
{
"epoch": 26.96551724137931,
"grad_norm": 0.44180047512054443,
"learning_rate": 1.123882503192848e-05,
"loss": 0.0033,
"step": 1564
},
{
"epoch": 27.0,
"grad_norm": 1.8280788660049438,
"learning_rate": 1.1111111111111112e-05,
"loss": 0.0053,
"step": 1566
},
{
"epoch": 27.0,
"eval_accuracy": 0.6666666666666666,
"eval_f1_macro": 0.5719249734426743,
"eval_f1_micro": 0.6666666666666666,
"eval_f1_weighted": 0.6555785069889042,
"eval_loss": 1.5308274030685425,
"eval_precision_macro": 0.5667354028698567,
"eval_precision_micro": 0.6666666666666666,
"eval_precision_weighted": 0.652369328839917,
"eval_recall_macro": 0.5842575187969924,
"eval_recall_micro": 0.6666666666666666,
"eval_recall_weighted": 0.6666666666666666,
"eval_runtime": 1.9794,
"eval_samples_per_second": 66.687,
"eval_steps_per_second": 8.588,
"step": 1566
},
{
"epoch": 27.03448275862069,
"grad_norm": 0.028027068823575974,
"learning_rate": 1.0983397190293743e-05,
"loss": 0.0007,
"step": 1568
},
{
"epoch": 27.06896551724138,
"grad_norm": 0.017265431582927704,
"learning_rate": 1.0855683269476373e-05,
"loss": 0.0006,
"step": 1570
},
{
"epoch": 27.103448275862068,
"grad_norm": 0.004702151753008366,
"learning_rate": 1.0727969348659005e-05,
"loss": 0.0078,
"step": 1572
},
{
"epoch": 27.137931034482758,
"grad_norm": 1.1751693487167358,
"learning_rate": 1.0600255427841635e-05,
"loss": 0.0102,
"step": 1574
},
{
"epoch": 27.17241379310345,
"grad_norm": 0.042279455810785294,
"learning_rate": 1.0472541507024267e-05,
"loss": 0.001,
"step": 1576
},
{
"epoch": 27.20689655172414,
"grad_norm": 0.013697315938770771,
"learning_rate": 1.0344827586206897e-05,
"loss": 0.0005,
"step": 1578
},
{
"epoch": 27.24137931034483,
"grad_norm": 0.1087651401758194,
"learning_rate": 1.0217113665389528e-05,
"loss": 0.0009,
"step": 1580
},
{
"epoch": 27.275862068965516,
"grad_norm": 0.040765080600976944,
"learning_rate": 1.0089399744572158e-05,
"loss": 0.0015,
"step": 1582
},
{
"epoch": 27.310344827586206,
"grad_norm": 0.15862275660037994,
"learning_rate": 9.96168582375479e-06,
"loss": 0.0029,
"step": 1584
},
{
"epoch": 27.344827586206897,
"grad_norm": 0.014304181560873985,
"learning_rate": 9.833971902937422e-06,
"loss": 0.045,
"step": 1586
},
{
"epoch": 27.379310344827587,
"grad_norm": 0.4837823808193207,
"learning_rate": 9.706257982120052e-06,
"loss": 0.003,
"step": 1588
},
{
"epoch": 27.413793103448278,
"grad_norm": 0.00520712835714221,
"learning_rate": 9.578544061302683e-06,
"loss": 0.0008,
"step": 1590
},
{
"epoch": 27.448275862068964,
"grad_norm": 0.026446396484971046,
"learning_rate": 9.450830140485313e-06,
"loss": 0.0005,
"step": 1592
},
{
"epoch": 27.482758620689655,
"grad_norm": 0.021808737888932228,
"learning_rate": 9.323116219667945e-06,
"loss": 0.0008,
"step": 1594
},
{
"epoch": 27.517241379310345,
"grad_norm": 1.217366337776184,
"learning_rate": 9.195402298850575e-06,
"loss": 0.0255,
"step": 1596
},
{
"epoch": 27.551724137931036,
"grad_norm": 0.04630236327648163,
"learning_rate": 9.067688378033207e-06,
"loss": 0.0011,
"step": 1598
},
{
"epoch": 27.586206896551722,
"grad_norm": 0.029485022649168968,
"learning_rate": 8.939974457215837e-06,
"loss": 0.0004,
"step": 1600
},
{
"epoch": 27.620689655172413,
"grad_norm": 0.6011648178100586,
"learning_rate": 8.812260536398467e-06,
"loss": 0.0031,
"step": 1602
},
{
"epoch": 27.655172413793103,
"grad_norm": 0.016739701852202415,
"learning_rate": 8.684546615581098e-06,
"loss": 0.0005,
"step": 1604
},
{
"epoch": 27.689655172413794,
"grad_norm": 0.07989770919084549,
"learning_rate": 8.55683269476373e-06,
"loss": 0.0007,
"step": 1606
},
{
"epoch": 27.724137931034484,
"grad_norm": 0.009848535992205143,
"learning_rate": 8.429118773946362e-06,
"loss": 0.0004,
"step": 1608
},
{
"epoch": 27.75862068965517,
"grad_norm": 0.007213211618363857,
"learning_rate": 8.301404853128992e-06,
"loss": 0.0007,
"step": 1610
},
{
"epoch": 27.79310344827586,
"grad_norm": 0.08470471948385239,
"learning_rate": 8.173690932311623e-06,
"loss": 0.0017,
"step": 1612
},
{
"epoch": 27.82758620689655,
"grad_norm": 0.11105465888977051,
"learning_rate": 8.045977011494253e-06,
"loss": 0.001,
"step": 1614
},
{
"epoch": 27.862068965517242,
"grad_norm": 0.38511380553245544,
"learning_rate": 7.918263090676885e-06,
"loss": 0.0039,
"step": 1616
},
{
"epoch": 27.896551724137932,
"grad_norm": 0.07350783795118332,
"learning_rate": 7.790549169859515e-06,
"loss": 0.0011,
"step": 1618
},
{
"epoch": 27.93103448275862,
"grad_norm": 0.0037571904249489307,
"learning_rate": 7.662835249042145e-06,
"loss": 0.0004,
"step": 1620
},
{
"epoch": 27.96551724137931,
"grad_norm": 0.283407986164093,
"learning_rate": 7.535121328224777e-06,
"loss": 0.0016,
"step": 1622
},
{
"epoch": 28.0,
"grad_norm": 0.025982772931456566,
"learning_rate": 7.4074074074074075e-06,
"loss": 0.0004,
"step": 1624
},
{
"epoch": 28.0,
"eval_accuracy": 0.6666666666666666,
"eval_f1_macro": 0.5732077187394248,
"eval_f1_micro": 0.6666666666666666,
"eval_f1_weighted": 0.6617322262037608,
"eval_loss": 1.5639015436172485,
"eval_precision_macro": 0.5684216325737007,
"eval_precision_micro": 0.6666666666666666,
"eval_precision_weighted": 0.667311206663744,
"eval_recall_macro": 0.5867167919799499,
"eval_recall_micro": 0.6666666666666666,
"eval_recall_weighted": 0.6666666666666666,
"eval_runtime": 2.0384,
"eval_samples_per_second": 64.757,
"eval_steps_per_second": 8.34,
"step": 1624
},
{
"epoch": 28.03448275862069,
"grad_norm": 0.23507054150104523,
"learning_rate": 7.279693486590039e-06,
"loss": 0.003,
"step": 1626
},
{
"epoch": 28.06896551724138,
"grad_norm": 0.0050230068154633045,
"learning_rate": 7.151979565772669e-06,
"loss": 0.0006,
"step": 1628
},
{
"epoch": 28.103448275862068,
"grad_norm": 0.1437883973121643,
"learning_rate": 7.024265644955301e-06,
"loss": 0.0018,
"step": 1630
},
{
"epoch": 28.137931034482758,
"grad_norm": 1.1382052898406982,
"learning_rate": 6.896551724137932e-06,
"loss": 0.0066,
"step": 1632
},
{
"epoch": 28.17241379310345,
"grad_norm": 0.00969479139894247,
"learning_rate": 6.768837803320563e-06,
"loss": 0.0004,
"step": 1634
},
{
"epoch": 28.20689655172414,
"grad_norm": 0.015258271247148514,
"learning_rate": 6.641123882503193e-06,
"loss": 0.0006,
"step": 1636
},
{
"epoch": 28.24137931034483,
"grad_norm": 0.057105790823698044,
"learning_rate": 6.513409961685823e-06,
"loss": 0.0007,
"step": 1638
},
{
"epoch": 28.275862068965516,
"grad_norm": 0.007530784234404564,
"learning_rate": 6.385696040868455e-06,
"loss": 0.0042,
"step": 1640
},
{
"epoch": 28.310344827586206,
"grad_norm": 0.05503424257040024,
"learning_rate": 6.257982120051086e-06,
"loss": 0.0009,
"step": 1642
},
{
"epoch": 28.344827586206897,
"grad_norm": 0.19767296314239502,
"learning_rate": 6.130268199233717e-06,
"loss": 0.1093,
"step": 1644
},
{
"epoch": 28.379310344827587,
"grad_norm": 0.023296566680073738,
"learning_rate": 6.0025542784163475e-06,
"loss": 0.001,
"step": 1646
},
{
"epoch": 28.413793103448278,
"grad_norm": 0.018203437328338623,
"learning_rate": 5.874840357598978e-06,
"loss": 0.0004,
"step": 1648
},
{
"epoch": 28.448275862068964,
"grad_norm": 0.042694464325904846,
"learning_rate": 5.747126436781609e-06,
"loss": 0.0007,
"step": 1650
},
{
"epoch": 28.482758620689655,
"grad_norm": 0.019331173971295357,
"learning_rate": 5.61941251596424e-06,
"loss": 0.0005,
"step": 1652
},
{
"epoch": 28.517241379310345,
"grad_norm": 0.03333083540201187,
"learning_rate": 5.491698595146872e-06,
"loss": 0.0011,
"step": 1654
},
{
"epoch": 28.551724137931036,
"grad_norm": 0.005839875899255276,
"learning_rate": 5.3639846743295025e-06,
"loss": 0.0006,
"step": 1656
},
{
"epoch": 28.586206896551722,
"grad_norm": 0.040753502398729324,
"learning_rate": 5.236270753512133e-06,
"loss": 0.0005,
"step": 1658
},
{
"epoch": 28.620689655172413,
"grad_norm": 0.0062615578062832355,
"learning_rate": 5.108556832694764e-06,
"loss": 0.0009,
"step": 1660
},
{
"epoch": 28.655172413793103,
"grad_norm": 0.024959621950984,
"learning_rate": 4.980842911877395e-06,
"loss": 0.0006,
"step": 1662
},
{
"epoch": 28.689655172413794,
"grad_norm": 0.006987102795392275,
"learning_rate": 4.853128991060026e-06,
"loss": 0.0007,
"step": 1664
},
{
"epoch": 28.724137931034484,
"grad_norm": 0.005346678197383881,
"learning_rate": 4.725415070242657e-06,
"loss": 0.0003,
"step": 1666
},
{
"epoch": 28.75862068965517,
"grad_norm": 0.08393517136573792,
"learning_rate": 4.5977011494252875e-06,
"loss": 0.0013,
"step": 1668
},
{
"epoch": 28.79310344827586,
"grad_norm": 0.01564914733171463,
"learning_rate": 4.469987228607918e-06,
"loss": 0.0206,
"step": 1670
},
{
"epoch": 28.82758620689655,
"grad_norm": 0.04356468468904495,
"learning_rate": 4.342273307790549e-06,
"loss": 0.0007,
"step": 1672
},
{
"epoch": 28.862068965517242,
"grad_norm": 0.056500744074583054,
"learning_rate": 4.214559386973181e-06,
"loss": 0.0011,
"step": 1674
},
{
"epoch": 28.896551724137932,
"grad_norm": 0.1394091099500656,
"learning_rate": 4.086845466155812e-06,
"loss": 0.0012,
"step": 1676
},
{
"epoch": 28.93103448275862,
"grad_norm": 8.025191307067871,
"learning_rate": 3.9591315453384425e-06,
"loss": 0.0235,
"step": 1678
},
{
"epoch": 28.96551724137931,
"grad_norm": 0.016247062012553215,
"learning_rate": 3.8314176245210725e-06,
"loss": 0.0005,
"step": 1680
},
{
"epoch": 29.0,
"grad_norm": 0.02568941004574299,
"learning_rate": 3.7037037037037037e-06,
"loss": 0.0007,
"step": 1682
},
{
"epoch": 29.0,
"eval_accuracy": 0.6742424242424242,
"eval_f1_macro": 0.5835078400423941,
"eval_f1_micro": 0.6742424242424242,
"eval_f1_weighted": 0.6678458659891735,
"eval_loss": 1.5345948934555054,
"eval_precision_macro": 0.5786037923941507,
"eval_precision_micro": 0.6742424242424242,
"eval_precision_weighted": 0.6702666872171518,
"eval_recall_macro": 0.5965382205513785,
"eval_recall_micro": 0.6742424242424242,
"eval_recall_weighted": 0.6742424242424242,
"eval_runtime": 2.0037,
"eval_samples_per_second": 65.879,
"eval_steps_per_second": 8.484,
"step": 1682
},
{
"epoch": 29.03448275862069,
"grad_norm": 0.007548394612967968,
"learning_rate": 3.5759897828863346e-06,
"loss": 0.0008,
"step": 1684
},
{
"epoch": 29.06896551724138,
"grad_norm": 0.11199589818716049,
"learning_rate": 3.448275862068966e-06,
"loss": 0.0011,
"step": 1686
},
{
"epoch": 29.103448275862068,
"grad_norm": 0.01992984302341938,
"learning_rate": 3.3205619412515967e-06,
"loss": 0.0005,
"step": 1688
},
{
"epoch": 29.137931034482758,
"grad_norm": 0.012301865965127945,
"learning_rate": 3.1928480204342275e-06,
"loss": 0.0006,
"step": 1690
},
{
"epoch": 29.17241379310345,
"grad_norm": 0.02116016112267971,
"learning_rate": 3.0651340996168583e-06,
"loss": 0.0097,
"step": 1692
},
{
"epoch": 29.20689655172414,
"grad_norm": 0.020142151042819023,
"learning_rate": 2.937420178799489e-06,
"loss": 0.0006,
"step": 1694
},
{
"epoch": 29.24137931034483,
"grad_norm": 0.042330626398324966,
"learning_rate": 2.80970625798212e-06,
"loss": 0.0009,
"step": 1696
},
{
"epoch": 29.275862068965516,
"grad_norm": 2.6495234966278076,
"learning_rate": 2.6819923371647512e-06,
"loss": 0.0576,
"step": 1698
},
{
"epoch": 29.310344827586206,
"grad_norm": 0.06072988361120224,
"learning_rate": 2.554278416347382e-06,
"loss": 0.0014,
"step": 1700
},
{
"epoch": 29.344827586206897,
"grad_norm": 0.19377738237380981,
"learning_rate": 2.426564495530013e-06,
"loss": 0.0018,
"step": 1702
},
{
"epoch": 29.379310344827587,
"grad_norm": 0.05790168046951294,
"learning_rate": 2.2988505747126437e-06,
"loss": 0.0007,
"step": 1704
},
{
"epoch": 29.413793103448278,
"grad_norm": 0.03023909032344818,
"learning_rate": 2.1711366538952746e-06,
"loss": 0.04,
"step": 1706
},
{
"epoch": 29.448275862068964,
"grad_norm": 0.039096005260944366,
"learning_rate": 2.043422733077906e-06,
"loss": 0.0009,
"step": 1708
},
{
"epoch": 29.482758620689655,
"grad_norm": 1.9999027252197266,
"learning_rate": 1.9157088122605362e-06,
"loss": 0.0383,
"step": 1710
},
{
"epoch": 29.517241379310345,
"grad_norm": 0.01942484639585018,
"learning_rate": 1.7879948914431673e-06,
"loss": 0.0004,
"step": 1712
},
{
"epoch": 29.551724137931036,
"grad_norm": 0.019709143787622452,
"learning_rate": 1.6602809706257983e-06,
"loss": 0.0006,
"step": 1714
},
{
"epoch": 29.586206896551722,
"grad_norm": 0.01487061008810997,
"learning_rate": 1.5325670498084292e-06,
"loss": 0.0003,
"step": 1716
},
{
"epoch": 29.620689655172413,
"grad_norm": 0.0076087117195129395,
"learning_rate": 1.40485312899106e-06,
"loss": 0.0003,
"step": 1718
},
{
"epoch": 29.655172413793103,
"grad_norm": 0.01488451100885868,
"learning_rate": 1.277139208173691e-06,
"loss": 0.0005,
"step": 1720
},
{
"epoch": 29.689655172413794,
"grad_norm": 0.00445834593847394,
"learning_rate": 1.1494252873563219e-06,
"loss": 0.0004,
"step": 1722
},
{
"epoch": 29.724137931034484,
"grad_norm": 0.01872408203780651,
"learning_rate": 1.021711366538953e-06,
"loss": 0.0005,
"step": 1724
},
{
"epoch": 29.75862068965517,
"grad_norm": 0.010615495964884758,
"learning_rate": 8.939974457215836e-07,
"loss": 0.0003,
"step": 1726
},
{
"epoch": 29.79310344827586,
"grad_norm": 0.015271478332579136,
"learning_rate": 7.662835249042146e-07,
"loss": 0.0005,
"step": 1728
},
{
"epoch": 29.82758620689655,
"grad_norm": 0.2127443104982376,
"learning_rate": 6.385696040868455e-07,
"loss": 0.002,
"step": 1730
},
{
"epoch": 29.862068965517242,
"grad_norm": 0.01297774724662304,
"learning_rate": 5.108556832694765e-07,
"loss": 0.0004,
"step": 1732
},
{
"epoch": 29.896551724137932,
"grad_norm": 0.010099313221871853,
"learning_rate": 3.831417624521073e-07,
"loss": 0.0064,
"step": 1734
},
{
"epoch": 29.93103448275862,
"grad_norm": 0.03221974894404411,
"learning_rate": 2.5542784163473823e-07,
"loss": 0.0005,
"step": 1736
},
{
"epoch": 29.96551724137931,
"grad_norm": 0.11465907841920853,
"learning_rate": 1.2771392081736911e-07,
"loss": 0.0009,
"step": 1738
},
{
"epoch": 30.0,
"grad_norm": 0.026466330513358116,
"learning_rate": 0.0,
"loss": 0.0004,
"step": 1740
},
{
"epoch": 30.0,
"eval_accuracy": 0.6742424242424242,
"eval_f1_macro": 0.5791488237085879,
"eval_f1_micro": 0.6742424242424242,
"eval_f1_weighted": 0.6661361118395798,
"eval_loss": 1.5231688022613525,
"eval_precision_macro": 0.5707257261879112,
"eval_precision_micro": 0.6742424242424242,
"eval_precision_weighted": 0.6628039716275012,
"eval_recall_macro": 0.5917763157894738,
"eval_recall_micro": 0.6742424242424242,
"eval_recall_weighted": 0.6742424242424242,
"eval_runtime": 2.1557,
"eval_samples_per_second": 61.234,
"eval_steps_per_second": 7.886,
"step": 1740
},
{
"epoch": 30.0,
"step": 1740,
"total_flos": 1.0740871074163507e+18,
"train_loss": 0.5156426236089975,
"train_runtime": 378.1612,
"train_samples_per_second": 36.651,
"train_steps_per_second": 4.601
}
],
"logging_steps": 2,
"max_steps": 1740,
"num_input_tokens_seen": 0,
"num_train_epochs": 30,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0740871074163507e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}