vit-tiny-patch16-224 / trainer_state.json
corranm's picture
End of training
92fb907 verified
{
"best_metric": 1.3132938146591187,
"best_model_checkpoint": "WinKawaks/vit-tiny-patch16-224/checkpoint-319",
"epoch": 25.0,
"eval_steps": 500,
"global_step": 725,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06896551724137931,
"grad_norm": 50.05537033081055,
"learning_rate": 2.7397260273972604e-06,
"loss": 2.392,
"step": 2
},
{
"epoch": 0.13793103448275862,
"grad_norm": 21.303316116333008,
"learning_rate": 5.479452054794521e-06,
"loss": 1.993,
"step": 4
},
{
"epoch": 0.20689655172413793,
"grad_norm": 29.138675689697266,
"learning_rate": 8.21917808219178e-06,
"loss": 1.9911,
"step": 6
},
{
"epoch": 0.27586206896551724,
"grad_norm": 24.99004364013672,
"learning_rate": 1.0958904109589042e-05,
"loss": 2.1828,
"step": 8
},
{
"epoch": 0.3448275862068966,
"grad_norm": 29.86865234375,
"learning_rate": 1.3698630136986302e-05,
"loss": 1.8818,
"step": 10
},
{
"epoch": 0.41379310344827586,
"grad_norm": 33.12383270263672,
"learning_rate": 1.643835616438356e-05,
"loss": 2.0859,
"step": 12
},
{
"epoch": 0.4827586206896552,
"grad_norm": 25.435964584350586,
"learning_rate": 1.9178082191780822e-05,
"loss": 2.0565,
"step": 14
},
{
"epoch": 0.5517241379310345,
"grad_norm": 21.697423934936523,
"learning_rate": 2.1917808219178083e-05,
"loss": 1.9561,
"step": 16
},
{
"epoch": 0.6206896551724138,
"grad_norm": 21.292062759399414,
"learning_rate": 2.4657534246575342e-05,
"loss": 1.8509,
"step": 18
},
{
"epoch": 0.6896551724137931,
"grad_norm": 25.63444709777832,
"learning_rate": 2.7397260273972603e-05,
"loss": 1.9259,
"step": 20
},
{
"epoch": 0.7586206896551724,
"grad_norm": 12.96942138671875,
"learning_rate": 3.0136986301369862e-05,
"loss": 1.8952,
"step": 22
},
{
"epoch": 0.8275862068965517,
"grad_norm": 13.86652660369873,
"learning_rate": 3.287671232876712e-05,
"loss": 1.9227,
"step": 24
},
{
"epoch": 0.896551724137931,
"grad_norm": 28.36823081970215,
"learning_rate": 3.561643835616438e-05,
"loss": 1.8958,
"step": 26
},
{
"epoch": 0.9655172413793104,
"grad_norm": 18.257474899291992,
"learning_rate": 3.8356164383561644e-05,
"loss": 1.9719,
"step": 28
},
{
"epoch": 1.0,
"eval_accuracy": 0.2196969696969697,
"eval_f1_macro": 0.09493363779078065,
"eval_f1_micro": 0.2196969696969697,
"eval_f1_weighted": 0.11546937910574274,
"eval_loss": 1.9209331274032593,
"eval_precision_macro": 0.08906952611553096,
"eval_precision_micro": 0.2196969696969697,
"eval_precision_weighted": 0.10509758602978943,
"eval_recall_macro": 0.17224489795918368,
"eval_recall_micro": 0.2196969696969697,
"eval_recall_weighted": 0.2196969696969697,
"eval_runtime": 2.2105,
"eval_samples_per_second": 59.716,
"eval_steps_per_second": 7.691,
"step": 29
},
{
"epoch": 1.0344827586206897,
"grad_norm": 28.444780349731445,
"learning_rate": 4.1095890410958905e-05,
"loss": 1.8292,
"step": 30
},
{
"epoch": 1.103448275862069,
"grad_norm": 16.733516693115234,
"learning_rate": 4.383561643835617e-05,
"loss": 1.8189,
"step": 32
},
{
"epoch": 1.1724137931034484,
"grad_norm": 16.255664825439453,
"learning_rate": 4.657534246575342e-05,
"loss": 1.8846,
"step": 34
},
{
"epoch": 1.2413793103448276,
"grad_norm": 15.87771987915039,
"learning_rate": 4.9315068493150684e-05,
"loss": 1.9816,
"step": 36
},
{
"epoch": 1.3103448275862069,
"grad_norm": 20.748777389526367,
"learning_rate": 5.2054794520547945e-05,
"loss": 1.8633,
"step": 38
},
{
"epoch": 1.3793103448275863,
"grad_norm": 23.478870391845703,
"learning_rate": 5.479452054794521e-05,
"loss": 1.9196,
"step": 40
},
{
"epoch": 1.4482758620689655,
"grad_norm": 14.190969467163086,
"learning_rate": 5.753424657534247e-05,
"loss": 1.8152,
"step": 42
},
{
"epoch": 1.5172413793103448,
"grad_norm": 27.225452423095703,
"learning_rate": 6.0273972602739724e-05,
"loss": 1.879,
"step": 44
},
{
"epoch": 1.5862068965517242,
"grad_norm": 16.791467666625977,
"learning_rate": 6.301369863013699e-05,
"loss": 1.851,
"step": 46
},
{
"epoch": 1.6551724137931034,
"grad_norm": 17.566041946411133,
"learning_rate": 6.575342465753424e-05,
"loss": 1.8164,
"step": 48
},
{
"epoch": 1.7241379310344827,
"grad_norm": 24.76405906677246,
"learning_rate": 6.84931506849315e-05,
"loss": 2.1271,
"step": 50
},
{
"epoch": 1.793103448275862,
"grad_norm": 21.847782135009766,
"learning_rate": 7.123287671232876e-05,
"loss": 1.7536,
"step": 52
},
{
"epoch": 1.8620689655172413,
"grad_norm": 19.893037796020508,
"learning_rate": 7.397260273972603e-05,
"loss": 1.7816,
"step": 54
},
{
"epoch": 1.9310344827586206,
"grad_norm": 14.866455078125,
"learning_rate": 7.671232876712329e-05,
"loss": 1.5881,
"step": 56
},
{
"epoch": 2.0,
"grad_norm": 20.373685836791992,
"learning_rate": 7.945205479452055e-05,
"loss": 1.8717,
"step": 58
},
{
"epoch": 2.0,
"eval_accuracy": 0.19696969696969696,
"eval_f1_macro": 0.09534116676973821,
"eval_f1_micro": 0.19696969696969696,
"eval_f1_weighted": 0.10689689098780009,
"eval_loss": 2.0377984046936035,
"eval_precision_macro": 0.19963369963369962,
"eval_precision_micro": 0.19696969696969696,
"eval_precision_weighted": 0.266025641025641,
"eval_recall_macro": 0.17944066515495088,
"eval_recall_micro": 0.19696969696969696,
"eval_recall_weighted": 0.19696969696969696,
"eval_runtime": 2.1913,
"eval_samples_per_second": 60.238,
"eval_steps_per_second": 7.758,
"step": 58
},
{
"epoch": 2.0689655172413794,
"grad_norm": 21.545093536376953,
"learning_rate": 8.219178082191781e-05,
"loss": 1.752,
"step": 60
},
{
"epoch": 2.1379310344827585,
"grad_norm": 15.046628952026367,
"learning_rate": 8.493150684931507e-05,
"loss": 1.7137,
"step": 62
},
{
"epoch": 2.206896551724138,
"grad_norm": 21.083383560180664,
"learning_rate": 8.767123287671233e-05,
"loss": 1.7003,
"step": 64
},
{
"epoch": 2.2758620689655173,
"grad_norm": 37.4631462097168,
"learning_rate": 9.041095890410958e-05,
"loss": 1.9736,
"step": 66
},
{
"epoch": 2.344827586206897,
"grad_norm": 19.171171188354492,
"learning_rate": 9.315068493150684e-05,
"loss": 1.7332,
"step": 68
},
{
"epoch": 2.413793103448276,
"grad_norm": 16.743974685668945,
"learning_rate": 9.58904109589041e-05,
"loss": 1.819,
"step": 70
},
{
"epoch": 2.4827586206896552,
"grad_norm": 20.756649017333984,
"learning_rate": 9.863013698630137e-05,
"loss": 1.9201,
"step": 72
},
{
"epoch": 2.5517241379310347,
"grad_norm": 17.703954696655273,
"learning_rate": 9.984662576687117e-05,
"loss": 1.6147,
"step": 74
},
{
"epoch": 2.6206896551724137,
"grad_norm": 32.903743743896484,
"learning_rate": 9.95398773006135e-05,
"loss": 2.0663,
"step": 76
},
{
"epoch": 2.689655172413793,
"grad_norm": 23.617671966552734,
"learning_rate": 9.923312883435584e-05,
"loss": 1.9193,
"step": 78
},
{
"epoch": 2.7586206896551726,
"grad_norm": 30.0602970123291,
"learning_rate": 9.892638036809816e-05,
"loss": 1.7659,
"step": 80
},
{
"epoch": 2.8275862068965516,
"grad_norm": 22.806859970092773,
"learning_rate": 9.861963190184049e-05,
"loss": 1.4494,
"step": 82
},
{
"epoch": 2.896551724137931,
"grad_norm": 16.40064811706543,
"learning_rate": 9.831288343558283e-05,
"loss": 1.6664,
"step": 84
},
{
"epoch": 2.9655172413793105,
"grad_norm": 24.703716278076172,
"learning_rate": 9.800613496932515e-05,
"loss": 1.9326,
"step": 86
},
{
"epoch": 3.0,
"eval_accuracy": 0.3939393939393939,
"eval_f1_macro": 0.2290203682353352,
"eval_f1_micro": 0.3939393939393939,
"eval_f1_weighted": 0.2938602829468567,
"eval_loss": 1.7679647207260132,
"eval_precision_macro": 0.21510735641170425,
"eval_precision_micro": 0.3939393939393939,
"eval_precision_weighted": 0.268160089800406,
"eval_recall_macro": 0.30038548752834465,
"eval_recall_micro": 0.3939393939393939,
"eval_recall_weighted": 0.3939393939393939,
"eval_runtime": 2.411,
"eval_samples_per_second": 54.749,
"eval_steps_per_second": 7.051,
"step": 87
},
{
"epoch": 3.0344827586206895,
"grad_norm": 34.48782730102539,
"learning_rate": 9.76993865030675e-05,
"loss": 1.9289,
"step": 88
},
{
"epoch": 3.103448275862069,
"grad_norm": 20.898527145385742,
"learning_rate": 9.739263803680982e-05,
"loss": 1.5641,
"step": 90
},
{
"epoch": 3.1724137931034484,
"grad_norm": 20.089338302612305,
"learning_rate": 9.708588957055215e-05,
"loss": 1.6451,
"step": 92
},
{
"epoch": 3.2413793103448274,
"grad_norm": 21.63447380065918,
"learning_rate": 9.677914110429448e-05,
"loss": 1.5942,
"step": 94
},
{
"epoch": 3.310344827586207,
"grad_norm": 18.611299514770508,
"learning_rate": 9.647239263803681e-05,
"loss": 1.4619,
"step": 96
},
{
"epoch": 3.3793103448275863,
"grad_norm": 14.333738327026367,
"learning_rate": 9.616564417177915e-05,
"loss": 1.6001,
"step": 98
},
{
"epoch": 3.4482758620689653,
"grad_norm": 16.133724212646484,
"learning_rate": 9.585889570552147e-05,
"loss": 1.3741,
"step": 100
},
{
"epoch": 3.5172413793103448,
"grad_norm": 29.28097915649414,
"learning_rate": 9.555214723926381e-05,
"loss": 1.6927,
"step": 102
},
{
"epoch": 3.586206896551724,
"grad_norm": 17.335416793823242,
"learning_rate": 9.524539877300614e-05,
"loss": 1.1759,
"step": 104
},
{
"epoch": 3.655172413793103,
"grad_norm": 25.239665985107422,
"learning_rate": 9.493865030674846e-05,
"loss": 1.4696,
"step": 106
},
{
"epoch": 3.7241379310344827,
"grad_norm": 19.4155330657959,
"learning_rate": 9.46319018404908e-05,
"loss": 1.0003,
"step": 108
},
{
"epoch": 3.793103448275862,
"grad_norm": 20.292905807495117,
"learning_rate": 9.432515337423313e-05,
"loss": 1.3851,
"step": 110
},
{
"epoch": 3.862068965517241,
"grad_norm": 31.247602462768555,
"learning_rate": 9.401840490797547e-05,
"loss": 1.6519,
"step": 112
},
{
"epoch": 3.9310344827586206,
"grad_norm": 25.56925392150879,
"learning_rate": 9.37116564417178e-05,
"loss": 1.4111,
"step": 114
},
{
"epoch": 4.0,
"grad_norm": 23.311908721923828,
"learning_rate": 9.340490797546013e-05,
"loss": 1.2873,
"step": 116
},
{
"epoch": 4.0,
"eval_accuracy": 0.44696969696969696,
"eval_f1_macro": 0.35023855102631046,
"eval_f1_micro": 0.44696969696969696,
"eval_f1_weighted": 0.40824342918240536,
"eval_loss": 1.5892395973205566,
"eval_precision_macro": 0.4831240188383045,
"eval_precision_micro": 0.44696969696969696,
"eval_precision_weighted": 0.513986013986014,
"eval_recall_macro": 0.36461829176114885,
"eval_recall_micro": 0.44696969696969696,
"eval_recall_weighted": 0.44696969696969696,
"eval_runtime": 2.1869,
"eval_samples_per_second": 60.359,
"eval_steps_per_second": 7.774,
"step": 116
},
{
"epoch": 4.068965517241379,
"grad_norm": 31.80891227722168,
"learning_rate": 9.309815950920246e-05,
"loss": 1.2896,
"step": 118
},
{
"epoch": 4.137931034482759,
"grad_norm": 22.716781616210938,
"learning_rate": 9.279141104294478e-05,
"loss": 1.2486,
"step": 120
},
{
"epoch": 4.206896551724138,
"grad_norm": 23.80237579345703,
"learning_rate": 9.248466257668712e-05,
"loss": 1.3835,
"step": 122
},
{
"epoch": 4.275862068965517,
"grad_norm": 18.24744987487793,
"learning_rate": 9.217791411042945e-05,
"loss": 0.9575,
"step": 124
},
{
"epoch": 4.344827586206897,
"grad_norm": 22.790117263793945,
"learning_rate": 9.187116564417179e-05,
"loss": 1.3711,
"step": 126
},
{
"epoch": 4.413793103448276,
"grad_norm": 15.804698944091797,
"learning_rate": 9.156441717791411e-05,
"loss": 1.4226,
"step": 128
},
{
"epoch": 4.482758620689655,
"grad_norm": 30.167261123657227,
"learning_rate": 9.125766871165644e-05,
"loss": 1.7736,
"step": 130
},
{
"epoch": 4.551724137931035,
"grad_norm": 15.975086212158203,
"learning_rate": 9.095092024539878e-05,
"loss": 1.4395,
"step": 132
},
{
"epoch": 4.620689655172414,
"grad_norm": 23.406415939331055,
"learning_rate": 9.06441717791411e-05,
"loss": 1.6491,
"step": 134
},
{
"epoch": 4.689655172413794,
"grad_norm": 30.07583236694336,
"learning_rate": 9.033742331288344e-05,
"loss": 1.747,
"step": 136
},
{
"epoch": 4.758620689655173,
"grad_norm": 20.77846336364746,
"learning_rate": 9.003067484662577e-05,
"loss": 1.6648,
"step": 138
},
{
"epoch": 4.827586206896552,
"grad_norm": 11.187516212463379,
"learning_rate": 8.972392638036811e-05,
"loss": 1.1932,
"step": 140
},
{
"epoch": 4.896551724137931,
"grad_norm": 15.845014572143555,
"learning_rate": 8.941717791411043e-05,
"loss": 1.4271,
"step": 142
},
{
"epoch": 4.9655172413793105,
"grad_norm": 15.977095603942871,
"learning_rate": 8.911042944785276e-05,
"loss": 1.3997,
"step": 144
},
{
"epoch": 5.0,
"eval_accuracy": 0.5,
"eval_f1_macro": 0.34814087704047136,
"eval_f1_micro": 0.5,
"eval_f1_weighted": 0.4244756131292643,
"eval_loss": 1.4773013591766357,
"eval_precision_macro": 0.34626430480089015,
"eval_precision_micro": 0.5,
"eval_precision_weighted": 0.41194737757930666,
"eval_recall_macro": 0.4051700680272109,
"eval_recall_micro": 0.5,
"eval_recall_weighted": 0.5,
"eval_runtime": 2.2186,
"eval_samples_per_second": 59.497,
"eval_steps_per_second": 7.662,
"step": 145
},
{
"epoch": 5.0344827586206895,
"grad_norm": 18.65735626220703,
"learning_rate": 8.88036809815951e-05,
"loss": 1.6883,
"step": 146
},
{
"epoch": 5.103448275862069,
"grad_norm": 18.897695541381836,
"learning_rate": 8.849693251533742e-05,
"loss": 1.3035,
"step": 148
},
{
"epoch": 5.172413793103448,
"grad_norm": 20.19015884399414,
"learning_rate": 8.819018404907976e-05,
"loss": 1.1881,
"step": 150
},
{
"epoch": 5.241379310344827,
"grad_norm": 18.574827194213867,
"learning_rate": 8.788343558282209e-05,
"loss": 1.0471,
"step": 152
},
{
"epoch": 5.310344827586207,
"grad_norm": 15.6314115524292,
"learning_rate": 8.757668711656443e-05,
"loss": 1.2302,
"step": 154
},
{
"epoch": 5.379310344827586,
"grad_norm": 20.703832626342773,
"learning_rate": 8.726993865030675e-05,
"loss": 1.1877,
"step": 156
},
{
"epoch": 5.448275862068965,
"grad_norm": 28.778301239013672,
"learning_rate": 8.696319018404908e-05,
"loss": 1.5936,
"step": 158
},
{
"epoch": 5.517241379310345,
"grad_norm": 25.026071548461914,
"learning_rate": 8.665644171779142e-05,
"loss": 1.0255,
"step": 160
},
{
"epoch": 5.586206896551724,
"grad_norm": 17.47880744934082,
"learning_rate": 8.634969325153374e-05,
"loss": 1.3967,
"step": 162
},
{
"epoch": 5.655172413793103,
"grad_norm": 20.092926025390625,
"learning_rate": 8.604294478527608e-05,
"loss": 1.2006,
"step": 164
},
{
"epoch": 5.724137931034483,
"grad_norm": 17.11634635925293,
"learning_rate": 8.573619631901841e-05,
"loss": 1.1081,
"step": 166
},
{
"epoch": 5.793103448275862,
"grad_norm": 21.26296043395996,
"learning_rate": 8.542944785276073e-05,
"loss": 1.0949,
"step": 168
},
{
"epoch": 5.862068965517241,
"grad_norm": 31.731033325195312,
"learning_rate": 8.512269938650307e-05,
"loss": 1.3502,
"step": 170
},
{
"epoch": 5.931034482758621,
"grad_norm": 25.80246353149414,
"learning_rate": 8.48159509202454e-05,
"loss": 1.5673,
"step": 172
},
{
"epoch": 6.0,
"grad_norm": 23.60406494140625,
"learning_rate": 8.450920245398774e-05,
"loss": 1.7041,
"step": 174
},
{
"epoch": 6.0,
"eval_accuracy": 0.5378787878787878,
"eval_f1_macro": 0.4265559579914266,
"eval_f1_micro": 0.5378787878787878,
"eval_f1_weighted": 0.5005041399030636,
"eval_loss": 1.4405734539031982,
"eval_precision_macro": 0.5010893868036724,
"eval_precision_micro": 0.5378787878787878,
"eval_precision_weighted": 0.5628245491881856,
"eval_recall_macro": 0.4529024943310657,
"eval_recall_micro": 0.5378787878787878,
"eval_recall_weighted": 0.5378787878787878,
"eval_runtime": 2.215,
"eval_samples_per_second": 59.594,
"eval_steps_per_second": 7.675,
"step": 174
},
{
"epoch": 6.068965517241379,
"grad_norm": 20.430105209350586,
"learning_rate": 8.420245398773006e-05,
"loss": 1.0018,
"step": 176
},
{
"epoch": 6.137931034482759,
"grad_norm": 14.565896987915039,
"learning_rate": 8.38957055214724e-05,
"loss": 1.02,
"step": 178
},
{
"epoch": 6.206896551724138,
"grad_norm": 21.370939254760742,
"learning_rate": 8.358895705521473e-05,
"loss": 1.1403,
"step": 180
},
{
"epoch": 6.275862068965517,
"grad_norm": 16.42601776123047,
"learning_rate": 8.328220858895705e-05,
"loss": 1.4774,
"step": 182
},
{
"epoch": 6.344827586206897,
"grad_norm": 18.844532012939453,
"learning_rate": 8.297546012269939e-05,
"loss": 1.1568,
"step": 184
},
{
"epoch": 6.413793103448276,
"grad_norm": 12.009956359863281,
"learning_rate": 8.266871165644172e-05,
"loss": 0.8588,
"step": 186
},
{
"epoch": 6.482758620689655,
"grad_norm": 21.886213302612305,
"learning_rate": 8.236196319018406e-05,
"loss": 0.8875,
"step": 188
},
{
"epoch": 6.551724137931035,
"grad_norm": 16.68918800354004,
"learning_rate": 8.205521472392638e-05,
"loss": 1.2283,
"step": 190
},
{
"epoch": 6.620689655172414,
"grad_norm": 19.978803634643555,
"learning_rate": 8.174846625766872e-05,
"loss": 1.0841,
"step": 192
},
{
"epoch": 6.689655172413794,
"grad_norm": 16.348190307617188,
"learning_rate": 8.144171779141105e-05,
"loss": 0.9849,
"step": 194
},
{
"epoch": 6.758620689655173,
"grad_norm": 21.0911865234375,
"learning_rate": 8.113496932515337e-05,
"loss": 1.6812,
"step": 196
},
{
"epoch": 6.827586206896552,
"grad_norm": 17.614559173583984,
"learning_rate": 8.082822085889571e-05,
"loss": 1.0849,
"step": 198
},
{
"epoch": 6.896551724137931,
"grad_norm": 19.585248947143555,
"learning_rate": 8.052147239263804e-05,
"loss": 0.9886,
"step": 200
},
{
"epoch": 6.9655172413793105,
"grad_norm": 19.765750885009766,
"learning_rate": 8.021472392638038e-05,
"loss": 1.1863,
"step": 202
},
{
"epoch": 7.0,
"eval_accuracy": 0.5681818181818182,
"eval_f1_macro": 0.4758501564659626,
"eval_f1_micro": 0.5681818181818182,
"eval_f1_weighted": 0.5400274091359906,
"eval_loss": 1.3679978847503662,
"eval_precision_macro": 0.5558842701699845,
"eval_precision_micro": 0.5681818181818182,
"eval_precision_weighted": 0.603162194071285,
"eval_recall_macro": 0.4831443688586546,
"eval_recall_micro": 0.5681818181818182,
"eval_recall_weighted": 0.5681818181818182,
"eval_runtime": 2.1971,
"eval_samples_per_second": 60.078,
"eval_steps_per_second": 7.737,
"step": 203
},
{
"epoch": 7.0344827586206895,
"grad_norm": 12.333309173583984,
"learning_rate": 7.99079754601227e-05,
"loss": 0.8463,
"step": 204
},
{
"epoch": 7.103448275862069,
"grad_norm": 21.288188934326172,
"learning_rate": 7.960122699386503e-05,
"loss": 0.9617,
"step": 206
},
{
"epoch": 7.172413793103448,
"grad_norm": 23.408567428588867,
"learning_rate": 7.929447852760737e-05,
"loss": 1.2719,
"step": 208
},
{
"epoch": 7.241379310344827,
"grad_norm": 16.838363647460938,
"learning_rate": 7.898773006134969e-05,
"loss": 0.9394,
"step": 210
},
{
"epoch": 7.310344827586207,
"grad_norm": 19.406648635864258,
"learning_rate": 7.868098159509203e-05,
"loss": 1.0913,
"step": 212
},
{
"epoch": 7.379310344827586,
"grad_norm": 25.161184310913086,
"learning_rate": 7.837423312883436e-05,
"loss": 1.029,
"step": 214
},
{
"epoch": 7.448275862068965,
"grad_norm": 24.920177459716797,
"learning_rate": 7.80674846625767e-05,
"loss": 1.3211,
"step": 216
},
{
"epoch": 7.517241379310345,
"grad_norm": 22.075044631958008,
"learning_rate": 7.776073619631902e-05,
"loss": 1.226,
"step": 218
},
{
"epoch": 7.586206896551724,
"grad_norm": 17.07358169555664,
"learning_rate": 7.745398773006135e-05,
"loss": 0.8096,
"step": 220
},
{
"epoch": 7.655172413793103,
"grad_norm": 23.150299072265625,
"learning_rate": 7.714723926380369e-05,
"loss": 1.0163,
"step": 222
},
{
"epoch": 7.724137931034483,
"grad_norm": 19.737802505493164,
"learning_rate": 7.684049079754601e-05,
"loss": 1.0773,
"step": 224
},
{
"epoch": 7.793103448275862,
"grad_norm": 25.407928466796875,
"learning_rate": 7.653374233128835e-05,
"loss": 1.2907,
"step": 226
},
{
"epoch": 7.862068965517241,
"grad_norm": 14.86108112335205,
"learning_rate": 7.622699386503068e-05,
"loss": 0.9851,
"step": 228
},
{
"epoch": 7.931034482758621,
"grad_norm": 16.41703987121582,
"learning_rate": 7.5920245398773e-05,
"loss": 1.405,
"step": 230
},
{
"epoch": 8.0,
"grad_norm": 20.147233963012695,
"learning_rate": 7.561349693251534e-05,
"loss": 0.9817,
"step": 232
},
{
"epoch": 8.0,
"eval_accuracy": 0.5227272727272727,
"eval_f1_macro": 0.43986070618723677,
"eval_f1_micro": 0.5227272727272727,
"eval_f1_weighted": 0.4968812272383701,
"eval_loss": 1.3514596223831177,
"eval_precision_macro": 0.44448009061813915,
"eval_precision_micro": 0.5227272727272727,
"eval_precision_weighted": 0.5088011746058729,
"eval_recall_macro": 0.47222222222222215,
"eval_recall_micro": 0.5227272727272727,
"eval_recall_weighted": 0.5227272727272727,
"eval_runtime": 2.1836,
"eval_samples_per_second": 60.451,
"eval_steps_per_second": 7.785,
"step": 232
},
{
"epoch": 8.068965517241379,
"grad_norm": 19.058284759521484,
"learning_rate": 7.530674846625767e-05,
"loss": 0.7432,
"step": 234
},
{
"epoch": 8.137931034482758,
"grad_norm": 15.031048774719238,
"learning_rate": 7.500000000000001e-05,
"loss": 0.9929,
"step": 236
},
{
"epoch": 8.206896551724139,
"grad_norm": 22.36937713623047,
"learning_rate": 7.469325153374233e-05,
"loss": 1.121,
"step": 238
},
{
"epoch": 8.275862068965518,
"grad_norm": 20.049163818359375,
"learning_rate": 7.438650306748467e-05,
"loss": 1.0773,
"step": 240
},
{
"epoch": 8.344827586206897,
"grad_norm": 17.548959732055664,
"learning_rate": 7.4079754601227e-05,
"loss": 0.6477,
"step": 242
},
{
"epoch": 8.413793103448276,
"grad_norm": 25.496204376220703,
"learning_rate": 7.377300613496932e-05,
"loss": 0.8577,
"step": 244
},
{
"epoch": 8.482758620689655,
"grad_norm": 22.851713180541992,
"learning_rate": 7.346625766871166e-05,
"loss": 1.0034,
"step": 246
},
{
"epoch": 8.551724137931034,
"grad_norm": 26.218107223510742,
"learning_rate": 7.315950920245399e-05,
"loss": 0.7915,
"step": 248
},
{
"epoch": 8.620689655172415,
"grad_norm": 18.867645263671875,
"learning_rate": 7.285276073619633e-05,
"loss": 1.0731,
"step": 250
},
{
"epoch": 8.689655172413794,
"grad_norm": 16.624637603759766,
"learning_rate": 7.254601226993865e-05,
"loss": 0.706,
"step": 252
},
{
"epoch": 8.758620689655173,
"grad_norm": 26.590402603149414,
"learning_rate": 7.223926380368099e-05,
"loss": 1.1713,
"step": 254
},
{
"epoch": 8.827586206896552,
"grad_norm": 26.262710571289062,
"learning_rate": 7.193251533742332e-05,
"loss": 0.8558,
"step": 256
},
{
"epoch": 8.89655172413793,
"grad_norm": 24.299407958984375,
"learning_rate": 7.162576687116564e-05,
"loss": 1.2063,
"step": 258
},
{
"epoch": 8.96551724137931,
"grad_norm": 14.688630104064941,
"learning_rate": 7.131901840490798e-05,
"loss": 0.617,
"step": 260
},
{
"epoch": 9.0,
"eval_accuracy": 0.5909090909090909,
"eval_f1_macro": 0.48949553001277135,
"eval_f1_micro": 0.5909090909090909,
"eval_f1_weighted": 0.5554842002399473,
"eval_loss": 1.3866709470748901,
"eval_precision_macro": 0.5135558290637433,
"eval_precision_micro": 0.5909090909090909,
"eval_precision_weighted": 0.5775828309138267,
"eval_recall_macro": 0.5183068783068784,
"eval_recall_micro": 0.5909090909090909,
"eval_recall_weighted": 0.5909090909090909,
"eval_runtime": 2.1836,
"eval_samples_per_second": 60.452,
"eval_steps_per_second": 7.785,
"step": 261
},
{
"epoch": 9.03448275862069,
"grad_norm": 14.76333236694336,
"learning_rate": 7.101226993865031e-05,
"loss": 0.5944,
"step": 262
},
{
"epoch": 9.10344827586207,
"grad_norm": 24.155582427978516,
"learning_rate": 7.070552147239265e-05,
"loss": 0.7582,
"step": 264
},
{
"epoch": 9.172413793103448,
"grad_norm": 28.48207664489746,
"learning_rate": 7.039877300613497e-05,
"loss": 0.8912,
"step": 266
},
{
"epoch": 9.241379310344827,
"grad_norm": 10.216355323791504,
"learning_rate": 7.00920245398773e-05,
"loss": 0.6462,
"step": 268
},
{
"epoch": 9.310344827586206,
"grad_norm": 26.633636474609375,
"learning_rate": 6.978527607361964e-05,
"loss": 0.9824,
"step": 270
},
{
"epoch": 9.379310344827585,
"grad_norm": 24.09172821044922,
"learning_rate": 6.947852760736196e-05,
"loss": 0.6394,
"step": 272
},
{
"epoch": 9.448275862068966,
"grad_norm": 27.006250381469727,
"learning_rate": 6.91717791411043e-05,
"loss": 0.9335,
"step": 274
},
{
"epoch": 9.517241379310345,
"grad_norm": 22.846731185913086,
"learning_rate": 6.886503067484663e-05,
"loss": 0.6089,
"step": 276
},
{
"epoch": 9.586206896551724,
"grad_norm": 19.58112144470215,
"learning_rate": 6.855828220858897e-05,
"loss": 0.7781,
"step": 278
},
{
"epoch": 9.655172413793103,
"grad_norm": 13.92150592803955,
"learning_rate": 6.825153374233129e-05,
"loss": 0.5359,
"step": 280
},
{
"epoch": 9.724137931034482,
"grad_norm": 31.10985565185547,
"learning_rate": 6.794478527607362e-05,
"loss": 1.1988,
"step": 282
},
{
"epoch": 9.793103448275861,
"grad_norm": 23.35214614868164,
"learning_rate": 6.763803680981596e-05,
"loss": 0.9214,
"step": 284
},
{
"epoch": 9.862068965517242,
"grad_norm": 28.431663513183594,
"learning_rate": 6.733128834355828e-05,
"loss": 0.7769,
"step": 286
},
{
"epoch": 9.931034482758621,
"grad_norm": 26.589706420898438,
"learning_rate": 6.702453987730062e-05,
"loss": 0.7962,
"step": 288
},
{
"epoch": 10.0,
"grad_norm": 31.327491760253906,
"learning_rate": 6.671779141104295e-05,
"loss": 1.0365,
"step": 290
},
{
"epoch": 10.0,
"eval_accuracy": 0.5378787878787878,
"eval_f1_macro": 0.43132712141928736,
"eval_f1_micro": 0.5378787878787878,
"eval_f1_weighted": 0.49605830250991545,
"eval_loss": 1.4607229232788086,
"eval_precision_macro": 0.4370555865025911,
"eval_precision_micro": 0.5378787878787878,
"eval_precision_weighted": 0.4996555594942692,
"eval_recall_macro": 0.46741496598639454,
"eval_recall_micro": 0.5378787878787878,
"eval_recall_weighted": 0.5378787878787878,
"eval_runtime": 2.1864,
"eval_samples_per_second": 60.372,
"eval_steps_per_second": 7.775,
"step": 290
},
{
"epoch": 10.068965517241379,
"grad_norm": 35.857444763183594,
"learning_rate": 6.641104294478529e-05,
"loss": 1.1976,
"step": 292
},
{
"epoch": 10.137931034482758,
"grad_norm": 19.138635635375977,
"learning_rate": 6.610429447852761e-05,
"loss": 0.775,
"step": 294
},
{
"epoch": 10.206896551724139,
"grad_norm": 28.3044490814209,
"learning_rate": 6.579754601226994e-05,
"loss": 0.7642,
"step": 296
},
{
"epoch": 10.275862068965518,
"grad_norm": 20.905742645263672,
"learning_rate": 6.549079754601228e-05,
"loss": 0.5362,
"step": 298
},
{
"epoch": 10.344827586206897,
"grad_norm": 18.387508392333984,
"learning_rate": 6.51840490797546e-05,
"loss": 0.9838,
"step": 300
},
{
"epoch": 10.413793103448276,
"grad_norm": 13.722414016723633,
"learning_rate": 6.487730061349694e-05,
"loss": 0.446,
"step": 302
},
{
"epoch": 10.482758620689655,
"grad_norm": 20.892261505126953,
"learning_rate": 6.457055214723927e-05,
"loss": 0.7612,
"step": 304
},
{
"epoch": 10.551724137931034,
"grad_norm": 13.154946327209473,
"learning_rate": 6.426380368098159e-05,
"loss": 0.7933,
"step": 306
},
{
"epoch": 10.620689655172415,
"grad_norm": 16.057727813720703,
"learning_rate": 6.395705521472393e-05,
"loss": 0.522,
"step": 308
},
{
"epoch": 10.689655172413794,
"grad_norm": 19.725608825683594,
"learning_rate": 6.365030674846626e-05,
"loss": 0.7614,
"step": 310
},
{
"epoch": 10.758620689655173,
"grad_norm": 24.844079971313477,
"learning_rate": 6.33435582822086e-05,
"loss": 0.7933,
"step": 312
},
{
"epoch": 10.827586206896552,
"grad_norm": 18.63338279724121,
"learning_rate": 6.303680981595092e-05,
"loss": 0.7279,
"step": 314
},
{
"epoch": 10.89655172413793,
"grad_norm": 21.24047088623047,
"learning_rate": 6.273006134969326e-05,
"loss": 0.5442,
"step": 316
},
{
"epoch": 10.96551724137931,
"grad_norm": 33.37268829345703,
"learning_rate": 6.242331288343559e-05,
"loss": 0.6815,
"step": 318
},
{
"epoch": 11.0,
"eval_accuracy": 0.5909090909090909,
"eval_f1_macro": 0.49620898933890345,
"eval_f1_micro": 0.5909090909090909,
"eval_f1_weighted": 0.5663560659976746,
"eval_loss": 1.3132938146591187,
"eval_precision_macro": 0.5087159863945577,
"eval_precision_micro": 0.5909090909090909,
"eval_precision_weighted": 0.5741680194805194,
"eval_recall_macro": 0.5132879818594104,
"eval_recall_micro": 0.5909090909090909,
"eval_recall_weighted": 0.5909090909090909,
"eval_runtime": 2.1795,
"eval_samples_per_second": 60.565,
"eval_steps_per_second": 7.8,
"step": 319
},
{
"epoch": 11.03448275862069,
"grad_norm": 16.19150733947754,
"learning_rate": 6.211656441717791e-05,
"loss": 0.644,
"step": 320
},
{
"epoch": 11.10344827586207,
"grad_norm": 20.564546585083008,
"learning_rate": 6.180981595092025e-05,
"loss": 0.4998,
"step": 322
},
{
"epoch": 11.172413793103448,
"grad_norm": 19.2364444732666,
"learning_rate": 6.150306748466258e-05,
"loss": 0.7469,
"step": 324
},
{
"epoch": 11.241379310344827,
"grad_norm": 11.9139404296875,
"learning_rate": 6.119631901840492e-05,
"loss": 0.3421,
"step": 326
},
{
"epoch": 11.310344827586206,
"grad_norm": 15.564549446105957,
"learning_rate": 6.088957055214725e-05,
"loss": 0.454,
"step": 328
},
{
"epoch": 11.379310344827585,
"grad_norm": 15.790903091430664,
"learning_rate": 6.058282208588958e-05,
"loss": 0.5501,
"step": 330
},
{
"epoch": 11.448275862068966,
"grad_norm": 20.395984649658203,
"learning_rate": 6.02760736196319e-05,
"loss": 0.7396,
"step": 332
},
{
"epoch": 11.517241379310345,
"grad_norm": 13.017558097839355,
"learning_rate": 5.996932515337423e-05,
"loss": 0.3902,
"step": 334
},
{
"epoch": 11.586206896551724,
"grad_norm": 16.60504150390625,
"learning_rate": 5.9662576687116564e-05,
"loss": 0.4817,
"step": 336
},
{
"epoch": 11.655172413793103,
"grad_norm": 26.98207664489746,
"learning_rate": 5.93558282208589e-05,
"loss": 0.7637,
"step": 338
},
{
"epoch": 11.724137931034482,
"grad_norm": 27.36790657043457,
"learning_rate": 5.9049079754601235e-05,
"loss": 0.7137,
"step": 340
},
{
"epoch": 11.793103448275861,
"grad_norm": 21.537046432495117,
"learning_rate": 5.874233128834357e-05,
"loss": 0.5965,
"step": 342
},
{
"epoch": 11.862068965517242,
"grad_norm": 23.125181198120117,
"learning_rate": 5.8435582822085886e-05,
"loss": 0.5142,
"step": 344
},
{
"epoch": 11.931034482758621,
"grad_norm": 24.718408584594727,
"learning_rate": 5.812883435582822e-05,
"loss": 0.7012,
"step": 346
},
{
"epoch": 12.0,
"grad_norm": 10.676513671875,
"learning_rate": 5.782208588957055e-05,
"loss": 0.4153,
"step": 348
},
{
"epoch": 12.0,
"eval_accuracy": 0.5909090909090909,
"eval_f1_macro": 0.5082184346733783,
"eval_f1_micro": 0.5909090909090909,
"eval_f1_weighted": 0.5734953480846786,
"eval_loss": 1.3527586460113525,
"eval_precision_macro": 0.518512557765101,
"eval_precision_micro": 0.5909090909090909,
"eval_precision_weighted": 0.5819735429220373,
"eval_recall_macro": 0.5201889644746788,
"eval_recall_micro": 0.5909090909090909,
"eval_recall_weighted": 0.5909090909090909,
"eval_runtime": 2.1855,
"eval_samples_per_second": 60.399,
"eval_steps_per_second": 7.779,
"step": 348
},
{
"epoch": 12.068965517241379,
"grad_norm": 9.67835521697998,
"learning_rate": 5.751533742331289e-05,
"loss": 0.341,
"step": 350
},
{
"epoch": 12.137931034482758,
"grad_norm": 26.32645034790039,
"learning_rate": 5.720858895705522e-05,
"loss": 0.6657,
"step": 352
},
{
"epoch": 12.206896551724139,
"grad_norm": 26.893024444580078,
"learning_rate": 5.6901840490797555e-05,
"loss": 0.6414,
"step": 354
},
{
"epoch": 12.275862068965518,
"grad_norm": 20.325834274291992,
"learning_rate": 5.6595092024539874e-05,
"loss": 0.5375,
"step": 356
},
{
"epoch": 12.344827586206897,
"grad_norm": 14.722708702087402,
"learning_rate": 5.6288343558282206e-05,
"loss": 0.3507,
"step": 358
},
{
"epoch": 12.413793103448276,
"grad_norm": 16.164493560791016,
"learning_rate": 5.598159509202454e-05,
"loss": 0.3948,
"step": 360
},
{
"epoch": 12.482758620689655,
"grad_norm": 20.786996841430664,
"learning_rate": 5.567484662576688e-05,
"loss": 0.4795,
"step": 362
},
{
"epoch": 12.551724137931034,
"grad_norm": 14.129579544067383,
"learning_rate": 5.536809815950921e-05,
"loss": 0.2681,
"step": 364
},
{
"epoch": 12.620689655172415,
"grad_norm": 14.56933879852295,
"learning_rate": 5.506134969325154e-05,
"loss": 0.5054,
"step": 366
},
{
"epoch": 12.689655172413794,
"grad_norm": 30.525728225708008,
"learning_rate": 5.475460122699386e-05,
"loss": 0.5441,
"step": 368
},
{
"epoch": 12.758620689655173,
"grad_norm": 26.576383590698242,
"learning_rate": 5.4447852760736193e-05,
"loss": 0.7958,
"step": 370
},
{
"epoch": 12.827586206896552,
"grad_norm": 15.745062828063965,
"learning_rate": 5.4141104294478526e-05,
"loss": 0.4405,
"step": 372
},
{
"epoch": 12.89655172413793,
"grad_norm": 27.86982536315918,
"learning_rate": 5.3834355828220865e-05,
"loss": 0.5312,
"step": 374
},
{
"epoch": 12.96551724137931,
"grad_norm": 14.46611213684082,
"learning_rate": 5.35276073619632e-05,
"loss": 0.3396,
"step": 376
},
{
"epoch": 13.0,
"eval_accuracy": 0.5909090909090909,
"eval_f1_macro": 0.53715778106022,
"eval_f1_micro": 0.5909090909090909,
"eval_f1_weighted": 0.5830308957803414,
"eval_loss": 1.385578989982605,
"eval_precision_macro": 0.5622709610159853,
"eval_precision_micro": 0.5909090909090909,
"eval_precision_weighted": 0.6017770445498773,
"eval_recall_macro": 0.5387226001511716,
"eval_recall_micro": 0.5909090909090909,
"eval_recall_weighted": 0.5909090909090909,
"eval_runtime": 2.1898,
"eval_samples_per_second": 60.279,
"eval_steps_per_second": 7.763,
"step": 377
},
{
"epoch": 13.03448275862069,
"grad_norm": 17.2612361907959,
"learning_rate": 5.322085889570553e-05,
"loss": 0.3936,
"step": 378
},
{
"epoch": 13.10344827586207,
"grad_norm": 14.51452350616455,
"learning_rate": 5.291411042944786e-05,
"loss": 0.3816,
"step": 380
},
{
"epoch": 13.172413793103448,
"grad_norm": 11.573400497436523,
"learning_rate": 5.260736196319018e-05,
"loss": 0.3908,
"step": 382
},
{
"epoch": 13.241379310344827,
"grad_norm": 18.94368553161621,
"learning_rate": 5.230061349693251e-05,
"loss": 0.5848,
"step": 384
},
{
"epoch": 13.310344827586206,
"grad_norm": 18.253276824951172,
"learning_rate": 5.1993865030674845e-05,
"loss": 0.2693,
"step": 386
},
{
"epoch": 13.379310344827585,
"grad_norm": 12.632643699645996,
"learning_rate": 5.1687116564417185e-05,
"loss": 0.4368,
"step": 388
},
{
"epoch": 13.448275862068966,
"grad_norm": 14.242535591125488,
"learning_rate": 5.138036809815952e-05,
"loss": 0.3045,
"step": 390
},
{
"epoch": 13.517241379310345,
"grad_norm": 10.820467948913574,
"learning_rate": 5.107361963190185e-05,
"loss": 0.191,
"step": 392
},
{
"epoch": 13.586206896551724,
"grad_norm": 18.95819091796875,
"learning_rate": 5.076687116564417e-05,
"loss": 0.5458,
"step": 394
},
{
"epoch": 13.655172413793103,
"grad_norm": 21.91457748413086,
"learning_rate": 5.04601226993865e-05,
"loss": 0.7368,
"step": 396
},
{
"epoch": 13.724137931034482,
"grad_norm": 28.396440505981445,
"learning_rate": 5.015337423312883e-05,
"loss": 0.6519,
"step": 398
},
{
"epoch": 13.793103448275861,
"grad_norm": 16.6456356048584,
"learning_rate": 4.984662576687117e-05,
"loss": 0.5557,
"step": 400
},
{
"epoch": 13.862068965517242,
"grad_norm": 19.1430606842041,
"learning_rate": 4.9539877300613504e-05,
"loss": 0.6529,
"step": 402
},
{
"epoch": 13.931034482758621,
"grad_norm": 26.434980392456055,
"learning_rate": 4.923312883435583e-05,
"loss": 0.6911,
"step": 404
},
{
"epoch": 14.0,
"grad_norm": 30.586454391479492,
"learning_rate": 4.892638036809816e-05,
"loss": 0.5415,
"step": 406
},
{
"epoch": 14.0,
"eval_accuracy": 0.5909090909090909,
"eval_f1_macro": 0.5132147277659137,
"eval_f1_micro": 0.5909090909090909,
"eval_f1_weighted": 0.5795267633305671,
"eval_loss": 1.4251549243927002,
"eval_precision_macro": 0.5222798718196696,
"eval_precision_micro": 0.5909090909090909,
"eval_precision_weighted": 0.5892947366820627,
"eval_recall_macro": 0.5254724111866969,
"eval_recall_micro": 0.5909090909090909,
"eval_recall_weighted": 0.5909090909090909,
"eval_runtime": 2.1927,
"eval_samples_per_second": 60.199,
"eval_steps_per_second": 7.753,
"step": 406
},
{
"epoch": 14.068965517241379,
"grad_norm": 22.47860336303711,
"learning_rate": 4.8619631901840495e-05,
"loss": 0.2761,
"step": 408
},
{
"epoch": 14.137931034482758,
"grad_norm": 6.592859745025635,
"learning_rate": 4.831288343558282e-05,
"loss": 0.2115,
"step": 410
},
{
"epoch": 14.206896551724139,
"grad_norm": 11.651180267333984,
"learning_rate": 4.800613496932516e-05,
"loss": 0.2898,
"step": 412
},
{
"epoch": 14.275862068965518,
"grad_norm": 24.14730453491211,
"learning_rate": 4.769938650306749e-05,
"loss": 0.468,
"step": 414
},
{
"epoch": 14.344827586206897,
"grad_norm": 22.79234504699707,
"learning_rate": 4.739263803680982e-05,
"loss": 0.3194,
"step": 416
},
{
"epoch": 14.413793103448276,
"grad_norm": 19.285917282104492,
"learning_rate": 4.708588957055215e-05,
"loss": 0.3259,
"step": 418
},
{
"epoch": 14.482758620689655,
"grad_norm": 15.568497657775879,
"learning_rate": 4.677914110429448e-05,
"loss": 0.229,
"step": 420
},
{
"epoch": 14.551724137931034,
"grad_norm": 6.664709091186523,
"learning_rate": 4.647239263803681e-05,
"loss": 0.5116,
"step": 422
},
{
"epoch": 14.620689655172415,
"grad_norm": 27.09947967529297,
"learning_rate": 4.616564417177914e-05,
"loss": 0.3526,
"step": 424
},
{
"epoch": 14.689655172413794,
"grad_norm": 19.279560089111328,
"learning_rate": 4.585889570552148e-05,
"loss": 0.327,
"step": 426
},
{
"epoch": 14.758620689655173,
"grad_norm": 14.68875789642334,
"learning_rate": 4.5552147239263805e-05,
"loss": 0.1932,
"step": 428
},
{
"epoch": 14.827586206896552,
"grad_norm": 10.188983917236328,
"learning_rate": 4.524539877300614e-05,
"loss": 0.3491,
"step": 430
},
{
"epoch": 14.89655172413793,
"grad_norm": 30.82689094543457,
"learning_rate": 4.493865030674847e-05,
"loss": 0.5371,
"step": 432
},
{
"epoch": 14.96551724137931,
"grad_norm": 25.854101181030273,
"learning_rate": 4.4631901840490795e-05,
"loss": 0.4421,
"step": 434
},
{
"epoch": 15.0,
"eval_accuracy": 0.6136363636363636,
"eval_f1_macro": 0.5574127938548423,
"eval_f1_micro": 0.6136363636363636,
"eval_f1_weighted": 0.6086044486243096,
"eval_loss": 1.4080591201782227,
"eval_precision_macro": 0.5752992105933282,
"eval_precision_micro": 0.6136363636363636,
"eval_precision_weighted": 0.6148821098687408,
"eval_recall_macro": 0.5531670445956159,
"eval_recall_micro": 0.6136363636363636,
"eval_recall_weighted": 0.6136363636363636,
"eval_runtime": 2.1892,
"eval_samples_per_second": 60.296,
"eval_steps_per_second": 7.765,
"step": 435
},
{
"epoch": 15.03448275862069,
"grad_norm": 10.050946235656738,
"learning_rate": 4.432515337423313e-05,
"loss": 0.3007,
"step": 436
},
{
"epoch": 15.10344827586207,
"grad_norm": 12.756734848022461,
"learning_rate": 4.4018404907975466e-05,
"loss": 0.3396,
"step": 438
},
{
"epoch": 15.172413793103448,
"grad_norm": 25.455589294433594,
"learning_rate": 4.371165644171779e-05,
"loss": 0.3914,
"step": 440
},
{
"epoch": 15.241379310344827,
"grad_norm": 13.843463897705078,
"learning_rate": 4.3404907975460124e-05,
"loss": 0.3308,
"step": 442
},
{
"epoch": 15.310344827586206,
"grad_norm": 23.339752197265625,
"learning_rate": 4.309815950920246e-05,
"loss": 0.4584,
"step": 444
},
{
"epoch": 15.379310344827585,
"grad_norm": 8.64341926574707,
"learning_rate": 4.279141104294479e-05,
"loss": 0.1732,
"step": 446
},
{
"epoch": 15.448275862068966,
"grad_norm": 6.727046012878418,
"learning_rate": 4.2484662576687115e-05,
"loss": 0.2713,
"step": 448
},
{
"epoch": 15.517241379310345,
"grad_norm": 30.494932174682617,
"learning_rate": 4.2177914110429454e-05,
"loss": 0.2759,
"step": 450
},
{
"epoch": 15.586206896551724,
"grad_norm": 25.142616271972656,
"learning_rate": 4.1871165644171786e-05,
"loss": 0.2707,
"step": 452
},
{
"epoch": 15.655172413793103,
"grad_norm": 27.547733306884766,
"learning_rate": 4.156441717791411e-05,
"loss": 0.5069,
"step": 454
},
{
"epoch": 15.724137931034482,
"grad_norm": 19.634178161621094,
"learning_rate": 4.1257668711656444e-05,
"loss": 0.3872,
"step": 456
},
{
"epoch": 15.793103448275861,
"grad_norm": 21.880495071411133,
"learning_rate": 4.0950920245398776e-05,
"loss": 0.2277,
"step": 458
},
{
"epoch": 15.862068965517242,
"grad_norm": 15.454160690307617,
"learning_rate": 4.06441717791411e-05,
"loss": 0.3267,
"step": 460
},
{
"epoch": 15.931034482758621,
"grad_norm": 15.292703628540039,
"learning_rate": 4.033742331288344e-05,
"loss": 0.2948,
"step": 462
},
{
"epoch": 16.0,
"grad_norm": 24.81329917907715,
"learning_rate": 4.0030674846625773e-05,
"loss": 0.2893,
"step": 464
},
{
"epoch": 16.0,
"eval_accuracy": 0.5984848484848485,
"eval_f1_macro": 0.512701663933191,
"eval_f1_micro": 0.5984848484848485,
"eval_f1_weighted": 0.5832852686300961,
"eval_loss": 1.5284953117370605,
"eval_precision_macro": 0.5059085452362763,
"eval_precision_micro": 0.5984848484848485,
"eval_precision_weighted": 0.5752329251259732,
"eval_recall_macro": 0.5253136810279667,
"eval_recall_micro": 0.5984848484848485,
"eval_recall_weighted": 0.5984848484848485,
"eval_runtime": 2.1964,
"eval_samples_per_second": 60.099,
"eval_steps_per_second": 7.74,
"step": 464
},
{
"epoch": 16.06896551724138,
"grad_norm": 19.42568588256836,
"learning_rate": 3.97239263803681e-05,
"loss": 0.1854,
"step": 466
},
{
"epoch": 16.137931034482758,
"grad_norm": 12.055990219116211,
"learning_rate": 3.941717791411043e-05,
"loss": 0.2725,
"step": 468
},
{
"epoch": 16.20689655172414,
"grad_norm": 22.306148529052734,
"learning_rate": 3.9110429447852764e-05,
"loss": 0.2066,
"step": 470
},
{
"epoch": 16.275862068965516,
"grad_norm": 15.890237808227539,
"learning_rate": 3.880368098159509e-05,
"loss": 0.3127,
"step": 472
},
{
"epoch": 16.344827586206897,
"grad_norm": 17.045835494995117,
"learning_rate": 3.849693251533742e-05,
"loss": 0.3555,
"step": 474
},
{
"epoch": 16.413793103448278,
"grad_norm": 15.841018676757812,
"learning_rate": 3.819018404907976e-05,
"loss": 0.3553,
"step": 476
},
{
"epoch": 16.482758620689655,
"grad_norm": 11.002803802490234,
"learning_rate": 3.7883435582822086e-05,
"loss": 0.3044,
"step": 478
},
{
"epoch": 16.551724137931036,
"grad_norm": 8.885010719299316,
"learning_rate": 3.757668711656442e-05,
"loss": 0.2396,
"step": 480
},
{
"epoch": 16.620689655172413,
"grad_norm": 20.58298110961914,
"learning_rate": 3.726993865030675e-05,
"loss": 0.2576,
"step": 482
},
{
"epoch": 16.689655172413794,
"grad_norm": 18.69637107849121,
"learning_rate": 3.696319018404908e-05,
"loss": 0.3052,
"step": 484
},
{
"epoch": 16.75862068965517,
"grad_norm": 7.023503303527832,
"learning_rate": 3.665644171779141e-05,
"loss": 0.1449,
"step": 486
},
{
"epoch": 16.82758620689655,
"grad_norm": 18.077198028564453,
"learning_rate": 3.634969325153375e-05,
"loss": 0.3661,
"step": 488
},
{
"epoch": 16.896551724137932,
"grad_norm": 17.02280616760254,
"learning_rate": 3.6042944785276074e-05,
"loss": 0.2259,
"step": 490
},
{
"epoch": 16.96551724137931,
"grad_norm": 20.76211929321289,
"learning_rate": 3.5736196319018406e-05,
"loss": 0.2403,
"step": 492
},
{
"epoch": 17.0,
"eval_accuracy": 0.6287878787878788,
"eval_f1_macro": 0.5395048301452242,
"eval_f1_micro": 0.6287878787878788,
"eval_f1_weighted": 0.6065423604608557,
"eval_loss": 1.4820140600204468,
"eval_precision_macro": 0.58078231292517,
"eval_precision_micro": 0.6287878787878788,
"eval_precision_weighted": 0.6380321067821069,
"eval_recall_macro": 0.5459561602418744,
"eval_recall_micro": 0.6287878787878788,
"eval_recall_weighted": 0.6287878787878788,
"eval_runtime": 2.1863,
"eval_samples_per_second": 60.376,
"eval_steps_per_second": 7.776,
"step": 493
},
{
"epoch": 17.03448275862069,
"grad_norm": 20.38401985168457,
"learning_rate": 3.542944785276074e-05,
"loss": 0.3407,
"step": 494
},
{
"epoch": 17.103448275862068,
"grad_norm": 5.897765636444092,
"learning_rate": 3.512269938650307e-05,
"loss": 0.1841,
"step": 496
},
{
"epoch": 17.17241379310345,
"grad_norm": 15.455453872680664,
"learning_rate": 3.4815950920245396e-05,
"loss": 0.1269,
"step": 498
},
{
"epoch": 17.24137931034483,
"grad_norm": 27.898658752441406,
"learning_rate": 3.4509202453987735e-05,
"loss": 0.2314,
"step": 500
},
{
"epoch": 17.310344827586206,
"grad_norm": 19.497142791748047,
"learning_rate": 3.420245398773007e-05,
"loss": 0.2743,
"step": 502
},
{
"epoch": 17.379310344827587,
"grad_norm": 15.271653175354004,
"learning_rate": 3.3895705521472393e-05,
"loss": 0.1961,
"step": 504
},
{
"epoch": 17.448275862068964,
"grad_norm": 25.726585388183594,
"learning_rate": 3.3588957055214726e-05,
"loss": 0.3415,
"step": 506
},
{
"epoch": 17.517241379310345,
"grad_norm": 22.30027198791504,
"learning_rate": 3.328220858895706e-05,
"loss": 0.2365,
"step": 508
},
{
"epoch": 17.586206896551722,
"grad_norm": 27.83454132080078,
"learning_rate": 3.2975460122699384e-05,
"loss": 0.323,
"step": 510
},
{
"epoch": 17.655172413793103,
"grad_norm": 17.994998931884766,
"learning_rate": 3.266871165644172e-05,
"loss": 0.2189,
"step": 512
},
{
"epoch": 17.724137931034484,
"grad_norm": 15.599747657775879,
"learning_rate": 3.2361963190184055e-05,
"loss": 0.2235,
"step": 514
},
{
"epoch": 17.79310344827586,
"grad_norm": 19.07125473022461,
"learning_rate": 3.205521472392638e-05,
"loss": 0.2625,
"step": 516
},
{
"epoch": 17.862068965517242,
"grad_norm": 5.794015407562256,
"learning_rate": 3.174846625766871e-05,
"loss": 0.1962,
"step": 518
},
{
"epoch": 17.93103448275862,
"grad_norm": 27.05267906188965,
"learning_rate": 3.1441717791411045e-05,
"loss": 0.4204,
"step": 520
},
{
"epoch": 18.0,
"grad_norm": 12.915875434875488,
"learning_rate": 3.113496932515337e-05,
"loss": 0.1087,
"step": 522
},
{
"epoch": 18.0,
"eval_accuracy": 0.6060606060606061,
"eval_f1_macro": 0.5319546411035773,
"eval_f1_micro": 0.6060606060606061,
"eval_f1_weighted": 0.6009282162872589,
"eval_loss": 1.3999419212341309,
"eval_precision_macro": 0.5611877440448869,
"eval_precision_micro": 0.6060606060606061,
"eval_precision_weighted": 0.6210855415400871,
"eval_recall_macro": 0.5260619803476947,
"eval_recall_micro": 0.6060606060606061,
"eval_recall_weighted": 0.6060606060606061,
"eval_runtime": 2.1908,
"eval_samples_per_second": 60.251,
"eval_steps_per_second": 7.76,
"step": 522
},
{
"epoch": 18.06896551724138,
"grad_norm": 17.82192039489746,
"learning_rate": 3.0828220858895703e-05,
"loss": 0.1255,
"step": 524
},
{
"epoch": 18.137931034482758,
"grad_norm": 13.459417343139648,
"learning_rate": 3.052147239263804e-05,
"loss": 0.1728,
"step": 526
},
{
"epoch": 18.20689655172414,
"grad_norm": 19.81383514404297,
"learning_rate": 3.0214723926380368e-05,
"loss": 0.1743,
"step": 528
},
{
"epoch": 18.275862068965516,
"grad_norm": 17.316072463989258,
"learning_rate": 2.99079754601227e-05,
"loss": 0.2115,
"step": 530
},
{
"epoch": 18.344827586206897,
"grad_norm": 25.32339096069336,
"learning_rate": 2.9601226993865033e-05,
"loss": 0.387,
"step": 532
},
{
"epoch": 18.413793103448278,
"grad_norm": 10.883082389831543,
"learning_rate": 2.9294478527607362e-05,
"loss": 0.0874,
"step": 534
},
{
"epoch": 18.482758620689655,
"grad_norm": 25.40140151977539,
"learning_rate": 2.8987730061349694e-05,
"loss": 0.3103,
"step": 536
},
{
"epoch": 18.551724137931036,
"grad_norm": 13.151557922363281,
"learning_rate": 2.8680981595092026e-05,
"loss": 0.277,
"step": 538
},
{
"epoch": 18.620689655172413,
"grad_norm": 16.688093185424805,
"learning_rate": 2.837423312883436e-05,
"loss": 0.1661,
"step": 540
},
{
"epoch": 18.689655172413794,
"grad_norm": 3.376065492630005,
"learning_rate": 2.8067484662576688e-05,
"loss": 0.2142,
"step": 542
},
{
"epoch": 18.75862068965517,
"grad_norm": 20.72919464111328,
"learning_rate": 2.776073619631902e-05,
"loss": 0.2779,
"step": 544
},
{
"epoch": 18.82758620689655,
"grad_norm": 3.795419454574585,
"learning_rate": 2.7453987730061353e-05,
"loss": 0.0706,
"step": 546
},
{
"epoch": 18.896551724137932,
"grad_norm": 18.60194969177246,
"learning_rate": 2.714723926380368e-05,
"loss": 0.0951,
"step": 548
},
{
"epoch": 18.96551724137931,
"grad_norm": 17.101329803466797,
"learning_rate": 2.6840490797546014e-05,
"loss": 0.2619,
"step": 550
},
{
"epoch": 19.0,
"eval_accuracy": 0.6136363636363636,
"eval_f1_macro": 0.561813814539783,
"eval_f1_micro": 0.6136363636363636,
"eval_f1_weighted": 0.6037404914018973,
"eval_loss": 1.440819263458252,
"eval_precision_macro": 0.6154006028203717,
"eval_precision_micro": 0.6136363636363636,
"eval_precision_weighted": 0.6224909931745618,
"eval_recall_macro": 0.5501284958427816,
"eval_recall_micro": 0.6136363636363636,
"eval_recall_weighted": 0.6136363636363636,
"eval_runtime": 2.201,
"eval_samples_per_second": 59.973,
"eval_steps_per_second": 7.724,
"step": 551
},
{
"epoch": 19.03448275862069,
"grad_norm": 13.738136291503906,
"learning_rate": 2.6533742331288346e-05,
"loss": 0.1927,
"step": 552
},
{
"epoch": 19.103448275862068,
"grad_norm": 17.96263885498047,
"learning_rate": 2.6226993865030675e-05,
"loss": 0.1388,
"step": 554
},
{
"epoch": 19.17241379310345,
"grad_norm": 25.091278076171875,
"learning_rate": 2.5920245398773008e-05,
"loss": 0.1476,
"step": 556
},
{
"epoch": 19.24137931034483,
"grad_norm": 14.9843168258667,
"learning_rate": 2.561349693251534e-05,
"loss": 0.116,
"step": 558
},
{
"epoch": 19.310344827586206,
"grad_norm": 13.588825225830078,
"learning_rate": 2.530674846625767e-05,
"loss": 0.1187,
"step": 560
},
{
"epoch": 19.379310344827587,
"grad_norm": 8.29517650604248,
"learning_rate": 2.5e-05,
"loss": 0.1494,
"step": 562
},
{
"epoch": 19.448275862068964,
"grad_norm": 24.074113845825195,
"learning_rate": 2.469325153374233e-05,
"loss": 0.1549,
"step": 564
},
{
"epoch": 19.517241379310345,
"grad_norm": 7.5761213302612305,
"learning_rate": 2.4386503067484666e-05,
"loss": 0.0937,
"step": 566
},
{
"epoch": 19.586206896551722,
"grad_norm": 9.566593170166016,
"learning_rate": 2.4079754601226995e-05,
"loss": 0.1075,
"step": 568
},
{
"epoch": 19.655172413793103,
"grad_norm": 14.346840858459473,
"learning_rate": 2.3773006134969324e-05,
"loss": 0.112,
"step": 570
},
{
"epoch": 19.724137931034484,
"grad_norm": 22.044532775878906,
"learning_rate": 2.346625766871166e-05,
"loss": 0.1831,
"step": 572
},
{
"epoch": 19.79310344827586,
"grad_norm": 10.464526176452637,
"learning_rate": 2.315950920245399e-05,
"loss": 0.1353,
"step": 574
},
{
"epoch": 19.862068965517242,
"grad_norm": 6.597527980804443,
"learning_rate": 2.285276073619632e-05,
"loss": 0.073,
"step": 576
},
{
"epoch": 19.93103448275862,
"grad_norm": 3.7595765590667725,
"learning_rate": 2.2546012269938653e-05,
"loss": 0.0686,
"step": 578
},
{
"epoch": 20.0,
"grad_norm": 4.094130516052246,
"learning_rate": 2.2239263803680982e-05,
"loss": 0.1154,
"step": 580
},
{
"epoch": 20.0,
"eval_accuracy": 0.6287878787878788,
"eval_f1_macro": 0.5401653994931305,
"eval_f1_micro": 0.6287878787878788,
"eval_f1_weighted": 0.6089890501655209,
"eval_loss": 1.4516006708145142,
"eval_precision_macro": 0.5538492063492063,
"eval_precision_micro": 0.6287878787878788,
"eval_precision_weighted": 0.6144570707070707,
"eval_recall_macro": 0.5491912320483749,
"eval_recall_micro": 0.6287878787878788,
"eval_recall_weighted": 0.6287878787878788,
"eval_runtime": 2.2073,
"eval_samples_per_second": 59.802,
"eval_steps_per_second": 7.702,
"step": 580
},
{
"epoch": 20.06896551724138,
"grad_norm": 9.36839485168457,
"learning_rate": 2.1932515337423315e-05,
"loss": 0.0687,
"step": 582
},
{
"epoch": 20.137931034482758,
"grad_norm": 24.775922775268555,
"learning_rate": 2.1625766871165647e-05,
"loss": 0.1334,
"step": 584
},
{
"epoch": 20.20689655172414,
"grad_norm": 23.269336700439453,
"learning_rate": 2.1319018404907976e-05,
"loss": 0.1813,
"step": 586
},
{
"epoch": 20.275862068965516,
"grad_norm": 5.708396911621094,
"learning_rate": 2.1012269938650308e-05,
"loss": 0.0841,
"step": 588
},
{
"epoch": 20.344827586206897,
"grad_norm": 11.590498924255371,
"learning_rate": 2.0705521472392637e-05,
"loss": 0.116,
"step": 590
},
{
"epoch": 20.413793103448278,
"grad_norm": 18.878385543823242,
"learning_rate": 2.039877300613497e-05,
"loss": 0.1187,
"step": 592
},
{
"epoch": 20.482758620689655,
"grad_norm": 15.631240844726562,
"learning_rate": 2.0092024539877302e-05,
"loss": 0.1785,
"step": 594
},
{
"epoch": 20.551724137931036,
"grad_norm": 9.58936595916748,
"learning_rate": 1.978527607361963e-05,
"loss": 0.0336,
"step": 596
},
{
"epoch": 20.620689655172413,
"grad_norm": 15.450642585754395,
"learning_rate": 1.9478527607361967e-05,
"loss": 0.1695,
"step": 598
},
{
"epoch": 20.689655172413794,
"grad_norm": 4.292616844177246,
"learning_rate": 1.9171779141104296e-05,
"loss": 0.0633,
"step": 600
},
{
"epoch": 20.75862068965517,
"grad_norm": 4.748676776885986,
"learning_rate": 1.8865030674846625e-05,
"loss": 0.0889,
"step": 602
},
{
"epoch": 20.82758620689655,
"grad_norm": 16.53461265563965,
"learning_rate": 1.855828220858896e-05,
"loss": 0.1608,
"step": 604
},
{
"epoch": 20.896551724137932,
"grad_norm": 26.134490966796875,
"learning_rate": 1.825153374233129e-05,
"loss": 0.1693,
"step": 606
},
{
"epoch": 20.96551724137931,
"grad_norm": 20.084346771240234,
"learning_rate": 1.7944785276073618e-05,
"loss": 0.1367,
"step": 608
},
{
"epoch": 21.0,
"eval_accuracy": 0.6136363636363636,
"eval_f1_macro": 0.5254125588472093,
"eval_f1_micro": 0.6136363636363636,
"eval_f1_weighted": 0.5942474496633104,
"eval_loss": 1.530592441558838,
"eval_precision_macro": 0.5321410615528263,
"eval_precision_micro": 0.6136363636363636,
"eval_precision_weighted": 0.5922574157868276,
"eval_recall_macro": 0.5339984882842026,
"eval_recall_micro": 0.6136363636363636,
"eval_recall_weighted": 0.6136363636363636,
"eval_runtime": 2.2038,
"eval_samples_per_second": 59.896,
"eval_steps_per_second": 7.714,
"step": 609
},
{
"epoch": 21.03448275862069,
"grad_norm": 4.850620746612549,
"learning_rate": 1.7638036809815954e-05,
"loss": 0.0614,
"step": 610
},
{
"epoch": 21.103448275862068,
"grad_norm": 23.208776473999023,
"learning_rate": 1.7331288343558283e-05,
"loss": 0.1307,
"step": 612
},
{
"epoch": 21.17241379310345,
"grad_norm": 8.065655708312988,
"learning_rate": 1.7024539877300612e-05,
"loss": 0.0389,
"step": 614
},
{
"epoch": 21.24137931034483,
"grad_norm": 9.520572662353516,
"learning_rate": 1.6717791411042948e-05,
"loss": 0.1722,
"step": 616
},
{
"epoch": 21.310344827586206,
"grad_norm": 26.25343894958496,
"learning_rate": 1.6411042944785277e-05,
"loss": 0.2112,
"step": 618
},
{
"epoch": 21.379310344827587,
"grad_norm": 6.016479015350342,
"learning_rate": 1.6104294478527606e-05,
"loss": 0.1577,
"step": 620
},
{
"epoch": 21.448275862068964,
"grad_norm": 28.95755386352539,
"learning_rate": 1.579754601226994e-05,
"loss": 0.1363,
"step": 622
},
{
"epoch": 21.517241379310345,
"grad_norm": 5.472126483917236,
"learning_rate": 1.549079754601227e-05,
"loss": 0.0527,
"step": 624
},
{
"epoch": 21.586206896551722,
"grad_norm": 12.595699310302734,
"learning_rate": 1.5184049079754603e-05,
"loss": 0.1044,
"step": 626
},
{
"epoch": 21.655172413793103,
"grad_norm": 13.575519561767578,
"learning_rate": 1.4877300613496933e-05,
"loss": 0.1545,
"step": 628
},
{
"epoch": 21.724137931034484,
"grad_norm": 1.717926025390625,
"learning_rate": 1.4570552147239264e-05,
"loss": 0.0754,
"step": 630
},
{
"epoch": 21.79310344827586,
"grad_norm": 15.80093765258789,
"learning_rate": 1.4263803680981596e-05,
"loss": 0.1765,
"step": 632
},
{
"epoch": 21.862068965517242,
"grad_norm": 8.399004936218262,
"learning_rate": 1.3957055214723927e-05,
"loss": 0.0478,
"step": 634
},
{
"epoch": 21.93103448275862,
"grad_norm": 9.28877067565918,
"learning_rate": 1.3650306748466258e-05,
"loss": 0.0561,
"step": 636
},
{
"epoch": 22.0,
"grad_norm": 13.288439750671387,
"learning_rate": 1.334355828220859e-05,
"loss": 0.0839,
"step": 638
},
{
"epoch": 22.0,
"eval_accuracy": 0.5833333333333334,
"eval_f1_macro": 0.515415984334813,
"eval_f1_micro": 0.5833333333333334,
"eval_f1_weighted": 0.575588621142538,
"eval_loss": 1.6396534442901611,
"eval_precision_macro": 0.5274346580737558,
"eval_precision_micro": 0.5833333333333334,
"eval_precision_weighted": 0.5895078605604921,
"eval_recall_macro": 0.5252003023431595,
"eval_recall_micro": 0.5833333333333334,
"eval_recall_weighted": 0.5833333333333334,
"eval_runtime": 2.1936,
"eval_samples_per_second": 60.176,
"eval_steps_per_second": 7.75,
"step": 638
},
{
"epoch": 22.06896551724138,
"grad_norm": 5.867281436920166,
"learning_rate": 1.303680981595092e-05,
"loss": 0.0999,
"step": 640
},
{
"epoch": 22.137931034482758,
"grad_norm": 3.7638766765594482,
"learning_rate": 1.2730061349693251e-05,
"loss": 0.0418,
"step": 642
},
{
"epoch": 22.20689655172414,
"grad_norm": 1.9534434080123901,
"learning_rate": 1.2423312883435584e-05,
"loss": 0.0351,
"step": 644
},
{
"epoch": 22.275862068965516,
"grad_norm": 1.0892353057861328,
"learning_rate": 1.2116564417177914e-05,
"loss": 0.0272,
"step": 646
},
{
"epoch": 22.344827586206897,
"grad_norm": 16.817415237426758,
"learning_rate": 1.1809815950920245e-05,
"loss": 0.2253,
"step": 648
},
{
"epoch": 22.413793103448278,
"grad_norm": 24.870695114135742,
"learning_rate": 1.1503067484662577e-05,
"loss": 0.1316,
"step": 650
},
{
"epoch": 22.482758620689655,
"grad_norm": 22.584014892578125,
"learning_rate": 1.119631901840491e-05,
"loss": 0.2264,
"step": 652
},
{
"epoch": 22.551724137931036,
"grad_norm": 8.62193775177002,
"learning_rate": 1.0889570552147239e-05,
"loss": 0.0336,
"step": 654
},
{
"epoch": 22.620689655172413,
"grad_norm": 7.243905067443848,
"learning_rate": 1.0582822085889571e-05,
"loss": 0.0435,
"step": 656
},
{
"epoch": 22.689655172413794,
"grad_norm": 1.8948745727539062,
"learning_rate": 1.0276073619631903e-05,
"loss": 0.0655,
"step": 658
},
{
"epoch": 22.75862068965517,
"grad_norm": 5.095564842224121,
"learning_rate": 9.969325153374232e-06,
"loss": 0.042,
"step": 660
},
{
"epoch": 22.82758620689655,
"grad_norm": 25.0085506439209,
"learning_rate": 9.662576687116565e-06,
"loss": 0.1186,
"step": 662
},
{
"epoch": 22.896551724137932,
"grad_norm": 4.847318172454834,
"learning_rate": 9.355828220858897e-06,
"loss": 0.0377,
"step": 664
},
{
"epoch": 22.96551724137931,
"grad_norm": 39.400447845458984,
"learning_rate": 9.049079754601228e-06,
"loss": 0.1818,
"step": 666
},
{
"epoch": 23.0,
"eval_accuracy": 0.6515151515151515,
"eval_f1_macro": 0.565634487061558,
"eval_f1_micro": 0.6515151515151515,
"eval_f1_weighted": 0.6358752918681777,
"eval_loss": 1.641618251800537,
"eval_precision_macro": 0.584759718380408,
"eval_precision_micro": 0.6515151515151515,
"eval_precision_weighted": 0.6455531040170539,
"eval_recall_macro": 0.5695616024187452,
"eval_recall_micro": 0.6515151515151515,
"eval_recall_weighted": 0.6515151515151515,
"eval_runtime": 2.2051,
"eval_samples_per_second": 59.861,
"eval_steps_per_second": 7.709,
"step": 667
},
{
"epoch": 23.03448275862069,
"grad_norm": 11.35802936553955,
"learning_rate": 8.742331288343558e-06,
"loss": 0.0513,
"step": 668
},
{
"epoch": 23.103448275862068,
"grad_norm": 1.9400774240493774,
"learning_rate": 8.435582822085889e-06,
"loss": 0.0571,
"step": 670
},
{
"epoch": 23.17241379310345,
"grad_norm": 3.928626775741577,
"learning_rate": 8.128834355828221e-06,
"loss": 0.034,
"step": 672
},
{
"epoch": 23.24137931034483,
"grad_norm": 5.0317511558532715,
"learning_rate": 7.822085889570554e-06,
"loss": 0.0833,
"step": 674
},
{
"epoch": 23.310344827586206,
"grad_norm": 12.943672180175781,
"learning_rate": 7.5153374233128836e-06,
"loss": 0.0575,
"step": 676
},
{
"epoch": 23.379310344827587,
"grad_norm": 1.5223954916000366,
"learning_rate": 7.208588957055215e-06,
"loss": 0.0237,
"step": 678
},
{
"epoch": 23.448275862068964,
"grad_norm": 21.462011337280273,
"learning_rate": 6.901840490797547e-06,
"loss": 0.0785,
"step": 680
},
{
"epoch": 23.517241379310345,
"grad_norm": 13.98965072631836,
"learning_rate": 6.595092024539877e-06,
"loss": 0.0597,
"step": 682
},
{
"epoch": 23.586206896551722,
"grad_norm": 13.07774829864502,
"learning_rate": 6.288343558282209e-06,
"loss": 0.0498,
"step": 684
},
{
"epoch": 23.655172413793103,
"grad_norm": 2.752511501312256,
"learning_rate": 5.98159509202454e-06,
"loss": 0.028,
"step": 686
},
{
"epoch": 23.724137931034484,
"grad_norm": 3.4312055110931396,
"learning_rate": 5.674846625766871e-06,
"loss": 0.0531,
"step": 688
},
{
"epoch": 23.79310344827586,
"grad_norm": 20.920682907104492,
"learning_rate": 5.368098159509203e-06,
"loss": 0.1892,
"step": 690
},
{
"epoch": 23.862068965517242,
"grad_norm": 1.7730119228363037,
"learning_rate": 5.061349693251534e-06,
"loss": 0.0409,
"step": 692
},
{
"epoch": 23.93103448275862,
"grad_norm": 8.248014450073242,
"learning_rate": 4.7546012269938654e-06,
"loss": 0.0499,
"step": 694
},
{
"epoch": 24.0,
"grad_norm": 13.629621505737305,
"learning_rate": 4.447852760736196e-06,
"loss": 0.0781,
"step": 696
},
{
"epoch": 24.0,
"eval_accuracy": 0.6212121212121212,
"eval_f1_macro": 0.5392658545631691,
"eval_f1_micro": 0.6212121212121212,
"eval_f1_weighted": 0.607896474480917,
"eval_loss": 1.6025735139846802,
"eval_precision_macro": 0.5523980652552082,
"eval_precision_micro": 0.6212121212121212,
"eval_precision_weighted": 0.6118077193077193,
"eval_recall_macro": 0.5412169312169313,
"eval_recall_micro": 0.6212121212121212,
"eval_recall_weighted": 0.6212121212121212,
"eval_runtime": 2.1909,
"eval_samples_per_second": 60.25,
"eval_steps_per_second": 7.76,
"step": 696
},
{
"epoch": 24.06896551724138,
"grad_norm": 2.504154920578003,
"learning_rate": 4.141104294478528e-06,
"loss": 0.0335,
"step": 698
},
{
"epoch": 24.137931034482758,
"grad_norm": 17.774227142333984,
"learning_rate": 3.834355828220859e-06,
"loss": 0.1496,
"step": 700
},
{
"epoch": 24.20689655172414,
"grad_norm": 2.7488608360290527,
"learning_rate": 3.52760736196319e-06,
"loss": 0.0266,
"step": 702
},
{
"epoch": 24.275862068965516,
"grad_norm": 7.259423732757568,
"learning_rate": 3.2208588957055217e-06,
"loss": 0.0863,
"step": 704
},
{
"epoch": 24.344827586206897,
"grad_norm": 7.789644718170166,
"learning_rate": 2.914110429447853e-06,
"loss": 0.0491,
"step": 706
},
{
"epoch": 24.413793103448278,
"grad_norm": 1.2346043586730957,
"learning_rate": 2.607361963190184e-06,
"loss": 0.0136,
"step": 708
},
{
"epoch": 24.482758620689655,
"grad_norm": 5.063798904418945,
"learning_rate": 2.3006134969325154e-06,
"loss": 0.0189,
"step": 710
},
{
"epoch": 24.551724137931036,
"grad_norm": 6.230537414550781,
"learning_rate": 1.9938650306748465e-06,
"loss": 0.0431,
"step": 712
},
{
"epoch": 24.620689655172413,
"grad_norm": 15.741507530212402,
"learning_rate": 1.687116564417178e-06,
"loss": 0.19,
"step": 714
},
{
"epoch": 24.689655172413794,
"grad_norm": 12.17914867401123,
"learning_rate": 1.3803680981595093e-06,
"loss": 0.0662,
"step": 716
},
{
"epoch": 24.75862068965517,
"grad_norm": 1.5245462656021118,
"learning_rate": 1.0736196319018406e-06,
"loss": 0.0579,
"step": 718
},
{
"epoch": 24.82758620689655,
"grad_norm": 3.734255790710449,
"learning_rate": 7.668711656441718e-07,
"loss": 0.0337,
"step": 720
},
{
"epoch": 24.896551724137932,
"grad_norm": 25.646230697631836,
"learning_rate": 4.601226993865031e-07,
"loss": 0.1464,
"step": 722
},
{
"epoch": 24.96551724137931,
"grad_norm": 11.875574111938477,
"learning_rate": 1.5337423312883438e-07,
"loss": 0.0792,
"step": 724
},
{
"epoch": 25.0,
"eval_accuracy": 0.6287878787878788,
"eval_f1_macro": 0.5494495726426264,
"eval_f1_micro": 0.6287878787878788,
"eval_f1_weighted": 0.6179503958679277,
"eval_loss": 1.599715232849121,
"eval_precision_macro": 0.5716202716202715,
"eval_precision_micro": 0.6287878787878788,
"eval_precision_weighted": 0.6297404683768321,
"eval_recall_macro": 0.5480196523053665,
"eval_recall_micro": 0.6287878787878788,
"eval_recall_weighted": 0.6287878787878788,
"eval_runtime": 2.2024,
"eval_samples_per_second": 59.933,
"eval_steps_per_second": 7.719,
"step": 725
},
{
"epoch": 25.0,
"step": 725,
"total_flos": 5.76425379898368e+16,
"train_loss": 0.7207291752639515,
"train_runtime": 641.8354,
"train_samples_per_second": 17.995,
"train_steps_per_second": 1.13
}
],
"logging_steps": 2,
"max_steps": 725,
"num_input_tokens_seen": 0,
"num_train_epochs": 25,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.76425379898368e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}