v1_5_mistral_lora / last-checkpoint /trainer_state.json
mtzig's picture
Training in progress, step 348, checkpoint
362ae50 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 20,
"global_step": 348,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"eval_accuracy": 0.7376237623762376,
"eval_f1": 0.11666666666666667,
"eval_loss": 0.595770537853241,
"eval_precision": 0.5,
"eval_recall": 0.0660377358490566,
"eval_runtime": 16.3774,
"eval_samples_per_second": 6.472,
"eval_steps_per_second": 0.244,
"step": 0
},
{
"epoch": 0.0028735632183908046,
"grad_norm": 2.3969345092773438,
"learning_rate": 5.714285714285715e-07,
"loss": 0.7755,
"step": 1
},
{
"epoch": 0.005747126436781609,
"grad_norm": 2.4285762310028076,
"learning_rate": 1.142857142857143e-06,
"loss": 0.7738,
"step": 2
},
{
"epoch": 0.008620689655172414,
"grad_norm": 2.5854382514953613,
"learning_rate": 1.7142857142857145e-06,
"loss": 0.7642,
"step": 3
},
{
"epoch": 0.011494252873563218,
"grad_norm": 2.7728841304779053,
"learning_rate": 2.285714285714286e-06,
"loss": 0.7878,
"step": 4
},
{
"epoch": 0.014367816091954023,
"grad_norm": 2.6096584796905518,
"learning_rate": 2.8571428571428573e-06,
"loss": 0.7831,
"step": 5
},
{
"epoch": 0.017241379310344827,
"grad_norm": 2.421935558319092,
"learning_rate": 3.428571428571429e-06,
"loss": 0.7508,
"step": 6
},
{
"epoch": 0.020114942528735632,
"grad_norm": 2.579468250274658,
"learning_rate": 4.000000000000001e-06,
"loss": 0.774,
"step": 7
},
{
"epoch": 0.022988505747126436,
"grad_norm": 2.328829288482666,
"learning_rate": 4.571428571428572e-06,
"loss": 0.7371,
"step": 8
},
{
"epoch": 0.02586206896551724,
"grad_norm": 2.5165493488311768,
"learning_rate": 5.142857142857142e-06,
"loss": 0.7402,
"step": 9
},
{
"epoch": 0.028735632183908046,
"grad_norm": 2.5865883827209473,
"learning_rate": 5.7142857142857145e-06,
"loss": 0.7545,
"step": 10
},
{
"epoch": 0.031609195402298854,
"grad_norm": 2.6125497817993164,
"learning_rate": 6.285714285714286e-06,
"loss": 0.7422,
"step": 11
},
{
"epoch": 0.034482758620689655,
"grad_norm": 2.3310539722442627,
"learning_rate": 6.857142857142858e-06,
"loss": 0.7349,
"step": 12
},
{
"epoch": 0.03735632183908046,
"grad_norm": 2.4077725410461426,
"learning_rate": 7.428571428571429e-06,
"loss": 0.7256,
"step": 13
},
{
"epoch": 0.040229885057471264,
"grad_norm": 2.2570087909698486,
"learning_rate": 8.000000000000001e-06,
"loss": 0.7078,
"step": 14
},
{
"epoch": 0.04310344827586207,
"grad_norm": 2.242189407348633,
"learning_rate": 8.571428571428571e-06,
"loss": 0.686,
"step": 15
},
{
"epoch": 0.04597701149425287,
"grad_norm": 2.1012094020843506,
"learning_rate": 9.142857142857144e-06,
"loss": 0.7336,
"step": 16
},
{
"epoch": 0.04885057471264368,
"grad_norm": 1.9637709856033325,
"learning_rate": 9.714285714285715e-06,
"loss": 0.7025,
"step": 17
},
{
"epoch": 0.05172413793103448,
"grad_norm": 2.24760365486145,
"learning_rate": 1.0285714285714285e-05,
"loss": 0.6973,
"step": 18
},
{
"epoch": 0.05459770114942529,
"grad_norm": 1.9541856050491333,
"learning_rate": 1.0857142857142858e-05,
"loss": 0.6621,
"step": 19
},
{
"epoch": 0.05747126436781609,
"grad_norm": 1.9117401838302612,
"learning_rate": 1.1428571428571429e-05,
"loss": 0.6808,
"step": 20
},
{
"epoch": 0.05747126436781609,
"eval_accuracy": 0.7400990099009901,
"eval_f1": 0.11764705882352941,
"eval_loss": 0.57041996717453,
"eval_precision": 0.5384615384615384,
"eval_recall": 0.0660377358490566,
"eval_runtime": 16.3739,
"eval_samples_per_second": 6.474,
"eval_steps_per_second": 0.244,
"step": 20
},
{
"epoch": 0.0603448275862069,
"grad_norm": 2.0838022232055664,
"learning_rate": 1.2e-05,
"loss": 0.6775,
"step": 21
},
{
"epoch": 0.06321839080459771,
"grad_norm": 1.8101274967193604,
"learning_rate": 1.2571428571428572e-05,
"loss": 0.6722,
"step": 22
},
{
"epoch": 0.06609195402298851,
"grad_norm": 1.6239138841629028,
"learning_rate": 1.3142857142857145e-05,
"loss": 0.6205,
"step": 23
},
{
"epoch": 0.06896551724137931,
"grad_norm": 1.581398606300354,
"learning_rate": 1.3714285714285716e-05,
"loss": 0.6114,
"step": 24
},
{
"epoch": 0.07183908045977011,
"grad_norm": 1.7053773403167725,
"learning_rate": 1.4285714285714287e-05,
"loss": 0.6215,
"step": 25
},
{
"epoch": 0.07471264367816093,
"grad_norm": 1.6182948350906372,
"learning_rate": 1.4857142857142858e-05,
"loss": 0.6062,
"step": 26
},
{
"epoch": 0.07758620689655173,
"grad_norm": 1.4925391674041748,
"learning_rate": 1.542857142857143e-05,
"loss": 0.5855,
"step": 27
},
{
"epoch": 0.08045977011494253,
"grad_norm": 1.4214599132537842,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.5605,
"step": 28
},
{
"epoch": 0.08333333333333333,
"grad_norm": 1.5156348943710327,
"learning_rate": 1.6571428571428574e-05,
"loss": 0.5704,
"step": 29
},
{
"epoch": 0.08620689655172414,
"grad_norm": 1.218806266784668,
"learning_rate": 1.7142857142857142e-05,
"loss": 0.544,
"step": 30
},
{
"epoch": 0.08908045977011494,
"grad_norm": 1.1243335008621216,
"learning_rate": 1.7714285714285717e-05,
"loss": 0.5393,
"step": 31
},
{
"epoch": 0.09195402298850575,
"grad_norm": 1.1667400598526,
"learning_rate": 1.8285714285714288e-05,
"loss": 0.5428,
"step": 32
},
{
"epoch": 0.09482758620689655,
"grad_norm": 1.4552701711654663,
"learning_rate": 1.885714285714286e-05,
"loss": 0.5218,
"step": 33
},
{
"epoch": 0.09770114942528736,
"grad_norm": 1.2645764350891113,
"learning_rate": 1.942857142857143e-05,
"loss": 0.5398,
"step": 34
},
{
"epoch": 0.10057471264367816,
"grad_norm": 1.2053171396255493,
"learning_rate": 2e-05,
"loss": 0.5286,
"step": 35
},
{
"epoch": 0.10344827586206896,
"grad_norm": 1.3919466733932495,
"learning_rate": 1.9999496293646753e-05,
"loss": 0.5388,
"step": 36
},
{
"epoch": 0.10632183908045977,
"grad_norm": 1.0895131826400757,
"learning_rate": 1.999798522533102e-05,
"loss": 0.5138,
"step": 37
},
{
"epoch": 0.10919540229885058,
"grad_norm": 1.1765999794006348,
"learning_rate": 1.9995466947279753e-05,
"loss": 0.4781,
"step": 38
},
{
"epoch": 0.11206896551724138,
"grad_norm": 1.329185128211975,
"learning_rate": 1.9991941713187477e-05,
"loss": 0.4821,
"step": 39
},
{
"epoch": 0.11494252873563218,
"grad_norm": 1.8006553649902344,
"learning_rate": 1.9987409878190752e-05,
"loss": 0.4764,
"step": 40
},
{
"epoch": 0.11494252873563218,
"eval_accuracy": 0.7574257425742574,
"eval_f1": 0.5882352941176471,
"eval_loss": 0.4768294095993042,
"eval_precision": 0.5303030303030303,
"eval_recall": 0.660377358490566,
"eval_runtime": 17.5901,
"eval_samples_per_second": 6.026,
"eval_steps_per_second": 0.227,
"step": 40
},
{
"epoch": 0.11781609195402298,
"grad_norm": 1.338883876800537,
"learning_rate": 1.99818718988324e-05,
"loss": 0.474,
"step": 41
},
{
"epoch": 0.1206896551724138,
"grad_norm": 1.211243987083435,
"learning_rate": 1.9975328333015497e-05,
"loss": 0.4644,
"step": 42
},
{
"epoch": 0.1235632183908046,
"grad_norm": 1.1209361553192139,
"learning_rate": 1.9967779839947172e-05,
"loss": 0.4748,
"step": 43
},
{
"epoch": 0.12643678160919541,
"grad_norm": 1.2608532905578613,
"learning_rate": 1.9959227180072216e-05,
"loss": 0.4162,
"step": 44
},
{
"epoch": 0.12931034482758622,
"grad_norm": 1.2170352935791016,
"learning_rate": 1.9949671214996448e-05,
"loss": 0.4661,
"step": 45
},
{
"epoch": 0.13218390804597702,
"grad_norm": 1.0206319093704224,
"learning_rate": 1.993911290739993e-05,
"loss": 0.4412,
"step": 46
},
{
"epoch": 0.13505747126436782,
"grad_norm": 1.0381529331207275,
"learning_rate": 1.992755332093999e-05,
"loss": 0.3974,
"step": 47
},
{
"epoch": 0.13793103448275862,
"grad_norm": 1.6168910264968872,
"learning_rate": 1.9914993620144055e-05,
"loss": 0.4304,
"step": 48
},
{
"epoch": 0.14080459770114942,
"grad_norm": 1.1238210201263428,
"learning_rate": 1.990143507029234e-05,
"loss": 0.4392,
"step": 49
},
{
"epoch": 0.14367816091954022,
"grad_norm": 1.129978895187378,
"learning_rate": 1.9886879037290385e-05,
"loss": 0.4034,
"step": 50
},
{
"epoch": 0.14655172413793102,
"grad_norm": 1.1116799116134644,
"learning_rate": 1.9871326987531453e-05,
"loss": 0.3649,
"step": 51
},
{
"epoch": 0.14942528735632185,
"grad_norm": 1.2544704675674438,
"learning_rate": 1.98547804877488e-05,
"loss": 0.3968,
"step": 52
},
{
"epoch": 0.15229885057471265,
"grad_norm": 1.2192368507385254,
"learning_rate": 1.983724120485783e-05,
"loss": 0.4372,
"step": 53
},
{
"epoch": 0.15517241379310345,
"grad_norm": 1.0894333124160767,
"learning_rate": 1.9818710905788195e-05,
"loss": 0.399,
"step": 54
},
{
"epoch": 0.15804597701149425,
"grad_norm": 1.7048630714416504,
"learning_rate": 1.9799191457305767e-05,
"loss": 0.4092,
"step": 55
},
{
"epoch": 0.16091954022988506,
"grad_norm": 1.6605204343795776,
"learning_rate": 1.977868482582459e-05,
"loss": 0.3609,
"step": 56
},
{
"epoch": 0.16379310344827586,
"grad_norm": 1.3857808113098145,
"learning_rate": 1.9757193077208776e-05,
"loss": 0.4289,
"step": 57
},
{
"epoch": 0.16666666666666666,
"grad_norm": 1.4144093990325928,
"learning_rate": 1.9734718376564386e-05,
"loss": 0.3706,
"step": 58
},
{
"epoch": 0.16954022988505746,
"grad_norm": 1.634936809539795,
"learning_rate": 1.9711262988021322e-05,
"loss": 0.4022,
"step": 59
},
{
"epoch": 0.1724137931034483,
"grad_norm": 1.4971150159835815,
"learning_rate": 1.968682927450523e-05,
"loss": 0.4099,
"step": 60
},
{
"epoch": 0.1724137931034483,
"eval_accuracy": 0.801980198019802,
"eval_f1": 0.6153846153846154,
"eval_loss": 0.4052402079105377,
"eval_precision": 0.6274509803921569,
"eval_recall": 0.6037735849056604,
"eval_runtime": 16.8779,
"eval_samples_per_second": 6.28,
"eval_steps_per_second": 0.237,
"step": 60
},
{
"epoch": 0.1752873563218391,
"grad_norm": 1.365148901939392,
"learning_rate": 1.9661419697499455e-05,
"loss": 0.3956,
"step": 61
},
{
"epoch": 0.1781609195402299,
"grad_norm": 1.4328665733337402,
"learning_rate": 1.9635036816797072e-05,
"loss": 0.4113,
"step": 62
},
{
"epoch": 0.1810344827586207,
"grad_norm": 1.5210320949554443,
"learning_rate": 1.960768329024301e-05,
"loss": 0.3848,
"step": 63
},
{
"epoch": 0.1839080459770115,
"grad_norm": 1.6699368953704834,
"learning_rate": 1.957936187346628e-05,
"loss": 0.3515,
"step": 64
},
{
"epoch": 0.1867816091954023,
"grad_norm": 1.5539981126785278,
"learning_rate": 1.955007541960241e-05,
"loss": 0.416,
"step": 65
},
{
"epoch": 0.1896551724137931,
"grad_norm": 1.75135338306427,
"learning_rate": 1.9519826879005964e-05,
"loss": 0.4134,
"step": 66
},
{
"epoch": 0.1925287356321839,
"grad_norm": 2.172255516052246,
"learning_rate": 1.948861929895336e-05,
"loss": 0.4107,
"step": 67
},
{
"epoch": 0.19540229885057472,
"grad_norm": 1.7174351215362549,
"learning_rate": 1.945645582333587e-05,
"loss": 0.3827,
"step": 68
},
{
"epoch": 0.19827586206896552,
"grad_norm": 2.1698808670043945,
"learning_rate": 1.9423339692342885e-05,
"loss": 0.3816,
"step": 69
},
{
"epoch": 0.20114942528735633,
"grad_norm": 1.6693840026855469,
"learning_rate": 1.9389274242135528e-05,
"loss": 0.3552,
"step": 70
},
{
"epoch": 0.20402298850574713,
"grad_norm": 2.666456937789917,
"learning_rate": 1.9354262904510544e-05,
"loss": 0.4101,
"step": 71
},
{
"epoch": 0.20689655172413793,
"grad_norm": 1.946183681488037,
"learning_rate": 1.9318309206554567e-05,
"loss": 0.3497,
"step": 72
},
{
"epoch": 0.20977011494252873,
"grad_norm": 1.5909761190414429,
"learning_rate": 1.9281416770288806e-05,
"loss": 0.3599,
"step": 73
},
{
"epoch": 0.21264367816091953,
"grad_norm": 1.677031397819519,
"learning_rate": 1.924358931230418e-05,
"loss": 0.3452,
"step": 74
},
{
"epoch": 0.21551724137931033,
"grad_norm": 2.067196846008301,
"learning_rate": 1.920483064338687e-05,
"loss": 0.3568,
"step": 75
},
{
"epoch": 0.21839080459770116,
"grad_norm": 2.4643149375915527,
"learning_rate": 1.9165144668134426e-05,
"loss": 0.3731,
"step": 76
},
{
"epoch": 0.22126436781609196,
"grad_norm": 3.8383142948150635,
"learning_rate": 1.9124535384562423e-05,
"loss": 0.3795,
"step": 77
},
{
"epoch": 0.22413793103448276,
"grad_norm": 3.8934013843536377,
"learning_rate": 1.9083006883701688e-05,
"loss": 0.3726,
"step": 78
},
{
"epoch": 0.22701149425287356,
"grad_norm": 2.358133554458618,
"learning_rate": 1.904056334918617e-05,
"loss": 0.3108,
"step": 79
},
{
"epoch": 0.22988505747126436,
"grad_norm": 1.4917255640029907,
"learning_rate": 1.8997209056831462e-05,
"loss": 0.346,
"step": 80
},
{
"epoch": 0.22988505747126436,
"eval_accuracy": 0.8366336633663366,
"eval_f1": 0.6826923076923077,
"eval_loss": 0.3761025071144104,
"eval_precision": 0.696078431372549,
"eval_recall": 0.6698113207547169,
"eval_runtime": 16.4137,
"eval_samples_per_second": 6.458,
"eval_steps_per_second": 0.244,
"step": 80
},
{
"epoch": 0.23275862068965517,
"grad_norm": 2.120394706726074,
"learning_rate": 1.8952948374204066e-05,
"loss": 0.4094,
"step": 81
},
{
"epoch": 0.23563218390804597,
"grad_norm": 1.4860031604766846,
"learning_rate": 1.8907785760181392e-05,
"loss": 0.3428,
"step": 82
},
{
"epoch": 0.23850574712643677,
"grad_norm": 2.5330207347869873,
"learning_rate": 1.8861725764502557e-05,
"loss": 0.3856,
"step": 83
},
{
"epoch": 0.2413793103448276,
"grad_norm": 1.8291746377944946,
"learning_rate": 1.881477302731006e-05,
"loss": 0.3613,
"step": 84
},
{
"epoch": 0.2442528735632184,
"grad_norm": 2.459777355194092,
"learning_rate": 1.87669322786823e-05,
"loss": 0.3607,
"step": 85
},
{
"epoch": 0.2471264367816092,
"grad_norm": 1.6540309190750122,
"learning_rate": 1.8718208338157082e-05,
"loss": 0.3602,
"step": 86
},
{
"epoch": 0.25,
"grad_norm": 1.5972404479980469,
"learning_rate": 1.866860611424609e-05,
"loss": 0.3323,
"step": 87
},
{
"epoch": 0.25287356321839083,
"grad_norm": 3.115286111831665,
"learning_rate": 1.8618130603940386e-05,
"loss": 0.3163,
"step": 88
},
{
"epoch": 0.2557471264367816,
"grad_norm": 2.2507760524749756,
"learning_rate": 1.856678689220701e-05,
"loss": 0.3266,
"step": 89
},
{
"epoch": 0.25862068965517243,
"grad_norm": 3.702547073364258,
"learning_rate": 1.851458015147673e-05,
"loss": 0.3787,
"step": 90
},
{
"epoch": 0.2614942528735632,
"grad_norm": 2.5684680938720703,
"learning_rate": 1.846151564112294e-05,
"loss": 0.3113,
"step": 91
},
{
"epoch": 0.26436781609195403,
"grad_norm": 1.9638134241104126,
"learning_rate": 1.840759870693184e-05,
"loss": 0.3725,
"step": 92
},
{
"epoch": 0.2672413793103448,
"grad_norm": 2.0520501136779785,
"learning_rate": 1.8352834780563888e-05,
"loss": 0.372,
"step": 93
},
{
"epoch": 0.27011494252873564,
"grad_norm": 2.0618882179260254,
"learning_rate": 1.8297229379006614e-05,
"loss": 0.2768,
"step": 94
},
{
"epoch": 0.27298850574712646,
"grad_norm": 2.1019253730773926,
"learning_rate": 1.8240788104018824e-05,
"loss": 0.3435,
"step": 95
},
{
"epoch": 0.27586206896551724,
"grad_norm": 4.472621440887451,
"learning_rate": 1.8183516641566278e-05,
"loss": 0.3146,
"step": 96
},
{
"epoch": 0.27873563218390807,
"grad_norm": 2.1167545318603516,
"learning_rate": 1.8125420761248878e-05,
"loss": 0.3804,
"step": 97
},
{
"epoch": 0.28160919540229884,
"grad_norm": 1.9784914255142212,
"learning_rate": 1.806650631571943e-05,
"loss": 0.3514,
"step": 98
},
{
"epoch": 0.28448275862068967,
"grad_norm": 2.407283306121826,
"learning_rate": 1.8006779240094024e-05,
"loss": 0.301,
"step": 99
},
{
"epoch": 0.28735632183908044,
"grad_norm": 1.7405933141708374,
"learning_rate": 1.7946245551354156e-05,
"loss": 0.2929,
"step": 100
},
{
"epoch": 0.28735632183908044,
"eval_accuracy": 0.8366336633663366,
"eval_f1": 0.6886792452830188,
"eval_loss": 0.3663554787635803,
"eval_precision": 0.6886792452830188,
"eval_recall": 0.6886792452830188,
"eval_runtime": 16.6875,
"eval_samples_per_second": 6.352,
"eval_steps_per_second": 0.24,
"step": 100
},
{
"epoch": 0.29022988505747127,
"grad_norm": 2.5615780353546143,
"learning_rate": 1.7884911347740556e-05,
"loss": 0.3328,
"step": 101
},
{
"epoch": 0.29310344827586204,
"grad_norm": 2.6771366596221924,
"learning_rate": 1.782278280813882e-05,
"loss": 0.3584,
"step": 102
},
{
"epoch": 0.2959770114942529,
"grad_norm": 2.411428451538086,
"learning_rate": 1.775986619145697e-05,
"loss": 0.3463,
"step": 103
},
{
"epoch": 0.2988505747126437,
"grad_norm": 1.991093397140503,
"learning_rate": 1.7696167835994927e-05,
"loss": 0.2636,
"step": 104
},
{
"epoch": 0.3017241379310345,
"grad_norm": 2.979641914367676,
"learning_rate": 1.7631694158805945e-05,
"loss": 0.2833,
"step": 105
},
{
"epoch": 0.3045977011494253,
"grad_norm": 2.2680490016937256,
"learning_rate": 1.7566451655050197e-05,
"loss": 0.2915,
"step": 106
},
{
"epoch": 0.3074712643678161,
"grad_norm": 2.6766483783721924,
"learning_rate": 1.7500446897340408e-05,
"loss": 0.3172,
"step": 107
},
{
"epoch": 0.3103448275862069,
"grad_norm": 2.766521692276001,
"learning_rate": 1.7433686535079736e-05,
"loss": 0.3347,
"step": 108
},
{
"epoch": 0.3132183908045977,
"grad_norm": 2.0438175201416016,
"learning_rate": 1.736617729379191e-05,
"loss": 0.3315,
"step": 109
},
{
"epoch": 0.3160919540229885,
"grad_norm": 2.5706722736358643,
"learning_rate": 1.7297925974443675e-05,
"loss": 0.2903,
"step": 110
},
{
"epoch": 0.31896551724137934,
"grad_norm": 6.830801010131836,
"learning_rate": 1.7228939452759666e-05,
"loss": 0.372,
"step": 111
},
{
"epoch": 0.3218390804597701,
"grad_norm": 2.375408411026001,
"learning_rate": 1.7159224678529734e-05,
"loss": 0.2875,
"step": 112
},
{
"epoch": 0.32471264367816094,
"grad_norm": 2.542205572128296,
"learning_rate": 1.7088788674908817e-05,
"loss": 0.3327,
"step": 113
},
{
"epoch": 0.3275862068965517,
"grad_norm": 2.645517587661743,
"learning_rate": 1.7017638537709426e-05,
"loss": 0.3225,
"step": 114
},
{
"epoch": 0.33045977011494254,
"grad_norm": 3.9527246952056885,
"learning_rate": 1.6945781434686783e-05,
"loss": 0.3683,
"step": 115
},
{
"epoch": 0.3333333333333333,
"grad_norm": 3.478126049041748,
"learning_rate": 1.6873224604816753e-05,
"loss": 0.3448,
"step": 116
},
{
"epoch": 0.33620689655172414,
"grad_norm": 4.451388359069824,
"learning_rate": 1.679997535756657e-05,
"loss": 0.2846,
"step": 117
},
{
"epoch": 0.3390804597701149,
"grad_norm": 2.1524624824523926,
"learning_rate": 1.672604107215848e-05,
"loss": 0.273,
"step": 118
},
{
"epoch": 0.34195402298850575,
"grad_norm": 2.950127601623535,
"learning_rate": 1.6651429196826337e-05,
"loss": 0.3886,
"step": 119
},
{
"epoch": 0.3448275862068966,
"grad_norm": 3.5067648887634277,
"learning_rate": 1.6576147248065268e-05,
"loss": 0.3822,
"step": 120
},
{
"epoch": 0.3448275862068966,
"eval_accuracy": 0.8514851485148515,
"eval_f1": 0.7297297297297297,
"eval_loss": 0.35510221123695374,
"eval_precision": 0.6982758620689655,
"eval_recall": 0.7641509433962265,
"eval_runtime": 16.7999,
"eval_samples_per_second": 6.31,
"eval_steps_per_second": 0.238,
"step": 120
},
{
"epoch": 0.34770114942528735,
"grad_norm": 4.016488552093506,
"learning_rate": 1.6500202809874446e-05,
"loss": 0.3354,
"step": 121
},
{
"epoch": 0.3505747126436782,
"grad_norm": 2.8852052688598633,
"learning_rate": 1.6423603532993074e-05,
"loss": 0.3143,
"step": 122
},
{
"epoch": 0.35344827586206895,
"grad_norm": 3.2638821601867676,
"learning_rate": 1.634635713412964e-05,
"loss": 0.3273,
"step": 123
},
{
"epoch": 0.3563218390804598,
"grad_norm": 2.6508638858795166,
"learning_rate": 1.626847139518452e-05,
"loss": 0.3539,
"step": 124
},
{
"epoch": 0.35919540229885055,
"grad_norm": 3.1945559978485107,
"learning_rate": 1.618995416246601e-05,
"loss": 0.3577,
"step": 125
},
{
"epoch": 0.3620689655172414,
"grad_norm": 3.6181535720825195,
"learning_rate": 1.6110813345899914e-05,
"loss": 0.3194,
"step": 126
},
{
"epoch": 0.3649425287356322,
"grad_norm": 3.1202199459075928,
"learning_rate": 1.6031056918232642e-05,
"loss": 0.3014,
"step": 127
},
{
"epoch": 0.367816091954023,
"grad_norm": 3.069596529006958,
"learning_rate": 1.595069291422807e-05,
"loss": 0.2934,
"step": 128
},
{
"epoch": 0.3706896551724138,
"grad_norm": 2.2219157218933105,
"learning_rate": 1.586972942985807e-05,
"loss": 0.2801,
"step": 129
},
{
"epoch": 0.3735632183908046,
"grad_norm": 2.305501699447632,
"learning_rate": 1.5788174621486936e-05,
"loss": 0.2895,
"step": 130
},
{
"epoch": 0.3764367816091954,
"grad_norm": 2.0006630420684814,
"learning_rate": 1.570603670504969e-05,
"loss": 0.2614,
"step": 131
},
{
"epoch": 0.3793103448275862,
"grad_norm": 2.4915575981140137,
"learning_rate": 1.5623323955224404e-05,
"loss": 0.3215,
"step": 132
},
{
"epoch": 0.382183908045977,
"grad_norm": 2.3519020080566406,
"learning_rate": 1.5540044704598588e-05,
"loss": 0.291,
"step": 133
},
{
"epoch": 0.3850574712643678,
"grad_norm": 2.4606783390045166,
"learning_rate": 1.5456207342829777e-05,
"loss": 0.3641,
"step": 134
},
{
"epoch": 0.3879310344827586,
"grad_norm": 2.4096121788024902,
"learning_rate": 1.5371820315800316e-05,
"loss": 0.3071,
"step": 135
},
{
"epoch": 0.39080459770114945,
"grad_norm": 2.9145760536193848,
"learning_rate": 1.5286892124766546e-05,
"loss": 0.2811,
"step": 136
},
{
"epoch": 0.3936781609195402,
"grad_norm": 1.954529881477356,
"learning_rate": 1.5201431325502332e-05,
"loss": 0.3137,
"step": 137
},
{
"epoch": 0.39655172413793105,
"grad_norm": 3.2401654720306396,
"learning_rate": 1.5115446527437193e-05,
"loss": 0.3052,
"step": 138
},
{
"epoch": 0.3994252873563218,
"grad_norm": 1.9580965042114258,
"learning_rate": 1.5028946392788934e-05,
"loss": 0.2898,
"step": 139
},
{
"epoch": 0.40229885057471265,
"grad_norm": 3.239868640899658,
"learning_rate": 1.4941939635691036e-05,
"loss": 0.3955,
"step": 140
},
{
"epoch": 0.40229885057471265,
"eval_accuracy": 0.8589108910891089,
"eval_f1": 0.7135678391959799,
"eval_loss": 0.34418779611587524,
"eval_precision": 0.7634408602150538,
"eval_recall": 0.6698113207547169,
"eval_runtime": 16.4081,
"eval_samples_per_second": 6.46,
"eval_steps_per_second": 0.244,
"step": 140
},
{
"epoch": 0.4051724137931034,
"grad_norm": 4.790642738342285,
"learning_rate": 1.4854435021314766e-05,
"loss": 0.3939,
"step": 141
},
{
"epoch": 0.40804597701149425,
"grad_norm": 1.9184012413024902,
"learning_rate": 1.4766441364986162e-05,
"loss": 0.2496,
"step": 142
},
{
"epoch": 0.4109195402298851,
"grad_norm": 3.910475969314575,
"learning_rate": 1.467796753129797e-05,
"loss": 0.3206,
"step": 143
},
{
"epoch": 0.41379310344827586,
"grad_norm": 5.272514820098877,
"learning_rate": 1.4589022433216616e-05,
"loss": 0.3299,
"step": 144
},
{
"epoch": 0.4166666666666667,
"grad_norm": 4.5233941078186035,
"learning_rate": 1.4499615031184297e-05,
"loss": 0.3089,
"step": 145
},
{
"epoch": 0.41954022988505746,
"grad_norm": 3.6632847785949707,
"learning_rate": 1.4409754332216303e-05,
"loss": 0.3436,
"step": 146
},
{
"epoch": 0.4224137931034483,
"grad_norm": 2.548621892929077,
"learning_rate": 1.431944938899363e-05,
"loss": 0.3434,
"step": 147
},
{
"epoch": 0.42528735632183906,
"grad_norm": 2.903876304626465,
"learning_rate": 1.4228709298950998e-05,
"loss": 0.2672,
"step": 148
},
{
"epoch": 0.4281609195402299,
"grad_norm": 4.375998020172119,
"learning_rate": 1.4137543203360382e-05,
"loss": 0.2741,
"step": 149
},
{
"epoch": 0.43103448275862066,
"grad_norm": 4.29938268661499,
"learning_rate": 1.4045960286410093e-05,
"loss": 0.3264,
"step": 150
},
{
"epoch": 0.4339080459770115,
"grad_norm": 2.3729660511016846,
"learning_rate": 1.395396977427955e-05,
"loss": 0.3135,
"step": 151
},
{
"epoch": 0.4367816091954023,
"grad_norm": 4.978923797607422,
"learning_rate": 1.3861580934209832e-05,
"loss": 0.3174,
"step": 152
},
{
"epoch": 0.4396551724137931,
"grad_norm": 2.483069658279419,
"learning_rate": 1.376880307357009e-05,
"loss": 0.2683,
"step": 153
},
{
"epoch": 0.4425287356321839,
"grad_norm": 2.2193140983581543,
"learning_rate": 1.3675645538919884e-05,
"loss": 0.257,
"step": 154
},
{
"epoch": 0.4454022988505747,
"grad_norm": 2.5293915271759033,
"learning_rate": 1.3582117715067628e-05,
"loss": 0.3204,
"step": 155
},
{
"epoch": 0.4482758620689655,
"grad_norm": 5.482168674468994,
"learning_rate": 1.3488229024125142e-05,
"loss": 0.3334,
"step": 156
},
{
"epoch": 0.4511494252873563,
"grad_norm": 3.027650833129883,
"learning_rate": 1.3393988924558445e-05,
"loss": 0.2637,
"step": 157
},
{
"epoch": 0.4540229885057471,
"grad_norm": 5.8336262702941895,
"learning_rate": 1.3299406910234917e-05,
"loss": 0.3285,
"step": 158
},
{
"epoch": 0.45689655172413796,
"grad_norm": 3.0736873149871826,
"learning_rate": 1.3204492509466862e-05,
"loss": 0.3032,
"step": 159
},
{
"epoch": 0.45977011494252873,
"grad_norm": 4.618353366851807,
"learning_rate": 1.3109255284051615e-05,
"loss": 0.34,
"step": 160
},
{
"epoch": 0.45977011494252873,
"eval_accuracy": 0.8613861386138614,
"eval_f1": 0.7431192660550459,
"eval_loss": 0.3399461805820465,
"eval_precision": 0.7232142857142857,
"eval_recall": 0.7641509433962265,
"eval_runtime": 16.589,
"eval_samples_per_second": 6.39,
"eval_steps_per_second": 0.241,
"step": 160
},
{
"epoch": 0.46264367816091956,
"grad_norm": 1.9849704504013062,
"learning_rate": 1.3013704828308276e-05,
"loss": 0.2715,
"step": 161
},
{
"epoch": 0.46551724137931033,
"grad_norm": 3.116058111190796,
"learning_rate": 1.2917850768111171e-05,
"loss": 0.3093,
"step": 162
},
{
"epoch": 0.46839080459770116,
"grad_norm": 3.0582470893859863,
"learning_rate": 1.282170275992012e-05,
"loss": 0.3502,
"step": 163
},
{
"epoch": 0.47126436781609193,
"grad_norm": 5.603944301605225,
"learning_rate": 1.2725270489807637e-05,
"loss": 0.3453,
"step": 164
},
{
"epoch": 0.47413793103448276,
"grad_norm": 3.758445978164673,
"learning_rate": 1.2628563672483147e-05,
"loss": 0.3542,
"step": 165
},
{
"epoch": 0.47701149425287354,
"grad_norm": 2.5959854125976562,
"learning_rate": 1.2531592050314308e-05,
"loss": 0.3346,
"step": 166
},
{
"epoch": 0.47988505747126436,
"grad_norm": 2.4197230339050293,
"learning_rate": 1.2434365392345553e-05,
"loss": 0.2817,
"step": 167
},
{
"epoch": 0.4827586206896552,
"grad_norm": 2.748478412628174,
"learning_rate": 1.2336893493313946e-05,
"loss": 0.3511,
"step": 168
},
{
"epoch": 0.48563218390804597,
"grad_norm": 3.117748975753784,
"learning_rate": 1.223918617266245e-05,
"loss": 0.3724,
"step": 169
},
{
"epoch": 0.4885057471264368,
"grad_norm": 2.6176235675811768,
"learning_rate": 1.2141253273550698e-05,
"loss": 0.3454,
"step": 170
},
{
"epoch": 0.49137931034482757,
"grad_norm": 2.648324966430664,
"learning_rate": 1.2043104661863386e-05,
"loss": 0.2713,
"step": 171
},
{
"epoch": 0.4942528735632184,
"grad_norm": 2.6428864002227783,
"learning_rate": 1.1944750225216363e-05,
"loss": 0.2618,
"step": 172
},
{
"epoch": 0.49712643678160917,
"grad_norm": 2.6188530921936035,
"learning_rate": 1.1846199871960557e-05,
"loss": 0.3443,
"step": 173
},
{
"epoch": 0.5,
"grad_norm": 2.8425047397613525,
"learning_rate": 1.1747463530183781e-05,
"loss": 0.3323,
"step": 174
},
{
"epoch": 0.5028735632183908,
"grad_norm": 3.0745861530303955,
"learning_rate": 1.1648551146710557e-05,
"loss": 0.2854,
"step": 175
},
{
"epoch": 0.5057471264367817,
"grad_norm": 3.5819411277770996,
"learning_rate": 1.1549472686100079e-05,
"loss": 0.3851,
"step": 176
},
{
"epoch": 0.5086206896551724,
"grad_norm": 2.7981903553009033,
"learning_rate": 1.145023812964237e-05,
"loss": 0.288,
"step": 177
},
{
"epoch": 0.5114942528735632,
"grad_norm": 2.600273847579956,
"learning_rate": 1.1350857474352734e-05,
"loss": 0.2982,
"step": 178
},
{
"epoch": 0.514367816091954,
"grad_norm": 2.207815170288086,
"learning_rate": 1.1251340731964664e-05,
"loss": 0.2697,
"step": 179
},
{
"epoch": 0.5172413793103449,
"grad_norm": 3.180569887161255,
"learning_rate": 1.1151697927921242e-05,
"loss": 0.2897,
"step": 180
},
{
"epoch": 0.5172413793103449,
"eval_accuracy": 0.8613861386138614,
"eval_f1": 0.7333333333333333,
"eval_loss": 0.3244224786758423,
"eval_precision": 0.7403846153846154,
"eval_recall": 0.7264150943396226,
"eval_runtime": 16.4883,
"eval_samples_per_second": 6.429,
"eval_steps_per_second": 0.243,
"step": 180
},
{
"epoch": 0.5201149425287356,
"grad_norm": 4.220306396484375,
"learning_rate": 1.1051939100365154e-05,
"loss": 0.254,
"step": 181
},
{
"epoch": 0.5229885057471264,
"grad_norm": 2.567113161087036,
"learning_rate": 1.0952074299127451e-05,
"loss": 0.2826,
"step": 182
},
{
"epoch": 0.5258620689655172,
"grad_norm": 2.881145715713501,
"learning_rate": 1.0852113584715103e-05,
"loss": 0.2882,
"step": 183
},
{
"epoch": 0.5287356321839081,
"grad_norm": 2.523293972015381,
"learning_rate": 1.0752067027297486e-05,
"loss": 0.3224,
"step": 184
},
{
"epoch": 0.5316091954022989,
"grad_norm": 3.2287135124206543,
"learning_rate": 1.065194470569193e-05,
"loss": 0.3054,
"step": 185
},
{
"epoch": 0.5344827586206896,
"grad_norm": 2.670992851257324,
"learning_rate": 1.0551756706348331e-05,
"loss": 0.3404,
"step": 186
},
{
"epoch": 0.5373563218390804,
"grad_norm": 2.8072383403778076,
"learning_rate": 1.0451513122333042e-05,
"loss": 0.3289,
"step": 187
},
{
"epoch": 0.5402298850574713,
"grad_norm": 2.7612991333007812,
"learning_rate": 1.035122405231209e-05,
"loss": 0.2705,
"step": 188
},
{
"epoch": 0.5431034482758621,
"grad_norm": 3.2553176879882812,
"learning_rate": 1.0250899599533833e-05,
"loss": 0.3088,
"step": 189
},
{
"epoch": 0.5459770114942529,
"grad_norm": 3.1975409984588623,
"learning_rate": 1.0150549870811108e-05,
"loss": 0.4086,
"step": 190
},
{
"epoch": 0.5488505747126436,
"grad_norm": 3.0886051654815674,
"learning_rate": 1.0050184975503104e-05,
"loss": 0.3177,
"step": 191
},
{
"epoch": 0.5517241379310345,
"grad_norm": 4.40545654296875,
"learning_rate": 9.949815024496901e-06,
"loss": 0.346,
"step": 192
},
{
"epoch": 0.5545977011494253,
"grad_norm": 4.658652305603027,
"learning_rate": 9.849450129188895e-06,
"loss": 0.2821,
"step": 193
},
{
"epoch": 0.5574712643678161,
"grad_norm": 7.797382831573486,
"learning_rate": 9.74910040046617e-06,
"loss": 0.3194,
"step": 194
},
{
"epoch": 0.5603448275862069,
"grad_norm": 3.3024234771728516,
"learning_rate": 9.648775947687914e-06,
"loss": 0.2912,
"step": 195
},
{
"epoch": 0.5632183908045977,
"grad_norm": 2.7685601711273193,
"learning_rate": 9.548486877666963e-06,
"loss": 0.3019,
"step": 196
},
{
"epoch": 0.5660919540229885,
"grad_norm": 3.0222320556640625,
"learning_rate": 9.448243293651676e-06,
"loss": 0.2963,
"step": 197
},
{
"epoch": 0.5689655172413793,
"grad_norm": 3.1808133125305176,
"learning_rate": 9.348055294308074e-06,
"loss": 0.3107,
"step": 198
},
{
"epoch": 0.5718390804597702,
"grad_norm": 2.3709192276000977,
"learning_rate": 9.247932972702514e-06,
"loss": 0.294,
"step": 199
},
{
"epoch": 0.5747126436781609,
"grad_norm": 2.9564688205718994,
"learning_rate": 9.147886415284903e-06,
"loss": 0.2599,
"step": 200
},
{
"epoch": 0.5747126436781609,
"eval_accuracy": 0.8638613861386139,
"eval_f1": 0.7417840375586855,
"eval_loss": 0.32250022888183594,
"eval_precision": 0.7383177570093458,
"eval_recall": 0.7452830188679245,
"eval_runtime": 16.3331,
"eval_samples_per_second": 6.49,
"eval_steps_per_second": 0.245,
"step": 200
},
{
"epoch": 0.5775862068965517,
"grad_norm": 3.761861801147461,
"learning_rate": 9.047925700872552e-06,
"loss": 0.3089,
"step": 201
},
{
"epoch": 0.5804597701149425,
"grad_norm": 2.631603240966797,
"learning_rate": 8.948060899634846e-06,
"loss": 0.2943,
"step": 202
},
{
"epoch": 0.5833333333333334,
"grad_norm": 2.613485336303711,
"learning_rate": 8.848302072078762e-06,
"loss": 0.2866,
"step": 203
},
{
"epoch": 0.5862068965517241,
"grad_norm": 2.1991889476776123,
"learning_rate": 8.748659268035339e-06,
"loss": 0.2749,
"step": 204
},
{
"epoch": 0.5890804597701149,
"grad_norm": 2.7884528636932373,
"learning_rate": 8.649142525647271e-06,
"loss": 0.3378,
"step": 205
},
{
"epoch": 0.5919540229885057,
"grad_norm": 4.045213222503662,
"learning_rate": 8.549761870357633e-06,
"loss": 0.3398,
"step": 206
},
{
"epoch": 0.5948275862068966,
"grad_norm": 2.83146595954895,
"learning_rate": 8.450527313899923e-06,
"loss": 0.2951,
"step": 207
},
{
"epoch": 0.5977011494252874,
"grad_norm": 2.664379358291626,
"learning_rate": 8.351448853289448e-06,
"loss": 0.3452,
"step": 208
},
{
"epoch": 0.6005747126436781,
"grad_norm": 2.2802395820617676,
"learning_rate": 8.25253646981622e-06,
"loss": 0.2993,
"step": 209
},
{
"epoch": 0.603448275862069,
"grad_norm": 2.1689095497131348,
"learning_rate": 8.153800128039441e-06,
"loss": 0.2847,
"step": 210
},
{
"epoch": 0.6063218390804598,
"grad_norm": 3.4026296138763428,
"learning_rate": 8.05524977478364e-06,
"loss": 0.3192,
"step": 211
},
{
"epoch": 0.6091954022988506,
"grad_norm": 3.272836923599243,
"learning_rate": 7.956895338136618e-06,
"loss": 0.3129,
"step": 212
},
{
"epoch": 0.6120689655172413,
"grad_norm": 4.036294460296631,
"learning_rate": 7.858746726449309e-06,
"loss": 0.3199,
"step": 213
},
{
"epoch": 0.6149425287356322,
"grad_norm": 2.3944919109344482,
"learning_rate": 7.760813827337555e-06,
"loss": 0.2513,
"step": 214
},
{
"epoch": 0.617816091954023,
"grad_norm": 4.011943340301514,
"learning_rate": 7.663106506686057e-06,
"loss": 0.3026,
"step": 215
},
{
"epoch": 0.6206896551724138,
"grad_norm": 2.424299716949463,
"learning_rate": 7.565634607654453e-06,
"loss": 0.3029,
"step": 216
},
{
"epoch": 0.6235632183908046,
"grad_norm": 5.497297286987305,
"learning_rate": 7.468407949685695e-06,
"loss": 0.3601,
"step": 217
},
{
"epoch": 0.6264367816091954,
"grad_norm": 2.9515810012817383,
"learning_rate": 7.371436327516854e-06,
"loss": 0.328,
"step": 218
},
{
"epoch": 0.6293103448275862,
"grad_norm": 3.359109878540039,
"learning_rate": 7.274729510192367e-06,
"loss": 0.3104,
"step": 219
},
{
"epoch": 0.632183908045977,
"grad_norm": 3.1870110034942627,
"learning_rate": 7.1782972400798825e-06,
"loss": 0.34,
"step": 220
},
{
"epoch": 0.632183908045977,
"eval_accuracy": 0.8688118811881188,
"eval_f1": 0.7464114832535885,
"eval_loss": 0.31775230169296265,
"eval_precision": 0.7572815533980582,
"eval_recall": 0.7358490566037735,
"eval_runtime": 17.2507,
"eval_samples_per_second": 6.145,
"eval_steps_per_second": 0.232,
"step": 220
},
{
"epoch": 0.6350574712643678,
"grad_norm": 2.9183595180511475,
"learning_rate": 7.082149231888833e-06,
"loss": 0.2827,
"step": 221
},
{
"epoch": 0.6379310344827587,
"grad_norm": 4.59254789352417,
"learning_rate": 6.986295171691727e-06,
"loss": 0.3284,
"step": 222
},
{
"epoch": 0.6408045977011494,
"grad_norm": 2.9099929332733154,
"learning_rate": 6.890744715948388e-06,
"loss": 0.2972,
"step": 223
},
{
"epoch": 0.6436781609195402,
"grad_norm": 2.407136917114258,
"learning_rate": 6.795507490533142e-06,
"loss": 0.2973,
"step": 224
},
{
"epoch": 0.646551724137931,
"grad_norm": 2.786597728729248,
"learning_rate": 6.700593089765086e-06,
"loss": 0.3426,
"step": 225
},
{
"epoch": 0.6494252873563219,
"grad_norm": 2.642282485961914,
"learning_rate": 6.606011075441556e-06,
"loss": 0.3201,
"step": 226
},
{
"epoch": 0.6522988505747126,
"grad_norm": 2.1382575035095215,
"learning_rate": 6.511770975874862e-06,
"loss": 0.2767,
"step": 227
},
{
"epoch": 0.6551724137931034,
"grad_norm": 4.03010892868042,
"learning_rate": 6.417882284932373e-06,
"loss": 0.2742,
"step": 228
},
{
"epoch": 0.6580459770114943,
"grad_norm": 2.754526138305664,
"learning_rate": 6.324354461080121e-06,
"loss": 0.2672,
"step": 229
},
{
"epoch": 0.6609195402298851,
"grad_norm": 2.4932737350463867,
"learning_rate": 6.231196926429913e-06,
"loss": 0.2835,
"step": 230
},
{
"epoch": 0.6637931034482759,
"grad_norm": 4.364743232727051,
"learning_rate": 6.138419065790169e-06,
"loss": 0.3079,
"step": 231
},
{
"epoch": 0.6666666666666666,
"grad_norm": 2.9517085552215576,
"learning_rate": 6.046030225720456e-06,
"loss": 0.2422,
"step": 232
},
{
"epoch": 0.6695402298850575,
"grad_norm": 2.8469698429107666,
"learning_rate": 5.95403971358991e-06,
"loss": 0.2641,
"step": 233
},
{
"epoch": 0.6724137931034483,
"grad_norm": 3.698885440826416,
"learning_rate": 5.86245679663962e-06,
"loss": 0.3572,
"step": 234
},
{
"epoch": 0.6752873563218391,
"grad_norm": 2.205653667449951,
"learning_rate": 5.7712907010490036e-06,
"loss": 0.252,
"step": 235
},
{
"epoch": 0.6781609195402298,
"grad_norm": 3.1082050800323486,
"learning_rate": 5.680550611006372e-06,
"loss": 0.3062,
"step": 236
},
{
"epoch": 0.6810344827586207,
"grad_norm": 4.51684045791626,
"learning_rate": 5.590245667783701e-06,
"loss": 0.281,
"step": 237
},
{
"epoch": 0.6839080459770115,
"grad_norm": 2.53916335105896,
"learning_rate": 5.5003849688157075e-06,
"loss": 0.3273,
"step": 238
},
{
"epoch": 0.6867816091954023,
"grad_norm": 3.1908535957336426,
"learning_rate": 5.4109775667833866e-06,
"loss": 0.3091,
"step": 239
},
{
"epoch": 0.6896551724137931,
"grad_norm": 2.92702579498291,
"learning_rate": 5.322032468702037e-06,
"loss": 0.2969,
"step": 240
},
{
"epoch": 0.6896551724137931,
"eval_accuracy": 0.8564356435643564,
"eval_f1": 0.7289719626168224,
"eval_loss": 0.3177642822265625,
"eval_precision": 0.7222222222222222,
"eval_recall": 0.7358490566037735,
"eval_runtime": 16.3833,
"eval_samples_per_second": 6.47,
"eval_steps_per_second": 0.244,
"step": 240
},
{
"epoch": 0.6925287356321839,
"grad_norm": 2.631377696990967,
"learning_rate": 5.233558635013842e-06,
"loss": 0.3108,
"step": 241
},
{
"epoch": 0.6954022988505747,
"grad_norm": 2.5703439712524414,
"learning_rate": 5.145564978685234e-06,
"loss": 0.2965,
"step": 242
},
{
"epoch": 0.6982758620689655,
"grad_norm": 5.4829254150390625,
"learning_rate": 5.058060364308965e-06,
"loss": 0.3302,
"step": 243
},
{
"epoch": 0.7011494252873564,
"grad_norm": 4.077176570892334,
"learning_rate": 4.971053607211069e-06,
"loss": 0.3833,
"step": 244
},
{
"epoch": 0.7040229885057471,
"grad_norm": 3.5669100284576416,
"learning_rate": 4.884553472562809e-06,
"loss": 0.3203,
"step": 245
},
{
"epoch": 0.7068965517241379,
"grad_norm": 2.850348472595215,
"learning_rate": 4.7985686744976714e-06,
"loss": 0.2846,
"step": 246
},
{
"epoch": 0.7097701149425287,
"grad_norm": 3.9147164821624756,
"learning_rate": 4.713107875233459e-06,
"loss": 0.3315,
"step": 247
},
{
"epoch": 0.7126436781609196,
"grad_norm": 3.5606236457824707,
"learning_rate": 4.628179684199685e-06,
"loss": 0.3101,
"step": 248
},
{
"epoch": 0.7155172413793104,
"grad_norm": 2.9054181575775146,
"learning_rate": 4.543792657170228e-06,
"loss": 0.3086,
"step": 249
},
{
"epoch": 0.7183908045977011,
"grad_norm": 2.9038889408111572,
"learning_rate": 4.459955295401415e-06,
"loss": 0.2979,
"step": 250
},
{
"epoch": 0.7212643678160919,
"grad_norm": 2.952456474304199,
"learning_rate": 4.376676044775601e-06,
"loss": 0.3221,
"step": 251
},
{
"epoch": 0.7241379310344828,
"grad_norm": 2.3187882900238037,
"learning_rate": 4.293963294950313e-06,
"loss": 0.296,
"step": 252
},
{
"epoch": 0.7270114942528736,
"grad_norm": 4.975540637969971,
"learning_rate": 4.211825378513066e-06,
"loss": 0.3873,
"step": 253
},
{
"epoch": 0.7298850574712644,
"grad_norm": 2.431337833404541,
"learning_rate": 4.130270570141931e-06,
"loss": 0.3386,
"step": 254
},
{
"epoch": 0.7327586206896551,
"grad_norm": 3.258333921432495,
"learning_rate": 4.0493070857719305e-06,
"loss": 0.3402,
"step": 255
},
{
"epoch": 0.735632183908046,
"grad_norm": 2.442279577255249,
"learning_rate": 3.968943081767358e-06,
"loss": 0.2268,
"step": 256
},
{
"epoch": 0.7385057471264368,
"grad_norm": 3.3889667987823486,
"learning_rate": 3.889186654100089e-06,
"loss": 0.3008,
"step": 257
},
{
"epoch": 0.7413793103448276,
"grad_norm": 2.3388214111328125,
"learning_rate": 3.81004583753399e-06,
"loss": 0.3399,
"step": 258
},
{
"epoch": 0.7442528735632183,
"grad_norm": 2.363194704055786,
"learning_rate": 3.7315286048154862e-06,
"loss": 0.2797,
"step": 259
},
{
"epoch": 0.7471264367816092,
"grad_norm": 3.6801648139953613,
"learning_rate": 3.6536428658703594e-06,
"loss": 0.3179,
"step": 260
},
{
"epoch": 0.7471264367816092,
"eval_accuracy": 0.8663366336633663,
"eval_f1": 0.7452830188679245,
"eval_loss": 0.3128357529640198,
"eval_precision": 0.7452830188679245,
"eval_recall": 0.7452830188679245,
"eval_runtime": 16.9937,
"eval_samples_per_second": 6.238,
"eval_steps_per_second": 0.235,
"step": 260
},
{
"epoch": 0.75,
"grad_norm": 3.5845208168029785,
"learning_rate": 3.576396467006925e-06,
"loss": 0.3121,
"step": 261
},
{
"epoch": 0.7528735632183908,
"grad_norm": 2.5115549564361572,
"learning_rate": 3.4997971901255588e-06,
"loss": 0.2695,
"step": 262
},
{
"epoch": 0.7557471264367817,
"grad_norm": 3.1949312686920166,
"learning_rate": 3.4238527519347353e-06,
"loss": 0.29,
"step": 263
},
{
"epoch": 0.7586206896551724,
"grad_norm": 3.134657859802246,
"learning_rate": 3.3485708031736698e-06,
"loss": 0.2959,
"step": 264
},
{
"epoch": 0.7614942528735632,
"grad_norm": 2.359828472137451,
"learning_rate": 3.2739589278415252e-06,
"loss": 0.299,
"step": 265
},
{
"epoch": 0.764367816091954,
"grad_norm": 2.662598133087158,
"learning_rate": 3.2000246424334315e-06,
"loss": 0.2887,
"step": 266
},
{
"epoch": 0.7672413793103449,
"grad_norm": 2.822681188583374,
"learning_rate": 3.1267753951832523e-06,
"loss": 0.3337,
"step": 267
},
{
"epoch": 0.7701149425287356,
"grad_norm": 3.435675859451294,
"learning_rate": 3.0542185653132216e-06,
"loss": 0.2431,
"step": 268
},
{
"epoch": 0.7729885057471264,
"grad_norm": 3.8508052825927734,
"learning_rate": 2.982361462290575e-06,
"loss": 0.3625,
"step": 269
},
{
"epoch": 0.7758620689655172,
"grad_norm": 3.24882435798645,
"learning_rate": 2.9112113250911844e-06,
"loss": 0.3255,
"step": 270
},
{
"epoch": 0.7787356321839081,
"grad_norm": 3.215721368789673,
"learning_rate": 2.8407753214702694e-06,
"loss": 0.3055,
"step": 271
},
{
"epoch": 0.7816091954022989,
"grad_norm": 3.5768065452575684,
"learning_rate": 2.7710605472403373e-06,
"loss": 0.2593,
"step": 272
},
{
"epoch": 0.7844827586206896,
"grad_norm": 3.4842770099639893,
"learning_rate": 2.702074025556327e-06,
"loss": 0.3183,
"step": 273
},
{
"epoch": 0.7873563218390804,
"grad_norm": 2.8685038089752197,
"learning_rate": 2.6338227062080924e-06,
"loss": 0.2674,
"step": 274
},
{
"epoch": 0.7902298850574713,
"grad_norm": 3.008521318435669,
"learning_rate": 2.566313464920265e-06,
"loss": 0.2944,
"step": 275
},
{
"epoch": 0.7931034482758621,
"grad_norm": 2.9339377880096436,
"learning_rate": 2.4995531026595952e-06,
"loss": 0.295,
"step": 276
},
{
"epoch": 0.7959770114942529,
"grad_norm": 4.123067378997803,
"learning_rate": 2.4335483449498053e-06,
"loss": 0.2295,
"step": 277
},
{
"epoch": 0.7988505747126436,
"grad_norm": 2.862365245819092,
"learning_rate": 2.3683058411940563e-06,
"loss": 0.2967,
"step": 278
},
{
"epoch": 0.8017241379310345,
"grad_norm": 4.078983783721924,
"learning_rate": 2.3038321640050763e-06,
"loss": 0.3005,
"step": 279
},
{
"epoch": 0.8045977011494253,
"grad_norm": 4.147453308105469,
"learning_rate": 2.2401338085430326e-06,
"loss": 0.2901,
"step": 280
},
{
"epoch": 0.8045977011494253,
"eval_accuracy": 0.8638613861386139,
"eval_f1": 0.7417840375586855,
"eval_loss": 0.3146108090877533,
"eval_precision": 0.7383177570093458,
"eval_recall": 0.7452830188679245,
"eval_runtime": 16.7331,
"eval_samples_per_second": 6.335,
"eval_steps_per_second": 0.239,
"step": 280
},
{
"epoch": 0.8074712643678161,
"grad_norm": 2.4641005992889404,
"learning_rate": 2.177217191861183e-06,
"loss": 0.2452,
"step": 281
},
{
"epoch": 0.8103448275862069,
"grad_norm": 3.1481075286865234,
"learning_rate": 2.115088652259446e-06,
"loss": 0.3355,
"step": 282
},
{
"epoch": 0.8132183908045977,
"grad_norm": 2.2011497020721436,
"learning_rate": 2.053754448645846e-06,
"loss": 0.2256,
"step": 283
},
{
"epoch": 0.8160919540229885,
"grad_norm": 3.1297502517700195,
"learning_rate": 1.9932207599059782e-06,
"loss": 0.2899,
"step": 284
},
{
"epoch": 0.8189655172413793,
"grad_norm": 2.566171646118164,
"learning_rate": 1.933493684280574e-06,
"loss": 0.2527,
"step": 285
},
{
"epoch": 0.8218390804597702,
"grad_norm": 3.0499560832977295,
"learning_rate": 1.8745792387511241e-06,
"loss": 0.2979,
"step": 286
},
{
"epoch": 0.8247126436781609,
"grad_norm": 3.5081562995910645,
"learning_rate": 1.8164833584337216e-06,
"loss": 0.2766,
"step": 287
},
{
"epoch": 0.8275862068965517,
"grad_norm": 3.2664620876312256,
"learning_rate": 1.75921189598118e-06,
"loss": 0.3008,
"step": 288
},
{
"epoch": 0.8304597701149425,
"grad_norm": 3.314521551132202,
"learning_rate": 1.7027706209933903e-06,
"loss": 0.3387,
"step": 289
},
{
"epoch": 0.8333333333333334,
"grad_norm": 2.5149619579315186,
"learning_rate": 1.6471652194361131e-06,
"loss": 0.3032,
"step": 290
},
{
"epoch": 0.8362068965517241,
"grad_norm": 3.847849130630493,
"learning_rate": 1.5924012930681643e-06,
"loss": 0.3218,
"step": 291
},
{
"epoch": 0.8390804597701149,
"grad_norm": 6.620360374450684,
"learning_rate": 1.5384843588770626e-06,
"loss": 0.3464,
"step": 292
},
{
"epoch": 0.8419540229885057,
"grad_norm": 3.233356475830078,
"learning_rate": 1.4854198485232696e-06,
"loss": 0.2475,
"step": 293
},
{
"epoch": 0.8448275862068966,
"grad_norm": 4.268490314483643,
"learning_rate": 1.433213107792991e-06,
"loss": 0.3646,
"step": 294
},
{
"epoch": 0.8477011494252874,
"grad_norm": 3.641005754470825,
"learning_rate": 1.3818693960596186e-06,
"loss": 0.3347,
"step": 295
},
{
"epoch": 0.8505747126436781,
"grad_norm": 2.945902109146118,
"learning_rate": 1.3313938857539133e-06,
"loss": 0.2806,
"step": 296
},
{
"epoch": 0.853448275862069,
"grad_norm": 3.552212715148926,
"learning_rate": 1.2817916618429194e-06,
"loss": 0.2993,
"step": 297
},
{
"epoch": 0.8563218390804598,
"grad_norm": 3.9987523555755615,
"learning_rate": 1.2330677213177034e-06,
"loss": 0.2611,
"step": 298
},
{
"epoch": 0.8591954022988506,
"grad_norm": 4.93873405456543,
"learning_rate": 1.1852269726899423e-06,
"loss": 0.3131,
"step": 299
},
{
"epoch": 0.8620689655172413,
"grad_norm": 2.6833460330963135,
"learning_rate": 1.138274235497443e-06,
"loss": 0.2587,
"step": 300
},
{
"epoch": 0.8620689655172413,
"eval_accuracy": 0.8638613861386139,
"eval_f1": 0.7417840375586855,
"eval_loss": 0.3137795925140381,
"eval_precision": 0.7383177570093458,
"eval_recall": 0.7452830188679245,
"eval_runtime": 16.8912,
"eval_samples_per_second": 6.275,
"eval_steps_per_second": 0.237,
"step": 300
},
{
"epoch": 0.8649425287356322,
"grad_norm": 2.3745739459991455,
"learning_rate": 1.0922142398186097e-06,
"loss": 0.2735,
"step": 301
},
{
"epoch": 0.867816091954023,
"grad_norm": 3.9790706634521484,
"learning_rate": 1.0470516257959351e-06,
"loss": 0.2966,
"step": 302
},
{
"epoch": 0.8706896551724138,
"grad_norm": 2.3996686935424805,
"learning_rate": 1.00279094316854e-06,
"loss": 0.2725,
"step": 303
},
{
"epoch": 0.8735632183908046,
"grad_norm": 3.1654207706451416,
"learning_rate": 9.594366508138352e-07,
"loss": 0.2983,
"step": 304
},
{
"epoch": 0.8764367816091954,
"grad_norm": 4.094039440155029,
"learning_rate": 9.169931162983137e-07,
"loss": 0.2797,
"step": 305
},
{
"epoch": 0.8793103448275862,
"grad_norm": 2.8442471027374268,
"learning_rate": 8.754646154375801e-07,
"loss": 0.2584,
"step": 306
},
{
"epoch": 0.882183908045977,
"grad_norm": 3.6936562061309814,
"learning_rate": 8.348553318655795e-07,
"loss": 0.3164,
"step": 307
},
{
"epoch": 0.8850574712643678,
"grad_norm": 2.954345703125,
"learning_rate": 7.951693566131325e-07,
"loss": 0.3144,
"step": 308
},
{
"epoch": 0.8879310344827587,
"grad_norm": 2.597691774368286,
"learning_rate": 7.564106876958188e-07,
"loss": 0.2971,
"step": 309
},
{
"epoch": 0.8908045977011494,
"grad_norm": 3.1714141368865967,
"learning_rate": 7.185832297111939e-07,
"loss": 0.391,
"step": 310
},
{
"epoch": 0.8936781609195402,
"grad_norm": 2.410207748413086,
"learning_rate": 6.816907934454353e-07,
"loss": 0.2538,
"step": 311
},
{
"epoch": 0.896551724137931,
"grad_norm": 2.8893043994903564,
"learning_rate": 6.457370954894582e-07,
"loss": 0.2312,
"step": 312
},
{
"epoch": 0.8994252873563219,
"grad_norm": 2.990267276763916,
"learning_rate": 6.107257578644721e-07,
"loss": 0.27,
"step": 313
},
{
"epoch": 0.9022988505747126,
"grad_norm": 2.6063694953918457,
"learning_rate": 5.766603076571164e-07,
"loss": 0.2675,
"step": 314
},
{
"epoch": 0.9051724137931034,
"grad_norm": 2.8116941452026367,
"learning_rate": 5.43544176664137e-07,
"loss": 0.2846,
"step": 315
},
{
"epoch": 0.9080459770114943,
"grad_norm": 2.8802504539489746,
"learning_rate": 5.113807010466432e-07,
"loss": 0.2898,
"step": 316
},
{
"epoch": 0.9109195402298851,
"grad_norm": 3.1726322174072266,
"learning_rate": 4.801731209940375e-07,
"loss": 0.2796,
"step": 317
},
{
"epoch": 0.9137931034482759,
"grad_norm": 3.0099661350250244,
"learning_rate": 4.499245803975927e-07,
"loss": 0.2649,
"step": 318
},
{
"epoch": 0.9166666666666666,
"grad_norm": 3.7728664875030518,
"learning_rate": 4.206381265337189e-07,
"loss": 0.3021,
"step": 319
},
{
"epoch": 0.9195402298850575,
"grad_norm": 3.430644989013672,
"learning_rate": 3.9231670975699354e-07,
"loss": 0.326,
"step": 320
},
{
"epoch": 0.9195402298850575,
"eval_accuracy": 0.8589108910891089,
"eval_f1": 0.7348837209302326,
"eval_loss": 0.31510937213897705,
"eval_precision": 0.7247706422018348,
"eval_recall": 0.7452830188679245,
"eval_runtime": 17.0364,
"eval_samples_per_second": 6.222,
"eval_steps_per_second": 0.235,
"step": 320
},
{
"epoch": 0.9224137931034483,
"grad_norm": 4.415366172790527,
"learning_rate": 3.649631832029288e-07,
"loss": 0.3382,
"step": 321
},
{
"epoch": 0.9252873563218391,
"grad_norm": 2.5626957416534424,
"learning_rate": 3.385803025005463e-07,
"loss": 0.2792,
"step": 322
},
{
"epoch": 0.9281609195402298,
"grad_norm": 3.4501123428344727,
"learning_rate": 3.1317072549477246e-07,
"loss": 0.3482,
"step": 323
},
{
"epoch": 0.9310344827586207,
"grad_norm": 2.9147861003875732,
"learning_rate": 2.887370119786792e-07,
"loss": 0.2824,
"step": 324
},
{
"epoch": 0.9339080459770115,
"grad_norm": 3.037773847579956,
"learning_rate": 2.6528162343561593e-07,
"loss": 0.3257,
"step": 325
},
{
"epoch": 0.9367816091954023,
"grad_norm": 3.1816771030426025,
"learning_rate": 2.4280692279122554e-07,
"loss": 0.2645,
"step": 326
},
{
"epoch": 0.9396551724137931,
"grad_norm": 2.802854537963867,
"learning_rate": 2.2131517417540937e-07,
"loss": 0.2947,
"step": 327
},
{
"epoch": 0.9425287356321839,
"grad_norm": 2.949431896209717,
"learning_rate": 2.00808542694233e-07,
"loss": 0.2907,
"step": 328
},
{
"epoch": 0.9454022988505747,
"grad_norm": 2.371004581451416,
"learning_rate": 1.8128909421180506e-07,
"loss": 0.2558,
"step": 329
},
{
"epoch": 0.9482758620689655,
"grad_norm": 2.651993989944458,
"learning_rate": 1.6275879514217052e-07,
"loss": 0.3132,
"step": 330
},
{
"epoch": 0.9511494252873564,
"grad_norm": 3.5036203861236572,
"learning_rate": 1.4521951225120345e-07,
"loss": 0.3745,
"step": 331
},
{
"epoch": 0.9540229885057471,
"grad_norm": 2.3972132205963135,
"learning_rate": 1.2867301246854757e-07,
"loss": 0.2746,
"step": 332
},
{
"epoch": 0.9568965517241379,
"grad_norm": 2.4108810424804688,
"learning_rate": 1.1312096270961525e-07,
"loss": 0.2656,
"step": 333
},
{
"epoch": 0.9597701149425287,
"grad_norm": 2.761547565460205,
"learning_rate": 9.856492970766296e-08,
"loss": 0.3047,
"step": 334
},
{
"epoch": 0.9626436781609196,
"grad_norm": 2.521554470062256,
"learning_rate": 8.50063798559475e-08,
"loss": 0.2753,
"step": 335
},
{
"epoch": 0.9655172413793104,
"grad_norm": 3.3763246536254883,
"learning_rate": 7.244667906001202e-08,
"loss": 0.2594,
"step": 336
},
{
"epoch": 0.9683908045977011,
"grad_norm": 3.34621000289917,
"learning_rate": 6.088709260007153e-08,
"loss": 0.2837,
"step": 337
},
{
"epoch": 0.9712643678160919,
"grad_norm": 2.6902542114257812,
"learning_rate": 5.032878500355498e-08,
"loss": 0.2849,
"step": 338
},
{
"epoch": 0.9741379310344828,
"grad_norm": 4.887283802032471,
"learning_rate": 4.07728199277857e-08,
"loss": 0.3055,
"step": 339
},
{
"epoch": 0.9770114942528736,
"grad_norm": 4.528458118438721,
"learning_rate": 3.2220160052828245e-08,
"loss": 0.3475,
"step": 340
},
{
"epoch": 0.9770114942528736,
"eval_accuracy": 0.8638613861386139,
"eval_f1": 0.7417840375586855,
"eval_loss": 0.3143324553966522,
"eval_precision": 0.7383177570093458,
"eval_recall": 0.7452830188679245,
"eval_runtime": 16.6976,
"eval_samples_per_second": 6.348,
"eval_steps_per_second": 0.24,
"step": 340
},
{
"epoch": 0.9798850574712644,
"grad_norm": 2.665673017501831,
"learning_rate": 2.467166698450485e-08,
"loss": 0.2825,
"step": 341
},
{
"epoch": 0.9827586206896551,
"grad_norm": 3.8746824264526367,
"learning_rate": 1.812810116760044e-08,
"loss": 0.2802,
"step": 342
},
{
"epoch": 0.985632183908046,
"grad_norm": 4.368228912353516,
"learning_rate": 1.2590121809247235e-08,
"loss": 0.3322,
"step": 343
},
{
"epoch": 0.9885057471264368,
"grad_norm": 2.3723561763763428,
"learning_rate": 8.05828681252452e-09,
"loss": 0.2589,
"step": 344
},
{
"epoch": 0.9913793103448276,
"grad_norm": 3.3824245929718018,
"learning_rate": 4.5330527202480656e-09,
"loss": 0.3456,
"step": 345
},
{
"epoch": 0.9942528735632183,
"grad_norm": 2.772489070892334,
"learning_rate": 2.014774668979147e-09,
"loss": 0.2756,
"step": 346
},
{
"epoch": 0.9971264367816092,
"grad_norm": 4.0318827629089355,
"learning_rate": 5.037063532498109e-10,
"loss": 0.303,
"step": 347
},
{
"epoch": 1.0,
"grad_norm": 3.210477113723755,
"learning_rate": 0.0,
"loss": 0.2905,
"step": 348
}
],
"logging_steps": 1,
"max_steps": 348,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0991834484860518e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}