mtzig's picture
Training in progress, step 796, checkpoint
5d36ec7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.999163179916318,
"eval_steps": 20,
"global_step": 796,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"eval_accuracy": 0.7339955849889624,
"eval_f1": 0.2445141065830721,
"eval_loss": 0.5994934439659119,
"eval_precision": 0.6,
"eval_recall": 0.15354330708661418,
"eval_runtime": 52.2305,
"eval_samples_per_second": 5.303,
"eval_steps_per_second": 0.172,
"step": 0
},
{
"epoch": 0.0012552301255230125,
"grad_norm": 1.9609311819076538,
"learning_rate": 2.5000000000000004e-07,
"loss": 0.6978,
"step": 1
},
{
"epoch": 0.002510460251046025,
"grad_norm": 1.9798370599746704,
"learning_rate": 5.000000000000001e-07,
"loss": 0.6725,
"step": 2
},
{
"epoch": 0.0037656903765690376,
"grad_norm": 2.634610652923584,
"learning_rate": 7.5e-07,
"loss": 0.7798,
"step": 3
},
{
"epoch": 0.00502092050209205,
"grad_norm": 1.9435328245162964,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.6777,
"step": 4
},
{
"epoch": 0.006276150627615063,
"grad_norm": 2.0514488220214844,
"learning_rate": 1.25e-06,
"loss": 0.7849,
"step": 5
},
{
"epoch": 0.007531380753138075,
"grad_norm": 3.1893956661224365,
"learning_rate": 1.5e-06,
"loss": 0.7281,
"step": 6
},
{
"epoch": 0.008786610878661089,
"grad_norm": 2.2875595092773438,
"learning_rate": 1.75e-06,
"loss": 0.7267,
"step": 7
},
{
"epoch": 0.0100418410041841,
"grad_norm": 1.7282941341400146,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.642,
"step": 8
},
{
"epoch": 0.011297071129707114,
"grad_norm": 1.9356372356414795,
"learning_rate": 2.25e-06,
"loss": 0.6923,
"step": 9
},
{
"epoch": 0.012552301255230125,
"grad_norm": 2.0085136890411377,
"learning_rate": 2.5e-06,
"loss": 0.6507,
"step": 10
},
{
"epoch": 0.013807531380753139,
"grad_norm": 2.1001739501953125,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.6162,
"step": 11
},
{
"epoch": 0.01506276150627615,
"grad_norm": 2.053370714187622,
"learning_rate": 3e-06,
"loss": 0.674,
"step": 12
},
{
"epoch": 0.016317991631799162,
"grad_norm": 2.3665823936462402,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.6931,
"step": 13
},
{
"epoch": 0.017573221757322177,
"grad_norm": 1.99113929271698,
"learning_rate": 3.5e-06,
"loss": 0.7218,
"step": 14
},
{
"epoch": 0.01882845188284519,
"grad_norm": 1.8170547485351562,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.6488,
"step": 15
},
{
"epoch": 0.0200836820083682,
"grad_norm": 1.8335210084915161,
"learning_rate": 4.000000000000001e-06,
"loss": 0.6467,
"step": 16
},
{
"epoch": 0.021338912133891212,
"grad_norm": 1.6069227457046509,
"learning_rate": 4.25e-06,
"loss": 0.6395,
"step": 17
},
{
"epoch": 0.022594142259414227,
"grad_norm": 1.7745310068130493,
"learning_rate": 4.5e-06,
"loss": 0.6905,
"step": 18
},
{
"epoch": 0.02384937238493724,
"grad_norm": 2.1716341972351074,
"learning_rate": 4.75e-06,
"loss": 0.653,
"step": 19
},
{
"epoch": 0.02510460251046025,
"grad_norm": 1.9034003019332886,
"learning_rate": 5e-06,
"loss": 0.7266,
"step": 20
},
{
"epoch": 0.02510460251046025,
"eval_accuracy": 0.7384105960264901,
"eval_f1": 0.26625386996904027,
"eval_loss": 0.5908851027488708,
"eval_precision": 0.6231884057971014,
"eval_recall": 0.16929133858267717,
"eval_runtime": 52.6006,
"eval_samples_per_second": 5.266,
"eval_steps_per_second": 0.171,
"step": 20
},
{
"epoch": 0.026359832635983262,
"grad_norm": 1.7850794792175293,
"learning_rate": 5.2500000000000006e-06,
"loss": 0.65,
"step": 21
},
{
"epoch": 0.027615062761506277,
"grad_norm": 1.765768051147461,
"learning_rate": 5.500000000000001e-06,
"loss": 0.6715,
"step": 22
},
{
"epoch": 0.02887029288702929,
"grad_norm": 1.8612842559814453,
"learning_rate": 5.75e-06,
"loss": 0.6614,
"step": 23
},
{
"epoch": 0.0301255230125523,
"grad_norm": 1.8554290533065796,
"learning_rate": 6e-06,
"loss": 0.641,
"step": 24
},
{
"epoch": 0.03138075313807531,
"grad_norm": 1.8867026567459106,
"learning_rate": 6.25e-06,
"loss": 0.6224,
"step": 25
},
{
"epoch": 0.032635983263598324,
"grad_norm": 1.8916778564453125,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.6471,
"step": 26
},
{
"epoch": 0.033891213389121336,
"grad_norm": 1.7810847759246826,
"learning_rate": 6.750000000000001e-06,
"loss": 0.6197,
"step": 27
},
{
"epoch": 0.035146443514644354,
"grad_norm": 1.6231898069381714,
"learning_rate": 7e-06,
"loss": 0.6362,
"step": 28
},
{
"epoch": 0.036401673640167366,
"grad_norm": 1.7416272163391113,
"learning_rate": 7.25e-06,
"loss": 0.6425,
"step": 29
},
{
"epoch": 0.03765690376569038,
"grad_norm": 1.6757186651229858,
"learning_rate": 7.500000000000001e-06,
"loss": 0.5854,
"step": 30
},
{
"epoch": 0.03891213389121339,
"grad_norm": 1.9119001626968384,
"learning_rate": 7.75e-06,
"loss": 0.5457,
"step": 31
},
{
"epoch": 0.0401673640167364,
"grad_norm": 1.576582908630371,
"learning_rate": 8.000000000000001e-06,
"loss": 0.5516,
"step": 32
},
{
"epoch": 0.04142259414225941,
"grad_norm": 1.5435791015625,
"learning_rate": 8.25e-06,
"loss": 0.5458,
"step": 33
},
{
"epoch": 0.042677824267782424,
"grad_norm": 1.8229247331619263,
"learning_rate": 8.5e-06,
"loss": 0.6167,
"step": 34
},
{
"epoch": 0.043933054393305436,
"grad_norm": 2.217472791671753,
"learning_rate": 8.750000000000001e-06,
"loss": 0.6588,
"step": 35
},
{
"epoch": 0.045188284518828455,
"grad_norm": 1.8096412420272827,
"learning_rate": 9e-06,
"loss": 0.6595,
"step": 36
},
{
"epoch": 0.046443514644351466,
"grad_norm": 1.8887217044830322,
"learning_rate": 9.250000000000001e-06,
"loss": 0.6004,
"step": 37
},
{
"epoch": 0.04769874476987448,
"grad_norm": 1.55510413646698,
"learning_rate": 9.5e-06,
"loss": 0.5685,
"step": 38
},
{
"epoch": 0.04895397489539749,
"grad_norm": 1.6017107963562012,
"learning_rate": 9.75e-06,
"loss": 0.497,
"step": 39
},
{
"epoch": 0.0502092050209205,
"grad_norm": 2.4420340061187744,
"learning_rate": 1e-05,
"loss": 0.674,
"step": 40
},
{
"epoch": 0.0502092050209205,
"eval_accuracy": 0.7516556291390728,
"eval_f1": 0.3553008595988539,
"eval_loss": 0.5497225522994995,
"eval_precision": 0.6526315789473685,
"eval_recall": 0.2440944881889764,
"eval_runtime": 53.1059,
"eval_samples_per_second": 5.216,
"eval_steps_per_second": 0.169,
"step": 40
},
{
"epoch": 0.05146443514644351,
"grad_norm": 1.6397217512130737,
"learning_rate": 1.025e-05,
"loss": 0.5409,
"step": 41
},
{
"epoch": 0.052719665271966525,
"grad_norm": 1.616377353668213,
"learning_rate": 1.0500000000000001e-05,
"loss": 0.5902,
"step": 42
},
{
"epoch": 0.05397489539748954,
"grad_norm": 1.5098403692245483,
"learning_rate": 1.075e-05,
"loss": 0.5468,
"step": 43
},
{
"epoch": 0.055230125523012555,
"grad_norm": 2.074469566345215,
"learning_rate": 1.1000000000000001e-05,
"loss": 0.5969,
"step": 44
},
{
"epoch": 0.056485355648535567,
"grad_norm": 1.646159291267395,
"learning_rate": 1.125e-05,
"loss": 0.5629,
"step": 45
},
{
"epoch": 0.05774058577405858,
"grad_norm": 1.7806731462478638,
"learning_rate": 1.15e-05,
"loss": 0.565,
"step": 46
},
{
"epoch": 0.05899581589958159,
"grad_norm": 1.5080231428146362,
"learning_rate": 1.1750000000000001e-05,
"loss": 0.5349,
"step": 47
},
{
"epoch": 0.0602510460251046,
"grad_norm": 1.4364521503448486,
"learning_rate": 1.2e-05,
"loss": 0.5335,
"step": 48
},
{
"epoch": 0.06150627615062761,
"grad_norm": 1.3679174184799194,
"learning_rate": 1.2250000000000001e-05,
"loss": 0.5727,
"step": 49
},
{
"epoch": 0.06276150627615062,
"grad_norm": 2.1571309566497803,
"learning_rate": 1.25e-05,
"loss": 0.473,
"step": 50
},
{
"epoch": 0.06401673640167364,
"grad_norm": 1.4333269596099854,
"learning_rate": 1.275e-05,
"loss": 0.5388,
"step": 51
},
{
"epoch": 0.06527196652719665,
"grad_norm": 1.5196632146835327,
"learning_rate": 1.3000000000000001e-05,
"loss": 0.5264,
"step": 52
},
{
"epoch": 0.06652719665271967,
"grad_norm": 1.491036057472229,
"learning_rate": 1.325e-05,
"loss": 0.4879,
"step": 53
},
{
"epoch": 0.06778242677824267,
"grad_norm": 2.134739875793457,
"learning_rate": 1.3500000000000001e-05,
"loss": 0.563,
"step": 54
},
{
"epoch": 0.06903765690376569,
"grad_norm": 1.4758329391479492,
"learning_rate": 1.375e-05,
"loss": 0.4958,
"step": 55
},
{
"epoch": 0.07029288702928871,
"grad_norm": 1.4116944074630737,
"learning_rate": 1.4e-05,
"loss": 0.5011,
"step": 56
},
{
"epoch": 0.07154811715481171,
"grad_norm": 2.1076178550720215,
"learning_rate": 1.425e-05,
"loss": 0.5847,
"step": 57
},
{
"epoch": 0.07280334728033473,
"grad_norm": 1.6353111267089844,
"learning_rate": 1.45e-05,
"loss": 0.573,
"step": 58
},
{
"epoch": 0.07405857740585774,
"grad_norm": 1.8148682117462158,
"learning_rate": 1.4750000000000003e-05,
"loss": 0.4973,
"step": 59
},
{
"epoch": 0.07531380753138076,
"grad_norm": 1.46212637424469,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.5187,
"step": 60
},
{
"epoch": 0.07531380753138076,
"eval_accuracy": 0.7759381898454746,
"eval_f1": 0.5333333333333333,
"eval_loss": 0.4896416962146759,
"eval_precision": 0.6408839779005525,
"eval_recall": 0.4566929133858268,
"eval_runtime": 52.5243,
"eval_samples_per_second": 5.274,
"eval_steps_per_second": 0.171,
"step": 60
},
{
"epoch": 0.07656903765690376,
"grad_norm": 1.7205400466918945,
"learning_rate": 1.525e-05,
"loss": 0.4742,
"step": 61
},
{
"epoch": 0.07782426778242678,
"grad_norm": 1.6081304550170898,
"learning_rate": 1.55e-05,
"loss": 0.4835,
"step": 62
},
{
"epoch": 0.0790794979079498,
"grad_norm": 1.903638482093811,
"learning_rate": 1.575e-05,
"loss": 0.525,
"step": 63
},
{
"epoch": 0.0803347280334728,
"grad_norm": 1.9859038591384888,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.5183,
"step": 64
},
{
"epoch": 0.08158995815899582,
"grad_norm": 1.567049264907837,
"learning_rate": 1.6250000000000002e-05,
"loss": 0.4662,
"step": 65
},
{
"epoch": 0.08284518828451883,
"grad_norm": 1.9365341663360596,
"learning_rate": 1.65e-05,
"loss": 0.5389,
"step": 66
},
{
"epoch": 0.08410041841004184,
"grad_norm": 2.623508930206299,
"learning_rate": 1.675e-05,
"loss": 0.4868,
"step": 67
},
{
"epoch": 0.08535564853556485,
"grad_norm": 2.0028135776519775,
"learning_rate": 1.7e-05,
"loss": 0.5124,
"step": 68
},
{
"epoch": 0.08661087866108787,
"grad_norm": 1.8613665103912354,
"learning_rate": 1.7250000000000003e-05,
"loss": 0.4848,
"step": 69
},
{
"epoch": 0.08786610878661087,
"grad_norm": 2.0450963973999023,
"learning_rate": 1.7500000000000002e-05,
"loss": 0.4746,
"step": 70
},
{
"epoch": 0.08912133891213389,
"grad_norm": 2.370429515838623,
"learning_rate": 1.775e-05,
"loss": 0.4939,
"step": 71
},
{
"epoch": 0.09037656903765691,
"grad_norm": 2.433497905731201,
"learning_rate": 1.8e-05,
"loss": 0.3848,
"step": 72
},
{
"epoch": 0.09163179916317991,
"grad_norm": 2.043933868408203,
"learning_rate": 1.825e-05,
"loss": 0.4812,
"step": 73
},
{
"epoch": 0.09288702928870293,
"grad_norm": 3.1938834190368652,
"learning_rate": 1.8500000000000002e-05,
"loss": 0.4042,
"step": 74
},
{
"epoch": 0.09414225941422594,
"grad_norm": 2.2702953815460205,
"learning_rate": 1.8750000000000002e-05,
"loss": 0.4561,
"step": 75
},
{
"epoch": 0.09539748953974896,
"grad_norm": 2.0371036529541016,
"learning_rate": 1.9e-05,
"loss": 0.4152,
"step": 76
},
{
"epoch": 0.09665271966527196,
"grad_norm": 2.1548779010772705,
"learning_rate": 1.925e-05,
"loss": 0.4596,
"step": 77
},
{
"epoch": 0.09790794979079498,
"grad_norm": 2.221036195755005,
"learning_rate": 1.95e-05,
"loss": 0.3985,
"step": 78
},
{
"epoch": 0.099163179916318,
"grad_norm": 2.6363329887390137,
"learning_rate": 1.9750000000000002e-05,
"loss": 0.4098,
"step": 79
},
{
"epoch": 0.100418410041841,
"grad_norm": 3.167816162109375,
"learning_rate": 2e-05,
"loss": 0.4811,
"step": 80
},
{
"epoch": 0.100418410041841,
"eval_accuracy": 0.7958057395143487,
"eval_f1": 0.5605700712589073,
"eval_loss": 0.4410252869129181,
"eval_precision": 0.7065868263473054,
"eval_recall": 0.4645669291338583,
"eval_runtime": 52.3433,
"eval_samples_per_second": 5.292,
"eval_steps_per_second": 0.172,
"step": 80
},
{
"epoch": 0.10167364016736402,
"grad_norm": 2.885763168334961,
"learning_rate": 1.9999903740631467e-05,
"loss": 0.4426,
"step": 81
},
{
"epoch": 0.10292887029288703,
"grad_norm": 3.0578081607818604,
"learning_rate": 1.9999614964379037e-05,
"loss": 0.3756,
"step": 82
},
{
"epoch": 0.10418410041841004,
"grad_norm": 2.6213362216949463,
"learning_rate": 1.9999133676802198e-05,
"loss": 0.3389,
"step": 83
},
{
"epoch": 0.10543933054393305,
"grad_norm": 2.924165964126587,
"learning_rate": 1.9998459887166635e-05,
"loss": 0.4445,
"step": 84
},
{
"epoch": 0.10669456066945607,
"grad_norm": 3.2457644939422607,
"learning_rate": 1.999759360844406e-05,
"loss": 0.3953,
"step": 85
},
{
"epoch": 0.10794979079497909,
"grad_norm": 3.2740261554718018,
"learning_rate": 1.9996534857311967e-05,
"loss": 0.3838,
"step": 86
},
{
"epoch": 0.10920502092050209,
"grad_norm": 2.938350200653076,
"learning_rate": 1.999528365415329e-05,
"loss": 0.3549,
"step": 87
},
{
"epoch": 0.11046025104602511,
"grad_norm": 3.5133864879608154,
"learning_rate": 1.9993840023056045e-05,
"loss": 0.3628,
"step": 88
},
{
"epoch": 0.11171548117154811,
"grad_norm": 4.392985820770264,
"learning_rate": 1.9992203991812823e-05,
"loss": 0.3628,
"step": 89
},
{
"epoch": 0.11297071129707113,
"grad_norm": 4.105576515197754,
"learning_rate": 1.9990375591920304e-05,
"loss": 0.3535,
"step": 90
},
{
"epoch": 0.11422594142259414,
"grad_norm": 3.1926333904266357,
"learning_rate": 1.9988354858578603e-05,
"loss": 0.3401,
"step": 91
},
{
"epoch": 0.11548117154811716,
"grad_norm": 3.768364191055298,
"learning_rate": 1.9986141830690626e-05,
"loss": 0.3704,
"step": 92
},
{
"epoch": 0.11673640167364016,
"grad_norm": 7.29379940032959,
"learning_rate": 1.9983736550861306e-05,
"loss": 0.4349,
"step": 93
},
{
"epoch": 0.11799163179916318,
"grad_norm": 2.7373130321502686,
"learning_rate": 1.9981139065396786e-05,
"loss": 0.3264,
"step": 94
},
{
"epoch": 0.1192468619246862,
"grad_norm": 6.39415979385376,
"learning_rate": 1.9978349424303532e-05,
"loss": 0.3619,
"step": 95
},
{
"epoch": 0.1205020920502092,
"grad_norm": 3.354396343231201,
"learning_rate": 1.9975367681287358e-05,
"loss": 0.338,
"step": 96
},
{
"epoch": 0.12175732217573222,
"grad_norm": 4.022518634796143,
"learning_rate": 1.99721938937524e-05,
"loss": 0.3851,
"step": 97
},
{
"epoch": 0.12301255230125523,
"grad_norm": 3.3857271671295166,
"learning_rate": 1.9968828122800022e-05,
"loss": 0.352,
"step": 98
},
{
"epoch": 0.12426778242677824,
"grad_norm": 3.325467824935913,
"learning_rate": 1.9965270433227623e-05,
"loss": 0.3588,
"step": 99
},
{
"epoch": 0.12552301255230125,
"grad_norm": 3.3111820220947266,
"learning_rate": 1.9961520893527385e-05,
"loss": 0.2811,
"step": 100
},
{
"epoch": 0.12552301255230125,
"eval_accuracy": 0.8101545253863135,
"eval_f1": 0.5825242718446602,
"eval_loss": 0.4248526096343994,
"eval_precision": 0.759493670886076,
"eval_recall": 0.47244094488188976,
"eval_runtime": 53.343,
"eval_samples_per_second": 5.193,
"eval_steps_per_second": 0.169,
"step": 100
},
{
"epoch": 0.12677824267782425,
"grad_norm": 3.9941301345825195,
"learning_rate": 1.9957579575884978e-05,
"loss": 0.3364,
"step": 101
},
{
"epoch": 0.1280334728033473,
"grad_norm": 3.4411442279815674,
"learning_rate": 1.995344655617815e-05,
"loss": 0.3481,
"step": 102
},
{
"epoch": 0.1292887029288703,
"grad_norm": 3.1757569313049316,
"learning_rate": 1.9949121913975275e-05,
"loss": 0.3447,
"step": 103
},
{
"epoch": 0.1305439330543933,
"grad_norm": 5.263054847717285,
"learning_rate": 1.994460573253382e-05,
"loss": 0.3705,
"step": 104
},
{
"epoch": 0.13179916317991633,
"grad_norm": 4.291073322296143,
"learning_rate": 1.9939898098798736e-05,
"loss": 0.349,
"step": 105
},
{
"epoch": 0.13305439330543933,
"grad_norm": 4.688785076141357,
"learning_rate": 1.9934999103400797e-05,
"loss": 0.2573,
"step": 106
},
{
"epoch": 0.13430962343096234,
"grad_norm": 3.483659505844116,
"learning_rate": 1.992990884065484e-05,
"loss": 0.2812,
"step": 107
},
{
"epoch": 0.13556485355648534,
"grad_norm": 5.222522258758545,
"learning_rate": 1.9924627408557963e-05,
"loss": 0.3208,
"step": 108
},
{
"epoch": 0.13682008368200838,
"grad_norm": 3.1946051120758057,
"learning_rate": 1.991915490878763e-05,
"loss": 0.4041,
"step": 109
},
{
"epoch": 0.13807531380753138,
"grad_norm": 2.834019899368286,
"learning_rate": 1.9913491446699715e-05,
"loss": 0.2989,
"step": 110
},
{
"epoch": 0.13933054393305438,
"grad_norm": 4.4058380126953125,
"learning_rate": 1.9907637131326475e-05,
"loss": 0.3247,
"step": 111
},
{
"epoch": 0.14058577405857742,
"grad_norm": 4.437101364135742,
"learning_rate": 1.9901592075374447e-05,
"loss": 0.3487,
"step": 112
},
{
"epoch": 0.14184100418410042,
"grad_norm": 3.1267802715301514,
"learning_rate": 1.989535639522229e-05,
"loss": 0.2741,
"step": 113
},
{
"epoch": 0.14309623430962343,
"grad_norm": 3.8325576782226562,
"learning_rate": 1.988893021091853e-05,
"loss": 0.385,
"step": 114
},
{
"epoch": 0.14435146443514643,
"grad_norm": 4.569618225097656,
"learning_rate": 1.9882313646179247e-05,
"loss": 0.3595,
"step": 115
},
{
"epoch": 0.14560669456066946,
"grad_norm": 5.401278972625732,
"learning_rate": 1.9875506828385723e-05,
"loss": 0.2875,
"step": 116
},
{
"epoch": 0.14686192468619247,
"grad_norm": 3.8402180671691895,
"learning_rate": 1.9868509888581945e-05,
"loss": 0.3079,
"step": 117
},
{
"epoch": 0.14811715481171547,
"grad_norm": 3.5551564693450928,
"learning_rate": 1.986132296147212e-05,
"loss": 0.3157,
"step": 118
},
{
"epoch": 0.1493723849372385,
"grad_norm": 9.136929512023926,
"learning_rate": 1.9853946185418056e-05,
"loss": 0.4209,
"step": 119
},
{
"epoch": 0.1506276150627615,
"grad_norm": 3.290203332901001,
"learning_rate": 1.9846379702436518e-05,
"loss": 0.2959,
"step": 120
},
{
"epoch": 0.1506276150627615,
"eval_accuracy": 0.8211920529801324,
"eval_f1": 0.6197183098591549,
"eval_loss": 0.37509337067604065,
"eval_precision": 0.7674418604651163,
"eval_recall": 0.5196850393700787,
"eval_runtime": 52.5731,
"eval_samples_per_second": 5.269,
"eval_steps_per_second": 0.171,
"step": 120
},
{
"epoch": 0.15188284518828452,
"grad_norm": 4.515352725982666,
"learning_rate": 1.983862365819648e-05,
"loss": 0.3283,
"step": 121
},
{
"epoch": 0.15313807531380752,
"grad_norm": 3.97063946723938,
"learning_rate": 1.9830678202016324e-05,
"loss": 0.3505,
"step": 122
},
{
"epoch": 0.15439330543933055,
"grad_norm": 4.553818225860596,
"learning_rate": 1.982254348686097e-05,
"loss": 0.313,
"step": 123
},
{
"epoch": 0.15564853556485356,
"grad_norm": 3.5846359729766846,
"learning_rate": 1.981421966933893e-05,
"loss": 0.35,
"step": 124
},
{
"epoch": 0.15690376569037656,
"grad_norm": 5.479614734649658,
"learning_rate": 1.9805706909699283e-05,
"loss": 0.3134,
"step": 125
},
{
"epoch": 0.1581589958158996,
"grad_norm": 3.6926157474517822,
"learning_rate": 1.9797005371828603e-05,
"loss": 0.3659,
"step": 126
},
{
"epoch": 0.1594142259414226,
"grad_norm": 4.4174957275390625,
"learning_rate": 1.97881152232478e-05,
"loss": 0.3069,
"step": 127
},
{
"epoch": 0.1606694560669456,
"grad_norm": 2.855861186981201,
"learning_rate": 1.9779036635108892e-05,
"loss": 0.2748,
"step": 128
},
{
"epoch": 0.1619246861924686,
"grad_norm": 3.4113943576812744,
"learning_rate": 1.976976978219171e-05,
"loss": 0.2942,
"step": 129
},
{
"epoch": 0.16317991631799164,
"grad_norm": 2.8706114292144775,
"learning_rate": 1.9760314842900537e-05,
"loss": 0.26,
"step": 130
},
{
"epoch": 0.16443514644351465,
"grad_norm": 3.3289883136749268,
"learning_rate": 1.975067199926067e-05,
"loss": 0.2942,
"step": 131
},
{
"epoch": 0.16569037656903765,
"grad_norm": 3.1963343620300293,
"learning_rate": 1.9740841436914917e-05,
"loss": 0.3404,
"step": 132
},
{
"epoch": 0.16694560669456068,
"grad_norm": 4.106410026550293,
"learning_rate": 1.9730823345120024e-05,
"loss": 0.3645,
"step": 133
},
{
"epoch": 0.1682008368200837,
"grad_norm": 3.587475299835205,
"learning_rate": 1.9720617916743022e-05,
"loss": 0.2905,
"step": 134
},
{
"epoch": 0.1694560669456067,
"grad_norm": 4.643335819244385,
"learning_rate": 1.971022534825754e-05,
"loss": 0.3199,
"step": 135
},
{
"epoch": 0.1707112970711297,
"grad_norm": 3.8745625019073486,
"learning_rate": 1.9699645839739987e-05,
"loss": 0.3276,
"step": 136
},
{
"epoch": 0.17196652719665273,
"grad_norm": 4.443915367126465,
"learning_rate": 1.9688879594865726e-05,
"loss": 0.1989,
"step": 137
},
{
"epoch": 0.17322175732217573,
"grad_norm": 3.165154218673706,
"learning_rate": 1.9677926820905143e-05,
"loss": 0.2877,
"step": 138
},
{
"epoch": 0.17447698744769874,
"grad_norm": 3.396127462387085,
"learning_rate": 1.9666787728719664e-05,
"loss": 0.2869,
"step": 139
},
{
"epoch": 0.17573221757322174,
"grad_norm": 4.032714366912842,
"learning_rate": 1.9655462532757677e-05,
"loss": 0.336,
"step": 140
},
{
"epoch": 0.17573221757322174,
"eval_accuracy": 0.8278145695364238,
"eval_f1": 0.6060606060606061,
"eval_loss": 0.3764040172100067,
"eval_precision": 0.8450704225352113,
"eval_recall": 0.47244094488188976,
"eval_runtime": 52.6872,
"eval_samples_per_second": 5.257,
"eval_steps_per_second": 0.171,
"step": 140
},
{
"epoch": 0.17698744769874478,
"grad_norm": 2.6727371215820312,
"learning_rate": 1.9643951451050428e-05,
"loss": 0.2636,
"step": 141
},
{
"epoch": 0.17824267782426778,
"grad_norm": 3.8816864490509033,
"learning_rate": 1.9632254705207813e-05,
"loss": 0.3208,
"step": 142
},
{
"epoch": 0.17949790794979079,
"grad_norm": 3.4616892337799072,
"learning_rate": 1.9620372520414098e-05,
"loss": 0.3218,
"step": 143
},
{
"epoch": 0.18075313807531382,
"grad_norm": 4.056252479553223,
"learning_rate": 1.9608305125423608e-05,
"loss": 0.2844,
"step": 144
},
{
"epoch": 0.18200836820083682,
"grad_norm": 5.902234077453613,
"learning_rate": 1.9596052752556308e-05,
"loss": 0.2497,
"step": 145
},
{
"epoch": 0.18326359832635983,
"grad_norm": 4.0488996505737305,
"learning_rate": 1.958361563769333e-05,
"loss": 0.2764,
"step": 146
},
{
"epoch": 0.18451882845188283,
"grad_norm": 4.619633197784424,
"learning_rate": 1.957099402027244e-05,
"loss": 0.3775,
"step": 147
},
{
"epoch": 0.18577405857740587,
"grad_norm": 4.491790294647217,
"learning_rate": 1.9558188143283425e-05,
"loss": 0.4185,
"step": 148
},
{
"epoch": 0.18702928870292887,
"grad_norm": 9.393437385559082,
"learning_rate": 1.954519825326341e-05,
"loss": 0.292,
"step": 149
},
{
"epoch": 0.18828451882845187,
"grad_norm": 9.774816513061523,
"learning_rate": 1.9532024600292115e-05,
"loss": 0.341,
"step": 150
},
{
"epoch": 0.1895397489539749,
"grad_norm": 9.051419258117676,
"learning_rate": 1.9518667437987045e-05,
"loss": 0.3125,
"step": 151
},
{
"epoch": 0.1907949790794979,
"grad_norm": 4.726169586181641,
"learning_rate": 1.9505127023498603e-05,
"loss": 0.3283,
"step": 152
},
{
"epoch": 0.19205020920502092,
"grad_norm": 3.818352222442627,
"learning_rate": 1.9491403617505134e-05,
"loss": 0.2696,
"step": 153
},
{
"epoch": 0.19330543933054392,
"grad_norm": 4.901086330413818,
"learning_rate": 1.9477497484207922e-05,
"loss": 0.2927,
"step": 154
},
{
"epoch": 0.19456066945606695,
"grad_norm": 2.7958414554595947,
"learning_rate": 1.9463408891326088e-05,
"loss": 0.2544,
"step": 155
},
{
"epoch": 0.19581589958158996,
"grad_norm": 3.541666030883789,
"learning_rate": 1.9449138110091444e-05,
"loss": 0.2723,
"step": 156
},
{
"epoch": 0.19707112970711296,
"grad_norm": 4.369930744171143,
"learning_rate": 1.9434685415243267e-05,
"loss": 0.3121,
"step": 157
},
{
"epoch": 0.198326359832636,
"grad_norm": 4.061751842498779,
"learning_rate": 1.9420051085023006e-05,
"loss": 0.3238,
"step": 158
},
{
"epoch": 0.199581589958159,
"grad_norm": 5.1077446937561035,
"learning_rate": 1.940523540116895e-05,
"loss": 0.2935,
"step": 159
},
{
"epoch": 0.200836820083682,
"grad_norm": 3.7316763401031494,
"learning_rate": 1.9390238648910765e-05,
"loss": 0.3239,
"step": 160
},
{
"epoch": 0.200836820083682,
"eval_accuracy": 0.8200883002207505,
"eval_f1": 0.5788113695090439,
"eval_loss": 0.3607686161994934,
"eval_precision": 0.8421052631578947,
"eval_recall": 0.4409448818897638,
"eval_runtime": 53.735,
"eval_samples_per_second": 5.155,
"eval_steps_per_second": 0.167,
"step": 160
},
{
"epoch": 0.202092050209205,
"grad_norm": 4.719494819641113,
"learning_rate": 1.9375061116964032e-05,
"loss": 0.3164,
"step": 161
},
{
"epoch": 0.20334728033472804,
"grad_norm": 3.245194435119629,
"learning_rate": 1.935970309752469e-05,
"loss": 0.2923,
"step": 162
},
{
"epoch": 0.20460251046025105,
"grad_norm": 4.244296550750732,
"learning_rate": 1.9344164886263375e-05,
"loss": 0.2891,
"step": 163
},
{
"epoch": 0.20585774058577405,
"grad_norm": 5.457589149475098,
"learning_rate": 1.932844678231977e-05,
"loss": 0.3057,
"step": 164
},
{
"epoch": 0.20711297071129708,
"grad_norm": 4.439499378204346,
"learning_rate": 1.9312549088296838e-05,
"loss": 0.2107,
"step": 165
},
{
"epoch": 0.2083682008368201,
"grad_norm": 5.0200653076171875,
"learning_rate": 1.929647211025497e-05,
"loss": 0.2859,
"step": 166
},
{
"epoch": 0.2096234309623431,
"grad_norm": 3.7708117961883545,
"learning_rate": 1.9280216157706113e-05,
"loss": 0.2816,
"step": 167
},
{
"epoch": 0.2108786610878661,
"grad_norm": 3.947610855102539,
"learning_rate": 1.9263781543607817e-05,
"loss": 0.2431,
"step": 168
},
{
"epoch": 0.21213389121338913,
"grad_norm": 3.28195858001709,
"learning_rate": 1.9247168584357195e-05,
"loss": 0.296,
"step": 169
},
{
"epoch": 0.21338912133891214,
"grad_norm": 3.6983871459960938,
"learning_rate": 1.923037759978484e-05,
"loss": 0.3003,
"step": 170
},
{
"epoch": 0.21464435146443514,
"grad_norm": 4.456281661987305,
"learning_rate": 1.921340891314867e-05,
"loss": 0.2493,
"step": 171
},
{
"epoch": 0.21589958158995817,
"grad_norm": 3.2370941638946533,
"learning_rate": 1.9196262851127695e-05,
"loss": 0.2353,
"step": 172
},
{
"epoch": 0.21715481171548118,
"grad_norm": 2.977496862411499,
"learning_rate": 1.9178939743815735e-05,
"loss": 0.3062,
"step": 173
},
{
"epoch": 0.21841004184100418,
"grad_norm": 5.293909072875977,
"learning_rate": 1.9161439924715063e-05,
"loss": 0.2646,
"step": 174
},
{
"epoch": 0.2196652719665272,
"grad_norm": 3.4083428382873535,
"learning_rate": 1.9143763730729987e-05,
"loss": 0.2305,
"step": 175
},
{
"epoch": 0.22092050209205022,
"grad_norm": 2.7759830951690674,
"learning_rate": 1.9125911502160365e-05,
"loss": 0.2554,
"step": 176
},
{
"epoch": 0.22217573221757322,
"grad_norm": 3.9626009464263916,
"learning_rate": 1.9107883582695043e-05,
"loss": 0.2789,
"step": 177
},
{
"epoch": 0.22343096234309623,
"grad_norm": 3.340153932571411,
"learning_rate": 1.9089680319405252e-05,
"loss": 0.2874,
"step": 178
},
{
"epoch": 0.22468619246861923,
"grad_norm": 3.277308702468872,
"learning_rate": 1.9071302062737915e-05,
"loss": 0.1978,
"step": 179
},
{
"epoch": 0.22594142259414227,
"grad_norm": 5.420035362243652,
"learning_rate": 1.905274916650891e-05,
"loss": 0.2767,
"step": 180
},
{
"epoch": 0.22594142259414227,
"eval_accuracy": 0.8543046357615894,
"eval_f1": 0.7066666666666667,
"eval_loss": 0.3361983299255371,
"eval_precision": 0.8112244897959183,
"eval_recall": 0.6259842519685039,
"eval_runtime": 51.9639,
"eval_samples_per_second": 5.331,
"eval_steps_per_second": 0.173,
"step": 180
},
{
"epoch": 0.22719665271966527,
"grad_norm": 6.328350067138672,
"learning_rate": 1.903402198789625e-05,
"loss": 0.3489,
"step": 181
},
{
"epoch": 0.22845188284518828,
"grad_norm": 3.141185760498047,
"learning_rate": 1.9015120887433215e-05,
"loss": 0.3043,
"step": 182
},
{
"epoch": 0.2297071129707113,
"grad_norm": 3.633781909942627,
"learning_rate": 1.8996046229001407e-05,
"loss": 0.3081,
"step": 183
},
{
"epoch": 0.2309623430962343,
"grad_norm": 3.6458773612976074,
"learning_rate": 1.897679837982373e-05,
"loss": 0.2259,
"step": 184
},
{
"epoch": 0.23221757322175732,
"grad_norm": 7.069568634033203,
"learning_rate": 1.895737771045736e-05,
"loss": 0.347,
"step": 185
},
{
"epoch": 0.23347280334728032,
"grad_norm": 5.888752460479736,
"learning_rate": 1.8937784594786562e-05,
"loss": 0.2361,
"step": 186
},
{
"epoch": 0.23472803347280335,
"grad_norm": 3.553389072418213,
"learning_rate": 1.8918019410015527e-05,
"loss": 0.2504,
"step": 187
},
{
"epoch": 0.23598326359832636,
"grad_norm": 3.6231913566589355,
"learning_rate": 1.8898082536661097e-05,
"loss": 0.2558,
"step": 188
},
{
"epoch": 0.23723849372384936,
"grad_norm": 3.3635237216949463,
"learning_rate": 1.887797435854543e-05,
"loss": 0.2867,
"step": 189
},
{
"epoch": 0.2384937238493724,
"grad_norm": 4.0168538093566895,
"learning_rate": 1.885769526278865e-05,
"loss": 0.3195,
"step": 190
},
{
"epoch": 0.2397489539748954,
"grad_norm": 4.260074615478516,
"learning_rate": 1.8837245639801332e-05,
"loss": 0.2861,
"step": 191
},
{
"epoch": 0.2410041841004184,
"grad_norm": 3.299710988998413,
"learning_rate": 1.8816625883277044e-05,
"loss": 0.2454,
"step": 192
},
{
"epoch": 0.2422594142259414,
"grad_norm": 4.68196439743042,
"learning_rate": 1.8795836390184727e-05,
"loss": 0.2976,
"step": 193
},
{
"epoch": 0.24351464435146444,
"grad_norm": 4.414516925811768,
"learning_rate": 1.8774877560761082e-05,
"loss": 0.2814,
"step": 194
},
{
"epoch": 0.24476987447698745,
"grad_norm": 4.108029365539551,
"learning_rate": 1.8753749798502845e-05,
"loss": 0.2478,
"step": 195
},
{
"epoch": 0.24602510460251045,
"grad_norm": 3.553065061569214,
"learning_rate": 1.8732453510159025e-05,
"loss": 0.2221,
"step": 196
},
{
"epoch": 0.24728033472803349,
"grad_norm": 3.1897339820861816,
"learning_rate": 1.871098910572308e-05,
"loss": 0.3001,
"step": 197
},
{
"epoch": 0.2485355648535565,
"grad_norm": 4.416936874389648,
"learning_rate": 1.8689356998425007e-05,
"loss": 0.3109,
"step": 198
},
{
"epoch": 0.2497907949790795,
"grad_norm": 3.162482976913452,
"learning_rate": 1.8667557604723404e-05,
"loss": 0.3104,
"step": 199
},
{
"epoch": 0.2510460251046025,
"grad_norm": 3.864384651184082,
"learning_rate": 1.864559134429745e-05,
"loss": 0.276,
"step": 200
},
{
"epoch": 0.2510460251046025,
"eval_accuracy": 0.8388520971302428,
"eval_f1": 0.6439024390243903,
"eval_loss": 0.3405630886554718,
"eval_precision": 0.8461538461538461,
"eval_recall": 0.5196850393700787,
"eval_runtime": 52.2841,
"eval_samples_per_second": 5.298,
"eval_steps_per_second": 0.172,
"step": 200
},
{
"epoch": 0.25230125523012553,
"grad_norm": 3.1597511768341064,
"learning_rate": 1.8623458640038817e-05,
"loss": 0.2417,
"step": 201
},
{
"epoch": 0.2535564853556485,
"grad_norm": 4.373691558837891,
"learning_rate": 1.8601159918043533e-05,
"loss": 0.2408,
"step": 202
},
{
"epoch": 0.25481171548117154,
"grad_norm": 3.5026726722717285,
"learning_rate": 1.857869560760377e-05,
"loss": 0.281,
"step": 203
},
{
"epoch": 0.2560669456066946,
"grad_norm": 4.196898460388184,
"learning_rate": 1.85560661411996e-05,
"loss": 0.2201,
"step": 204
},
{
"epoch": 0.25732217573221755,
"grad_norm": 3.8971402645111084,
"learning_rate": 1.8533271954490655e-05,
"loss": 0.2692,
"step": 205
},
{
"epoch": 0.2585774058577406,
"grad_norm": 6.247049808502197,
"learning_rate": 1.8510313486307734e-05,
"loss": 0.2653,
"step": 206
},
{
"epoch": 0.2598326359832636,
"grad_norm": 5.326446056365967,
"learning_rate": 1.848719117864437e-05,
"loss": 0.2857,
"step": 207
},
{
"epoch": 0.2610878661087866,
"grad_norm": 3.6153714656829834,
"learning_rate": 1.846390547664831e-05,
"loss": 0.262,
"step": 208
},
{
"epoch": 0.2623430962343096,
"grad_norm": 6.952093124389648,
"learning_rate": 1.8440456828612946e-05,
"loss": 0.2807,
"step": 209
},
{
"epoch": 0.26359832635983266,
"grad_norm": 3.8363044261932373,
"learning_rate": 1.841684568596869e-05,
"loss": 0.2604,
"step": 210
},
{
"epoch": 0.26485355648535563,
"grad_norm": 3.643761396408081,
"learning_rate": 1.8393072503274277e-05,
"loss": 0.2796,
"step": 211
},
{
"epoch": 0.26610878661087867,
"grad_norm": 3.259951114654541,
"learning_rate": 1.836913773820802e-05,
"loss": 0.2724,
"step": 212
},
{
"epoch": 0.2673640167364017,
"grad_norm": 4.189282417297363,
"learning_rate": 1.834504185155899e-05,
"loss": 0.2455,
"step": 213
},
{
"epoch": 0.2686192468619247,
"grad_norm": 4.426260948181152,
"learning_rate": 1.832078530721816e-05,
"loss": 0.2975,
"step": 214
},
{
"epoch": 0.2698744769874477,
"grad_norm": 4.503783226013184,
"learning_rate": 1.829636857216945e-05,
"loss": 0.2852,
"step": 215
},
{
"epoch": 0.2711297071129707,
"grad_norm": 4.618401527404785,
"learning_rate": 1.8271792116480767e-05,
"loss": 0.3006,
"step": 216
},
{
"epoch": 0.2723849372384937,
"grad_norm": 5.484090805053711,
"learning_rate": 1.8247056413294927e-05,
"loss": 0.3397,
"step": 217
},
{
"epoch": 0.27364016736401675,
"grad_norm": 4.215097427368164,
"learning_rate": 1.8222161938820564e-05,
"loss": 0.312,
"step": 218
},
{
"epoch": 0.27489539748953973,
"grad_norm": 2.8045787811279297,
"learning_rate": 1.8197109172322958e-05,
"loss": 0.2896,
"step": 219
},
{
"epoch": 0.27615062761506276,
"grad_norm": 3.3742685317993164,
"learning_rate": 1.8171898596114804e-05,
"loss": 0.2715,
"step": 220
},
{
"epoch": 0.27615062761506276,
"eval_accuracy": 0.8410596026490066,
"eval_f1": 0.6587677725118484,
"eval_loss": 0.3223263919353485,
"eval_precision": 0.8273809523809523,
"eval_recall": 0.547244094488189,
"eval_runtime": 52.2317,
"eval_samples_per_second": 5.303,
"eval_steps_per_second": 0.172,
"step": 220
},
{
"epoch": 0.2774058577405858,
"grad_norm": 2.477954149246216,
"learning_rate": 1.8146530695546934e-05,
"loss": 0.2171,
"step": 221
},
{
"epoch": 0.27866108786610877,
"grad_norm": 3.73885440826416,
"learning_rate": 1.8121005958998968e-05,
"loss": 0.3282,
"step": 222
},
{
"epoch": 0.2799163179916318,
"grad_norm": 3.0945334434509277,
"learning_rate": 1.8095324877869902e-05,
"loss": 0.2823,
"step": 223
},
{
"epoch": 0.28117154811715483,
"grad_norm": 3.98866868019104,
"learning_rate": 1.8069487946568675e-05,
"loss": 0.3008,
"step": 224
},
{
"epoch": 0.2824267782426778,
"grad_norm": 3.4288768768310547,
"learning_rate": 1.804349566250462e-05,
"loss": 0.2644,
"step": 225
},
{
"epoch": 0.28368200836820084,
"grad_norm": 3.3643836975097656,
"learning_rate": 1.801734852607791e-05,
"loss": 0.2543,
"step": 226
},
{
"epoch": 0.2849372384937239,
"grad_norm": 5.725021839141846,
"learning_rate": 1.799104704066991e-05,
"loss": 0.2827,
"step": 227
},
{
"epoch": 0.28619246861924685,
"grad_norm": 4.484889507293701,
"learning_rate": 1.79645917126335e-05,
"loss": 0.3096,
"step": 228
},
{
"epoch": 0.2874476987447699,
"grad_norm": 5.622531414031982,
"learning_rate": 1.7937983051283312e-05,
"loss": 0.3283,
"step": 229
},
{
"epoch": 0.28870292887029286,
"grad_norm": 4.898491382598877,
"learning_rate": 1.7911221568885935e-05,
"loss": 0.2316,
"step": 230
},
{
"epoch": 0.2899581589958159,
"grad_norm": 4.367154121398926,
"learning_rate": 1.7884307780650047e-05,
"loss": 0.2739,
"step": 231
},
{
"epoch": 0.29121338912133893,
"grad_norm": 4.016841888427734,
"learning_rate": 1.7857242204716497e-05,
"loss": 0.2375,
"step": 232
},
{
"epoch": 0.2924686192468619,
"grad_norm": 3.615976333618164,
"learning_rate": 1.783002536214834e-05,
"loss": 0.2644,
"step": 233
},
{
"epoch": 0.29372384937238494,
"grad_norm": 5.212274074554443,
"learning_rate": 1.780265777692079e-05,
"loss": 0.3412,
"step": 234
},
{
"epoch": 0.29497907949790797,
"grad_norm": 3.3848087787628174,
"learning_rate": 1.7775139975911143e-05,
"loss": 0.2489,
"step": 235
},
{
"epoch": 0.29623430962343095,
"grad_norm": 5.973453998565674,
"learning_rate": 1.7747472488888622e-05,
"loss": 0.2657,
"step": 236
},
{
"epoch": 0.297489539748954,
"grad_norm": 4.158175468444824,
"learning_rate": 1.77196558485042e-05,
"loss": 0.2951,
"step": 237
},
{
"epoch": 0.298744769874477,
"grad_norm": 3.3108043670654297,
"learning_rate": 1.7691690590280325e-05,
"loss": 0.26,
"step": 238
},
{
"epoch": 0.3,
"grad_norm": 4.062819480895996,
"learning_rate": 1.7663577252600612e-05,
"loss": 0.2535,
"step": 239
},
{
"epoch": 0.301255230125523,
"grad_norm": 4.0478339195251465,
"learning_rate": 1.763531637669949e-05,
"loss": 0.2737,
"step": 240
},
{
"epoch": 0.301255230125523,
"eval_accuracy": 0.8520971302428256,
"eval_f1": 0.6995515695067265,
"eval_loss": 0.3201642632484436,
"eval_precision": 0.8125,
"eval_recall": 0.6141732283464567,
"eval_runtime": 52.8485,
"eval_samples_per_second": 5.241,
"eval_steps_per_second": 0.17,
"step": 240
},
{
"epoch": 0.302510460251046,
"grad_norm": 5.782260894775391,
"learning_rate": 1.760690850665177e-05,
"loss": 0.2356,
"step": 241
},
{
"epoch": 0.30376569037656903,
"grad_norm": 4.108422756195068,
"learning_rate": 1.7578354189362183e-05,
"loss": 0.2658,
"step": 242
},
{
"epoch": 0.30502092050209206,
"grad_norm": 2.872807264328003,
"learning_rate": 1.7549653974554835e-05,
"loss": 0.3048,
"step": 243
},
{
"epoch": 0.30627615062761504,
"grad_norm": 3.7681846618652344,
"learning_rate": 1.752080841476264e-05,
"loss": 0.2832,
"step": 244
},
{
"epoch": 0.3075313807531381,
"grad_norm": 6.7302069664001465,
"learning_rate": 1.7491818065316676e-05,
"loss": 0.2518,
"step": 245
},
{
"epoch": 0.3087866108786611,
"grad_norm": 7.851168155670166,
"learning_rate": 1.7462683484335477e-05,
"loss": 0.2188,
"step": 246
},
{
"epoch": 0.3100418410041841,
"grad_norm": 5.26230001449585,
"learning_rate": 1.7433405232714325e-05,
"loss": 0.2898,
"step": 247
},
{
"epoch": 0.3112970711297071,
"grad_norm": 3.618230104446411,
"learning_rate": 1.7403983874114422e-05,
"loss": 0.2303,
"step": 248
},
{
"epoch": 0.31255230125523015,
"grad_norm": 3.8040518760681152,
"learning_rate": 1.7374419974952045e-05,
"loss": 0.3179,
"step": 249
},
{
"epoch": 0.3138075313807531,
"grad_norm": 3.1975717544555664,
"learning_rate": 1.734471410438765e-05,
"loss": 0.2503,
"step": 250
},
{
"epoch": 0.31506276150627616,
"grad_norm": 2.8378207683563232,
"learning_rate": 1.731486683431491e-05,
"loss": 0.2424,
"step": 251
},
{
"epoch": 0.3163179916317992,
"grad_norm": 5.816548824310303,
"learning_rate": 1.728487873934969e-05,
"loss": 0.2567,
"step": 252
},
{
"epoch": 0.31757322175732217,
"grad_norm": 3.5895259380340576,
"learning_rate": 1.7254750396819008e-05,
"loss": 0.2762,
"step": 253
},
{
"epoch": 0.3188284518828452,
"grad_norm": 3.293178081512451,
"learning_rate": 1.7224482386749916e-05,
"loss": 0.2801,
"step": 254
},
{
"epoch": 0.3200836820083682,
"grad_norm": 3.76770281791687,
"learning_rate": 1.719407529185831e-05,
"loss": 0.2545,
"step": 255
},
{
"epoch": 0.3213389121338912,
"grad_norm": 3.1176042556762695,
"learning_rate": 1.7163529697537756e-05,
"loss": 0.2608,
"step": 256
},
{
"epoch": 0.32259414225941424,
"grad_norm": 3.789315700531006,
"learning_rate": 1.7132846191848167e-05,
"loss": 0.2708,
"step": 257
},
{
"epoch": 0.3238493723849372,
"grad_norm": 5.817142963409424,
"learning_rate": 1.7102025365504524e-05,
"loss": 0.3254,
"step": 258
},
{
"epoch": 0.32510460251046025,
"grad_norm": 4.174067497253418,
"learning_rate": 1.7071067811865477e-05,
"loss": 0.2826,
"step": 259
},
{
"epoch": 0.3263598326359833,
"grad_norm": 4.383941173553467,
"learning_rate": 1.7039974126921946e-05,
"loss": 0.3245,
"step": 260
},
{
"epoch": 0.3263598326359833,
"eval_accuracy": 0.8465783664459161,
"eval_f1": 0.6774941995359629,
"eval_loss": 0.30984166264533997,
"eval_precision": 0.8248587570621468,
"eval_recall": 0.5748031496062992,
"eval_runtime": 52.3032,
"eval_samples_per_second": 5.296,
"eval_steps_per_second": 0.172,
"step": 260
},
{
"epoch": 0.32761506276150626,
"grad_norm": 4.471529960632324,
"learning_rate": 1.7008744909285626e-05,
"loss": 0.2658,
"step": 261
},
{
"epoch": 0.3288702928870293,
"grad_norm": 4.479955673217773,
"learning_rate": 1.6977380760177467e-05,
"loss": 0.3076,
"step": 262
},
{
"epoch": 0.3301255230125523,
"grad_norm": 3.6632466316223145,
"learning_rate": 1.694588228341611e-05,
"loss": 0.2387,
"step": 263
},
{
"epoch": 0.3313807531380753,
"grad_norm": 3.813127040863037,
"learning_rate": 1.691425008540625e-05,
"loss": 0.2575,
"step": 264
},
{
"epoch": 0.33263598326359833,
"grad_norm": 3.7820916175842285,
"learning_rate": 1.6882484775126968e-05,
"loss": 0.2517,
"step": 265
},
{
"epoch": 0.33389121338912137,
"grad_norm": 3.487283229827881,
"learning_rate": 1.6850586964120005e-05,
"loss": 0.2898,
"step": 266
},
{
"epoch": 0.33514644351464434,
"grad_norm": 5.123818397521973,
"learning_rate": 1.6818557266477993e-05,
"loss": 0.2758,
"step": 267
},
{
"epoch": 0.3364016736401674,
"grad_norm": 3.208160400390625,
"learning_rate": 1.6786396298832622e-05,
"loss": 0.2846,
"step": 268
},
{
"epoch": 0.33765690376569035,
"grad_norm": 2.8521032333374023,
"learning_rate": 1.6754104680342783e-05,
"loss": 0.2573,
"step": 269
},
{
"epoch": 0.3389121338912134,
"grad_norm": 2.8169782161712646,
"learning_rate": 1.6721683032682637e-05,
"loss": 0.2259,
"step": 270
},
{
"epoch": 0.3401673640167364,
"grad_norm": 3.7779228687286377,
"learning_rate": 1.6689131980029647e-05,
"loss": 0.2947,
"step": 271
},
{
"epoch": 0.3414225941422594,
"grad_norm": 4.368408203125,
"learning_rate": 1.6656452149052568e-05,
"loss": 0.2654,
"step": 272
},
{
"epoch": 0.3426778242677824,
"grad_norm": 3.421369791030884,
"learning_rate": 1.662364416889938e-05,
"loss": 0.2921,
"step": 273
},
{
"epoch": 0.34393305439330546,
"grad_norm": 4.275522232055664,
"learning_rate": 1.6590708671185176e-05,
"loss": 0.2527,
"step": 274
},
{
"epoch": 0.34518828451882844,
"grad_norm": 3.0027596950531006,
"learning_rate": 1.6557646289979996e-05,
"loss": 0.2031,
"step": 275
},
{
"epoch": 0.34644351464435147,
"grad_norm": 3.2799339294433594,
"learning_rate": 1.6524457661796626e-05,
"loss": 0.2276,
"step": 276
},
{
"epoch": 0.3476987447698745,
"grad_norm": 3.7090659141540527,
"learning_rate": 1.6491143425578345e-05,
"loss": 0.2264,
"step": 277
},
{
"epoch": 0.3489539748953975,
"grad_norm": 6.081251621246338,
"learning_rate": 1.645770422268662e-05,
"loss": 0.3315,
"step": 278
},
{
"epoch": 0.3502092050209205,
"grad_norm": 5.695575714111328,
"learning_rate": 1.6424140696888765e-05,
"loss": 0.2948,
"step": 279
},
{
"epoch": 0.3514644351464435,
"grad_norm": 4.191822052001953,
"learning_rate": 1.639045349434554e-05,
"loss": 0.2868,
"step": 280
},
{
"epoch": 0.3514644351464435,
"eval_accuracy": 0.8432671081677704,
"eval_f1": 0.6830357142857143,
"eval_loss": 0.3159337043762207,
"eval_precision": 0.788659793814433,
"eval_recall": 0.6023622047244095,
"eval_runtime": 52.6769,
"eval_samples_per_second": 5.258,
"eval_steps_per_second": 0.171,
"step": 280
},
{
"epoch": 0.3527196652719665,
"grad_norm": 4.9057183265686035,
"learning_rate": 1.6356643263598716e-05,
"loss": 0.3545,
"step": 281
},
{
"epoch": 0.35397489539748955,
"grad_norm": 6.470303058624268,
"learning_rate": 1.6322710655558577e-05,
"loss": 0.3414,
"step": 282
},
{
"epoch": 0.35523012552301253,
"grad_norm": 3.9251017570495605,
"learning_rate": 1.6288656323491415e-05,
"loss": 0.2573,
"step": 283
},
{
"epoch": 0.35648535564853556,
"grad_norm": 4.604090213775635,
"learning_rate": 1.6254480923006924e-05,
"loss": 0.226,
"step": 284
},
{
"epoch": 0.3577405857740586,
"grad_norm": 6.23361873626709,
"learning_rate": 1.6220185112045606e-05,
"loss": 0.2693,
"step": 285
},
{
"epoch": 0.35899581589958157,
"grad_norm": 3.5196187496185303,
"learning_rate": 1.6185769550866073e-05,
"loss": 0.2104,
"step": 286
},
{
"epoch": 0.3602510460251046,
"grad_norm": 5.589550495147705,
"learning_rate": 1.6151234902032374e-05,
"loss": 0.3379,
"step": 287
},
{
"epoch": 0.36150627615062764,
"grad_norm": 3.052987813949585,
"learning_rate": 1.6116581830401193e-05,
"loss": 0.2646,
"step": 288
},
{
"epoch": 0.3627615062761506,
"grad_norm": 2.715062141418457,
"learning_rate": 1.60818110031091e-05,
"loss": 0.2731,
"step": 289
},
{
"epoch": 0.36401673640167365,
"grad_norm": 3.9851012229919434,
"learning_rate": 1.6046923089559667e-05,
"loss": 0.2482,
"step": 290
},
{
"epoch": 0.3652719665271967,
"grad_norm": 4.131580352783203,
"learning_rate": 1.6011918761410596e-05,
"loss": 0.2916,
"step": 291
},
{
"epoch": 0.36652719665271966,
"grad_norm": 5.364291667938232,
"learning_rate": 1.5976798692560796e-05,
"loss": 0.3029,
"step": 292
},
{
"epoch": 0.3677824267782427,
"grad_norm": 3.139458417892456,
"learning_rate": 1.5941563559137398e-05,
"loss": 0.2396,
"step": 293
},
{
"epoch": 0.36903765690376567,
"grad_norm": 3.1862568855285645,
"learning_rate": 1.5906214039482732e-05,
"loss": 0.2504,
"step": 294
},
{
"epoch": 0.3702928870292887,
"grad_norm": 3.489682912826538,
"learning_rate": 1.5870750814141296e-05,
"loss": 0.2214,
"step": 295
},
{
"epoch": 0.37154811715481173,
"grad_norm": 4.336936950683594,
"learning_rate": 1.5835174565846624e-05,
"loss": 0.3056,
"step": 296
},
{
"epoch": 0.3728033472803347,
"grad_norm": 3.281315803527832,
"learning_rate": 1.579948597950815e-05,
"loss": 0.2579,
"step": 297
},
{
"epoch": 0.37405857740585774,
"grad_norm": 7.08855676651001,
"learning_rate": 1.576368574219804e-05,
"loss": 0.295,
"step": 298
},
{
"epoch": 0.37531380753138077,
"grad_norm": 5.177116394042969,
"learning_rate": 1.5727774543137927e-05,
"loss": 0.2363,
"step": 299
},
{
"epoch": 0.37656903765690375,
"grad_norm": 2.4472217559814453,
"learning_rate": 1.5691753073685692e-05,
"loss": 0.2601,
"step": 300
},
{
"epoch": 0.37656903765690375,
"eval_accuracy": 0.8587196467991169,
"eval_f1": 0.7387755102040816,
"eval_loss": 0.31048765778541565,
"eval_precision": 0.7669491525423728,
"eval_recall": 0.7125984251968503,
"eval_runtime": 52.6769,
"eval_samples_per_second": 5.258,
"eval_steps_per_second": 0.171,
"step": 300
},
{
"epoch": 0.3778242677824268,
"grad_norm": 7.539090156555176,
"learning_rate": 1.565562202732211e-05,
"loss": 0.289,
"step": 301
},
{
"epoch": 0.3790794979079498,
"grad_norm": 7.3726420402526855,
"learning_rate": 1.561938209963753e-05,
"loss": 0.2752,
"step": 302
},
{
"epoch": 0.3803347280334728,
"grad_norm": 5.038547515869141,
"learning_rate": 1.5583033988318453e-05,
"loss": 0.2419,
"step": 303
},
{
"epoch": 0.3815899581589958,
"grad_norm": 3.0914595127105713,
"learning_rate": 1.554657839313413e-05,
"loss": 0.2324,
"step": 304
},
{
"epoch": 0.38284518828451886,
"grad_norm": 5.068948268890381,
"learning_rate": 1.5510016015923084e-05,
"loss": 0.2864,
"step": 305
},
{
"epoch": 0.38410041841004183,
"grad_norm": 4.331803321838379,
"learning_rate": 1.5473347560579576e-05,
"loss": 0.2247,
"step": 306
},
{
"epoch": 0.38535564853556487,
"grad_norm": 4.25094747543335,
"learning_rate": 1.5436573733040073e-05,
"loss": 0.2025,
"step": 307
},
{
"epoch": 0.38661087866108784,
"grad_norm": 6.317193984985352,
"learning_rate": 1.539969524126967e-05,
"loss": 0.2389,
"step": 308
},
{
"epoch": 0.3878661087866109,
"grad_norm": 5.176138401031494,
"learning_rate": 1.5362712795248423e-05,
"loss": 0.2235,
"step": 309
},
{
"epoch": 0.3891213389121339,
"grad_norm": 4.67032527923584,
"learning_rate": 1.5325627106957715e-05,
"loss": 0.2004,
"step": 310
},
{
"epoch": 0.3903765690376569,
"grad_norm": 7.408180236816406,
"learning_rate": 1.5288438890366534e-05,
"loss": 0.3133,
"step": 311
},
{
"epoch": 0.3916317991631799,
"grad_norm": 4.369890213012695,
"learning_rate": 1.5251148861417733e-05,
"loss": 0.2798,
"step": 312
},
{
"epoch": 0.39288702928870295,
"grad_norm": 6.916268348693848,
"learning_rate": 1.5213757738014234e-05,
"loss": 0.2518,
"step": 313
},
{
"epoch": 0.3941422594142259,
"grad_norm": 3.2595841884613037,
"learning_rate": 1.5176266240005225e-05,
"loss": 0.2666,
"step": 314
},
{
"epoch": 0.39539748953974896,
"grad_norm": 4.970115661621094,
"learning_rate": 1.513867508917229e-05,
"loss": 0.2762,
"step": 315
},
{
"epoch": 0.396652719665272,
"grad_norm": 3.959069013595581,
"learning_rate": 1.5100985009215519e-05,
"loss": 0.2324,
"step": 316
},
{
"epoch": 0.39790794979079497,
"grad_norm": 5.496798515319824,
"learning_rate": 1.5063196725739568e-05,
"loss": 0.283,
"step": 317
},
{
"epoch": 0.399163179916318,
"grad_norm": 4.346258640289307,
"learning_rate": 1.5025310966239701e-05,
"loss": 0.2182,
"step": 318
},
{
"epoch": 0.400418410041841,
"grad_norm": 7.267153263092041,
"learning_rate": 1.4987328460087778e-05,
"loss": 0.2261,
"step": 319
},
{
"epoch": 0.401673640167364,
"grad_norm": 4.095457077026367,
"learning_rate": 1.4949249938518203e-05,
"loss": 0.2597,
"step": 320
},
{
"epoch": 0.401673640167364,
"eval_accuracy": 0.8509933774834437,
"eval_f1": 0.6867749419953596,
"eval_loss": 0.31619083881378174,
"eval_precision": 0.8361581920903954,
"eval_recall": 0.5826771653543307,
"eval_runtime": 50.8111,
"eval_samples_per_second": 5.452,
"eval_steps_per_second": 0.177,
"step": 320
},
{
"epoch": 0.40292887029288704,
"grad_norm": 4.564698696136475,
"learning_rate": 1.491107613461387e-05,
"loss": 0.2494,
"step": 321
},
{
"epoch": 0.40418410041841,
"grad_norm": 3.54681134223938,
"learning_rate": 1.4872807783292027e-05,
"loss": 0.2396,
"step": 322
},
{
"epoch": 0.40543933054393305,
"grad_norm": 3.487334966659546,
"learning_rate": 1.4834445621290144e-05,
"loss": 0.2264,
"step": 323
},
{
"epoch": 0.4066945606694561,
"grad_norm": 4.941503047943115,
"learning_rate": 1.4795990387151719e-05,
"loss": 0.2566,
"step": 324
},
{
"epoch": 0.40794979079497906,
"grad_norm": 3.7651941776275635,
"learning_rate": 1.4757442821212058e-05,
"loss": 0.2159,
"step": 325
},
{
"epoch": 0.4092050209205021,
"grad_norm": 6.6421685218811035,
"learning_rate": 1.4718803665584038e-05,
"loss": 0.2367,
"step": 326
},
{
"epoch": 0.4104602510460251,
"grad_norm": 4.226874351501465,
"learning_rate": 1.4680073664143799e-05,
"loss": 0.2573,
"step": 327
},
{
"epoch": 0.4117154811715481,
"grad_norm": 5.6968536376953125,
"learning_rate": 1.464125356251644e-05,
"loss": 0.3498,
"step": 328
},
{
"epoch": 0.41297071129707114,
"grad_norm": 5.091569900512695,
"learning_rate": 1.4602344108061657e-05,
"loss": 0.2999,
"step": 329
},
{
"epoch": 0.41422594142259417,
"grad_norm": 3.757646083831787,
"learning_rate": 1.4563346049859348e-05,
"loss": 0.2588,
"step": 330
},
{
"epoch": 0.41548117154811715,
"grad_norm": 3.553725242614746,
"learning_rate": 1.4524260138695206e-05,
"loss": 0.3026,
"step": 331
},
{
"epoch": 0.4167364016736402,
"grad_norm": 4.0715765953063965,
"learning_rate": 1.4485087127046256e-05,
"loss": 0.3188,
"step": 332
},
{
"epoch": 0.41799163179916315,
"grad_norm": 3.9009945392608643,
"learning_rate": 1.4445827769066374e-05,
"loss": 0.2373,
"step": 333
},
{
"epoch": 0.4192468619246862,
"grad_norm": 3.4119412899017334,
"learning_rate": 1.4406482820571759e-05,
"loss": 0.2381,
"step": 334
},
{
"epoch": 0.4205020920502092,
"grad_norm": 7.349539756774902,
"learning_rate": 1.4367053039026392e-05,
"loss": 0.2528,
"step": 335
},
{
"epoch": 0.4217573221757322,
"grad_norm": 3.9228568077087402,
"learning_rate": 1.4327539183527447e-05,
"loss": 0.249,
"step": 336
},
{
"epoch": 0.42301255230125523,
"grad_norm": 5.134557247161865,
"learning_rate": 1.4287942014790677e-05,
"loss": 0.2908,
"step": 337
},
{
"epoch": 0.42426778242677826,
"grad_norm": 4.12131929397583,
"learning_rate": 1.4248262295135779e-05,
"loss": 0.2661,
"step": 338
},
{
"epoch": 0.42552301255230124,
"grad_norm": 3.757857322692871,
"learning_rate": 1.42085007884717e-05,
"loss": 0.2448,
"step": 339
},
{
"epoch": 0.42677824267782427,
"grad_norm": 3.9377548694610596,
"learning_rate": 1.4168658260281944e-05,
"loss": 0.287,
"step": 340
},
{
"epoch": 0.42677824267782427,
"eval_accuracy": 0.8532008830022075,
"eval_f1": 0.70509977827051,
"eval_loss": 0.29967617988586426,
"eval_precision": 0.8071065989847716,
"eval_recall": 0.6259842519685039,
"eval_runtime": 50.9044,
"eval_samples_per_second": 5.442,
"eval_steps_per_second": 0.177,
"step": 340
},
{
"epoch": 0.4280334728033473,
"grad_norm": 3.8355214595794678,
"learning_rate": 1.4128735477609839e-05,
"loss": 0.2409,
"step": 341
},
{
"epoch": 0.4292887029288703,
"grad_norm": 3.9827072620391846,
"learning_rate": 1.4088733209043748e-05,
"loss": 0.1978,
"step": 342
},
{
"epoch": 0.4305439330543933,
"grad_norm": 3.053262710571289,
"learning_rate": 1.4048652224702295e-05,
"loss": 0.215,
"step": 343
},
{
"epoch": 0.43179916317991635,
"grad_norm": 3.117565155029297,
"learning_rate": 1.400849329621953e-05,
"loss": 0.2652,
"step": 344
},
{
"epoch": 0.4330543933054393,
"grad_norm": 4.665426731109619,
"learning_rate": 1.3968257196730069e-05,
"loss": 0.3002,
"step": 345
},
{
"epoch": 0.43430962343096235,
"grad_norm": 3.6823060512542725,
"learning_rate": 1.3927944700854223e-05,
"loss": 0.2987,
"step": 346
},
{
"epoch": 0.43556485355648533,
"grad_norm": 3.018756628036499,
"learning_rate": 1.388755658468307e-05,
"loss": 0.2399,
"step": 347
},
{
"epoch": 0.43682008368200836,
"grad_norm": 2.53790283203125,
"learning_rate": 1.3847093625763517e-05,
"loss": 0.2733,
"step": 348
},
{
"epoch": 0.4380753138075314,
"grad_norm": 4.417150974273682,
"learning_rate": 1.3806556603083346e-05,
"loss": 0.2144,
"step": 349
},
{
"epoch": 0.4393305439330544,
"grad_norm": 6.118602275848389,
"learning_rate": 1.3765946297056192e-05,
"loss": 0.3063,
"step": 350
},
{
"epoch": 0.4405857740585774,
"grad_norm": 3.5751051902770996,
"learning_rate": 1.3725263489506542e-05,
"loss": 0.1951,
"step": 351
},
{
"epoch": 0.44184100418410044,
"grad_norm": 5.6558837890625,
"learning_rate": 1.3684508963654667e-05,
"loss": 0.3366,
"step": 352
},
{
"epoch": 0.4430962343096234,
"grad_norm": 3.0790345668792725,
"learning_rate": 1.364368350410155e-05,
"loss": 0.2517,
"step": 353
},
{
"epoch": 0.44435146443514645,
"grad_norm": 3.3675646781921387,
"learning_rate": 1.3602787896813787e-05,
"loss": 0.283,
"step": 354
},
{
"epoch": 0.4456066945606695,
"grad_norm": 3.162820339202881,
"learning_rate": 1.356182292910844e-05,
"loss": 0.2131,
"step": 355
},
{
"epoch": 0.44686192468619246,
"grad_norm": 2.9676196575164795,
"learning_rate": 1.3520789389637898e-05,
"loss": 0.2782,
"step": 356
},
{
"epoch": 0.4481171548117155,
"grad_norm": 5.9504008293151855,
"learning_rate": 1.347968806837468e-05,
"loss": 0.2663,
"step": 357
},
{
"epoch": 0.44937238493723847,
"grad_norm": 5.749334335327148,
"learning_rate": 1.3438519756596226e-05,
"loss": 0.2307,
"step": 358
},
{
"epoch": 0.4506276150627615,
"grad_norm": 5.305976390838623,
"learning_rate": 1.339728524686968e-05,
"loss": 0.2,
"step": 359
},
{
"epoch": 0.45188284518828453,
"grad_norm": 5.051678657531738,
"learning_rate": 1.335598533303662e-05,
"loss": 0.3115,
"step": 360
},
{
"epoch": 0.45188284518828453,
"eval_accuracy": 0.8543046357615894,
"eval_f1": 0.6986301369863014,
"eval_loss": 0.30281126499176025,
"eval_precision": 0.8315217391304348,
"eval_recall": 0.6023622047244095,
"eval_runtime": 53.4812,
"eval_samples_per_second": 5.179,
"eval_steps_per_second": 0.168,
"step": 360
},
{
"epoch": 0.4531380753138075,
"grad_norm": 3.419318914413452,
"learning_rate": 1.331462081019776e-05,
"loss": 0.2384,
"step": 361
},
{
"epoch": 0.45439330543933054,
"grad_norm": 3.9998960494995117,
"learning_rate": 1.327319247469768e-05,
"loss": 0.2815,
"step": 362
},
{
"epoch": 0.4556485355648536,
"grad_norm": 3.4446206092834473,
"learning_rate": 1.323170112410946e-05,
"loss": 0.272,
"step": 363
},
{
"epoch": 0.45690376569037655,
"grad_norm": 3.6744120121002197,
"learning_rate": 1.319014755721934e-05,
"loss": 0.2609,
"step": 364
},
{
"epoch": 0.4581589958158996,
"grad_norm": 4.846432209014893,
"learning_rate": 1.3148532574011342e-05,
"loss": 0.288,
"step": 365
},
{
"epoch": 0.4594142259414226,
"grad_norm": 5.032169818878174,
"learning_rate": 1.3106856975651866e-05,
"loss": 0.2614,
"step": 366
},
{
"epoch": 0.4606694560669456,
"grad_norm": 3.7232418060302734,
"learning_rate": 1.3065121564474268e-05,
"loss": 0.2498,
"step": 367
},
{
"epoch": 0.4619246861924686,
"grad_norm": 3.13726544380188,
"learning_rate": 1.3023327143963415e-05,
"loss": 0.2192,
"step": 368
},
{
"epoch": 0.46317991631799166,
"grad_norm": 5.035037994384766,
"learning_rate": 1.2981474518740217e-05,
"loss": 0.2971,
"step": 369
},
{
"epoch": 0.46443514644351463,
"grad_norm": 3.5766642093658447,
"learning_rate": 1.293956449454612e-05,
"loss": 0.3288,
"step": 370
},
{
"epoch": 0.46569037656903767,
"grad_norm": 2.6294803619384766,
"learning_rate": 1.2897597878227624e-05,
"loss": 0.236,
"step": 371
},
{
"epoch": 0.46694560669456064,
"grad_norm": 5.947935104370117,
"learning_rate": 1.285557547772072e-05,
"loss": 0.2743,
"step": 372
},
{
"epoch": 0.4682008368200837,
"grad_norm": 2.6133997440338135,
"learning_rate": 1.2813498102035357e-05,
"loss": 0.243,
"step": 373
},
{
"epoch": 0.4694560669456067,
"grad_norm": 3.549476146697998,
"learning_rate": 1.2771366561239865e-05,
"loss": 0.1827,
"step": 374
},
{
"epoch": 0.4707112970711297,
"grad_norm": 4.550835609436035,
"learning_rate": 1.2729181666445338e-05,
"loss": 0.2061,
"step": 375
},
{
"epoch": 0.4719665271966527,
"grad_norm": 4.819687843322754,
"learning_rate": 1.2686944229790044e-05,
"loss": 0.2638,
"step": 376
},
{
"epoch": 0.47322175732217575,
"grad_norm": 3.6842753887176514,
"learning_rate": 1.264465506442378e-05,
"loss": 0.2583,
"step": 377
},
{
"epoch": 0.47447698744769873,
"grad_norm": 7.268190860748291,
"learning_rate": 1.2602314984492222e-05,
"loss": 0.2948,
"step": 378
},
{
"epoch": 0.47573221757322176,
"grad_norm": 4.938054084777832,
"learning_rate": 1.2559924805121236e-05,
"loss": 0.2874,
"step": 379
},
{
"epoch": 0.4769874476987448,
"grad_norm": 8.232144355773926,
"learning_rate": 1.2517485342401201e-05,
"loss": 0.2654,
"step": 380
},
{
"epoch": 0.4769874476987448,
"eval_accuracy": 0.8543046357615894,
"eval_f1": 0.7013574660633484,
"eval_loss": 0.3007500171661377,
"eval_precision": 0.824468085106383,
"eval_recall": 0.610236220472441,
"eval_runtime": 52.7757,
"eval_samples_per_second": 5.249,
"eval_steps_per_second": 0.171,
"step": 380
},
{
"epoch": 0.47824267782426777,
"grad_norm": 4.203466892242432,
"learning_rate": 1.2474997413371294e-05,
"loss": 0.1688,
"step": 381
},
{
"epoch": 0.4794979079497908,
"grad_norm": 2.6223390102386475,
"learning_rate": 1.2432461836003762e-05,
"loss": 0.2678,
"step": 382
},
{
"epoch": 0.48075313807531384,
"grad_norm": 3.429694414138794,
"learning_rate": 1.238987942918817e-05,
"loss": 0.2859,
"step": 383
},
{
"epoch": 0.4820083682008368,
"grad_norm": 2.9112257957458496,
"learning_rate": 1.2347251012715629e-05,
"loss": 0.2242,
"step": 384
},
{
"epoch": 0.48326359832635984,
"grad_norm": 4.1868896484375,
"learning_rate": 1.2304577407263032e-05,
"loss": 0.2995,
"step": 385
},
{
"epoch": 0.4845188284518828,
"grad_norm": 3.6559159755706787,
"learning_rate": 1.2261859434377245e-05,
"loss": 0.2115,
"step": 386
},
{
"epoch": 0.48577405857740585,
"grad_norm": 4.471072673797607,
"learning_rate": 1.2219097916459284e-05,
"loss": 0.2012,
"step": 387
},
{
"epoch": 0.4870292887029289,
"grad_norm": 4.849166393280029,
"learning_rate": 1.2176293676748494e-05,
"loss": 0.2927,
"step": 388
},
{
"epoch": 0.48828451882845186,
"grad_norm": 4.391753196716309,
"learning_rate": 1.2133447539306689e-05,
"loss": 0.3133,
"step": 389
},
{
"epoch": 0.4895397489539749,
"grad_norm": 2.870288848876953,
"learning_rate": 1.2090560329002294e-05,
"loss": 0.212,
"step": 390
},
{
"epoch": 0.49079497907949793,
"grad_norm": 5.226803302764893,
"learning_rate": 1.2047632871494472e-05,
"loss": 0.224,
"step": 391
},
{
"epoch": 0.4920502092050209,
"grad_norm": 3.988142728805542,
"learning_rate": 1.200466599321721e-05,
"loss": 0.2408,
"step": 392
},
{
"epoch": 0.49330543933054394,
"grad_norm": 3.6183176040649414,
"learning_rate": 1.196166052136342e-05,
"loss": 0.2265,
"step": 393
},
{
"epoch": 0.49456066945606697,
"grad_norm": 4.849849700927734,
"learning_rate": 1.1918617283869021e-05,
"loss": 0.2457,
"step": 394
},
{
"epoch": 0.49581589958158995,
"grad_norm": 4.868863105773926,
"learning_rate": 1.1875537109396978e-05,
"loss": 0.2463,
"step": 395
},
{
"epoch": 0.497071129707113,
"grad_norm": 3.9498956203460693,
"learning_rate": 1.1832420827321374e-05,
"loss": 0.2655,
"step": 396
},
{
"epoch": 0.49832635983263596,
"grad_norm": 4.637706756591797,
"learning_rate": 1.1789269267711425e-05,
"loss": 0.2025,
"step": 397
},
{
"epoch": 0.499581589958159,
"grad_norm": 3.8872170448303223,
"learning_rate": 1.1746083261315505e-05,
"loss": 0.2401,
"step": 398
},
{
"epoch": 0.500836820083682,
"grad_norm": 3.0792970657348633,
"learning_rate": 1.1702863639545157e-05,
"loss": 0.2804,
"step": 399
},
{
"epoch": 0.502092050209205,
"grad_norm": 5.217683792114258,
"learning_rate": 1.165961123445908e-05,
"loss": 0.2443,
"step": 400
},
{
"epoch": 0.502092050209205,
"eval_accuracy": 0.8565121412803532,
"eval_f1": 0.7161572052401747,
"eval_loss": 0.2955167889595032,
"eval_precision": 0.803921568627451,
"eval_recall": 0.6456692913385826,
"eval_runtime": 51.6679,
"eval_samples_per_second": 5.361,
"eval_steps_per_second": 0.174,
"step": 400
},
{
"epoch": 0.5033472803347281,
"grad_norm": 3.5723674297332764,
"learning_rate": 1.1616326878747115e-05,
"loss": 0.2269,
"step": 401
},
{
"epoch": 0.5046025104602511,
"grad_norm": 3.2594027519226074,
"learning_rate": 1.1573011405714214e-05,
"loss": 0.2535,
"step": 402
},
{
"epoch": 0.505857740585774,
"grad_norm": 4.008416652679443,
"learning_rate": 1.1529665649264388e-05,
"loss": 0.2676,
"step": 403
},
{
"epoch": 0.507112970711297,
"grad_norm": 4.401585102081299,
"learning_rate": 1.1486290443884666e-05,
"loss": 0.2613,
"step": 404
},
{
"epoch": 0.5083682008368201,
"grad_norm": 4.80834436416626,
"learning_rate": 1.1442886624629035e-05,
"loss": 0.3053,
"step": 405
},
{
"epoch": 0.5096234309623431,
"grad_norm": 4.229012489318848,
"learning_rate": 1.1399455027102327e-05,
"loss": 0.241,
"step": 406
},
{
"epoch": 0.5108786610878661,
"grad_norm": 3.3926737308502197,
"learning_rate": 1.1355996487444178e-05,
"loss": 0.1874,
"step": 407
},
{
"epoch": 0.5121338912133891,
"grad_norm": 6.564388751983643,
"learning_rate": 1.131251184231291e-05,
"loss": 0.2087,
"step": 408
},
{
"epoch": 0.5133891213389121,
"grad_norm": 4.077323913574219,
"learning_rate": 1.1269001928869414e-05,
"loss": 0.2787,
"step": 409
},
{
"epoch": 0.5146443514644351,
"grad_norm": 4.169933319091797,
"learning_rate": 1.122546758476105e-05,
"loss": 0.2701,
"step": 410
},
{
"epoch": 0.5158995815899582,
"grad_norm": 4.548664093017578,
"learning_rate": 1.1181909648105511e-05,
"loss": 0.2711,
"step": 411
},
{
"epoch": 0.5171548117154812,
"grad_norm": 7.507246971130371,
"learning_rate": 1.1138328957474691e-05,
"loss": 0.2761,
"step": 412
},
{
"epoch": 0.5184100418410041,
"grad_norm": 4.293572425842285,
"learning_rate": 1.1094726351878549e-05,
"loss": 0.2451,
"step": 413
},
{
"epoch": 0.5196652719665272,
"grad_norm": 4.692587852478027,
"learning_rate": 1.1051102670748939e-05,
"loss": 0.2841,
"step": 414
},
{
"epoch": 0.5209205020920502,
"grad_norm": 3.3623526096343994,
"learning_rate": 1.1007458753923455e-05,
"loss": 0.245,
"step": 415
},
{
"epoch": 0.5221757322175732,
"grad_norm": 4.157695770263672,
"learning_rate": 1.0963795441629275e-05,
"loss": 0.2193,
"step": 416
},
{
"epoch": 0.5234309623430963,
"grad_norm": 5.14530086517334,
"learning_rate": 1.0920113574466975e-05,
"loss": 0.3033,
"step": 417
},
{
"epoch": 0.5246861924686193,
"grad_norm": 4.993683338165283,
"learning_rate": 1.0876413993394346e-05,
"loss": 0.2947,
"step": 418
},
{
"epoch": 0.5259414225941422,
"grad_norm": 6.235597610473633,
"learning_rate": 1.0832697539710197e-05,
"loss": 0.2765,
"step": 419
},
{
"epoch": 0.5271966527196653,
"grad_norm": 3.5556070804595947,
"learning_rate": 1.0788965055038179e-05,
"loss": 0.2743,
"step": 420
},
{
"epoch": 0.5271966527196653,
"eval_accuracy": 0.8543046357615894,
"eval_f1": 0.695852534562212,
"eval_loss": 0.30108267068862915,
"eval_precision": 0.8388888888888889,
"eval_recall": 0.594488188976378,
"eval_runtime": 53.2652,
"eval_samples_per_second": 5.2,
"eval_steps_per_second": 0.169,
"step": 420
},
{
"epoch": 0.5284518828451883,
"grad_norm": 3.676478624343872,
"learning_rate": 1.0745217381310562e-05,
"loss": 0.2493,
"step": 421
},
{
"epoch": 0.5297071129707113,
"grad_norm": 2.608015775680542,
"learning_rate": 1.0701455360752038e-05,
"loss": 0.2325,
"step": 422
},
{
"epoch": 0.5309623430962344,
"grad_norm": 3.949383497238159,
"learning_rate": 1.0657679835863497e-05,
"loss": 0.3002,
"step": 423
},
{
"epoch": 0.5322175732217573,
"grad_norm": 4.198700904846191,
"learning_rate": 1.0613891649405816e-05,
"loss": 0.1867,
"step": 424
},
{
"epoch": 0.5334728033472803,
"grad_norm": 4.353850364685059,
"learning_rate": 1.0570091644383631e-05,
"loss": 0.2605,
"step": 425
},
{
"epoch": 0.5347280334728034,
"grad_norm": 3.590062379837036,
"learning_rate": 1.0526280664029105e-05,
"loss": 0.2438,
"step": 426
},
{
"epoch": 0.5359832635983264,
"grad_norm": 3.3330376148223877,
"learning_rate": 1.0482459551785705e-05,
"loss": 0.1865,
"step": 427
},
{
"epoch": 0.5372384937238494,
"grad_norm": 3.8933749198913574,
"learning_rate": 1.0438629151291944e-05,
"loss": 0.3091,
"step": 428
},
{
"epoch": 0.5384937238493723,
"grad_norm": 3.451763153076172,
"learning_rate": 1.0394790306365154e-05,
"loss": 0.2454,
"step": 429
},
{
"epoch": 0.5397489539748954,
"grad_norm": 3.9449098110198975,
"learning_rate": 1.0350943860985249e-05,
"loss": 0.2863,
"step": 430
},
{
"epoch": 0.5410041841004184,
"grad_norm": 3.1234138011932373,
"learning_rate": 1.0307090659278453e-05,
"loss": 0.2382,
"step": 431
},
{
"epoch": 0.5422594142259414,
"grad_norm": 4.012730598449707,
"learning_rate": 1.0263231545501068e-05,
"loss": 0.1927,
"step": 432
},
{
"epoch": 0.5435146443514645,
"grad_norm": 3.3617918491363525,
"learning_rate": 1.0219367364023216e-05,
"loss": 0.2557,
"step": 433
},
{
"epoch": 0.5447698744769874,
"grad_norm": 3.0772593021392822,
"learning_rate": 1.0175498959312572e-05,
"loss": 0.2144,
"step": 434
},
{
"epoch": 0.5460251046025104,
"grad_norm": 3.1349897384643555,
"learning_rate": 1.013162717591813e-05,
"loss": 0.2583,
"step": 435
},
{
"epoch": 0.5472803347280335,
"grad_norm": 3.4480161666870117,
"learning_rate": 1.0087752858453923e-05,
"loss": 0.2406,
"step": 436
},
{
"epoch": 0.5485355648535565,
"grad_norm": 7.350139617919922,
"learning_rate": 1.0043876851582763e-05,
"loss": 0.2542,
"step": 437
},
{
"epoch": 0.5497907949790795,
"grad_norm": 3.588282346725464,
"learning_rate": 1e-05,
"loss": 0.2763,
"step": 438
},
{
"epoch": 0.5510460251046025,
"grad_norm": 4.360295295715332,
"learning_rate": 9.956123148417239e-06,
"loss": 0.3057,
"step": 439
},
{
"epoch": 0.5523012552301255,
"grad_norm": 2.705050468444824,
"learning_rate": 9.91224714154608e-06,
"loss": 0.2248,
"step": 440
},
{
"epoch": 0.5523012552301255,
"eval_accuracy": 0.8532008830022075,
"eval_f1": 0.6928406466512702,
"eval_loss": 0.3030659258365631,
"eval_precision": 0.8379888268156425,
"eval_recall": 0.5905511811023622,
"eval_runtime": 53.3187,
"eval_samples_per_second": 5.195,
"eval_steps_per_second": 0.169,
"step": 440
},
{
"epoch": 0.5535564853556485,
"grad_norm": 8.555684089660645,
"learning_rate": 9.86837282408187e-06,
"loss": 0.2456,
"step": 441
},
{
"epoch": 0.5548117154811716,
"grad_norm": 4.036064624786377,
"learning_rate": 9.82450104068743e-06,
"loss": 0.2197,
"step": 442
},
{
"epoch": 0.5560669456066946,
"grad_norm": 3.9498164653778076,
"learning_rate": 9.78063263597679e-06,
"loss": 0.2475,
"step": 443
},
{
"epoch": 0.5573221757322175,
"grad_norm": 5.120787620544434,
"learning_rate": 9.736768454498935e-06,
"loss": 0.2361,
"step": 444
},
{
"epoch": 0.5585774058577406,
"grad_norm": 3.8310952186584473,
"learning_rate": 9.692909340721549e-06,
"loss": 0.1948,
"step": 445
},
{
"epoch": 0.5598326359832636,
"grad_norm": 7.724740982055664,
"learning_rate": 9.649056139014754e-06,
"loss": 0.2821,
"step": 446
},
{
"epoch": 0.5610878661087866,
"grad_norm": 3.3741142749786377,
"learning_rate": 9.605209693634849e-06,
"loss": 0.2063,
"step": 447
},
{
"epoch": 0.5623430962343097,
"grad_norm": 4.161517143249512,
"learning_rate": 9.561370848708061e-06,
"loss": 0.2638,
"step": 448
},
{
"epoch": 0.5635983263598326,
"grad_norm": 4.094989776611328,
"learning_rate": 9.517540448214299e-06,
"loss": 0.2374,
"step": 449
},
{
"epoch": 0.5648535564853556,
"grad_norm": 5.402541160583496,
"learning_rate": 9.473719335970896e-06,
"loss": 0.2456,
"step": 450
},
{
"epoch": 0.5661087866108787,
"grad_norm": 3.0757880210876465,
"learning_rate": 9.429908355616372e-06,
"loss": 0.1735,
"step": 451
},
{
"epoch": 0.5673640167364017,
"grad_norm": 3.2094337940216064,
"learning_rate": 9.38610835059419e-06,
"loss": 0.1759,
"step": 452
},
{
"epoch": 0.5686192468619247,
"grad_norm": 6.4844489097595215,
"learning_rate": 9.342320164136506e-06,
"loss": 0.274,
"step": 453
},
{
"epoch": 0.5698744769874478,
"grad_norm": 3.759528636932373,
"learning_rate": 9.298544639247965e-06,
"loss": 0.2074,
"step": 454
},
{
"epoch": 0.5711297071129707,
"grad_norm": 4.877233028411865,
"learning_rate": 9.25478261868944e-06,
"loss": 0.2442,
"step": 455
},
{
"epoch": 0.5723849372384937,
"grad_norm": 4.176396369934082,
"learning_rate": 9.211034944961825e-06,
"loss": 0.2082,
"step": 456
},
{
"epoch": 0.5736401673640167,
"grad_norm": 4.096301078796387,
"learning_rate": 9.167302460289804e-06,
"loss": 0.2556,
"step": 457
},
{
"epoch": 0.5748953974895398,
"grad_norm": 3.9278416633605957,
"learning_rate": 9.123586006605658e-06,
"loss": 0.1989,
"step": 458
},
{
"epoch": 0.5761506276150627,
"grad_norm": 6.082350254058838,
"learning_rate": 9.079886425533026e-06,
"loss": 0.3095,
"step": 459
},
{
"epoch": 0.5774058577405857,
"grad_norm": 4.512117862701416,
"learning_rate": 9.036204558370725e-06,
"loss": 0.2149,
"step": 460
},
{
"epoch": 0.5774058577405857,
"eval_accuracy": 0.8609271523178808,
"eval_f1": 0.7307692307692307,
"eval_loss": 0.28679677844047546,
"eval_precision": 0.7990654205607477,
"eval_recall": 0.6732283464566929,
"eval_runtime": 52.9315,
"eval_samples_per_second": 5.233,
"eval_steps_per_second": 0.17,
"step": 460
},
{
"epoch": 0.5786610878661088,
"grad_norm": 5.169907569885254,
"learning_rate": 8.992541246076552e-06,
"loss": 0.2715,
"step": 461
},
{
"epoch": 0.5799163179916318,
"grad_norm": 7.113213539123535,
"learning_rate": 8.948897329251066e-06,
"loss": 0.2462,
"step": 462
},
{
"epoch": 0.5811715481171548,
"grad_norm": 6.822581768035889,
"learning_rate": 8.905273648121455e-06,
"loss": 0.2062,
"step": 463
},
{
"epoch": 0.5824267782426779,
"grad_norm": 4.880428314208984,
"learning_rate": 8.861671042525312e-06,
"loss": 0.2825,
"step": 464
},
{
"epoch": 0.5836820083682008,
"grad_norm": 4.777677536010742,
"learning_rate": 8.818090351894492e-06,
"loss": 0.2439,
"step": 465
},
{
"epoch": 0.5849372384937238,
"grad_norm": 4.444671154022217,
"learning_rate": 8.774532415238954e-06,
"loss": 0.2612,
"step": 466
},
{
"epoch": 0.5861924686192469,
"grad_norm": 4.537267208099365,
"learning_rate": 8.730998071130589e-06,
"loss": 0.2002,
"step": 467
},
{
"epoch": 0.5874476987447699,
"grad_norm": 2.443470001220703,
"learning_rate": 8.68748815768709e-06,
"loss": 0.205,
"step": 468
},
{
"epoch": 0.5887029288702929,
"grad_norm": 3.0930683612823486,
"learning_rate": 8.64400351255582e-06,
"loss": 0.1858,
"step": 469
},
{
"epoch": 0.5899581589958159,
"grad_norm": 4.16900634765625,
"learning_rate": 8.600544972897678e-06,
"loss": 0.2589,
"step": 470
},
{
"epoch": 0.5912133891213389,
"grad_norm": 3.5443623065948486,
"learning_rate": 8.55711337537097e-06,
"loss": 0.2245,
"step": 471
},
{
"epoch": 0.5924686192468619,
"grad_norm": 4.173429012298584,
"learning_rate": 8.513709556115335e-06,
"loss": 0.2116,
"step": 472
},
{
"epoch": 0.593723849372385,
"grad_norm": 4.342430114746094,
"learning_rate": 8.470334350735615e-06,
"loss": 0.2919,
"step": 473
},
{
"epoch": 0.594979079497908,
"grad_norm": 3.233147144317627,
"learning_rate": 8.42698859428579e-06,
"loss": 0.2359,
"step": 474
},
{
"epoch": 0.5962343096234309,
"grad_norm": 3.438584327697754,
"learning_rate": 8.383673121252887e-06,
"loss": 0.2049,
"step": 475
},
{
"epoch": 0.597489539748954,
"grad_norm": 6.184849739074707,
"learning_rate": 8.340388765540923e-06,
"loss": 0.2746,
"step": 476
},
{
"epoch": 0.598744769874477,
"grad_norm": 3.2504115104675293,
"learning_rate": 8.297136360454844e-06,
"loss": 0.2135,
"step": 477
},
{
"epoch": 0.6,
"grad_norm": 5.307207107543945,
"learning_rate": 8.253916738684497e-06,
"loss": 0.2292,
"step": 478
},
{
"epoch": 0.6012552301255231,
"grad_norm": 8.800402641296387,
"learning_rate": 8.21073073228858e-06,
"loss": 0.3836,
"step": 479
},
{
"epoch": 0.602510460251046,
"grad_norm": 3.201681613922119,
"learning_rate": 8.16757917267863e-06,
"loss": 0.1998,
"step": 480
},
{
"epoch": 0.602510460251046,
"eval_accuracy": 0.8587196467991169,
"eval_f1": 0.7117117117117117,
"eval_loss": 0.2975335419178009,
"eval_precision": 0.8315789473684211,
"eval_recall": 0.6220472440944882,
"eval_runtime": 52.1942,
"eval_samples_per_second": 5.307,
"eval_steps_per_second": 0.172,
"step": 480
},
{
"epoch": 0.603765690376569,
"grad_norm": 4.214263916015625,
"learning_rate": 8.124462890603027e-06,
"loss": 0.2576,
"step": 481
},
{
"epoch": 0.605020920502092,
"grad_norm": 4.688704490661621,
"learning_rate": 8.081382716130982e-06,
"loss": 0.2174,
"step": 482
},
{
"epoch": 0.6062761506276151,
"grad_norm": 4.108887672424316,
"learning_rate": 8.038339478636581e-06,
"loss": 0.2709,
"step": 483
},
{
"epoch": 0.6075313807531381,
"grad_norm": 3.246297597885132,
"learning_rate": 7.995334006782793e-06,
"loss": 0.2215,
"step": 484
},
{
"epoch": 0.608786610878661,
"grad_norm": 4.0921454429626465,
"learning_rate": 7.95236712850553e-06,
"loss": 0.2766,
"step": 485
},
{
"epoch": 0.6100418410041841,
"grad_norm": 3.6497576236724854,
"learning_rate": 7.909439670997706e-06,
"loss": 0.2337,
"step": 486
},
{
"epoch": 0.6112970711297071,
"grad_norm": 3.3551504611968994,
"learning_rate": 7.866552460693314e-06,
"loss": 0.208,
"step": 487
},
{
"epoch": 0.6125523012552301,
"grad_norm": 4.46877384185791,
"learning_rate": 7.823706323251512e-06,
"loss": 0.2493,
"step": 488
},
{
"epoch": 0.6138075313807532,
"grad_norm": 4.38779878616333,
"learning_rate": 7.78090208354072e-06,
"loss": 0.2635,
"step": 489
},
{
"epoch": 0.6150627615062761,
"grad_norm": 5.72041130065918,
"learning_rate": 7.738140565622758e-06,
"loss": 0.2348,
"step": 490
},
{
"epoch": 0.6163179916317991,
"grad_norm": 5.17399263381958,
"learning_rate": 7.69542259273697e-06,
"loss": 0.2208,
"step": 491
},
{
"epoch": 0.6175732217573222,
"grad_norm": 3.7989094257354736,
"learning_rate": 7.652748987284375e-06,
"loss": 0.256,
"step": 492
},
{
"epoch": 0.6188284518828452,
"grad_norm": 3.5693840980529785,
"learning_rate": 7.610120570811833e-06,
"loss": 0.2408,
"step": 493
},
{
"epoch": 0.6200836820083682,
"grad_norm": 3.3571958541870117,
"learning_rate": 7.567538163996237e-06,
"loss": 0.2263,
"step": 494
},
{
"epoch": 0.6213389121338913,
"grad_norm": 5.469134330749512,
"learning_rate": 7.525002586628707e-06,
"loss": 0.2335,
"step": 495
},
{
"epoch": 0.6225941422594142,
"grad_norm": 2.846597909927368,
"learning_rate": 7.4825146575988e-06,
"loss": 0.204,
"step": 496
},
{
"epoch": 0.6238493723849372,
"grad_norm": 6.401832103729248,
"learning_rate": 7.440075194878769e-06,
"loss": 0.2643,
"step": 497
},
{
"epoch": 0.6251046025104603,
"grad_norm": 4.148714065551758,
"learning_rate": 7.397685015507781e-06,
"loss": 0.1882,
"step": 498
},
{
"epoch": 0.6263598326359833,
"grad_norm": 3.8023147583007812,
"learning_rate": 7.355344935576221e-06,
"loss": 0.179,
"step": 499
},
{
"epoch": 0.6276150627615062,
"grad_norm": 3.7073490619659424,
"learning_rate": 7.313055770209961e-06,
"loss": 0.2459,
"step": 500
},
{
"epoch": 0.6276150627615062,
"eval_accuracy": 0.8509933774834437,
"eval_f1": 0.6882217090069284,
"eval_loss": 0.29779428243637085,
"eval_precision": 0.8324022346368715,
"eval_recall": 0.5866141732283464,
"eval_runtime": 52.9024,
"eval_samples_per_second": 5.236,
"eval_steps_per_second": 0.17,
"step": 500
},
{
"epoch": 0.6288702928870293,
"grad_norm": 4.248108863830566,
"learning_rate": 7.270818333554665e-06,
"loss": 0.2752,
"step": 501
},
{
"epoch": 0.6301255230125523,
"grad_norm": 3.575007677078247,
"learning_rate": 7.228633438760138e-06,
"loss": 0.238,
"step": 502
},
{
"epoch": 0.6313807531380753,
"grad_norm": 5.464937210083008,
"learning_rate": 7.186501897964644e-06,
"loss": 0.2215,
"step": 503
},
{
"epoch": 0.6326359832635984,
"grad_norm": 4.194279670715332,
"learning_rate": 7.144424522279283e-06,
"loss": 0.2387,
"step": 504
},
{
"epoch": 0.6338912133891214,
"grad_norm": 4.254185199737549,
"learning_rate": 7.102402121772378e-06,
"loss": 0.2539,
"step": 505
},
{
"epoch": 0.6351464435146443,
"grad_norm": 8.297247886657715,
"learning_rate": 7.060435505453884e-06,
"loss": 0.2829,
"step": 506
},
{
"epoch": 0.6364016736401673,
"grad_norm": 5.86175012588501,
"learning_rate": 7.018525481259787e-06,
"loss": 0.2731,
"step": 507
},
{
"epoch": 0.6376569037656904,
"grad_norm": 4.584890365600586,
"learning_rate": 6.976672856036586e-06,
"loss": 0.1941,
"step": 508
},
{
"epoch": 0.6389121338912134,
"grad_norm": 4.639788627624512,
"learning_rate": 6.934878435525736e-06,
"loss": 0.2143,
"step": 509
},
{
"epoch": 0.6401673640167364,
"grad_norm": 4.085133075714111,
"learning_rate": 6.893143024348137e-06,
"loss": 0.2428,
"step": 510
},
{
"epoch": 0.6414225941422594,
"grad_norm": 5.64658784866333,
"learning_rate": 6.851467425988663e-06,
"loss": 0.2014,
"step": 511
},
{
"epoch": 0.6426778242677824,
"grad_norm": 4.088956832885742,
"learning_rate": 6.809852442780664e-06,
"loss": 0.185,
"step": 512
},
{
"epoch": 0.6439330543933054,
"grad_norm": 5.6827921867370605,
"learning_rate": 6.768298875890541e-06,
"loss": 0.3019,
"step": 513
},
{
"epoch": 0.6451882845188285,
"grad_norm": 6.169975757598877,
"learning_rate": 6.726807525302319e-06,
"loss": 0.2872,
"step": 514
},
{
"epoch": 0.6464435146443515,
"grad_norm": 4.995835304260254,
"learning_rate": 6.685379189802241e-06,
"loss": 0.2611,
"step": 515
},
{
"epoch": 0.6476987447698744,
"grad_norm": 3.8299150466918945,
"learning_rate": 6.6440146669633855e-06,
"loss": 0.1959,
"step": 516
},
{
"epoch": 0.6489539748953975,
"grad_norm": 4.961380958557129,
"learning_rate": 6.602714753130322e-06,
"loss": 0.2274,
"step": 517
},
{
"epoch": 0.6502092050209205,
"grad_norm": 4.710041522979736,
"learning_rate": 6.561480243403776e-06,
"loss": 0.3025,
"step": 518
},
{
"epoch": 0.6514644351464435,
"grad_norm": 3.762503147125244,
"learning_rate": 6.520311931625325e-06,
"loss": 0.25,
"step": 519
},
{
"epoch": 0.6527196652719666,
"grad_norm": 3.377311944961548,
"learning_rate": 6.479210610362103e-06,
"loss": 0.1953,
"step": 520
},
{
"epoch": 0.6527196652719666,
"eval_accuracy": 0.8576158940397351,
"eval_f1": 0.7020785219399538,
"eval_loss": 0.29889100790023804,
"eval_precision": 0.8491620111731844,
"eval_recall": 0.5984251968503937,
"eval_runtime": 50.9239,
"eval_samples_per_second": 5.439,
"eval_steps_per_second": 0.177,
"step": 520
},
{
"epoch": 0.6539748953974895,
"grad_norm": 4.013047218322754,
"learning_rate": 6.4381770708915594e-06,
"loss": 0.2467,
"step": 521
},
{
"epoch": 0.6552301255230125,
"grad_norm": 3.758030652999878,
"learning_rate": 6.397212103186214e-06,
"loss": 0.2211,
"step": 522
},
{
"epoch": 0.6564853556485356,
"grad_norm": 3.948408603668213,
"learning_rate": 6.35631649589845e-06,
"loss": 0.188,
"step": 523
},
{
"epoch": 0.6577405857740586,
"grad_norm": 2.879676103591919,
"learning_rate": 6.315491036345338e-06,
"loss": 0.2009,
"step": 524
},
{
"epoch": 0.6589958158995816,
"grad_norm": 4.440194606781006,
"learning_rate": 6.274736510493462e-06,
"loss": 0.2608,
"step": 525
},
{
"epoch": 0.6602510460251046,
"grad_norm": 7.7365403175354,
"learning_rate": 6.23405370294381e-06,
"loss": 0.2733,
"step": 526
},
{
"epoch": 0.6615062761506276,
"grad_norm": 4.252779960632324,
"learning_rate": 6.1934433969166575e-06,
"loss": 0.1904,
"step": 527
},
{
"epoch": 0.6627615062761506,
"grad_norm": 3.153885841369629,
"learning_rate": 6.1529063742364844e-06,
"loss": 0.1833,
"step": 528
},
{
"epoch": 0.6640167364016737,
"grad_norm": 7.1857476234436035,
"learning_rate": 6.112443415316934e-06,
"loss": 0.2215,
"step": 529
},
{
"epoch": 0.6652719665271967,
"grad_norm": 2.9891092777252197,
"learning_rate": 6.072055299145778e-06,
"loss": 0.2156,
"step": 530
},
{
"epoch": 0.6665271966527196,
"grad_norm": 3.8794708251953125,
"learning_rate": 6.031742803269931e-06,
"loss": 0.251,
"step": 531
},
{
"epoch": 0.6677824267782427,
"grad_norm": 3.384833335876465,
"learning_rate": 5.991506703780475e-06,
"loss": 0.218,
"step": 532
},
{
"epoch": 0.6690376569037657,
"grad_norm": 3.629615306854248,
"learning_rate": 5.95134777529771e-06,
"loss": 0.2329,
"step": 533
},
{
"epoch": 0.6702928870292887,
"grad_norm": 3.8525490760803223,
"learning_rate": 5.911266790956258e-06,
"loss": 0.229,
"step": 534
},
{
"epoch": 0.6715481171548117,
"grad_norm": 4.439032077789307,
"learning_rate": 5.871264522390165e-06,
"loss": 0.2752,
"step": 535
},
{
"epoch": 0.6728033472803348,
"grad_norm": 3.8713276386260986,
"learning_rate": 5.831341739718055e-06,
"loss": 0.2427,
"step": 536
},
{
"epoch": 0.6740585774058577,
"grad_norm": 3.2495763301849365,
"learning_rate": 5.791499211528302e-06,
"loss": 0.2424,
"step": 537
},
{
"epoch": 0.6753138075313807,
"grad_norm": 4.471564292907715,
"learning_rate": 5.751737704864224e-06,
"loss": 0.1954,
"step": 538
},
{
"epoch": 0.6765690376569038,
"grad_norm": 4.963108539581299,
"learning_rate": 5.712057985209325e-06,
"loss": 0.2393,
"step": 539
},
{
"epoch": 0.6778242677824268,
"grad_norm": 5.474493503570557,
"learning_rate": 5.672460816472556e-06,
"loss": 0.3153,
"step": 540
},
{
"epoch": 0.6778242677824268,
"eval_accuracy": 0.8642384105960265,
"eval_f1": 0.7260579064587973,
"eval_loss": 0.2864134609699249,
"eval_precision": 0.8358974358974359,
"eval_recall": 0.6417322834645669,
"eval_runtime": 51.7722,
"eval_samples_per_second": 5.35,
"eval_steps_per_second": 0.174,
"step": 540
},
{
"epoch": 0.6790794979079497,
"grad_norm": 5.037370681762695,
"learning_rate": 5.632946960973611e-06,
"loss": 0.2517,
"step": 541
},
{
"epoch": 0.6803347280334728,
"grad_norm": 5.2865142822265625,
"learning_rate": 5.5935171794282426e-06,
"loss": 0.2962,
"step": 542
},
{
"epoch": 0.6815899581589958,
"grad_norm": 3.1313962936401367,
"learning_rate": 5.554172230933628e-06,
"loss": 0.1967,
"step": 543
},
{
"epoch": 0.6828451882845188,
"grad_norm": 4.365119934082031,
"learning_rate": 5.514912872953746e-06,
"loss": 0.2568,
"step": 544
},
{
"epoch": 0.6841004184100419,
"grad_norm": 2.9407169818878174,
"learning_rate": 5.4757398613047985e-06,
"loss": 0.2133,
"step": 545
},
{
"epoch": 0.6853556485355649,
"grad_norm": 4.410444259643555,
"learning_rate": 5.436653950140657e-06,
"loss": 0.2656,
"step": 546
},
{
"epoch": 0.6866108786610878,
"grad_norm": 5.113467216491699,
"learning_rate": 5.397655891938348e-06,
"loss": 0.2425,
"step": 547
},
{
"epoch": 0.6878661087866109,
"grad_norm": 3.6607089042663574,
"learning_rate": 5.35874643748356e-06,
"loss": 0.1942,
"step": 548
},
{
"epoch": 0.6891213389121339,
"grad_norm": 4.0803914070129395,
"learning_rate": 5.3199263358562e-06,
"loss": 0.1702,
"step": 549
},
{
"epoch": 0.6903765690376569,
"grad_norm": 7.306187629699707,
"learning_rate": 5.281196334415968e-06,
"loss": 0.1774,
"step": 550
},
{
"epoch": 0.69163179916318,
"grad_norm": 6.191274166107178,
"learning_rate": 5.2425571787879455e-06,
"loss": 0.2996,
"step": 551
},
{
"epoch": 0.6928870292887029,
"grad_norm": 4.841433048248291,
"learning_rate": 5.204009612848288e-06,
"loss": 0.3033,
"step": 552
},
{
"epoch": 0.6941422594142259,
"grad_norm": 4.5002899169921875,
"learning_rate": 5.165554378709857e-06,
"loss": 0.2149,
"step": 553
},
{
"epoch": 0.695397489539749,
"grad_norm": 3.8781685829162598,
"learning_rate": 5.127192216707974e-06,
"loss": 0.2828,
"step": 554
},
{
"epoch": 0.696652719665272,
"grad_norm": 4.068243980407715,
"learning_rate": 5.088923865386133e-06,
"loss": 0.1836,
"step": 555
},
{
"epoch": 0.697907949790795,
"grad_norm": 4.608306407928467,
"learning_rate": 5.050750061481799e-06,
"loss": 0.2514,
"step": 556
},
{
"epoch": 0.699163179916318,
"grad_norm": 5.066010475158691,
"learning_rate": 5.012671539912226e-06,
"loss": 0.2174,
"step": 557
},
{
"epoch": 0.700418410041841,
"grad_norm": 4.110201358795166,
"learning_rate": 4.9746890337603005e-06,
"loss": 0.1672,
"step": 558
},
{
"epoch": 0.701673640167364,
"grad_norm": 4.265486717224121,
"learning_rate": 4.936803274260434e-06,
"loss": 0.2198,
"step": 559
},
{
"epoch": 0.702928870292887,
"grad_norm": 4.03239107131958,
"learning_rate": 4.899014990784485e-06,
"loss": 0.2172,
"step": 560
},
{
"epoch": 0.702928870292887,
"eval_accuracy": 0.8443708609271523,
"eval_f1": 0.6483790523690773,
"eval_loss": 0.31904953718185425,
"eval_precision": 0.8843537414965986,
"eval_recall": 0.5118110236220472,
"eval_runtime": 51.3743,
"eval_samples_per_second": 5.392,
"eval_steps_per_second": 0.175,
"step": 560
},
{
"epoch": 0.7041841004184101,
"grad_norm": 4.523290157318115,
"learning_rate": 4.861324910827714e-06,
"loss": 0.2345,
"step": 561
},
{
"epoch": 0.705439330543933,
"grad_norm": 4.160706520080566,
"learning_rate": 4.8237337599947795e-06,
"loss": 0.2406,
"step": 562
},
{
"epoch": 0.706694560669456,
"grad_norm": 6.3733811378479,
"learning_rate": 4.786242261985772e-06,
"loss": 0.2486,
"step": 563
},
{
"epoch": 0.7079497907949791,
"grad_norm": 6.861822128295898,
"learning_rate": 4.748851138582269e-06,
"loss": 0.2124,
"step": 564
},
{
"epoch": 0.7092050209205021,
"grad_norm": 4.6429829597473145,
"learning_rate": 4.711561109633466e-06,
"loss": 0.2569,
"step": 565
},
{
"epoch": 0.7104602510460251,
"grad_norm": 3.4625086784362793,
"learning_rate": 4.674372893042287e-06,
"loss": 0.2174,
"step": 566
},
{
"epoch": 0.7117154811715481,
"grad_norm": 7.073486804962158,
"learning_rate": 4.63728720475158e-06,
"loss": 0.2145,
"step": 567
},
{
"epoch": 0.7129707112970711,
"grad_norm": 4.345584869384766,
"learning_rate": 4.6003047587303376e-06,
"loss": 0.1827,
"step": 568
},
{
"epoch": 0.7142259414225941,
"grad_norm": 4.599338054656982,
"learning_rate": 4.563426266959932e-06,
"loss": 0.2167,
"step": 569
},
{
"epoch": 0.7154811715481172,
"grad_norm": 3.284950017929077,
"learning_rate": 4.526652439420427e-06,
"loss": 0.1409,
"step": 570
},
{
"epoch": 0.7167364016736402,
"grad_norm": 3.3235983848571777,
"learning_rate": 4.489983984076918e-06,
"loss": 0.2377,
"step": 571
},
{
"epoch": 0.7179916317991631,
"grad_norm": 5.1698079109191895,
"learning_rate": 4.453421606865869e-06,
"loss": 0.2022,
"step": 572
},
{
"epoch": 0.7192468619246862,
"grad_norm": 5.351980209350586,
"learning_rate": 4.416966011681548e-06,
"loss": 0.1903,
"step": 573
},
{
"epoch": 0.7205020920502092,
"grad_norm": 2.702564239501953,
"learning_rate": 4.380617900362473e-06,
"loss": 0.1935,
"step": 574
},
{
"epoch": 0.7217573221757322,
"grad_norm": 3.770988702774048,
"learning_rate": 4.34437797267789e-06,
"loss": 0.175,
"step": 575
},
{
"epoch": 0.7230125523012553,
"grad_norm": 3.6061084270477295,
"learning_rate": 4.308246926314307e-06,
"loss": 0.2515,
"step": 576
},
{
"epoch": 0.7242677824267783,
"grad_norm": 5.896265983581543,
"learning_rate": 4.272225456862076e-06,
"loss": 0.2694,
"step": 577
},
{
"epoch": 0.7255230125523012,
"grad_norm": 7.105819225311279,
"learning_rate": 4.236314257801968e-06,
"loss": 0.3122,
"step": 578
},
{
"epoch": 0.7267782426778243,
"grad_norm": 5.691869735717773,
"learning_rate": 4.200514020491854e-06,
"loss": 0.2672,
"step": 579
},
{
"epoch": 0.7280334728033473,
"grad_norm": 3.698089838027954,
"learning_rate": 4.164825434153381e-06,
"loss": 0.2604,
"step": 580
},
{
"epoch": 0.7280334728033473,
"eval_accuracy": 0.8686534216335541,
"eval_f1": 0.7384615384615385,
"eval_loss": 0.28295037150382996,
"eval_precision": 0.835820895522388,
"eval_recall": 0.6614173228346457,
"eval_runtime": 53.3249,
"eval_samples_per_second": 5.195,
"eval_steps_per_second": 0.169,
"step": 580
},
{
"epoch": 0.7292887029288703,
"grad_norm": 5.295552730560303,
"learning_rate": 4.129249185858704e-06,
"loss": 0.2536,
"step": 581
},
{
"epoch": 0.7305439330543934,
"grad_norm": 3.8178629875183105,
"learning_rate": 4.093785960517269e-06,
"loss": 0.2233,
"step": 582
},
{
"epoch": 0.7317991631799163,
"grad_norm": 5.176862716674805,
"learning_rate": 4.0584364408626065e-06,
"loss": 0.3026,
"step": 583
},
{
"epoch": 0.7330543933054393,
"grad_norm": 6.326966762542725,
"learning_rate": 4.0232013074392065e-06,
"loss": 0.2652,
"step": 584
},
{
"epoch": 0.7343096234309623,
"grad_norm": 8.710590362548828,
"learning_rate": 3.988081238589406e-06,
"loss": 0.2439,
"step": 585
},
{
"epoch": 0.7355648535564854,
"grad_norm": 5.5283026695251465,
"learning_rate": 3.953076910440337e-06,
"loss": 0.2445,
"step": 586
},
{
"epoch": 0.7368200836820084,
"grad_norm": 4.787403583526611,
"learning_rate": 3.918188996890903e-06,
"loss": 0.2705,
"step": 587
},
{
"epoch": 0.7380753138075313,
"grad_norm": 6.294352054595947,
"learning_rate": 3.883418169598808e-06,
"loss": 0.2813,
"step": 588
},
{
"epoch": 0.7393305439330544,
"grad_norm": 4.5643415451049805,
"learning_rate": 3.84876509796763e-06,
"loss": 0.2417,
"step": 589
},
{
"epoch": 0.7405857740585774,
"grad_norm": 6.011057376861572,
"learning_rate": 3.814230449133928e-06,
"loss": 0.3062,
"step": 590
},
{
"epoch": 0.7418410041841004,
"grad_norm": 5.100391387939453,
"learning_rate": 3.7798148879543983e-06,
"loss": 0.2424,
"step": 591
},
{
"epoch": 0.7430962343096235,
"grad_norm": 3.619565963745117,
"learning_rate": 3.745519076993078e-06,
"loss": 0.262,
"step": 592
},
{
"epoch": 0.7443514644351464,
"grad_norm": 5.100575923919678,
"learning_rate": 3.7113436765085865e-06,
"loss": 0.2577,
"step": 593
},
{
"epoch": 0.7456066945606694,
"grad_norm": 6.600237846374512,
"learning_rate": 3.6772893444414226e-06,
"loss": 0.2571,
"step": 594
},
{
"epoch": 0.7468619246861925,
"grad_norm": 4.155444145202637,
"learning_rate": 3.643356736401289e-06,
"loss": 0.2558,
"step": 595
},
{
"epoch": 0.7481171548117155,
"grad_norm": 3.4668867588043213,
"learning_rate": 3.609546505654462e-06,
"loss": 0.1694,
"step": 596
},
{
"epoch": 0.7493723849372385,
"grad_norm": 4.315099239349365,
"learning_rate": 3.5758593031112364e-06,
"loss": 0.2029,
"step": 597
},
{
"epoch": 0.7506276150627615,
"grad_norm": 4.79595422744751,
"learning_rate": 3.5422957773133804e-06,
"loss": 0.2165,
"step": 598
},
{
"epoch": 0.7518828451882845,
"grad_norm": 3.9190430641174316,
"learning_rate": 3.5088565744216574e-06,
"loss": 0.2107,
"step": 599
},
{
"epoch": 0.7531380753138075,
"grad_norm": 4.860176086425781,
"learning_rate": 3.475542338203377e-06,
"loss": 0.2671,
"step": 600
},
{
"epoch": 0.7531380753138075,
"eval_accuracy": 0.8565121412803532,
"eval_f1": 0.6976744186046512,
"eval_loss": 0.29695039987564087,
"eval_precision": 0.8522727272727273,
"eval_recall": 0.5905511811023622,
"eval_runtime": 51.86,
"eval_samples_per_second": 5.341,
"eval_steps_per_second": 0.174,
"step": 600
},
{
"epoch": 0.7543933054393306,
"grad_norm": 5.352249622344971,
"learning_rate": 3.4423537100200068e-06,
"loss": 0.2344,
"step": 601
},
{
"epoch": 0.7556485355648536,
"grad_norm": 4.358277797698975,
"learning_rate": 3.4092913288148254e-06,
"loss": 0.2084,
"step": 602
},
{
"epoch": 0.7569037656903765,
"grad_norm": 4.106777667999268,
"learning_rate": 3.3763558311006207e-06,
"loss": 0.2017,
"step": 603
},
{
"epoch": 0.7581589958158996,
"grad_norm": 6.909910678863525,
"learning_rate": 3.343547850947434e-06,
"loss": 0.3994,
"step": 604
},
{
"epoch": 0.7594142259414226,
"grad_norm": 4.24434757232666,
"learning_rate": 3.310868019970356e-06,
"loss": 0.2443,
"step": 605
},
{
"epoch": 0.7606694560669456,
"grad_norm": 4.735110759735107,
"learning_rate": 3.2783169673173666e-06,
"loss": 0.2305,
"step": 606
},
{
"epoch": 0.7619246861924687,
"grad_norm": 4.206883907318115,
"learning_rate": 3.2458953196572194e-06,
"loss": 0.2061,
"step": 607
},
{
"epoch": 0.7631799163179916,
"grad_norm": 2.6777400970458984,
"learning_rate": 3.2136037011673803e-06,
"loss": 0.1777,
"step": 608
},
{
"epoch": 0.7644351464435146,
"grad_norm": 3.7428863048553467,
"learning_rate": 3.181442733522008e-06,
"loss": 0.211,
"step": 609
},
{
"epoch": 0.7656903765690377,
"grad_norm": 5.144768714904785,
"learning_rate": 3.149413035879996e-06,
"loss": 0.2571,
"step": 610
},
{
"epoch": 0.7669456066945607,
"grad_norm": 6.341953754425049,
"learning_rate": 3.1175152248730343e-06,
"loss": 0.2649,
"step": 611
},
{
"epoch": 0.7682008368200837,
"grad_norm": 4.261897563934326,
"learning_rate": 3.085749914593752e-06,
"loss": 0.2207,
"step": 612
},
{
"epoch": 0.7694560669456066,
"grad_norm": 3.700777053833008,
"learning_rate": 3.0541177165838954e-06,
"loss": 0.2589,
"step": 613
},
{
"epoch": 0.7707112970711297,
"grad_norm": 5.691616535186768,
"learning_rate": 3.022619239822535e-06,
"loss": 0.2042,
"step": 614
},
{
"epoch": 0.7719665271966527,
"grad_norm": 2.532932996749878,
"learning_rate": 2.9912550907143766e-06,
"loss": 0.2177,
"step": 615
},
{
"epoch": 0.7732217573221757,
"grad_norm": 6.345351219177246,
"learning_rate": 2.9600258730780564e-06,
"loss": 0.2141,
"step": 616
},
{
"epoch": 0.7744769874476988,
"grad_norm": 4.4296793937683105,
"learning_rate": 2.9289321881345257e-06,
"loss": 0.2414,
"step": 617
},
{
"epoch": 0.7757322175732217,
"grad_norm": 5.737855911254883,
"learning_rate": 2.897974634495482e-06,
"loss": 0.2163,
"step": 618
},
{
"epoch": 0.7769874476987447,
"grad_norm": 3.722508430480957,
"learning_rate": 2.867153808151837e-06,
"loss": 0.2102,
"step": 619
},
{
"epoch": 0.7782426778242678,
"grad_norm": 3.9203503131866455,
"learning_rate": 2.8364703024622474e-06,
"loss": 0.2049,
"step": 620
},
{
"epoch": 0.7782426778242678,
"eval_accuracy": 0.8587196467991169,
"eval_f1": 0.7117117117117117,
"eval_loss": 0.28623539209365845,
"eval_precision": 0.8315789473684211,
"eval_recall": 0.6220472440944882,
"eval_runtime": 51.4017,
"eval_samples_per_second": 5.389,
"eval_steps_per_second": 0.175,
"step": 620
},
{
"epoch": 0.7794979079497908,
"grad_norm": 5.395325660705566,
"learning_rate": 2.8059247081416887e-06,
"loss": 0.2889,
"step": 621
},
{
"epoch": 0.7807531380753138,
"grad_norm": 3.7470452785491943,
"learning_rate": 2.7755176132500905e-06,
"loss": 0.2567,
"step": 622
},
{
"epoch": 0.7820083682008369,
"grad_norm": 3.5239267349243164,
"learning_rate": 2.745249603180996e-06,
"loss": 0.214,
"step": 623
},
{
"epoch": 0.7832635983263598,
"grad_norm": 3.5487923622131348,
"learning_rate": 2.7151212606503164e-06,
"loss": 0.2062,
"step": 624
},
{
"epoch": 0.7845188284518828,
"grad_norm": 3.3393709659576416,
"learning_rate": 2.6851331656850966e-06,
"loss": 0.1947,
"step": 625
},
{
"epoch": 0.7857740585774059,
"grad_norm": 2.6931920051574707,
"learning_rate": 2.6552858956123485e-06,
"loss": 0.2918,
"step": 626
},
{
"epoch": 0.7870292887029289,
"grad_norm": 3.748667001724243,
"learning_rate": 2.625580025047956e-06,
"loss": 0.2286,
"step": 627
},
{
"epoch": 0.7882845188284519,
"grad_norm": 3.2201664447784424,
"learning_rate": 2.5960161258855807e-06,
"loss": 0.2037,
"step": 628
},
{
"epoch": 0.7895397489539749,
"grad_norm": 3.861560821533203,
"learning_rate": 2.5665947672856774e-06,
"loss": 0.2023,
"step": 629
},
{
"epoch": 0.7907949790794979,
"grad_norm": 2.7438437938690186,
"learning_rate": 2.5373165156645263e-06,
"loss": 0.2018,
"step": 630
},
{
"epoch": 0.7920502092050209,
"grad_norm": 4.868431568145752,
"learning_rate": 2.5081819346833293e-06,
"loss": 0.2575,
"step": 631
},
{
"epoch": 0.793305439330544,
"grad_norm": 4.836085796356201,
"learning_rate": 2.4791915852373604e-06,
"loss": 0.206,
"step": 632
},
{
"epoch": 0.794560669456067,
"grad_norm": 3.095149278640747,
"learning_rate": 2.450346025445165e-06,
"loss": 0.173,
"step": 633
},
{
"epoch": 0.7958158995815899,
"grad_norm": 3.0923187732696533,
"learning_rate": 2.4216458106378194e-06,
"loss": 0.2016,
"step": 634
},
{
"epoch": 0.797071129707113,
"grad_norm": 5.667265892028809,
"learning_rate": 2.3930914933482317e-06,
"loss": 0.3038,
"step": 635
},
{
"epoch": 0.798326359832636,
"grad_norm": 4.840297222137451,
"learning_rate": 2.3646836233005133e-06,
"loss": 0.2346,
"step": 636
},
{
"epoch": 0.799581589958159,
"grad_norm": 3.6551482677459717,
"learning_rate": 2.3364227473993885e-06,
"loss": 0.2153,
"step": 637
},
{
"epoch": 0.800836820083682,
"grad_norm": 6.236753940582275,
"learning_rate": 2.3083094097196766e-06,
"loss": 0.3236,
"step": 638
},
{
"epoch": 0.802092050209205,
"grad_norm": 3.9249565601348877,
"learning_rate": 2.280344151495799e-06,
"loss": 0.1873,
"step": 639
},
{
"epoch": 0.803347280334728,
"grad_norm": 5.489711284637451,
"learning_rate": 2.252527511111381e-06,
"loss": 0.2972,
"step": 640
},
{
"epoch": 0.803347280334728,
"eval_accuracy": 0.8609271523178808,
"eval_f1": 0.7149321266968326,
"eval_loss": 0.2890259623527527,
"eval_precision": 0.8404255319148937,
"eval_recall": 0.6220472440944882,
"eval_runtime": 52.9255,
"eval_samples_per_second": 5.234,
"eval_steps_per_second": 0.17,
"step": 640
},
{
"epoch": 0.804602510460251,
"grad_norm": 4.235107421875,
"learning_rate": 2.224860024088863e-06,
"loss": 0.2522,
"step": 641
},
{
"epoch": 0.8058577405857741,
"grad_norm": 5.042692184448242,
"learning_rate": 2.197342223079212e-06,
"loss": 0.2259,
"step": 642
},
{
"epoch": 0.8071129707112971,
"grad_norm": 6.554571628570557,
"learning_rate": 2.1699746378516618e-06,
"loss": 0.3102,
"step": 643
},
{
"epoch": 0.80836820083682,
"grad_norm": 5.590906143188477,
"learning_rate": 2.1427577952835044e-06,
"loss": 0.242,
"step": 644
},
{
"epoch": 0.8096234309623431,
"grad_norm": 3.1682546138763428,
"learning_rate": 2.1156922193499573e-06,
"loss": 0.2016,
"step": 645
},
{
"epoch": 0.8108786610878661,
"grad_norm": 5.03267765045166,
"learning_rate": 2.088778431114068e-06,
"loss": 0.3032,
"step": 646
},
{
"epoch": 0.8121338912133891,
"grad_norm": 5.085690498352051,
"learning_rate": 2.06201694871669e-06,
"loss": 0.2572,
"step": 647
},
{
"epoch": 0.8133891213389122,
"grad_norm": 6.410571098327637,
"learning_rate": 2.0354082873665015e-06,
"loss": 0.2776,
"step": 648
},
{
"epoch": 0.8146443514644351,
"grad_norm": 2.672776699066162,
"learning_rate": 2.0089529593300916e-06,
"loss": 0.1729,
"step": 649
},
{
"epoch": 0.8158995815899581,
"grad_norm": 4.959750175476074,
"learning_rate": 1.9826514739220946e-06,
"loss": 0.2254,
"step": 650
},
{
"epoch": 0.8171548117154812,
"grad_norm": 4.0036725997924805,
"learning_rate": 1.956504337495384e-06,
"loss": 0.1674,
"step": 651
},
{
"epoch": 0.8184100418410042,
"grad_norm": 4.883665561676025,
"learning_rate": 1.9305120534313295e-06,
"loss": 0.1647,
"step": 652
},
{
"epoch": 0.8196652719665272,
"grad_norm": 5.845210075378418,
"learning_rate": 1.9046751221301018e-06,
"loss": 0.3202,
"step": 653
},
{
"epoch": 0.8209205020920503,
"grad_norm": 5.12738037109375,
"learning_rate": 1.8789940410010355e-06,
"loss": 0.1829,
"step": 654
},
{
"epoch": 0.8221757322175732,
"grad_norm": 3.904860019683838,
"learning_rate": 1.853469304453066e-06,
"loss": 0.2275,
"step": 655
},
{
"epoch": 0.8234309623430962,
"grad_norm": 4.157991409301758,
"learning_rate": 1.8281014038851963e-06,
"loss": 0.2335,
"step": 656
},
{
"epoch": 0.8246861924686193,
"grad_norm": 6.301884651184082,
"learning_rate": 1.802890827677045e-06,
"loss": 0.265,
"step": 657
},
{
"epoch": 0.8259414225941423,
"grad_norm": 4.426259994506836,
"learning_rate": 1.777838061179442e-06,
"loss": 0.2346,
"step": 658
},
{
"epoch": 0.8271966527196652,
"grad_norm": 3.300267457962036,
"learning_rate": 1.7529435867050771e-06,
"loss": 0.2312,
"step": 659
},
{
"epoch": 0.8284518828451883,
"grad_norm": 3.5169765949249268,
"learning_rate": 1.7282078835192362e-06,
"loss": 0.1953,
"step": 660
},
{
"epoch": 0.8284518828451883,
"eval_accuracy": 0.8609271523178808,
"eval_f1": 0.7136363636363636,
"eval_loss": 0.2910914123058319,
"eval_precision": 0.8440860215053764,
"eval_recall": 0.6181102362204725,
"eval_runtime": 52.6057,
"eval_samples_per_second": 5.266,
"eval_steps_per_second": 0.171,
"step": 660
},
{
"epoch": 0.8297071129707113,
"grad_norm": 3.4855308532714844,
"learning_rate": 1.703631427830551e-06,
"loss": 0.2036,
"step": 661
},
{
"epoch": 0.8309623430962343,
"grad_norm": 4.0676493644714355,
"learning_rate": 1.679214692781842e-06,
"loss": 0.2412,
"step": 662
},
{
"epoch": 0.8322175732217573,
"grad_norm": 4.750482559204102,
"learning_rate": 1.6549581484410105e-06,
"loss": 0.2193,
"step": 663
},
{
"epoch": 0.8334728033472804,
"grad_norm": 7.349670886993408,
"learning_rate": 1.6308622617919823e-06,
"loss": 0.2712,
"step": 664
},
{
"epoch": 0.8347280334728033,
"grad_norm": 3.89208984375,
"learning_rate": 1.606927496725722e-06,
"loss": 0.2761,
"step": 665
},
{
"epoch": 0.8359832635983263,
"grad_norm": 3.2538020610809326,
"learning_rate": 1.583154314031311e-06,
"loss": 0.1974,
"step": 666
},
{
"epoch": 0.8372384937238494,
"grad_norm": 4.290145397186279,
"learning_rate": 1.5595431713870569e-06,
"loss": 0.2562,
"step": 667
},
{
"epoch": 0.8384937238493724,
"grad_norm": 5.561316013336182,
"learning_rate": 1.5360945233516933e-06,
"loss": 0.2707,
"step": 668
},
{
"epoch": 0.8397489539748954,
"grad_norm": 4.019890308380127,
"learning_rate": 1.5128088213556324e-06,
"loss": 0.1786,
"step": 669
},
{
"epoch": 0.8410041841004184,
"grad_norm": 4.0204758644104,
"learning_rate": 1.489686513692269e-06,
"loss": 0.2377,
"step": 670
},
{
"epoch": 0.8422594142259414,
"grad_norm": 4.556098461151123,
"learning_rate": 1.4667280455093468e-06,
"loss": 0.2138,
"step": 671
},
{
"epoch": 0.8435146443514644,
"grad_norm": 5.089337348937988,
"learning_rate": 1.4439338588004005e-06,
"loss": 0.2272,
"step": 672
},
{
"epoch": 0.8447698744769875,
"grad_norm": 4.120019435882568,
"learning_rate": 1.4213043923962322e-06,
"loss": 0.2104,
"step": 673
},
{
"epoch": 0.8460251046025105,
"grad_norm": 3.5715548992156982,
"learning_rate": 1.3988400819564707e-06,
"loss": 0.2013,
"step": 674
},
{
"epoch": 0.8472803347280334,
"grad_norm": 4.313863277435303,
"learning_rate": 1.3765413599611832e-06,
"loss": 0.2265,
"step": 675
},
{
"epoch": 0.8485355648535565,
"grad_norm": 5.553587436676025,
"learning_rate": 1.3544086557025493e-06,
"loss": 0.2667,
"step": 676
},
{
"epoch": 0.8497907949790795,
"grad_norm": 4.209076404571533,
"learning_rate": 1.3324423952765974e-06,
"loss": 0.2375,
"step": 677
},
{
"epoch": 0.8510460251046025,
"grad_norm": 4.849112033843994,
"learning_rate": 1.3106430015749971e-06,
"loss": 0.2268,
"step": 678
},
{
"epoch": 0.8523012552301256,
"grad_norm": 4.489964485168457,
"learning_rate": 1.2890108942769253e-06,
"loss": 0.1934,
"step": 679
},
{
"epoch": 0.8535564853556485,
"grad_norm": 3.962709665298462,
"learning_rate": 1.2675464898409772e-06,
"loss": 0.24,
"step": 680
},
{
"epoch": 0.8535564853556485,
"eval_accuracy": 0.8653421633554084,
"eval_f1": 0.7288888888888889,
"eval_loss": 0.28242796659469604,
"eval_precision": 0.8367346938775511,
"eval_recall": 0.6456692913385826,
"eval_runtime": 53.7633,
"eval_samples_per_second": 5.152,
"eval_steps_per_second": 0.167,
"step": 680
},
{
"epoch": 0.8548117154811715,
"grad_norm": 4.079026699066162,
"learning_rate": 1.2462502014971579e-06,
"loss": 0.2389,
"step": 681
},
{
"epoch": 0.8560669456066946,
"grad_norm": 5.030835151672363,
"learning_rate": 1.2251224392389183e-06,
"loss": 0.296,
"step": 682
},
{
"epoch": 0.8573221757322176,
"grad_norm": 4.880098819732666,
"learning_rate": 1.2041636098152742e-06,
"loss": 0.2687,
"step": 683
},
{
"epoch": 0.8585774058577406,
"grad_norm": 3.222585439682007,
"learning_rate": 1.1833741167229584e-06,
"loss": 0.2378,
"step": 684
},
{
"epoch": 0.8598326359832636,
"grad_norm": 4.026751518249512,
"learning_rate": 1.1627543601986702e-06,
"loss": 0.1571,
"step": 685
},
{
"epoch": 0.8610878661087866,
"grad_norm": 4.304843902587891,
"learning_rate": 1.1423047372113538e-06,
"loss": 0.2836,
"step": 686
},
{
"epoch": 0.8623430962343096,
"grad_norm": 4.018265247344971,
"learning_rate": 1.1220256414545682e-06,
"loss": 0.2154,
"step": 687
},
{
"epoch": 0.8635983263598327,
"grad_norm": 5.214784622192383,
"learning_rate": 1.1019174633389073e-06,
"loss": 0.235,
"step": 688
},
{
"epoch": 0.8648535564853557,
"grad_norm": 3.2286956310272217,
"learning_rate": 1.0819805899844748e-06,
"loss": 0.1933,
"step": 689
},
{
"epoch": 0.8661087866108786,
"grad_norm": 4.96722936630249,
"learning_rate": 1.0622154052134392e-06,
"loss": 0.2189,
"step": 690
},
{
"epoch": 0.8673640167364016,
"grad_norm": 5.077961444854736,
"learning_rate": 1.042622289542642e-06,
"loss": 0.1864,
"step": 691
},
{
"epoch": 0.8686192468619247,
"grad_norm": 4.23472785949707,
"learning_rate": 1.0232016201762696e-06,
"loss": 0.286,
"step": 692
},
{
"epoch": 0.8698744769874477,
"grad_norm": 4.877628803253174,
"learning_rate": 1.0039537709985968e-06,
"loss": 0.23,
"step": 693
},
{
"epoch": 0.8711297071129707,
"grad_norm": 5.60928201675415,
"learning_rate": 9.848791125667867e-07,
"loss": 0.2311,
"step": 694
},
{
"epoch": 0.8723849372384938,
"grad_norm": 4.585776329040527,
"learning_rate": 9.659780121037533e-07,
"loss": 0.2079,
"step": 695
},
{
"epoch": 0.8736401673640167,
"grad_norm": 4.836354732513428,
"learning_rate": 9.472508334910946e-07,
"loss": 0.1449,
"step": 696
},
{
"epoch": 0.8748953974895397,
"grad_norm": 6.011707782745361,
"learning_rate": 9.286979372620885e-07,
"loss": 0.2135,
"step": 697
},
{
"epoch": 0.8761506276150628,
"grad_norm": 5.969391822814941,
"learning_rate": 9.103196805947522e-07,
"loss": 0.2964,
"step": 698
},
{
"epoch": 0.8774058577405858,
"grad_norm": 5.757129192352295,
"learning_rate": 8.92116417304958e-07,
"loss": 0.2444,
"step": 699
},
{
"epoch": 0.8786610878661087,
"grad_norm": 4.445234298706055,
"learning_rate": 8.740884978396358e-07,
"loss": 0.282,
"step": 700
},
{
"epoch": 0.8786610878661087,
"eval_accuracy": 0.8631346578366446,
"eval_f1": 0.7219730941704036,
"eval_loss": 0.2860436737537384,
"eval_precision": 0.8385416666666666,
"eval_recall": 0.6338582677165354,
"eval_runtime": 51.8577,
"eval_samples_per_second": 5.342,
"eval_steps_per_second": 0.174,
"step": 700
},
{
"epoch": 0.8799163179916318,
"grad_norm": 5.480141639709473,
"learning_rate": 8.562362692700121e-07,
"loss": 0.2179,
"step": 701
},
{
"epoch": 0.8811715481171548,
"grad_norm": 4.480906009674072,
"learning_rate": 8.385600752849387e-07,
"loss": 0.2641,
"step": 702
},
{
"epoch": 0.8824267782426778,
"grad_norm": 4.178032875061035,
"learning_rate": 8.210602561842696e-07,
"loss": 0.2121,
"step": 703
},
{
"epoch": 0.8836820083682009,
"grad_norm": 3.827854633331299,
"learning_rate": 8.037371488723078e-07,
"loss": 0.2251,
"step": 704
},
{
"epoch": 0.8849372384937239,
"grad_norm": 4.65687370300293,
"learning_rate": 7.865910868513316e-07,
"loss": 0.2668,
"step": 705
},
{
"epoch": 0.8861924686192468,
"grad_norm": 3.7226722240448,
"learning_rate": 7.69622400215162e-07,
"loss": 0.2322,
"step": 706
},
{
"epoch": 0.8874476987447699,
"grad_norm": 2.8721847534179688,
"learning_rate": 7.528314156428085e-07,
"loss": 0.2454,
"step": 707
},
{
"epoch": 0.8887029288702929,
"grad_norm": 6.672764301300049,
"learning_rate": 7.36218456392187e-07,
"loss": 0.2606,
"step": 708
},
{
"epoch": 0.8899581589958159,
"grad_norm": 8.37939167022705,
"learning_rate": 7.197838422938908e-07,
"loss": 0.1692,
"step": 709
},
{
"epoch": 0.891213389121339,
"grad_norm": 5.5875725746154785,
"learning_rate": 7.035278897450326e-07,
"loss": 0.2718,
"step": 710
},
{
"epoch": 0.8924686192468619,
"grad_norm": 3.284900188446045,
"learning_rate": 6.874509117031636e-07,
"loss": 0.2369,
"step": 711
},
{
"epoch": 0.8937238493723849,
"grad_norm": 3.2228903770446777,
"learning_rate": 6.715532176802298e-07,
"loss": 0.1676,
"step": 712
},
{
"epoch": 0.894979079497908,
"grad_norm": 4.319593906402588,
"learning_rate": 6.558351137366292e-07,
"loss": 0.2301,
"step": 713
},
{
"epoch": 0.896234309623431,
"grad_norm": 4.787438869476318,
"learning_rate": 6.402969024753147e-07,
"loss": 0.2494,
"step": 714
},
{
"epoch": 0.897489539748954,
"grad_norm": 5.25081205368042,
"learning_rate": 6.249388830359659e-07,
"loss": 0.1948,
"step": 715
},
{
"epoch": 0.8987447698744769,
"grad_norm": 4.489555835723877,
"learning_rate": 6.097613510892364e-07,
"loss": 0.3019,
"step": 716
},
{
"epoch": 0.9,
"grad_norm": 5.4843339920043945,
"learning_rate": 5.947645988310524e-07,
"loss": 0.2591,
"step": 717
},
{
"epoch": 0.901255230125523,
"grad_norm": 3.593355417251587,
"learning_rate": 5.799489149769943e-07,
"loss": 0.2489,
"step": 718
},
{
"epoch": 0.902510460251046,
"grad_norm": 4.679093837738037,
"learning_rate": 5.653145847567376e-07,
"loss": 0.3109,
"step": 719
},
{
"epoch": 0.9037656903765691,
"grad_norm": 3.6788761615753174,
"learning_rate": 5.508618899085583e-07,
"loss": 0.1931,
"step": 720
},
{
"epoch": 0.9037656903765691,
"eval_accuracy": 0.8620309050772627,
"eval_f1": 0.7178329571106095,
"eval_loss": 0.2884938418865204,
"eval_precision": 0.8412698412698413,
"eval_recall": 0.6259842519685039,
"eval_runtime": 51.1882,
"eval_samples_per_second": 5.411,
"eval_steps_per_second": 0.176,
"step": 720
},
{
"epoch": 0.905020920502092,
"grad_norm": 6.400424957275391,
"learning_rate": 5.365911086739117e-07,
"loss": 0.2357,
"step": 721
},
{
"epoch": 0.906276150627615,
"grad_norm": 4.213512897491455,
"learning_rate": 5.225025157920782e-07,
"loss": 0.2613,
"step": 722
},
{
"epoch": 0.9075313807531381,
"grad_norm": 4.268280506134033,
"learning_rate": 5.08596382494867e-07,
"loss": 0.1831,
"step": 723
},
{
"epoch": 0.9087866108786611,
"grad_norm": 5.073379039764404,
"learning_rate": 4.948729765014004e-07,
"loss": 0.2215,
"step": 724
},
{
"epoch": 0.9100418410041841,
"grad_norm": 3.750990867614746,
"learning_rate": 4.813325620129572e-07,
"loss": 0.2024,
"step": 725
},
{
"epoch": 0.9112970711297071,
"grad_norm": 3.3088490962982178,
"learning_rate": 4.6797539970788843e-07,
"loss": 0.2044,
"step": 726
},
{
"epoch": 0.9125523012552301,
"grad_norm": 4.00676965713501,
"learning_rate": 4.548017467365917e-07,
"loss": 0.1813,
"step": 727
},
{
"epoch": 0.9138075313807531,
"grad_norm": 4.088128089904785,
"learning_rate": 4.4181185671657634e-07,
"loss": 0.2087,
"step": 728
},
{
"epoch": 0.9150627615062762,
"grad_norm": 2.8981070518493652,
"learning_rate": 4.290059797275614e-07,
"loss": 0.1629,
"step": 729
},
{
"epoch": 0.9163179916317992,
"grad_norm": 3.5616722106933594,
"learning_rate": 4.163843623066732e-07,
"loss": 0.2354,
"step": 730
},
{
"epoch": 0.9175732217573221,
"grad_norm": 3.7056453227996826,
"learning_rate": 4.0394724744369564e-07,
"loss": 0.2432,
"step": 731
},
{
"epoch": 0.9188284518828452,
"grad_norm": 6.088983535766602,
"learning_rate": 3.916948745763938e-07,
"loss": 0.1824,
"step": 732
},
{
"epoch": 0.9200836820083682,
"grad_norm": 4.606024742126465,
"learning_rate": 3.7962747958590453e-07,
"loss": 0.2111,
"step": 733
},
{
"epoch": 0.9213389121338912,
"grad_norm": 3.6736326217651367,
"learning_rate": 3.6774529479219e-07,
"loss": 0.183,
"step": 734
},
{
"epoch": 0.9225941422594143,
"grad_norm": 4.102712154388428,
"learning_rate": 3.560485489495724e-07,
"loss": 0.2266,
"step": 735
},
{
"epoch": 0.9238493723849373,
"grad_norm": 3.6704976558685303,
"learning_rate": 3.445374672423252e-07,
"loss": 0.1764,
"step": 736
},
{
"epoch": 0.9251046025104602,
"grad_norm": 3.65004825592041,
"learning_rate": 3.3321227128033983e-07,
"loss": 0.183,
"step": 737
},
{
"epoch": 0.9263598326359833,
"grad_norm": 5.921749591827393,
"learning_rate": 3.2207317909485834e-07,
"loss": 0.3058,
"step": 738
},
{
"epoch": 0.9276150627615063,
"grad_norm": 3.211097240447998,
"learning_rate": 3.1112040513427646e-07,
"loss": 0.2254,
"step": 739
},
{
"epoch": 0.9288702928870293,
"grad_norm": 3.559563398361206,
"learning_rate": 3.0035416026001573e-07,
"loss": 0.2251,
"step": 740
},
{
"epoch": 0.9288702928870293,
"eval_accuracy": 0.8631346578366446,
"eval_f1": 0.7194570135746606,
"eval_loss": 0.2898021638393402,
"eval_precision": 0.8457446808510638,
"eval_recall": 0.6259842519685039,
"eval_runtime": 51.6937,
"eval_samples_per_second": 5.358,
"eval_steps_per_second": 0.174,
"step": 740
},
{
"epoch": 0.9301255230125522,
"grad_norm": 5.178525447845459,
"learning_rate": 2.8977465174246156e-07,
"loss": 0.2279,
"step": 741
},
{
"epoch": 0.9313807531380753,
"grad_norm": 3.763082504272461,
"learning_rate": 2.7938208325697825e-07,
"loss": 0.2488,
"step": 742
},
{
"epoch": 0.9326359832635983,
"grad_norm": 3.6214609146118164,
"learning_rate": 2.691766548799779e-07,
"loss": 0.2449,
"step": 743
},
{
"epoch": 0.9338912133891213,
"grad_norm": 4.081825256347656,
"learning_rate": 2.591585630850835e-07,
"loss": 0.2272,
"step": 744
},
{
"epoch": 0.9351464435146444,
"grad_norm": 6.377195835113525,
"learning_rate": 2.493280007393295e-07,
"loss": 0.2366,
"step": 745
},
{
"epoch": 0.9364016736401674,
"grad_norm": 6.9662981033325195,
"learning_rate": 2.3968515709946296e-07,
"loss": 0.2034,
"step": 746
},
{
"epoch": 0.9376569037656903,
"grad_norm": 2.801429510116577,
"learning_rate": 2.3023021780829158e-07,
"loss": 0.2079,
"step": 747
},
{
"epoch": 0.9389121338912134,
"grad_norm": 5.810364246368408,
"learning_rate": 2.2096336489111025e-07,
"loss": 0.2818,
"step": 748
},
{
"epoch": 0.9401673640167364,
"grad_norm": 6.120866775512695,
"learning_rate": 2.1188477675220142e-07,
"loss": 0.3222,
"step": 749
},
{
"epoch": 0.9414225941422594,
"grad_norm": 4.8275957107543945,
"learning_rate": 2.0299462817139902e-07,
"loss": 0.2819,
"step": 750
},
{
"epoch": 0.9426778242677825,
"grad_norm": 3.9053924083709717,
"learning_rate": 1.942930903007212e-07,
"loss": 0.1928,
"step": 751
},
{
"epoch": 0.9439330543933054,
"grad_norm": 3.453087091445923,
"learning_rate": 1.8578033066107392e-07,
"loss": 0.1989,
"step": 752
},
{
"epoch": 0.9451882845188284,
"grad_norm": 3.622796058654785,
"learning_rate": 1.7745651313903157e-07,
"loss": 0.1392,
"step": 753
},
{
"epoch": 0.9464435146443515,
"grad_norm": 3.5609803199768066,
"learning_rate": 1.693217979836792e-07,
"loss": 0.1935,
"step": 754
},
{
"epoch": 0.9476987447698745,
"grad_norm": 4.004082679748535,
"learning_rate": 1.6137634180352303e-07,
"loss": 0.2124,
"step": 755
},
{
"epoch": 0.9489539748953975,
"grad_norm": 3.635117292404175,
"learning_rate": 1.5362029756348373e-07,
"loss": 0.2254,
"step": 756
},
{
"epoch": 0.9502092050209205,
"grad_norm": 6.247368812561035,
"learning_rate": 1.4605381458194568e-07,
"loss": 0.3048,
"step": 757
},
{
"epoch": 0.9514644351464435,
"grad_norm": 7.196407794952393,
"learning_rate": 1.386770385278835e-07,
"loss": 0.2386,
"step": 758
},
{
"epoch": 0.9527196652719665,
"grad_norm": 4.880216121673584,
"learning_rate": 1.3149011141805669e-07,
"loss": 0.286,
"step": 759
},
{
"epoch": 0.9539748953974896,
"grad_norm": 3.848249912261963,
"learning_rate": 1.2449317161427942e-07,
"loss": 0.178,
"step": 760
},
{
"epoch": 0.9539748953974896,
"eval_accuracy": 0.8631346578366446,
"eval_f1": 0.7194570135746606,
"eval_loss": 0.2888832986354828,
"eval_precision": 0.8457446808510638,
"eval_recall": 0.6259842519685039,
"eval_runtime": 51.4816,
"eval_samples_per_second": 5.381,
"eval_steps_per_second": 0.175,
"step": 760
},
{
"epoch": 0.9552301255230126,
"grad_norm": 4.997644424438477,
"learning_rate": 1.1768635382075289e-07,
"loss": 0.2997,
"step": 761
},
{
"epoch": 0.9564853556485355,
"grad_norm": 4.796181678771973,
"learning_rate": 1.1106978908147381e-07,
"loss": 0.2601,
"step": 762
},
{
"epoch": 0.9577405857740586,
"grad_norm": 3.6467103958129883,
"learning_rate": 1.0464360477771218e-07,
"loss": 0.2264,
"step": 763
},
{
"epoch": 0.9589958158995816,
"grad_norm": 5.8963303565979,
"learning_rate": 9.840792462555426e-08,
"loss": 0.2699,
"step": 764
},
{
"epoch": 0.9602510460251046,
"grad_norm": 4.986123561859131,
"learning_rate": 9.236286867352785e-08,
"loss": 0.2784,
"step": 765
},
{
"epoch": 0.9615062761506277,
"grad_norm": 4.026951313018799,
"learning_rate": 8.650855330028629e-08,
"loss": 0.2525,
"step": 766
},
{
"epoch": 0.9627615062761506,
"grad_norm": 5.082536220550537,
"learning_rate": 8.08450912123715e-08,
"loss": 0.2589,
"step": 767
},
{
"epoch": 0.9640167364016736,
"grad_norm": 5.790295124053955,
"learning_rate": 7.53725914420378e-08,
"loss": 0.2811,
"step": 768
},
{
"epoch": 0.9652719665271966,
"grad_norm": 4.651604175567627,
"learning_rate": 7.009115934516253e-08,
"loss": 0.2499,
"step": 769
},
{
"epoch": 0.9665271966527197,
"grad_norm": 4.613510608673096,
"learning_rate": 6.500089659920661e-08,
"loss": 0.2495,
"step": 770
},
{
"epoch": 0.9677824267782427,
"grad_norm": 4.089534759521484,
"learning_rate": 6.010190120126602e-08,
"loss": 0.2421,
"step": 771
},
{
"epoch": 0.9690376569037656,
"grad_norm": 4.79866886138916,
"learning_rate": 5.539426746618337e-08,
"loss": 0.2493,
"step": 772
},
{
"epoch": 0.9702928870292887,
"grad_norm": 4.052529335021973,
"learning_rate": 5.0878086024727104e-08,
"loss": 0.245,
"step": 773
},
{
"epoch": 0.9715481171548117,
"grad_norm": 3.7741782665252686,
"learning_rate": 4.6553443821852893e-08,
"loss": 0.2205,
"step": 774
},
{
"epoch": 0.9728033472803347,
"grad_norm": 3.792848587036133,
"learning_rate": 4.2420424115025004e-08,
"loss": 0.2297,
"step": 775
},
{
"epoch": 0.9740585774058578,
"grad_norm": 4.39737606048584,
"learning_rate": 3.847910647261754e-08,
"loss": 0.1941,
"step": 776
},
{
"epoch": 0.9753138075313807,
"grad_norm": 4.301551342010498,
"learning_rate": 3.472956677238015e-08,
"loss": 0.2069,
"step": 777
},
{
"epoch": 0.9765690376569037,
"grad_norm": 3.8330047130584717,
"learning_rate": 3.117187719997805e-08,
"loss": 0.209,
"step": 778
},
{
"epoch": 0.9778242677824268,
"grad_norm": 3.5165398120880127,
"learning_rate": 2.780610624760094e-08,
"loss": 0.1973,
"step": 779
},
{
"epoch": 0.9790794979079498,
"grad_norm": 4.450705528259277,
"learning_rate": 2.4632318712646264e-08,
"loss": 0.2431,
"step": 780
},
{
"epoch": 0.9790794979079498,
"eval_accuracy": 0.8631346578366446,
"eval_f1": 0.7194570135746606,
"eval_loss": 0.2885710895061493,
"eval_precision": 0.8457446808510638,
"eval_recall": 0.6259842519685039,
"eval_runtime": 51.066,
"eval_samples_per_second": 5.424,
"eval_steps_per_second": 0.176,
"step": 780
},
{
"epoch": 0.9803347280334728,
"grad_norm": 5.469013690948486,
"learning_rate": 2.1650575696471332e-08,
"loss": 0.2557,
"step": 781
},
{
"epoch": 0.9815899581589959,
"grad_norm": 3.630676031112671,
"learning_rate": 1.8860934603215365e-08,
"loss": 0.1707,
"step": 782
},
{
"epoch": 0.9828451882845188,
"grad_norm": 2.949327230453491,
"learning_rate": 1.626344913869593e-08,
"loss": 0.1495,
"step": 783
},
{
"epoch": 0.9841004184100418,
"grad_norm": 4.833507537841797,
"learning_rate": 1.3858169309376446e-08,
"loss": 0.2054,
"step": 784
},
{
"epoch": 0.9853556485355649,
"grad_norm": 4.511420249938965,
"learning_rate": 1.1645141421399164e-08,
"loss": 0.2058,
"step": 785
},
{
"epoch": 0.9866108786610879,
"grad_norm": 4.471105098724365,
"learning_rate": 9.624408079699221e-09,
"loss": 0.2349,
"step": 786
},
{
"epoch": 0.9878661087866109,
"grad_norm": 4.504687309265137,
"learning_rate": 7.796008187177518e-09,
"loss": 0.2564,
"step": 787
},
{
"epoch": 0.9891213389121339,
"grad_norm": 4.099091053009033,
"learning_rate": 6.1599769439590896e-09,
"loss": 0.2413,
"step": 788
},
{
"epoch": 0.9903765690376569,
"grad_norm": 3.7011802196502686,
"learning_rate": 4.716345846711434e-09,
"loss": 0.2547,
"step": 789
},
{
"epoch": 0.9916317991631799,
"grad_norm": 4.092798233032227,
"learning_rate": 3.465142688036105e-09,
"loss": 0.2754,
"step": 790
},
{
"epoch": 0.992887029288703,
"grad_norm": 3.2027719020843506,
"learning_rate": 2.4063915559402463e-09,
"loss": 0.2205,
"step": 791
},
{
"epoch": 0.994142259414226,
"grad_norm": 4.241121768951416,
"learning_rate": 1.540112833366969e-09,
"loss": 0.2476,
"step": 792
},
{
"epoch": 0.9953974895397489,
"grad_norm": 3.92455792427063,
"learning_rate": 8.663231978034425e-10,
"loss": 0.2345,
"step": 793
},
{
"epoch": 0.9966527196652719,
"grad_norm": 5.164183616638184,
"learning_rate": 3.850356209633699e-10,
"loss": 0.2975,
"step": 794
},
{
"epoch": 0.997907949790795,
"grad_norm": 7.26465368270874,
"learning_rate": 9.625936853385753e-11,
"loss": 0.3067,
"step": 795
},
{
"epoch": 0.999163179916318,
"grad_norm": 4.6205949783325195,
"learning_rate": 0.0,
"loss": 0.2553,
"step": 796
}
],
"logging_steps": 1,
"max_steps": 796,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.917806194389156e+17,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}