opjustice-lm / trainer_state.json
rootblind's picture
Upload folder using huggingface_hub
4778d22 verified
{
"best_global_step": 4662,
"best_metric": 0.9187405866743169,
"best_model_checkpoint": "./automod-model/model_versions/v4-masked/checkpoint-4662",
"epoch": 7.0,
"eval_steps": 500,
"global_step": 4662,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.015015015015015015,
"grad_norm": 17.097240447998047,
"learning_rate": 3.6e-07,
"loss": 2.9671,
"step": 10
},
{
"epoch": 0.03003003003003003,
"grad_norm": 26.079153060913086,
"learning_rate": 7.6e-07,
"loss": 3.0981,
"step": 20
},
{
"epoch": 0.04504504504504504,
"grad_norm": 80.04959106445312,
"learning_rate": 1.1600000000000001e-06,
"loss": 2.5492,
"step": 30
},
{
"epoch": 0.06006006006006006,
"grad_norm": 20.597259521484375,
"learning_rate": 1.56e-06,
"loss": 2.1283,
"step": 40
},
{
"epoch": 0.07507507507507508,
"grad_norm": 33.89236831665039,
"learning_rate": 1.9600000000000003e-06,
"loss": 1.821,
"step": 50
},
{
"epoch": 0.09009009009009009,
"grad_norm": 21.228269577026367,
"learning_rate": 2.3600000000000003e-06,
"loss": 1.0551,
"step": 60
},
{
"epoch": 0.10510510510510511,
"grad_norm": 29.945491790771484,
"learning_rate": 2.7600000000000003e-06,
"loss": 0.906,
"step": 70
},
{
"epoch": 0.12012012012012012,
"grad_norm": 21.597726821899414,
"learning_rate": 3.1600000000000002e-06,
"loss": 0.7201,
"step": 80
},
{
"epoch": 0.13513513513513514,
"grad_norm": 21.56252670288086,
"learning_rate": 3.5600000000000002e-06,
"loss": 0.3313,
"step": 90
},
{
"epoch": 0.15015015015015015,
"grad_norm": 9.781754493713379,
"learning_rate": 3.96e-06,
"loss": 0.3786,
"step": 100
},
{
"epoch": 0.16516516516516516,
"grad_norm": 13.030562400817871,
"learning_rate": 4.360000000000001e-06,
"loss": 0.3193,
"step": 110
},
{
"epoch": 0.18018018018018017,
"grad_norm": 18.68300437927246,
"learning_rate": 4.76e-06,
"loss": 0.2353,
"step": 120
},
{
"epoch": 0.19519519519519518,
"grad_norm": 12.98038101196289,
"learning_rate": 5.1600000000000006e-06,
"loss": 0.2529,
"step": 130
},
{
"epoch": 0.21021021021021022,
"grad_norm": 14.075587272644043,
"learning_rate": 5.560000000000001e-06,
"loss": 0.2724,
"step": 140
},
{
"epoch": 0.22522522522522523,
"grad_norm": 13.491477966308594,
"learning_rate": 5.9600000000000005e-06,
"loss": 0.2253,
"step": 150
},
{
"epoch": 0.24024024024024024,
"grad_norm": 21.4825439453125,
"learning_rate": 6.360000000000001e-06,
"loss": 0.1718,
"step": 160
},
{
"epoch": 0.2552552552552553,
"grad_norm": 12.049821853637695,
"learning_rate": 6.760000000000001e-06,
"loss": 0.2332,
"step": 170
},
{
"epoch": 0.2702702702702703,
"grad_norm": 13.302751541137695,
"learning_rate": 7.16e-06,
"loss": 0.1648,
"step": 180
},
{
"epoch": 0.2852852852852853,
"grad_norm": 9.489476203918457,
"learning_rate": 7.5600000000000005e-06,
"loss": 0.1742,
"step": 190
},
{
"epoch": 0.3003003003003003,
"grad_norm": 7.511724948883057,
"learning_rate": 7.960000000000002e-06,
"loss": 0.1392,
"step": 200
},
{
"epoch": 0.3153153153153153,
"grad_norm": 6.69696044921875,
"learning_rate": 8.36e-06,
"loss": 0.1368,
"step": 210
},
{
"epoch": 0.3303303303303303,
"grad_norm": 19.48908042907715,
"learning_rate": 8.76e-06,
"loss": 0.1694,
"step": 220
},
{
"epoch": 0.34534534534534533,
"grad_norm": 9.891258239746094,
"learning_rate": 9.16e-06,
"loss": 0.106,
"step": 230
},
{
"epoch": 0.36036036036036034,
"grad_norm": 6.785653114318848,
"learning_rate": 9.56e-06,
"loss": 0.2347,
"step": 240
},
{
"epoch": 0.37537537537537535,
"grad_norm": 11.124282836914062,
"learning_rate": 9.960000000000001e-06,
"loss": 0.1421,
"step": 250
},
{
"epoch": 0.39039039039039036,
"grad_norm": 13.130078315734863,
"learning_rate": 1.036e-05,
"loss": 0.1036,
"step": 260
},
{
"epoch": 0.40540540540540543,
"grad_norm": 6.414909362792969,
"learning_rate": 1.0760000000000002e-05,
"loss": 0.1492,
"step": 270
},
{
"epoch": 0.42042042042042044,
"grad_norm": 9.707856178283691,
"learning_rate": 1.1160000000000002e-05,
"loss": 0.1308,
"step": 280
},
{
"epoch": 0.43543543543543545,
"grad_norm": 7.036477565765381,
"learning_rate": 1.156e-05,
"loss": 0.1186,
"step": 290
},
{
"epoch": 0.45045045045045046,
"grad_norm": 5.08420991897583,
"learning_rate": 1.196e-05,
"loss": 0.1248,
"step": 300
},
{
"epoch": 0.46546546546546547,
"grad_norm": 6.691461563110352,
"learning_rate": 1.236e-05,
"loss": 0.1096,
"step": 310
},
{
"epoch": 0.4804804804804805,
"grad_norm": 8.506667137145996,
"learning_rate": 1.2760000000000001e-05,
"loss": 0.1377,
"step": 320
},
{
"epoch": 0.4954954954954955,
"grad_norm": 7.334710597991943,
"learning_rate": 1.3160000000000001e-05,
"loss": 0.1051,
"step": 330
},
{
"epoch": 0.5105105105105106,
"grad_norm": 12.647950172424316,
"learning_rate": 1.3560000000000002e-05,
"loss": 0.1136,
"step": 340
},
{
"epoch": 0.5255255255255256,
"grad_norm": 4.561657905578613,
"learning_rate": 1.396e-05,
"loss": 0.1091,
"step": 350
},
{
"epoch": 0.5405405405405406,
"grad_norm": 6.011996269226074,
"learning_rate": 1.4360000000000001e-05,
"loss": 0.0891,
"step": 360
},
{
"epoch": 0.5555555555555556,
"grad_norm": 9.521617889404297,
"learning_rate": 1.4760000000000001e-05,
"loss": 0.0702,
"step": 370
},
{
"epoch": 0.5705705705705706,
"grad_norm": 4.111408710479736,
"learning_rate": 1.516e-05,
"loss": 0.105,
"step": 380
},
{
"epoch": 0.5855855855855856,
"grad_norm": 3.560914993286133,
"learning_rate": 1.556e-05,
"loss": 0.0766,
"step": 390
},
{
"epoch": 0.6006006006006006,
"grad_norm": 2.4863393306732178,
"learning_rate": 1.5960000000000003e-05,
"loss": 0.0804,
"step": 400
},
{
"epoch": 0.6156156156156156,
"grad_norm": 8.447433471679688,
"learning_rate": 1.636e-05,
"loss": 0.0755,
"step": 410
},
{
"epoch": 0.6306306306306306,
"grad_norm": 6.882669448852539,
"learning_rate": 1.6760000000000002e-05,
"loss": 0.0712,
"step": 420
},
{
"epoch": 0.6456456456456456,
"grad_norm": 3.4703919887542725,
"learning_rate": 1.7160000000000002e-05,
"loss": 0.0546,
"step": 430
},
{
"epoch": 0.6606606606606606,
"grad_norm": 1.0200519561767578,
"learning_rate": 1.756e-05,
"loss": 0.0514,
"step": 440
},
{
"epoch": 0.6756756756756757,
"grad_norm": 5.531317710876465,
"learning_rate": 1.796e-05,
"loss": 0.0762,
"step": 450
},
{
"epoch": 0.6906906906906907,
"grad_norm": 0.6142644882202148,
"learning_rate": 1.8360000000000004e-05,
"loss": 0.0354,
"step": 460
},
{
"epoch": 0.7057057057057057,
"grad_norm": 6.253670692443848,
"learning_rate": 1.876e-05,
"loss": 0.0394,
"step": 470
},
{
"epoch": 0.7207207207207207,
"grad_norm": 6.93025016784668,
"learning_rate": 1.916e-05,
"loss": 0.0477,
"step": 480
},
{
"epoch": 0.7357357357357357,
"grad_norm": 6.35200309753418,
"learning_rate": 1.9560000000000002e-05,
"loss": 0.0327,
"step": 490
},
{
"epoch": 0.7507507507507507,
"grad_norm": 5.108675003051758,
"learning_rate": 1.9960000000000002e-05,
"loss": 0.0575,
"step": 500
},
{
"epoch": 0.7657657657657657,
"grad_norm": 4.811997413635254,
"learning_rate": 1.9962717481358742e-05,
"loss": 0.0545,
"step": 510
},
{
"epoch": 0.7807807807807807,
"grad_norm": 3.6220436096191406,
"learning_rate": 1.992129246064623e-05,
"loss": 0.0202,
"step": 520
},
{
"epoch": 0.7957957957957958,
"grad_norm": 6.347226142883301,
"learning_rate": 1.9879867439933723e-05,
"loss": 0.0431,
"step": 530
},
{
"epoch": 0.8108108108108109,
"grad_norm": 0.9845024347305298,
"learning_rate": 1.983844241922121e-05,
"loss": 0.0353,
"step": 540
},
{
"epoch": 0.8258258258258259,
"grad_norm": 6.9723663330078125,
"learning_rate": 1.97970173985087e-05,
"loss": 0.0495,
"step": 550
},
{
"epoch": 0.8408408408408409,
"grad_norm": 0.35112687945365906,
"learning_rate": 1.9755592377796192e-05,
"loss": 0.0249,
"step": 560
},
{
"epoch": 0.8558558558558559,
"grad_norm": 6.217226505279541,
"learning_rate": 1.971416735708368e-05,
"loss": 0.0367,
"step": 570
},
{
"epoch": 0.8708708708708709,
"grad_norm": 1.6721233129501343,
"learning_rate": 1.967274233637117e-05,
"loss": 0.0279,
"step": 580
},
{
"epoch": 0.8858858858858859,
"grad_norm": 6.501187324523926,
"learning_rate": 1.963131731565866e-05,
"loss": 0.037,
"step": 590
},
{
"epoch": 0.9009009009009009,
"grad_norm": 3.4928908348083496,
"learning_rate": 1.9589892294946147e-05,
"loss": 0.0341,
"step": 600
},
{
"epoch": 0.9159159159159159,
"grad_norm": 4.611274719238281,
"learning_rate": 1.954846727423364e-05,
"loss": 0.0475,
"step": 610
},
{
"epoch": 0.9309309309309309,
"grad_norm": 7.894728660583496,
"learning_rate": 1.9507042253521128e-05,
"loss": 0.0478,
"step": 620
},
{
"epoch": 0.9459459459459459,
"grad_norm": 4.463768005371094,
"learning_rate": 1.9465617232808617e-05,
"loss": 0.0303,
"step": 630
},
{
"epoch": 0.960960960960961,
"grad_norm": 3.8476738929748535,
"learning_rate": 1.9424192212096106e-05,
"loss": 0.0216,
"step": 640
},
{
"epoch": 0.975975975975976,
"grad_norm": 1.4353941679000854,
"learning_rate": 1.9382767191383598e-05,
"loss": 0.0115,
"step": 650
},
{
"epoch": 0.990990990990991,
"grad_norm": 0.5383431315422058,
"learning_rate": 1.9341342170671087e-05,
"loss": 0.0316,
"step": 660
},
{
"epoch": 1.0,
"eval_accuracy": 0.844590780809031,
"eval_f1": 0.9061312298314809,
"eval_loss": 0.21899376809597015,
"eval_roc_auc": 0.9366743383999476,
"eval_runtime": 7.3751,
"eval_samples_per_second": 720.673,
"eval_steps_per_second": 22.644,
"step": 666
},
{
"epoch": 1.006006006006006,
"grad_norm": 2.482923984527588,
"learning_rate": 1.9299917149958575e-05,
"loss": 0.0243,
"step": 670
},
{
"epoch": 1.021021021021021,
"grad_norm": 0.28770723938941956,
"learning_rate": 1.9258492129246067e-05,
"loss": 0.0174,
"step": 680
},
{
"epoch": 1.0360360360360361,
"grad_norm": 3.5773494243621826,
"learning_rate": 1.9217067108533556e-05,
"loss": 0.0317,
"step": 690
},
{
"epoch": 1.0510510510510511,
"grad_norm": 7.554081439971924,
"learning_rate": 1.9175642087821045e-05,
"loss": 0.0322,
"step": 700
},
{
"epoch": 1.0660660660660661,
"grad_norm": 0.5560117959976196,
"learning_rate": 1.9134217067108537e-05,
"loss": 0.0192,
"step": 710
},
{
"epoch": 1.0810810810810811,
"grad_norm": 2.8596460819244385,
"learning_rate": 1.9092792046396026e-05,
"loss": 0.0201,
"step": 720
},
{
"epoch": 1.0960960960960962,
"grad_norm": 2.245648145675659,
"learning_rate": 1.9051367025683514e-05,
"loss": 0.0206,
"step": 730
},
{
"epoch": 1.1111111111111112,
"grad_norm": 1.595831274986267,
"learning_rate": 1.9009942004971003e-05,
"loss": 0.0145,
"step": 740
},
{
"epoch": 1.1261261261261262,
"grad_norm": 3.3559751510620117,
"learning_rate": 1.8968516984258495e-05,
"loss": 0.0259,
"step": 750
},
{
"epoch": 1.1411411411411412,
"grad_norm": 8.872766494750977,
"learning_rate": 1.8927091963545984e-05,
"loss": 0.0316,
"step": 760
},
{
"epoch": 1.1561561561561562,
"grad_norm": 1.1749708652496338,
"learning_rate": 1.8885666942833473e-05,
"loss": 0.0335,
"step": 770
},
{
"epoch": 1.1711711711711712,
"grad_norm": 2.3856115341186523,
"learning_rate": 1.884424192212096e-05,
"loss": 0.0258,
"step": 780
},
{
"epoch": 1.1861861861861862,
"grad_norm": 0.8851059675216675,
"learning_rate": 1.880281690140845e-05,
"loss": 0.0135,
"step": 790
},
{
"epoch": 1.2012012012012012,
"grad_norm": 3.2970917224884033,
"learning_rate": 1.8761391880695942e-05,
"loss": 0.0234,
"step": 800
},
{
"epoch": 1.2162162162162162,
"grad_norm": 5.862252712249756,
"learning_rate": 1.871996685998343e-05,
"loss": 0.0265,
"step": 810
},
{
"epoch": 1.2312312312312312,
"grad_norm": 6.934352874755859,
"learning_rate": 1.867854183927092e-05,
"loss": 0.0209,
"step": 820
},
{
"epoch": 1.2462462462462462,
"grad_norm": 5.499032974243164,
"learning_rate": 1.8637116818558412e-05,
"loss": 0.0381,
"step": 830
},
{
"epoch": 1.2612612612612613,
"grad_norm": 1.9378541707992554,
"learning_rate": 1.85956917978459e-05,
"loss": 0.0355,
"step": 840
},
{
"epoch": 1.2762762762762763,
"grad_norm": 3.2807366847991943,
"learning_rate": 1.855426677713339e-05,
"loss": 0.0252,
"step": 850
},
{
"epoch": 1.2912912912912913,
"grad_norm": 0.08878616243600845,
"learning_rate": 1.851284175642088e-05,
"loss": 0.0184,
"step": 860
},
{
"epoch": 1.3063063063063063,
"grad_norm": 0.21245735883712769,
"learning_rate": 1.847141673570837e-05,
"loss": 0.0107,
"step": 870
},
{
"epoch": 1.3213213213213213,
"grad_norm": 0.23962010443210602,
"learning_rate": 1.842999171499586e-05,
"loss": 0.0236,
"step": 880
},
{
"epoch": 1.3363363363363363,
"grad_norm": 5.516748905181885,
"learning_rate": 1.838856669428335e-05,
"loss": 0.0175,
"step": 890
},
{
"epoch": 1.3513513513513513,
"grad_norm": 0.09256813675165176,
"learning_rate": 1.834714167357084e-05,
"loss": 0.0153,
"step": 900
},
{
"epoch": 1.3663663663663663,
"grad_norm": 4.04671049118042,
"learning_rate": 1.830571665285833e-05,
"loss": 0.0304,
"step": 910
},
{
"epoch": 1.3813813813813813,
"grad_norm": 0.05709666758775711,
"learning_rate": 1.8264291632145817e-05,
"loss": 0.0226,
"step": 920
},
{
"epoch": 1.3963963963963963,
"grad_norm": 2.605480194091797,
"learning_rate": 1.8222866611433306e-05,
"loss": 0.0191,
"step": 930
},
{
"epoch": 1.4114114114114114,
"grad_norm": 12.805084228515625,
"learning_rate": 1.8181441590720795e-05,
"loss": 0.023,
"step": 940
},
{
"epoch": 1.4264264264264264,
"grad_norm": 0.10644625872373581,
"learning_rate": 1.8140016570008287e-05,
"loss": 0.0138,
"step": 950
},
{
"epoch": 1.4414414414414414,
"grad_norm": 0.4645148515701294,
"learning_rate": 1.8098591549295776e-05,
"loss": 0.0207,
"step": 960
},
{
"epoch": 1.4564564564564564,
"grad_norm": 6.761512279510498,
"learning_rate": 1.8057166528583265e-05,
"loss": 0.0228,
"step": 970
},
{
"epoch": 1.4714714714714714,
"grad_norm": 0.7149978876113892,
"learning_rate": 1.8015741507870757e-05,
"loss": 0.0113,
"step": 980
},
{
"epoch": 1.4864864864864864,
"grad_norm": 4.93594217300415,
"learning_rate": 1.7974316487158245e-05,
"loss": 0.0156,
"step": 990
},
{
"epoch": 1.5015015015015014,
"grad_norm": 0.030841587111353874,
"learning_rate": 1.7932891466445734e-05,
"loss": 0.0224,
"step": 1000
},
{
"epoch": 1.5165165165165164,
"grad_norm": 3.3840746879577637,
"learning_rate": 1.7891466445733223e-05,
"loss": 0.027,
"step": 1010
},
{
"epoch": 1.5315315315315314,
"grad_norm": 0.03548554331064224,
"learning_rate": 1.7850041425020715e-05,
"loss": 0.0232,
"step": 1020
},
{
"epoch": 1.5465465465465464,
"grad_norm": 3.062281847000122,
"learning_rate": 1.7808616404308204e-05,
"loss": 0.0199,
"step": 1030
},
{
"epoch": 1.5615615615615615,
"grad_norm": 0.028117358684539795,
"learning_rate": 1.7767191383595693e-05,
"loss": 0.0196,
"step": 1040
},
{
"epoch": 1.5765765765765765,
"grad_norm": 1.8692775964736938,
"learning_rate": 1.7725766362883185e-05,
"loss": 0.0091,
"step": 1050
},
{
"epoch": 1.5915915915915915,
"grad_norm": 3.6717379093170166,
"learning_rate": 1.7684341342170673e-05,
"loss": 0.0081,
"step": 1060
},
{
"epoch": 1.6066066066066065,
"grad_norm": 0.24632543325424194,
"learning_rate": 1.7642916321458162e-05,
"loss": 0.0211,
"step": 1070
},
{
"epoch": 1.6216216216216215,
"grad_norm": 0.46948179602622986,
"learning_rate": 1.760149130074565e-05,
"loss": 0.0237,
"step": 1080
},
{
"epoch": 1.6366366366366365,
"grad_norm": 5.49431037902832,
"learning_rate": 1.756006628003314e-05,
"loss": 0.0292,
"step": 1090
},
{
"epoch": 1.6516516516516515,
"grad_norm": 1.7562233209609985,
"learning_rate": 1.7518641259320632e-05,
"loss": 0.0107,
"step": 1100
},
{
"epoch": 1.6666666666666665,
"grad_norm": 4.193264961242676,
"learning_rate": 1.747721623860812e-05,
"loss": 0.0088,
"step": 1110
},
{
"epoch": 1.6816816816816815,
"grad_norm": 2.0336873531341553,
"learning_rate": 1.743579121789561e-05,
"loss": 0.0159,
"step": 1120
},
{
"epoch": 1.6966966966966965,
"grad_norm": 0.03742986172437668,
"learning_rate": 1.7394366197183098e-05,
"loss": 0.0227,
"step": 1130
},
{
"epoch": 1.7117117117117115,
"grad_norm": 2.9706671237945557,
"learning_rate": 1.735294117647059e-05,
"loss": 0.0133,
"step": 1140
},
{
"epoch": 1.7267267267267268,
"grad_norm": 0.054504282772541046,
"learning_rate": 1.731151615575808e-05,
"loss": 0.018,
"step": 1150
},
{
"epoch": 1.7417417417417418,
"grad_norm": 0.3289143145084381,
"learning_rate": 1.7270091135045568e-05,
"loss": 0.0164,
"step": 1160
},
{
"epoch": 1.7567567567567568,
"grad_norm": 5.590298652648926,
"learning_rate": 1.722866611433306e-05,
"loss": 0.0266,
"step": 1170
},
{
"epoch": 1.7717717717717718,
"grad_norm": 3.789137601852417,
"learning_rate": 1.718724109362055e-05,
"loss": 0.0325,
"step": 1180
},
{
"epoch": 1.7867867867867868,
"grad_norm": 1.8751722574234009,
"learning_rate": 1.7145816072908037e-05,
"loss": 0.0303,
"step": 1190
},
{
"epoch": 1.8018018018018018,
"grad_norm": 0.3353935778141022,
"learning_rate": 1.710439105219553e-05,
"loss": 0.0328,
"step": 1200
},
{
"epoch": 1.8168168168168168,
"grad_norm": 2.441638231277466,
"learning_rate": 1.7062966031483018e-05,
"loss": 0.0105,
"step": 1210
},
{
"epoch": 1.8318318318318318,
"grad_norm": 1.8479589223861694,
"learning_rate": 1.7021541010770507e-05,
"loss": 0.0429,
"step": 1220
},
{
"epoch": 1.8468468468468469,
"grad_norm": 0.5461903214454651,
"learning_rate": 1.6980115990057996e-05,
"loss": 0.0208,
"step": 1230
},
{
"epoch": 1.8618618618618619,
"grad_norm": 0.17233674228191376,
"learning_rate": 1.6938690969345484e-05,
"loss": 0.0279,
"step": 1240
},
{
"epoch": 1.8768768768768769,
"grad_norm": 1.572726845741272,
"learning_rate": 1.6897265948632976e-05,
"loss": 0.0085,
"step": 1250
},
{
"epoch": 1.8918918918918919,
"grad_norm": 13.602294921875,
"learning_rate": 1.6855840927920465e-05,
"loss": 0.0304,
"step": 1260
},
{
"epoch": 1.906906906906907,
"grad_norm": 0.2937522530555725,
"learning_rate": 1.6814415907207954e-05,
"loss": 0.0182,
"step": 1270
},
{
"epoch": 1.921921921921922,
"grad_norm": 8.252511024475098,
"learning_rate": 1.6772990886495443e-05,
"loss": 0.0191,
"step": 1280
},
{
"epoch": 1.936936936936937,
"grad_norm": 3.633695602416992,
"learning_rate": 1.6731565865782935e-05,
"loss": 0.0209,
"step": 1290
},
{
"epoch": 1.951951951951952,
"grad_norm": 2.4778058528900146,
"learning_rate": 1.6690140845070424e-05,
"loss": 0.0283,
"step": 1300
},
{
"epoch": 1.966966966966967,
"grad_norm": 4.573657989501953,
"learning_rate": 1.6648715824357912e-05,
"loss": 0.0242,
"step": 1310
},
{
"epoch": 1.981981981981982,
"grad_norm": 10.803725242614746,
"learning_rate": 1.6607290803645404e-05,
"loss": 0.0154,
"step": 1320
},
{
"epoch": 1.996996996996997,
"grad_norm": 1.1839510202407837,
"learning_rate": 1.6565865782932893e-05,
"loss": 0.0164,
"step": 1330
},
{
"epoch": 2.0,
"eval_accuracy": 0.844590780809031,
"eval_f1": 0.9083219840871622,
"eval_loss": 0.21870091557502747,
"eval_roc_auc": 0.9382724799051716,
"eval_runtime": 7.4074,
"eval_samples_per_second": 717.526,
"eval_steps_per_second": 22.545,
"step": 1332
},
{
"epoch": 2.012012012012012,
"grad_norm": 2.07466197013855,
"learning_rate": 1.6524440762220382e-05,
"loss": 0.0341,
"step": 1340
},
{
"epoch": 2.027027027027027,
"grad_norm": 0.3642808794975281,
"learning_rate": 1.6483015741507874e-05,
"loss": 0.0068,
"step": 1350
},
{
"epoch": 2.042042042042042,
"grad_norm": 3.976308822631836,
"learning_rate": 1.6441590720795363e-05,
"loss": 0.0232,
"step": 1360
},
{
"epoch": 2.057057057057057,
"grad_norm": 0.7623342275619507,
"learning_rate": 1.640016570008285e-05,
"loss": 0.0055,
"step": 1370
},
{
"epoch": 2.0720720720720722,
"grad_norm": 2.239234685897827,
"learning_rate": 1.635874067937034e-05,
"loss": 0.0124,
"step": 1380
},
{
"epoch": 2.0870870870870872,
"grad_norm": 5.9343061447143555,
"learning_rate": 1.6317315658657832e-05,
"loss": 0.0116,
"step": 1390
},
{
"epoch": 2.1021021021021022,
"grad_norm": 2.6489689350128174,
"learning_rate": 1.627589063794532e-05,
"loss": 0.0126,
"step": 1400
},
{
"epoch": 2.1171171171171173,
"grad_norm": 1.19527006149292,
"learning_rate": 1.623446561723281e-05,
"loss": 0.0209,
"step": 1410
},
{
"epoch": 2.1321321321321323,
"grad_norm": 2.1353812217712402,
"learning_rate": 1.61930405965203e-05,
"loss": 0.0146,
"step": 1420
},
{
"epoch": 2.1471471471471473,
"grad_norm": 0.1424659639596939,
"learning_rate": 1.6151615575807787e-05,
"loss": 0.0266,
"step": 1430
},
{
"epoch": 2.1621621621621623,
"grad_norm": 4.604438304901123,
"learning_rate": 1.611019055509528e-05,
"loss": 0.0131,
"step": 1440
},
{
"epoch": 2.1771771771771773,
"grad_norm": 0.18747694790363312,
"learning_rate": 1.6068765534382768e-05,
"loss": 0.0166,
"step": 1450
},
{
"epoch": 2.1921921921921923,
"grad_norm": 0.10579628497362137,
"learning_rate": 1.6027340513670257e-05,
"loss": 0.0141,
"step": 1460
},
{
"epoch": 2.2072072072072073,
"grad_norm": 0.06953544914722443,
"learning_rate": 1.598591549295775e-05,
"loss": 0.0066,
"step": 1470
},
{
"epoch": 2.2222222222222223,
"grad_norm": 1.7048633098602295,
"learning_rate": 1.5944490472245238e-05,
"loss": 0.026,
"step": 1480
},
{
"epoch": 2.2372372372372373,
"grad_norm": 1.1399219036102295,
"learning_rate": 1.5903065451532727e-05,
"loss": 0.0178,
"step": 1490
},
{
"epoch": 2.2522522522522523,
"grad_norm": 0.9059074521064758,
"learning_rate": 1.586164043082022e-05,
"loss": 0.0122,
"step": 1500
},
{
"epoch": 2.2672672672672673,
"grad_norm": 0.6121827960014343,
"learning_rate": 1.5820215410107707e-05,
"loss": 0.007,
"step": 1510
},
{
"epoch": 2.2822822822822824,
"grad_norm": 4.591891288757324,
"learning_rate": 1.5778790389395196e-05,
"loss": 0.0189,
"step": 1520
},
{
"epoch": 2.2972972972972974,
"grad_norm": 6.620510101318359,
"learning_rate": 1.5737365368682688e-05,
"loss": 0.0214,
"step": 1530
},
{
"epoch": 2.3123123123123124,
"grad_norm": 3.0007002353668213,
"learning_rate": 1.5695940347970177e-05,
"loss": 0.0368,
"step": 1540
},
{
"epoch": 2.3273273273273274,
"grad_norm": 4.294153213500977,
"learning_rate": 1.5654515327257666e-05,
"loss": 0.0134,
"step": 1550
},
{
"epoch": 2.3423423423423424,
"grad_norm": 12.665278434753418,
"learning_rate": 1.5613090306545154e-05,
"loss": 0.0094,
"step": 1560
},
{
"epoch": 2.3573573573573574,
"grad_norm": 0.5548607707023621,
"learning_rate": 1.5571665285832643e-05,
"loss": 0.0145,
"step": 1570
},
{
"epoch": 2.3723723723723724,
"grad_norm": 5.412700176239014,
"learning_rate": 1.5530240265120132e-05,
"loss": 0.0199,
"step": 1580
},
{
"epoch": 2.3873873873873874,
"grad_norm": 2.9422519207000732,
"learning_rate": 1.5488815244407624e-05,
"loss": 0.0122,
"step": 1590
},
{
"epoch": 2.4024024024024024,
"grad_norm": 4.299681186676025,
"learning_rate": 1.5447390223695113e-05,
"loss": 0.0287,
"step": 1600
},
{
"epoch": 2.4174174174174174,
"grad_norm": 3.635817766189575,
"learning_rate": 1.54059652029826e-05,
"loss": 0.0145,
"step": 1610
},
{
"epoch": 2.4324324324324325,
"grad_norm": 0.06890556961297989,
"learning_rate": 1.5364540182270094e-05,
"loss": 0.0155,
"step": 1620
},
{
"epoch": 2.4474474474474475,
"grad_norm": 1.9247612953186035,
"learning_rate": 1.5323115161557582e-05,
"loss": 0.0208,
"step": 1630
},
{
"epoch": 2.4624624624624625,
"grad_norm": 0.0777619257569313,
"learning_rate": 1.528169014084507e-05,
"loss": 0.0194,
"step": 1640
},
{
"epoch": 2.4774774774774775,
"grad_norm": 0.09070724248886108,
"learning_rate": 1.524026512013256e-05,
"loss": 0.0106,
"step": 1650
},
{
"epoch": 2.4924924924924925,
"grad_norm": 3.718015670776367,
"learning_rate": 1.5198840099420052e-05,
"loss": 0.0186,
"step": 1660
},
{
"epoch": 2.5075075075075075,
"grad_norm": 0.07736914604902267,
"learning_rate": 1.515741507870754e-05,
"loss": 0.0061,
"step": 1670
},
{
"epoch": 2.5225225225225225,
"grad_norm": 7.326141834259033,
"learning_rate": 1.511599005799503e-05,
"loss": 0.0205,
"step": 1680
},
{
"epoch": 2.5375375375375375,
"grad_norm": 0.2035406529903412,
"learning_rate": 1.507456503728252e-05,
"loss": 0.0055,
"step": 1690
},
{
"epoch": 2.5525525525525525,
"grad_norm": 0.1642528474330902,
"learning_rate": 1.5033140016570009e-05,
"loss": 0.0247,
"step": 1700
},
{
"epoch": 2.5675675675675675,
"grad_norm": 0.04008019343018532,
"learning_rate": 1.4991714995857497e-05,
"loss": 0.0134,
"step": 1710
},
{
"epoch": 2.5825825825825826,
"grad_norm": 2.158572196960449,
"learning_rate": 1.495028997514499e-05,
"loss": 0.0139,
"step": 1720
},
{
"epoch": 2.5975975975975976,
"grad_norm": 6.9125494956970215,
"learning_rate": 1.4908864954432478e-05,
"loss": 0.0141,
"step": 1730
},
{
"epoch": 2.6126126126126126,
"grad_norm": 0.05439571291208267,
"learning_rate": 1.4867439933719967e-05,
"loss": 0.0172,
"step": 1740
},
{
"epoch": 2.6276276276276276,
"grad_norm": 1.7265421152114868,
"learning_rate": 1.4826014913007457e-05,
"loss": 0.029,
"step": 1750
},
{
"epoch": 2.6426426426426426,
"grad_norm": 0.15565641224384308,
"learning_rate": 1.4784589892294948e-05,
"loss": 0.0164,
"step": 1760
},
{
"epoch": 2.6576576576576576,
"grad_norm": 1.2956058979034424,
"learning_rate": 1.4743164871582437e-05,
"loss": 0.0225,
"step": 1770
},
{
"epoch": 2.6726726726726726,
"grad_norm": 0.5900393724441528,
"learning_rate": 1.4701739850869927e-05,
"loss": 0.0128,
"step": 1780
},
{
"epoch": 2.6876876876876876,
"grad_norm": 2.6349215507507324,
"learning_rate": 1.4660314830157416e-05,
"loss": 0.0048,
"step": 1790
},
{
"epoch": 2.7027027027027026,
"grad_norm": 4.905094146728516,
"learning_rate": 1.4618889809444905e-05,
"loss": 0.0148,
"step": 1800
},
{
"epoch": 2.7177177177177176,
"grad_norm": 0.15017619729042053,
"learning_rate": 1.4577464788732397e-05,
"loss": 0.022,
"step": 1810
},
{
"epoch": 2.7327327327327327,
"grad_norm": 0.6810951828956604,
"learning_rate": 1.4536039768019885e-05,
"loss": 0.0348,
"step": 1820
},
{
"epoch": 2.7477477477477477,
"grad_norm": 0.09275978058576584,
"learning_rate": 1.4494614747307374e-05,
"loss": 0.0103,
"step": 1830
},
{
"epoch": 2.7627627627627627,
"grad_norm": 0.9358335733413696,
"learning_rate": 1.4453189726594865e-05,
"loss": 0.007,
"step": 1840
},
{
"epoch": 2.7777777777777777,
"grad_norm": 0.21646398305892944,
"learning_rate": 1.4411764705882353e-05,
"loss": 0.0111,
"step": 1850
},
{
"epoch": 2.7927927927927927,
"grad_norm": 3.0269370079040527,
"learning_rate": 1.4370339685169842e-05,
"loss": 0.0064,
"step": 1860
},
{
"epoch": 2.8078078078078077,
"grad_norm": 0.03600875288248062,
"learning_rate": 1.4328914664457334e-05,
"loss": 0.0173,
"step": 1870
},
{
"epoch": 2.8228228228228227,
"grad_norm": 0.03154575824737549,
"learning_rate": 1.4287489643744823e-05,
"loss": 0.0057,
"step": 1880
},
{
"epoch": 2.8378378378378377,
"grad_norm": 2.526102304458618,
"learning_rate": 1.4246064623032312e-05,
"loss": 0.004,
"step": 1890
},
{
"epoch": 2.8528528528528527,
"grad_norm": 1.8955109119415283,
"learning_rate": 1.4204639602319802e-05,
"loss": 0.0161,
"step": 1900
},
{
"epoch": 2.8678678678678677,
"grad_norm": 0.41396257281303406,
"learning_rate": 1.4163214581607293e-05,
"loss": 0.0143,
"step": 1910
},
{
"epoch": 2.8828828828828827,
"grad_norm": 0.08320903033018112,
"learning_rate": 1.4121789560894781e-05,
"loss": 0.024,
"step": 1920
},
{
"epoch": 2.8978978978978978,
"grad_norm": 0.22456814348697662,
"learning_rate": 1.4080364540182272e-05,
"loss": 0.0111,
"step": 1930
},
{
"epoch": 2.9129129129129128,
"grad_norm": 0.3686913847923279,
"learning_rate": 1.403893951946976e-05,
"loss": 0.0244,
"step": 1940
},
{
"epoch": 2.9279279279279278,
"grad_norm": 0.21892797946929932,
"learning_rate": 1.399751449875725e-05,
"loss": 0.0176,
"step": 1950
},
{
"epoch": 2.942942942942943,
"grad_norm": 3.7241322994232178,
"learning_rate": 1.3956089478044741e-05,
"loss": 0.0103,
"step": 1960
},
{
"epoch": 2.957957957957958,
"grad_norm": 2.0452113151550293,
"learning_rate": 1.391466445733223e-05,
"loss": 0.039,
"step": 1970
},
{
"epoch": 2.972972972972973,
"grad_norm": 2.3977973461151123,
"learning_rate": 1.3873239436619719e-05,
"loss": 0.0106,
"step": 1980
},
{
"epoch": 2.987987987987988,
"grad_norm": 0.21678972244262695,
"learning_rate": 1.383181441590721e-05,
"loss": 0.0173,
"step": 1990
},
{
"epoch": 3.0,
"eval_accuracy": 0.8553151458137347,
"eval_f1": 0.9143430578453158,
"eval_loss": 0.21451762318611145,
"eval_roc_auc": 0.9423584374850139,
"eval_runtime": 7.3964,
"eval_samples_per_second": 718.591,
"eval_steps_per_second": 22.579,
"step": 1998
},
{
"epoch": 3.003003003003003,
"grad_norm": 1.4772316217422485,
"learning_rate": 1.3790389395194698e-05,
"loss": 0.0103,
"step": 2000
},
{
"epoch": 3.018018018018018,
"grad_norm": 0.7159767746925354,
"learning_rate": 1.3748964374482188e-05,
"loss": 0.0107,
"step": 2010
},
{
"epoch": 3.033033033033033,
"grad_norm": 0.16911160945892334,
"learning_rate": 1.3707539353769679e-05,
"loss": 0.0063,
"step": 2020
},
{
"epoch": 3.048048048048048,
"grad_norm": 0.11155136674642563,
"learning_rate": 1.3666114333057168e-05,
"loss": 0.0337,
"step": 2030
},
{
"epoch": 3.063063063063063,
"grad_norm": 0.32412320375442505,
"learning_rate": 1.3624689312344656e-05,
"loss": 0.0238,
"step": 2040
},
{
"epoch": 3.078078078078078,
"grad_norm": 0.03618387132883072,
"learning_rate": 1.3583264291632149e-05,
"loss": 0.0162,
"step": 2050
},
{
"epoch": 3.093093093093093,
"grad_norm": 0.7066205739974976,
"learning_rate": 1.3541839270919637e-05,
"loss": 0.0103,
"step": 2060
},
{
"epoch": 3.108108108108108,
"grad_norm": 0.11609063297510147,
"learning_rate": 1.3500414250207126e-05,
"loss": 0.0105,
"step": 2070
},
{
"epoch": 3.123123123123123,
"grad_norm": 0.32982802391052246,
"learning_rate": 1.3458989229494616e-05,
"loss": 0.0102,
"step": 2080
},
{
"epoch": 3.138138138138138,
"grad_norm": 3.4004058837890625,
"learning_rate": 1.3417564208782105e-05,
"loss": 0.0159,
"step": 2090
},
{
"epoch": 3.153153153153153,
"grad_norm": 6.178320407867432,
"learning_rate": 1.3376139188069594e-05,
"loss": 0.0232,
"step": 2100
},
{
"epoch": 3.1681681681681684,
"grad_norm": 1.1276676654815674,
"learning_rate": 1.3334714167357086e-05,
"loss": 0.0069,
"step": 2110
},
{
"epoch": 3.1831831831831834,
"grad_norm": 0.02346782386302948,
"learning_rate": 1.3293289146644575e-05,
"loss": 0.0059,
"step": 2120
},
{
"epoch": 3.1981981981981984,
"grad_norm": 4.738574028015137,
"learning_rate": 1.3251864125932064e-05,
"loss": 0.0202,
"step": 2130
},
{
"epoch": 3.2132132132132134,
"grad_norm": 6.053224086761475,
"learning_rate": 1.3210439105219554e-05,
"loss": 0.0198,
"step": 2140
},
{
"epoch": 3.2282282282282284,
"grad_norm": 3.8819286823272705,
"learning_rate": 1.3169014084507044e-05,
"loss": 0.0073,
"step": 2150
},
{
"epoch": 3.2432432432432434,
"grad_norm": 2.7025258541107178,
"learning_rate": 1.3127589063794533e-05,
"loss": 0.0213,
"step": 2160
},
{
"epoch": 3.2582582582582584,
"grad_norm": 3.3367843627929688,
"learning_rate": 1.3086164043082024e-05,
"loss": 0.01,
"step": 2170
},
{
"epoch": 3.2732732732732734,
"grad_norm": 0.6221895217895508,
"learning_rate": 1.3044739022369512e-05,
"loss": 0.0046,
"step": 2180
},
{
"epoch": 3.2882882882882885,
"grad_norm": 4.412055969238281,
"learning_rate": 1.3003314001657001e-05,
"loss": 0.0207,
"step": 2190
},
{
"epoch": 3.3033033033033035,
"grad_norm": 5.719205379486084,
"learning_rate": 1.2961888980944493e-05,
"loss": 0.01,
"step": 2200
},
{
"epoch": 3.3183183183183185,
"grad_norm": 1.341784119606018,
"learning_rate": 1.2920463960231982e-05,
"loss": 0.0079,
"step": 2210
},
{
"epoch": 3.3333333333333335,
"grad_norm": 0.6478399038314819,
"learning_rate": 1.287903893951947e-05,
"loss": 0.0104,
"step": 2220
},
{
"epoch": 3.3483483483483485,
"grad_norm": 1.90777587890625,
"learning_rate": 1.2837613918806961e-05,
"loss": 0.0063,
"step": 2230
},
{
"epoch": 3.3633633633633635,
"grad_norm": 0.18736779689788818,
"learning_rate": 1.279618889809445e-05,
"loss": 0.0142,
"step": 2240
},
{
"epoch": 3.3783783783783785,
"grad_norm": 1.921953797340393,
"learning_rate": 1.2754763877381939e-05,
"loss": 0.0056,
"step": 2250
},
{
"epoch": 3.3933933933933935,
"grad_norm": 0.2626243531703949,
"learning_rate": 1.271333885666943e-05,
"loss": 0.0135,
"step": 2260
},
{
"epoch": 3.4084084084084085,
"grad_norm": 2.2788760662078857,
"learning_rate": 1.267191383595692e-05,
"loss": 0.0085,
"step": 2270
},
{
"epoch": 3.4234234234234235,
"grad_norm": 0.39181146025657654,
"learning_rate": 1.2630488815244408e-05,
"loss": 0.0148,
"step": 2280
},
{
"epoch": 3.4384384384384385,
"grad_norm": 8.13824462890625,
"learning_rate": 1.2589063794531897e-05,
"loss": 0.0091,
"step": 2290
},
{
"epoch": 3.4534534534534536,
"grad_norm": 5.898563385009766,
"learning_rate": 1.2547638773819389e-05,
"loss": 0.0172,
"step": 2300
},
{
"epoch": 3.4684684684684686,
"grad_norm": 0.0695483535528183,
"learning_rate": 1.2506213753106878e-05,
"loss": 0.008,
"step": 2310
},
{
"epoch": 3.4834834834834836,
"grad_norm": 0.5626207590103149,
"learning_rate": 1.2464788732394367e-05,
"loss": 0.0191,
"step": 2320
},
{
"epoch": 3.4984984984984986,
"grad_norm": 0.9743795990943909,
"learning_rate": 1.2423363711681857e-05,
"loss": 0.0064,
"step": 2330
},
{
"epoch": 3.5135135135135136,
"grad_norm": 0.03379818797111511,
"learning_rate": 1.2381938690969346e-05,
"loss": 0.0063,
"step": 2340
},
{
"epoch": 3.5285285285285286,
"grad_norm": 2.3289623260498047,
"learning_rate": 1.2340513670256834e-05,
"loss": 0.0074,
"step": 2350
},
{
"epoch": 3.5435435435435436,
"grad_norm": 0.7672750949859619,
"learning_rate": 1.2299088649544327e-05,
"loss": 0.0137,
"step": 2360
},
{
"epoch": 3.5585585585585586,
"grad_norm": 0.019895190373063087,
"learning_rate": 1.2257663628831815e-05,
"loss": 0.0117,
"step": 2370
},
{
"epoch": 3.5735735735735736,
"grad_norm": 2.079782485961914,
"learning_rate": 1.2216238608119304e-05,
"loss": 0.0097,
"step": 2380
},
{
"epoch": 3.5885885885885886,
"grad_norm": 7.858700752258301,
"learning_rate": 1.2174813587406794e-05,
"loss": 0.0148,
"step": 2390
},
{
"epoch": 3.6036036036036037,
"grad_norm": 1.9548249244689941,
"learning_rate": 1.2133388566694285e-05,
"loss": 0.0132,
"step": 2400
},
{
"epoch": 3.6186186186186187,
"grad_norm": 3.6328041553497314,
"learning_rate": 1.2091963545981774e-05,
"loss": 0.0102,
"step": 2410
},
{
"epoch": 3.6336336336336337,
"grad_norm": 0.6240374445915222,
"learning_rate": 1.2050538525269264e-05,
"loss": 0.0082,
"step": 2420
},
{
"epoch": 3.6486486486486487,
"grad_norm": 0.9374043941497803,
"learning_rate": 1.2009113504556753e-05,
"loss": 0.0129,
"step": 2430
},
{
"epoch": 3.6636636636636637,
"grad_norm": 1.3998479843139648,
"learning_rate": 1.1967688483844242e-05,
"loss": 0.0112,
"step": 2440
},
{
"epoch": 3.6786786786786787,
"grad_norm": 6.0802507400512695,
"learning_rate": 1.1926263463131734e-05,
"loss": 0.0147,
"step": 2450
},
{
"epoch": 3.6936936936936937,
"grad_norm": 1.1334019899368286,
"learning_rate": 1.1884838442419222e-05,
"loss": 0.011,
"step": 2460
},
{
"epoch": 3.7087087087087087,
"grad_norm": 2.0716891288757324,
"learning_rate": 1.1843413421706711e-05,
"loss": 0.019,
"step": 2470
},
{
"epoch": 3.7237237237237237,
"grad_norm": 0.058690622448921204,
"learning_rate": 1.1801988400994202e-05,
"loss": 0.0142,
"step": 2480
},
{
"epoch": 3.7387387387387387,
"grad_norm": 0.4929686486721039,
"learning_rate": 1.176056338028169e-05,
"loss": 0.0098,
"step": 2490
},
{
"epoch": 3.7537537537537538,
"grad_norm": 0.36563634872436523,
"learning_rate": 1.1719138359569179e-05,
"loss": 0.0074,
"step": 2500
},
{
"epoch": 3.7687687687687688,
"grad_norm": 0.20172318816184998,
"learning_rate": 1.1677713338856671e-05,
"loss": 0.0076,
"step": 2510
},
{
"epoch": 3.7837837837837838,
"grad_norm": 1.6785756349563599,
"learning_rate": 1.163628831814416e-05,
"loss": 0.0087,
"step": 2520
},
{
"epoch": 3.798798798798799,
"grad_norm": 0.06056467816233635,
"learning_rate": 1.1594863297431649e-05,
"loss": 0.0071,
"step": 2530
},
{
"epoch": 3.813813813813814,
"grad_norm": 0.4795999526977539,
"learning_rate": 1.155343827671914e-05,
"loss": 0.0068,
"step": 2540
},
{
"epoch": 3.828828828828829,
"grad_norm": 1.2181782722473145,
"learning_rate": 1.151201325600663e-05,
"loss": 0.0088,
"step": 2550
},
{
"epoch": 3.843843843843844,
"grad_norm": 0.036575447767972946,
"learning_rate": 1.1470588235294118e-05,
"loss": 0.009,
"step": 2560
},
{
"epoch": 3.858858858858859,
"grad_norm": 0.7162961363792419,
"learning_rate": 1.1429163214581609e-05,
"loss": 0.011,
"step": 2570
},
{
"epoch": 3.873873873873874,
"grad_norm": 0.8698685765266418,
"learning_rate": 1.1387738193869098e-05,
"loss": 0.0063,
"step": 2580
},
{
"epoch": 3.888888888888889,
"grad_norm": 0.37558797001838684,
"learning_rate": 1.1346313173156586e-05,
"loss": 0.0064,
"step": 2590
},
{
"epoch": 3.903903903903904,
"grad_norm": 5.980177402496338,
"learning_rate": 1.1304888152444078e-05,
"loss": 0.0253,
"step": 2600
},
{
"epoch": 3.918918918918919,
"grad_norm": 0.07595256716012955,
"learning_rate": 1.1263463131731567e-05,
"loss": 0.0122,
"step": 2610
},
{
"epoch": 3.933933933933934,
"grad_norm": 3.3737852573394775,
"learning_rate": 1.1222038111019056e-05,
"loss": 0.0122,
"step": 2620
},
{
"epoch": 3.948948948948949,
"grad_norm": 0.67271488904953,
"learning_rate": 1.1180613090306546e-05,
"loss": 0.0112,
"step": 2630
},
{
"epoch": 3.963963963963964,
"grad_norm": 5.097310543060303,
"learning_rate": 1.1139188069594035e-05,
"loss": 0.0117,
"step": 2640
},
{
"epoch": 3.978978978978979,
"grad_norm": 0.0951702669262886,
"learning_rate": 1.1097763048881525e-05,
"loss": 0.0038,
"step": 2650
},
{
"epoch": 3.993993993993994,
"grad_norm": 2.21738862991333,
"learning_rate": 1.1056338028169016e-05,
"loss": 0.0095,
"step": 2660
},
{
"epoch": 4.0,
"eval_accuracy": 0.8568203198494826,
"eval_f1": 0.9156453809216656,
"eval_loss": 0.21418116986751556,
"eval_roc_auc": 0.9432802901372163,
"eval_runtime": 7.387,
"eval_samples_per_second": 719.51,
"eval_steps_per_second": 22.607,
"step": 2664
},
{
"epoch": 4.009009009009009,
"grad_norm": 1.2217696905136108,
"learning_rate": 1.1014913007456505e-05,
"loss": 0.0082,
"step": 2670
},
{
"epoch": 4.024024024024024,
"grad_norm": 0.018607912585139275,
"learning_rate": 1.0973487986743993e-05,
"loss": 0.0143,
"step": 2680
},
{
"epoch": 4.039039039039039,
"grad_norm": 0.2839476764202118,
"learning_rate": 1.0932062966031486e-05,
"loss": 0.0081,
"step": 2690
},
{
"epoch": 4.054054054054054,
"grad_norm": 0.2138938009738922,
"learning_rate": 1.0890637945318974e-05,
"loss": 0.0108,
"step": 2700
},
{
"epoch": 4.069069069069069,
"grad_norm": 1.584167718887329,
"learning_rate": 1.0849212924606463e-05,
"loss": 0.0131,
"step": 2710
},
{
"epoch": 4.084084084084084,
"grad_norm": 0.10289608687162399,
"learning_rate": 1.0807787903893953e-05,
"loss": 0.0078,
"step": 2720
},
{
"epoch": 4.099099099099099,
"grad_norm": 0.9854161739349365,
"learning_rate": 1.0766362883181442e-05,
"loss": 0.0073,
"step": 2730
},
{
"epoch": 4.114114114114114,
"grad_norm": 0.760006844997406,
"learning_rate": 1.0724937862468931e-05,
"loss": 0.0085,
"step": 2740
},
{
"epoch": 4.129129129129129,
"grad_norm": 2.729097604751587,
"learning_rate": 1.0683512841756423e-05,
"loss": 0.0086,
"step": 2750
},
{
"epoch": 4.1441441441441444,
"grad_norm": 0.011590493842959404,
"learning_rate": 1.0642087821043912e-05,
"loss": 0.0182,
"step": 2760
},
{
"epoch": 4.1591591591591595,
"grad_norm": 1.401490569114685,
"learning_rate": 1.06006628003314e-05,
"loss": 0.0052,
"step": 2770
},
{
"epoch": 4.1741741741741745,
"grad_norm": 9.527831077575684,
"learning_rate": 1.0559237779618891e-05,
"loss": 0.0058,
"step": 2780
},
{
"epoch": 4.1891891891891895,
"grad_norm": 0.16799849271774292,
"learning_rate": 1.051781275890638e-05,
"loss": 0.0019,
"step": 2790
},
{
"epoch": 4.2042042042042045,
"grad_norm": 0.15852217376232147,
"learning_rate": 1.047638773819387e-05,
"loss": 0.0215,
"step": 2800
},
{
"epoch": 4.2192192192192195,
"grad_norm": 0.03776758536696434,
"learning_rate": 1.043496271748136e-05,
"loss": 0.0087,
"step": 2810
},
{
"epoch": 4.2342342342342345,
"grad_norm": 0.038916442543268204,
"learning_rate": 1.039353769676885e-05,
"loss": 0.0048,
"step": 2820
},
{
"epoch": 4.2492492492492495,
"grad_norm": 0.1437731832265854,
"learning_rate": 1.0352112676056338e-05,
"loss": 0.007,
"step": 2830
},
{
"epoch": 4.2642642642642645,
"grad_norm": 0.5450798273086548,
"learning_rate": 1.031068765534383e-05,
"loss": 0.0061,
"step": 2840
},
{
"epoch": 4.2792792792792795,
"grad_norm": 0.19095036387443542,
"learning_rate": 1.0269262634631319e-05,
"loss": 0.003,
"step": 2850
},
{
"epoch": 4.2942942942942945,
"grad_norm": 0.08140315115451813,
"learning_rate": 1.0227837613918808e-05,
"loss": 0.0088,
"step": 2860
},
{
"epoch": 4.3093093093093096,
"grad_norm": 6.2226667404174805,
"learning_rate": 1.0186412593206298e-05,
"loss": 0.0038,
"step": 2870
},
{
"epoch": 4.324324324324325,
"grad_norm": 0.4789693355560303,
"learning_rate": 1.0144987572493787e-05,
"loss": 0.0087,
"step": 2880
},
{
"epoch": 4.33933933933934,
"grad_norm": 0.22297614812850952,
"learning_rate": 1.0103562551781276e-05,
"loss": 0.0104,
"step": 2890
},
{
"epoch": 4.354354354354355,
"grad_norm": 3.0525765419006348,
"learning_rate": 1.0062137531068768e-05,
"loss": 0.0095,
"step": 2900
},
{
"epoch": 4.36936936936937,
"grad_norm": 0.9432957768440247,
"learning_rate": 1.0020712510356256e-05,
"loss": 0.0063,
"step": 2910
},
{
"epoch": 4.384384384384385,
"grad_norm": 0.20507968962192535,
"learning_rate": 9.979287489643745e-06,
"loss": 0.0081,
"step": 2920
},
{
"epoch": 4.3993993993994,
"grad_norm": 0.13641659915447235,
"learning_rate": 9.937862468931236e-06,
"loss": 0.0162,
"step": 2930
},
{
"epoch": 4.414414414414415,
"grad_norm": 0.16560953855514526,
"learning_rate": 9.896437448218726e-06,
"loss": 0.0084,
"step": 2940
},
{
"epoch": 4.42942942942943,
"grad_norm": 0.11404027044773102,
"learning_rate": 9.855012427506215e-06,
"loss": 0.0104,
"step": 2950
},
{
"epoch": 4.444444444444445,
"grad_norm": 0.016587508842349052,
"learning_rate": 9.813587406793704e-06,
"loss": 0.0116,
"step": 2960
},
{
"epoch": 4.45945945945946,
"grad_norm": 0.623416006565094,
"learning_rate": 9.772162386081194e-06,
"loss": 0.0105,
"step": 2970
},
{
"epoch": 4.474474474474475,
"grad_norm": 0.0888320803642273,
"learning_rate": 9.730737365368683e-06,
"loss": 0.0063,
"step": 2980
},
{
"epoch": 4.48948948948949,
"grad_norm": 1.374616026878357,
"learning_rate": 9.689312344656173e-06,
"loss": 0.0077,
"step": 2990
},
{
"epoch": 4.504504504504505,
"grad_norm": 0.029000842943787575,
"learning_rate": 9.647887323943664e-06,
"loss": 0.0098,
"step": 3000
},
{
"epoch": 4.51951951951952,
"grad_norm": 2.8579955101013184,
"learning_rate": 9.606462303231152e-06,
"loss": 0.0043,
"step": 3010
},
{
"epoch": 4.534534534534535,
"grad_norm": 3.9038970470428467,
"learning_rate": 9.565037282518643e-06,
"loss": 0.0194,
"step": 3020
},
{
"epoch": 4.54954954954955,
"grad_norm": 0.4741935431957245,
"learning_rate": 9.523612261806132e-06,
"loss": 0.0065,
"step": 3030
},
{
"epoch": 4.564564564564565,
"grad_norm": 1.3021470308303833,
"learning_rate": 9.482187241093622e-06,
"loss": 0.0048,
"step": 3040
},
{
"epoch": 4.57957957957958,
"grad_norm": 0.17900153994560242,
"learning_rate": 9.44076222038111e-06,
"loss": 0.0103,
"step": 3050
},
{
"epoch": 4.594594594594595,
"grad_norm": 0.8498497605323792,
"learning_rate": 9.399337199668601e-06,
"loss": 0.0084,
"step": 3060
},
{
"epoch": 4.60960960960961,
"grad_norm": 0.6373823285102844,
"learning_rate": 9.35791217895609e-06,
"loss": 0.0061,
"step": 3070
},
{
"epoch": 4.624624624624625,
"grad_norm": 0.9934857487678528,
"learning_rate": 9.31648715824358e-06,
"loss": 0.0069,
"step": 3080
},
{
"epoch": 4.63963963963964,
"grad_norm": 0.12153011560440063,
"learning_rate": 9.275062137531069e-06,
"loss": 0.0166,
"step": 3090
},
{
"epoch": 4.654654654654655,
"grad_norm": 0.3886626660823822,
"learning_rate": 9.23363711681856e-06,
"loss": 0.0124,
"step": 3100
},
{
"epoch": 4.66966966966967,
"grad_norm": 3.08005690574646,
"learning_rate": 9.192212096106048e-06,
"loss": 0.0126,
"step": 3110
},
{
"epoch": 4.684684684684685,
"grad_norm": 2.155261278152466,
"learning_rate": 9.150787075393539e-06,
"loss": 0.0189,
"step": 3120
},
{
"epoch": 4.6996996996997,
"grad_norm": 3.455352544784546,
"learning_rate": 9.109362054681027e-06,
"loss": 0.0043,
"step": 3130
},
{
"epoch": 4.714714714714715,
"grad_norm": 0.19982695579528809,
"learning_rate": 9.067937033968518e-06,
"loss": 0.0144,
"step": 3140
},
{
"epoch": 4.72972972972973,
"grad_norm": 0.28114157915115356,
"learning_rate": 9.026512013256007e-06,
"loss": 0.0081,
"step": 3150
},
{
"epoch": 4.744744744744745,
"grad_norm": 3.1953513622283936,
"learning_rate": 8.985086992543497e-06,
"loss": 0.0125,
"step": 3160
},
{
"epoch": 4.75975975975976,
"grad_norm": 9.547475814819336,
"learning_rate": 8.943661971830987e-06,
"loss": 0.0107,
"step": 3170
},
{
"epoch": 4.774774774774775,
"grad_norm": 0.3070663809776306,
"learning_rate": 8.902236951118476e-06,
"loss": 0.0094,
"step": 3180
},
{
"epoch": 4.78978978978979,
"grad_norm": 1.0773664712905884,
"learning_rate": 8.860811930405967e-06,
"loss": 0.0151,
"step": 3190
},
{
"epoch": 4.804804804804805,
"grad_norm": 0.05860641971230507,
"learning_rate": 8.819386909693455e-06,
"loss": 0.0091,
"step": 3200
},
{
"epoch": 4.81981981981982,
"grad_norm": 0.1941542774438858,
"learning_rate": 8.777961888980944e-06,
"loss": 0.0081,
"step": 3210
},
{
"epoch": 4.834834834834835,
"grad_norm": 0.0734286680817604,
"learning_rate": 8.736536868268435e-06,
"loss": 0.0055,
"step": 3220
},
{
"epoch": 4.84984984984985,
"grad_norm": 0.18101628124713898,
"learning_rate": 8.695111847555925e-06,
"loss": 0.0103,
"step": 3230
},
{
"epoch": 4.864864864864865,
"grad_norm": 0.027951765805482864,
"learning_rate": 8.653686826843414e-06,
"loss": 0.0034,
"step": 3240
},
{
"epoch": 4.87987987987988,
"grad_norm": 0.8830456137657166,
"learning_rate": 8.612261806130904e-06,
"loss": 0.0152,
"step": 3250
},
{
"epoch": 4.894894894894895,
"grad_norm": 0.04218738153576851,
"learning_rate": 8.570836785418395e-06,
"loss": 0.0141,
"step": 3260
},
{
"epoch": 4.90990990990991,
"grad_norm": 0.07865418493747711,
"learning_rate": 8.529411764705883e-06,
"loss": 0.0039,
"step": 3270
},
{
"epoch": 4.924924924924925,
"grad_norm": 2.8771729469299316,
"learning_rate": 8.487986743993372e-06,
"loss": 0.0092,
"step": 3280
},
{
"epoch": 4.93993993993994,
"grad_norm": 0.7154805064201355,
"learning_rate": 8.446561723280862e-06,
"loss": 0.0191,
"step": 3290
},
{
"epoch": 4.954954954954955,
"grad_norm": 1.2003430128097534,
"learning_rate": 8.405136702568351e-06,
"loss": 0.0165,
"step": 3300
},
{
"epoch": 4.96996996996997,
"grad_norm": 0.19323813915252686,
"learning_rate": 8.363711681855842e-06,
"loss": 0.0157,
"step": 3310
},
{
"epoch": 4.984984984984985,
"grad_norm": 0.06954075396060944,
"learning_rate": 8.322286661143332e-06,
"loss": 0.0116,
"step": 3320
},
{
"epoch": 5.0,
"grad_norm": 0.9249535202980042,
"learning_rate": 8.28086164043082e-06,
"loss": 0.0151,
"step": 3330
},
{
"epoch": 5.0,
"eval_accuracy": 0.8613358419567263,
"eval_f1": 0.9182426116344287,
"eval_loss": 0.21194002032279968,
"eval_roc_auc": 0.9441613070085622,
"eval_runtime": 7.442,
"eval_samples_per_second": 714.187,
"eval_steps_per_second": 22.44,
"step": 3330
},
{
"epoch": 5.015015015015015,
"grad_norm": 1.1550884246826172,
"learning_rate": 8.239436619718311e-06,
"loss": 0.0097,
"step": 3340
},
{
"epoch": 5.03003003003003,
"grad_norm": 3.4667506217956543,
"learning_rate": 8.1980115990058e-06,
"loss": 0.0076,
"step": 3350
},
{
"epoch": 5.045045045045045,
"grad_norm": 0.022501222789287567,
"learning_rate": 8.15658657829329e-06,
"loss": 0.0174,
"step": 3360
},
{
"epoch": 5.06006006006006,
"grad_norm": 0.2297457605600357,
"learning_rate": 8.11516155758078e-06,
"loss": 0.0121,
"step": 3370
},
{
"epoch": 5.075075075075075,
"grad_norm": 1.9849693775177002,
"learning_rate": 8.07373653686827e-06,
"loss": 0.0069,
"step": 3380
},
{
"epoch": 5.09009009009009,
"grad_norm": 3.121706962585449,
"learning_rate": 8.032311516155758e-06,
"loss": 0.0069,
"step": 3390
},
{
"epoch": 5.105105105105105,
"grad_norm": 0.28827306628227234,
"learning_rate": 7.990886495443249e-06,
"loss": 0.0037,
"step": 3400
},
{
"epoch": 5.12012012012012,
"grad_norm": 0.26308292150497437,
"learning_rate": 7.949461474730738e-06,
"loss": 0.003,
"step": 3410
},
{
"epoch": 5.135135135135135,
"grad_norm": 1.9140377044677734,
"learning_rate": 7.908036454018228e-06,
"loss": 0.0051,
"step": 3420
},
{
"epoch": 5.15015015015015,
"grad_norm": 0.019069673493504524,
"learning_rate": 7.866611433305717e-06,
"loss": 0.0087,
"step": 3430
},
{
"epoch": 5.165165165165165,
"grad_norm": 0.09611427038908005,
"learning_rate": 7.825186412593207e-06,
"loss": 0.0042,
"step": 3440
},
{
"epoch": 5.18018018018018,
"grad_norm": 0.04649114981293678,
"learning_rate": 7.783761391880696e-06,
"loss": 0.0075,
"step": 3450
},
{
"epoch": 5.195195195195195,
"grad_norm": 0.7499408721923828,
"learning_rate": 7.742336371168186e-06,
"loss": 0.0064,
"step": 3460
},
{
"epoch": 5.21021021021021,
"grad_norm": 0.08816402405500412,
"learning_rate": 7.700911350455675e-06,
"loss": 0.0052,
"step": 3470
},
{
"epoch": 5.225225225225225,
"grad_norm": 0.6211869120597839,
"learning_rate": 7.659486329743165e-06,
"loss": 0.0123,
"step": 3480
},
{
"epoch": 5.24024024024024,
"grad_norm": 2.1001930236816406,
"learning_rate": 7.618061309030655e-06,
"loss": 0.0053,
"step": 3490
},
{
"epoch": 5.255255255255255,
"grad_norm": 5.740816593170166,
"learning_rate": 7.576636288318145e-06,
"loss": 0.0064,
"step": 3500
},
{
"epoch": 5.27027027027027,
"grad_norm": 3.0299530029296875,
"learning_rate": 7.535211267605634e-06,
"loss": 0.007,
"step": 3510
},
{
"epoch": 5.285285285285285,
"grad_norm": 0.03936946764588356,
"learning_rate": 7.493786246893125e-06,
"loss": 0.015,
"step": 3520
},
{
"epoch": 5.3003003003003,
"grad_norm": 0.11476922035217285,
"learning_rate": 7.4523612261806134e-06,
"loss": 0.0218,
"step": 3530
},
{
"epoch": 5.315315315315315,
"grad_norm": 0.18106217682361603,
"learning_rate": 7.410936205468103e-06,
"loss": 0.0034,
"step": 3540
},
{
"epoch": 5.33033033033033,
"grad_norm": 3.0002665519714355,
"learning_rate": 7.3695111847555935e-06,
"loss": 0.0104,
"step": 3550
},
{
"epoch": 5.345345345345345,
"grad_norm": 0.30738332867622375,
"learning_rate": 7.328086164043082e-06,
"loss": 0.0148,
"step": 3560
},
{
"epoch": 5.36036036036036,
"grad_norm": 0.021894821897149086,
"learning_rate": 7.286661143330573e-06,
"loss": 0.0178,
"step": 3570
},
{
"epoch": 5.375375375375375,
"grad_norm": 8.09498405456543,
"learning_rate": 7.245236122618062e-06,
"loss": 0.0114,
"step": 3580
},
{
"epoch": 5.39039039039039,
"grad_norm": 0.6404626369476318,
"learning_rate": 7.203811101905551e-06,
"loss": 0.0145,
"step": 3590
},
{
"epoch": 5.405405405405405,
"grad_norm": 0.25979307293891907,
"learning_rate": 7.162386081193041e-06,
"loss": 0.0134,
"step": 3600
},
{
"epoch": 5.42042042042042,
"grad_norm": 4.571927547454834,
"learning_rate": 7.120961060480531e-06,
"loss": 0.0146,
"step": 3610
},
{
"epoch": 5.435435435435435,
"grad_norm": 0.42414915561676025,
"learning_rate": 7.0795360397680206e-06,
"loss": 0.002,
"step": 3620
},
{
"epoch": 5.45045045045045,
"grad_norm": 0.13032342493534088,
"learning_rate": 7.03811101905551e-06,
"loss": 0.0042,
"step": 3630
},
{
"epoch": 5.465465465465465,
"grad_norm": 4.202887535095215,
"learning_rate": 6.996685998343001e-06,
"loss": 0.0068,
"step": 3640
},
{
"epoch": 5.48048048048048,
"grad_norm": 1.762563943862915,
"learning_rate": 6.955260977630489e-06,
"loss": 0.0082,
"step": 3650
},
{
"epoch": 5.495495495495495,
"grad_norm": 0.03661961853504181,
"learning_rate": 6.913835956917979e-06,
"loss": 0.0176,
"step": 3660
},
{
"epoch": 5.51051051051051,
"grad_norm": 0.043978456407785416,
"learning_rate": 6.872410936205469e-06,
"loss": 0.0027,
"step": 3670
},
{
"epoch": 5.525525525525525,
"grad_norm": 0.4701463282108307,
"learning_rate": 6.830985915492958e-06,
"loss": 0.0135,
"step": 3680
},
{
"epoch": 5.54054054054054,
"grad_norm": 2.628462076187134,
"learning_rate": 6.7895608947804485e-06,
"loss": 0.0068,
"step": 3690
},
{
"epoch": 5.555555555555555,
"grad_norm": 0.09407415241003036,
"learning_rate": 6.748135874067938e-06,
"loss": 0.0167,
"step": 3700
},
{
"epoch": 5.57057057057057,
"grad_norm": 0.7712174654006958,
"learning_rate": 6.706710853355427e-06,
"loss": 0.0116,
"step": 3710
},
{
"epoch": 5.585585585585585,
"grad_norm": 0.07994990795850754,
"learning_rate": 6.665285832642917e-06,
"loss": 0.0077,
"step": 3720
},
{
"epoch": 5.6006006006006,
"grad_norm": 0.057626720517873764,
"learning_rate": 6.623860811930406e-06,
"loss": 0.0168,
"step": 3730
},
{
"epoch": 5.615615615615615,
"grad_norm": 3.927039861679077,
"learning_rate": 6.582435791217896e-06,
"loss": 0.0073,
"step": 3740
},
{
"epoch": 5.63063063063063,
"grad_norm": 0.04220356419682503,
"learning_rate": 6.541010770505386e-06,
"loss": 0.0126,
"step": 3750
},
{
"epoch": 5.645645645645645,
"grad_norm": 0.30007079243659973,
"learning_rate": 6.499585749792875e-06,
"loss": 0.0092,
"step": 3760
},
{
"epoch": 5.66066066066066,
"grad_norm": 0.011833683587610722,
"learning_rate": 6.458160729080365e-06,
"loss": 0.0077,
"step": 3770
},
{
"epoch": 5.675675675675675,
"grad_norm": 0.1129905954003334,
"learning_rate": 6.416735708367855e-06,
"loss": 0.0019,
"step": 3780
},
{
"epoch": 5.6906906906906904,
"grad_norm": 0.40713539719581604,
"learning_rate": 6.3753106876553436e-06,
"loss": 0.0112,
"step": 3790
},
{
"epoch": 5.7057057057057055,
"grad_norm": 0.6869781017303467,
"learning_rate": 6.333885666942834e-06,
"loss": 0.0044,
"step": 3800
},
{
"epoch": 5.7207207207207205,
"grad_norm": 2.5867929458618164,
"learning_rate": 6.292460646230324e-06,
"loss": 0.0098,
"step": 3810
},
{
"epoch": 5.7357357357357355,
"grad_norm": 0.9201477766036987,
"learning_rate": 6.251035625517813e-06,
"loss": 0.0093,
"step": 3820
},
{
"epoch": 5.7507507507507505,
"grad_norm": 1.0136293172836304,
"learning_rate": 6.209610604805303e-06,
"loss": 0.0166,
"step": 3830
},
{
"epoch": 5.7657657657657655,
"grad_norm": 6.813093662261963,
"learning_rate": 6.168185584092793e-06,
"loss": 0.0066,
"step": 3840
},
{
"epoch": 5.7807807807807805,
"grad_norm": 0.02762005664408207,
"learning_rate": 6.126760563380282e-06,
"loss": 0.0039,
"step": 3850
},
{
"epoch": 5.7957957957957955,
"grad_norm": 2.779971122741699,
"learning_rate": 6.0853355426677715e-06,
"loss": 0.023,
"step": 3860
},
{
"epoch": 5.8108108108108105,
"grad_norm": 5.017881393432617,
"learning_rate": 6.043910521955262e-06,
"loss": 0.0131,
"step": 3870
},
{
"epoch": 5.8258258258258255,
"grad_norm": 0.02220802940428257,
"learning_rate": 6.002485501242751e-06,
"loss": 0.0069,
"step": 3880
},
{
"epoch": 5.8408408408408405,
"grad_norm": 0.02759486250579357,
"learning_rate": 5.961060480530241e-06,
"loss": 0.0026,
"step": 3890
},
{
"epoch": 5.8558558558558556,
"grad_norm": 0.3462941646575928,
"learning_rate": 5.919635459817731e-06,
"loss": 0.0092,
"step": 3900
},
{
"epoch": 5.870870870870871,
"grad_norm": 0.22936759889125824,
"learning_rate": 5.8782104391052195e-06,
"loss": 0.0043,
"step": 3910
},
{
"epoch": 5.885885885885886,
"grad_norm": 0.4390968978404999,
"learning_rate": 5.83678541839271e-06,
"loss": 0.0052,
"step": 3920
},
{
"epoch": 5.900900900900901,
"grad_norm": 0.6948964595794678,
"learning_rate": 5.7953603976801995e-06,
"loss": 0.0047,
"step": 3930
},
{
"epoch": 5.915915915915916,
"grad_norm": 0.47237828373908997,
"learning_rate": 5.753935376967689e-06,
"loss": 0.0031,
"step": 3940
},
{
"epoch": 5.930930930930931,
"grad_norm": 0.18768872320652008,
"learning_rate": 5.712510356255179e-06,
"loss": 0.007,
"step": 3950
},
{
"epoch": 5.945945945945946,
"grad_norm": 0.328825443983078,
"learning_rate": 5.671085335542669e-06,
"loss": 0.0109,
"step": 3960
},
{
"epoch": 5.960960960960961,
"grad_norm": 0.2502267360687256,
"learning_rate": 5.629660314830158e-06,
"loss": 0.0078,
"step": 3970
},
{
"epoch": 5.975975975975976,
"grad_norm": 1.2609971761703491,
"learning_rate": 5.588235294117647e-06,
"loss": 0.0162,
"step": 3980
},
{
"epoch": 5.990990990990991,
"grad_norm": 0.03614773973822594,
"learning_rate": 5.546810273405138e-06,
"loss": 0.0038,
"step": 3990
},
{
"epoch": 6.0,
"eval_accuracy": 0.863593603010348,
"eval_f1": 0.9184759166067578,
"eval_loss": 0.21304447948932648,
"eval_roc_auc": 0.9443766695354825,
"eval_runtime": 7.4272,
"eval_samples_per_second": 715.612,
"eval_steps_per_second": 22.485,
"step": 3996
},
{
"epoch": 6.006006006006006,
"grad_norm": 0.12015757709741592,
"learning_rate": 5.505385252692627e-06,
"loss": 0.0053,
"step": 4000
},
{
"epoch": 6.021021021021021,
"grad_norm": 4.061729907989502,
"learning_rate": 5.463960231980116e-06,
"loss": 0.0082,
"step": 4010
},
{
"epoch": 6.036036036036036,
"grad_norm": 0.02439393475651741,
"learning_rate": 5.422535211267607e-06,
"loss": 0.0034,
"step": 4020
},
{
"epoch": 6.051051051051051,
"grad_norm": 0.24121862649917603,
"learning_rate": 5.381110190555095e-06,
"loss": 0.0049,
"step": 4030
},
{
"epoch": 6.066066066066066,
"grad_norm": 0.1391245424747467,
"learning_rate": 5.339685169842586e-06,
"loss": 0.0032,
"step": 4040
},
{
"epoch": 6.081081081081081,
"grad_norm": 1.9418208599090576,
"learning_rate": 5.2982601491300745e-06,
"loss": 0.0099,
"step": 4050
},
{
"epoch": 6.096096096096096,
"grad_norm": 0.706449568271637,
"learning_rate": 5.256835128417564e-06,
"loss": 0.0088,
"step": 4060
},
{
"epoch": 6.111111111111111,
"grad_norm": 0.6291218996047974,
"learning_rate": 5.2154101077050546e-06,
"loss": 0.0065,
"step": 4070
},
{
"epoch": 6.126126126126126,
"grad_norm": 0.024717414751648903,
"learning_rate": 5.173985086992543e-06,
"loss": 0.0044,
"step": 4080
},
{
"epoch": 6.141141141141141,
"grad_norm": 0.2399992197751999,
"learning_rate": 5.132560066280034e-06,
"loss": 0.0024,
"step": 4090
},
{
"epoch": 6.156156156156156,
"grad_norm": 0.02905816026031971,
"learning_rate": 5.091135045567523e-06,
"loss": 0.0051,
"step": 4100
},
{
"epoch": 6.171171171171171,
"grad_norm": 1.4288544654846191,
"learning_rate": 5.049710024855012e-06,
"loss": 0.008,
"step": 4110
},
{
"epoch": 6.186186186186186,
"grad_norm": 0.4185940623283386,
"learning_rate": 5.0082850041425025e-06,
"loss": 0.0063,
"step": 4120
},
{
"epoch": 6.201201201201201,
"grad_norm": 1.1119290590286255,
"learning_rate": 4.966859983429992e-06,
"loss": 0.0112,
"step": 4130
},
{
"epoch": 6.216216216216216,
"grad_norm": 0.015655577182769775,
"learning_rate": 4.925434962717482e-06,
"loss": 0.0067,
"step": 4140
},
{
"epoch": 6.231231231231231,
"grad_norm": 3.4215376377105713,
"learning_rate": 4.884009942004971e-06,
"loss": 0.0076,
"step": 4150
},
{
"epoch": 6.246246246246246,
"grad_norm": 0.013630112633109093,
"learning_rate": 4.842584921292461e-06,
"loss": 0.0025,
"step": 4160
},
{
"epoch": 6.261261261261261,
"grad_norm": 0.8447656035423279,
"learning_rate": 4.8011599005799504e-06,
"loss": 0.0065,
"step": 4170
},
{
"epoch": 6.276276276276276,
"grad_norm": 0.1344449371099472,
"learning_rate": 4.75973487986744e-06,
"loss": 0.0084,
"step": 4180
},
{
"epoch": 6.291291291291291,
"grad_norm": 2.4140686988830566,
"learning_rate": 4.71830985915493e-06,
"loss": 0.0034,
"step": 4190
},
{
"epoch": 6.306306306306306,
"grad_norm": 0.021810833364725113,
"learning_rate": 4.67688483844242e-06,
"loss": 0.0029,
"step": 4200
},
{
"epoch": 6.321321321321321,
"grad_norm": 0.031651388853788376,
"learning_rate": 4.63545981772991e-06,
"loss": 0.0045,
"step": 4210
},
{
"epoch": 6.336336336336337,
"grad_norm": 0.2835276424884796,
"learning_rate": 4.594034797017398e-06,
"loss": 0.017,
"step": 4220
},
{
"epoch": 6.351351351351352,
"grad_norm": 0.08151044696569443,
"learning_rate": 4.552609776304889e-06,
"loss": 0.0017,
"step": 4230
},
{
"epoch": 6.366366366366367,
"grad_norm": 0.6036203503608704,
"learning_rate": 4.511184755592378e-06,
"loss": 0.0029,
"step": 4240
},
{
"epoch": 6.381381381381382,
"grad_norm": 0.13752371072769165,
"learning_rate": 4.469759734879868e-06,
"loss": 0.0033,
"step": 4250
},
{
"epoch": 6.396396396396397,
"grad_norm": 0.14697255194187164,
"learning_rate": 4.4283347141673576e-06,
"loss": 0.0083,
"step": 4260
},
{
"epoch": 6.411411411411412,
"grad_norm": 4.969063758850098,
"learning_rate": 4.386909693454847e-06,
"loss": 0.0092,
"step": 4270
},
{
"epoch": 6.426426426426427,
"grad_norm": 0.26685309410095215,
"learning_rate": 4.345484672742337e-06,
"loss": 0.0101,
"step": 4280
},
{
"epoch": 6.441441441441442,
"grad_norm": 0.030430737882852554,
"learning_rate": 4.304059652029826e-06,
"loss": 0.003,
"step": 4290
},
{
"epoch": 6.456456456456457,
"grad_norm": 0.5854690074920654,
"learning_rate": 4.262634631317316e-06,
"loss": 0.0043,
"step": 4300
},
{
"epoch": 6.471471471471472,
"grad_norm": 0.02023538574576378,
"learning_rate": 4.2212096106048055e-06,
"loss": 0.0093,
"step": 4310
},
{
"epoch": 6.486486486486487,
"grad_norm": 0.02463540807366371,
"learning_rate": 4.179784589892295e-06,
"loss": 0.0023,
"step": 4320
},
{
"epoch": 6.501501501501502,
"grad_norm": 0.25623008608818054,
"learning_rate": 4.138359569179785e-06,
"loss": 0.0083,
"step": 4330
},
{
"epoch": 6.516516516516517,
"grad_norm": 4.038478851318359,
"learning_rate": 4.096934548467274e-06,
"loss": 0.0076,
"step": 4340
},
{
"epoch": 6.531531531531532,
"grad_norm": 3.8557655811309814,
"learning_rate": 4.055509527754764e-06,
"loss": 0.0117,
"step": 4350
},
{
"epoch": 6.546546546546547,
"grad_norm": 0.7435543537139893,
"learning_rate": 4.014084507042254e-06,
"loss": 0.0076,
"step": 4360
},
{
"epoch": 6.561561561561562,
"grad_norm": 0.22783824801445007,
"learning_rate": 3.972659486329744e-06,
"loss": 0.0047,
"step": 4370
},
{
"epoch": 6.576576576576577,
"grad_norm": 4.226299285888672,
"learning_rate": 3.931234465617233e-06,
"loss": 0.0121,
"step": 4380
},
{
"epoch": 6.591591591591592,
"grad_norm": 0.0962708368897438,
"learning_rate": 3.889809444904723e-06,
"loss": 0.0088,
"step": 4390
},
{
"epoch": 6.606606606606607,
"grad_norm": 2.5976169109344482,
"learning_rate": 3.848384424192213e-06,
"loss": 0.0067,
"step": 4400
},
{
"epoch": 6.621621621621622,
"grad_norm": 0.026430007070302963,
"learning_rate": 3.806959403479702e-06,
"loss": 0.0029,
"step": 4410
},
{
"epoch": 6.636636636636637,
"grad_norm": 0.3470136523246765,
"learning_rate": 3.765534382767192e-06,
"loss": 0.0047,
"step": 4420
},
{
"epoch": 6.651651651651652,
"grad_norm": 0.1191195622086525,
"learning_rate": 3.7241093620546814e-06,
"loss": 0.0025,
"step": 4430
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.15261198580265045,
"learning_rate": 3.682684341342171e-06,
"loss": 0.015,
"step": 4440
},
{
"epoch": 6.681681681681682,
"grad_norm": 1.6035737991333008,
"learning_rate": 3.64125932062966e-06,
"loss": 0.0128,
"step": 4450
},
{
"epoch": 6.696696696696697,
"grad_norm": 0.059660863131284714,
"learning_rate": 3.5998342999171506e-06,
"loss": 0.0032,
"step": 4460
},
{
"epoch": 6.711711711711712,
"grad_norm": 0.0641784518957138,
"learning_rate": 3.5584092792046398e-06,
"loss": 0.0105,
"step": 4470
},
{
"epoch": 6.726726726726727,
"grad_norm": 0.028454996645450592,
"learning_rate": 3.5169842584921293e-06,
"loss": 0.0033,
"step": 4480
},
{
"epoch": 6.741741741741742,
"grad_norm": 0.8149629235267639,
"learning_rate": 3.4755592377796194e-06,
"loss": 0.0031,
"step": 4490
},
{
"epoch": 6.756756756756757,
"grad_norm": 0.023963823914527893,
"learning_rate": 3.434134217067109e-06,
"loss": 0.0098,
"step": 4500
},
{
"epoch": 6.771771771771772,
"grad_norm": 0.24265415966510773,
"learning_rate": 3.392709196354598e-06,
"loss": 0.0071,
"step": 4510
},
{
"epoch": 6.786786786786787,
"grad_norm": 0.018639802932739258,
"learning_rate": 3.351284175642088e-06,
"loss": 0.0072,
"step": 4520
},
{
"epoch": 6.801801801801802,
"grad_norm": 2.4583423137664795,
"learning_rate": 3.3098591549295777e-06,
"loss": 0.0175,
"step": 4530
},
{
"epoch": 6.816816816816817,
"grad_norm": 0.10339108109474182,
"learning_rate": 3.2684341342170673e-06,
"loss": 0.0162,
"step": 4540
},
{
"epoch": 6.831831831831832,
"grad_norm": 0.41008320450782776,
"learning_rate": 3.2270091135045573e-06,
"loss": 0.0034,
"step": 4550
},
{
"epoch": 6.846846846846847,
"grad_norm": 0.056475620716810226,
"learning_rate": 3.185584092792047e-06,
"loss": 0.0092,
"step": 4560
},
{
"epoch": 6.861861861861862,
"grad_norm": 0.3767000138759613,
"learning_rate": 3.144159072079536e-06,
"loss": 0.0097,
"step": 4570
},
{
"epoch": 6.876876876876877,
"grad_norm": 0.08370574563741684,
"learning_rate": 3.102734051367026e-06,
"loss": 0.0063,
"step": 4580
},
{
"epoch": 6.891891891891892,
"grad_norm": 0.25333690643310547,
"learning_rate": 3.0613090306545157e-06,
"loss": 0.0031,
"step": 4590
},
{
"epoch": 6.906906906906907,
"grad_norm": 0.06466072797775269,
"learning_rate": 3.0198840099420052e-06,
"loss": 0.0091,
"step": 4600
},
{
"epoch": 6.921921921921922,
"grad_norm": 0.45550736784935,
"learning_rate": 2.9784589892294944e-06,
"loss": 0.0063,
"step": 4610
},
{
"epoch": 6.936936936936937,
"grad_norm": 0.2586912512779236,
"learning_rate": 2.937033968516985e-06,
"loss": 0.0038,
"step": 4620
},
{
"epoch": 6.951951951951952,
"grad_norm": 0.020179102197289467,
"learning_rate": 2.895608947804474e-06,
"loss": 0.0084,
"step": 4630
},
{
"epoch": 6.966966966966967,
"grad_norm": 0.016084246337413788,
"learning_rate": 2.8541839270919636e-06,
"loss": 0.0112,
"step": 4640
},
{
"epoch": 6.981981981981982,
"grad_norm": 2.5298447608947754,
"learning_rate": 2.8127589063794536e-06,
"loss": 0.0085,
"step": 4650
},
{
"epoch": 6.996996996996997,
"grad_norm": 1.6728397607803345,
"learning_rate": 2.771333885666943e-06,
"loss": 0.0092,
"step": 4660
},
{
"epoch": 7.0,
"eval_accuracy": 0.8630291627469426,
"eval_f1": 0.9187405866743169,
"eval_loss": 0.21907110512256622,
"eval_roc_auc": 0.9451891020630947,
"eval_runtime": 7.3889,
"eval_samples_per_second": 719.318,
"eval_steps_per_second": 22.601,
"step": 4662
}
],
"logging_steps": 10,
"max_steps": 5328,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4368118206154752.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}