reranker-bc5cdr / trainer_state.json
Dash00's picture
update files
a20803d verified
{
"best_global_step": 1200,
"best_metric": 0.01394818,
"best_model_checkpoint": "/home/serusr01/new/output/v48-20251225-153152/checkpoint-1200",
"epoch": 0.5176876617773943,
"eval_steps": 100,
"global_step": 1200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"acc": 0.5208333333333334,
"epoch": 0.0004314063848144953,
"grad_norm": 144.0,
"learning_rate": 5.999997244730563e-06,
"loss": 1.0161079168319702,
"step": 1
},
{
"acc": 0.8512820512820513,
"epoch": 0.0021570319240724763,
"grad_norm": 12.3125,
"learning_rate": 5.9999311185171385e-06,
"loss": 0.40382882952690125,
"step": 5
},
{
"acc": 0.8622047244094488,
"epoch": 0.004314063848144953,
"grad_norm": 18.75,
"learning_rate": 5.999724477231659e-06,
"loss": 0.35493576526641846,
"step": 10
},
{
"acc": 0.8986083499005965,
"epoch": 0.0064710957722174285,
"grad_norm": 17.0,
"learning_rate": 5.9993800856327355e-06,
"loss": 0.29610111713409426,
"step": 15
},
{
"acc": 0.8972868217054264,
"epoch": 0.008628127696289905,
"grad_norm": 9.125,
"learning_rate": 5.998897959535169e-06,
"loss": 0.302515435218811,
"step": 20
},
{
"acc": 0.876953125,
"epoch": 0.010785159620362382,
"grad_norm": 14.9375,
"learning_rate": 5.998278121078668e-06,
"loss": 0.3312162160873413,
"step": 25
},
{
"acc": 0.8975903614457831,
"epoch": 0.012942191544434857,
"grad_norm": 13.375,
"learning_rate": 5.997520598726825e-06,
"loss": 0.30696473121643064,
"step": 30
},
{
"acc": 0.8742632612966601,
"epoch": 0.015099223468507334,
"grad_norm": 13.25,
"learning_rate": 5.996625427265816e-06,
"loss": 0.36511917114257814,
"step": 35
},
{
"acc": 0.8631790744466801,
"epoch": 0.01725625539257981,
"grad_norm": 9.3125,
"learning_rate": 5.995592647802802e-06,
"loss": 0.338372015953064,
"step": 40
},
{
"acc": 0.8964803312629399,
"epoch": 0.019413287316652286,
"grad_norm": 6.0,
"learning_rate": 5.9944223077640325e-06,
"loss": 0.32863070964813235,
"step": 45
},
{
"acc": 0.8957915831663327,
"epoch": 0.021570319240724764,
"grad_norm": 7.96875,
"learning_rate": 5.993114460892682e-06,
"loss": 0.27804901599884035,
"step": 50
},
{
"acc": 0.8773006134969326,
"epoch": 0.02372735116479724,
"grad_norm": 8.9375,
"learning_rate": 5.9916691672463725e-06,
"loss": 0.31299686431884766,
"step": 55
},
{
"acc": 0.9154929577464789,
"epoch": 0.025884383088869714,
"grad_norm": 9.1875,
"learning_rate": 5.990086493194415e-06,
"loss": 0.2637639045715332,
"step": 60
},
{
"acc": 0.8927125506072875,
"epoch": 0.028041415012942193,
"grad_norm": 9.5,
"learning_rate": 5.988366511414766e-06,
"loss": 0.30198609828948975,
"step": 65
},
{
"acc": 0.9156118143459916,
"epoch": 0.030198446937014668,
"grad_norm": 9.0625,
"learning_rate": 5.986509300890691e-06,
"loss": 0.24096782207489015,
"step": 70
},
{
"acc": 0.8798449612403101,
"epoch": 0.032355478861087146,
"grad_norm": 7.4375,
"learning_rate": 5.984514946907133e-06,
"loss": 0.2953530550003052,
"step": 75
},
{
"acc": 0.8818737270875764,
"epoch": 0.03451251078515962,
"grad_norm": 15.9375,
"learning_rate": 5.982383541046798e-06,
"loss": 0.3139867067337036,
"step": 80
},
{
"acc": 0.9300567107750473,
"epoch": 0.036669542709232096,
"grad_norm": 6.5,
"learning_rate": 5.98011518118595e-06,
"loss": 0.20404579639434814,
"step": 85
},
{
"acc": 0.9036885245901639,
"epoch": 0.03882657463330457,
"grad_norm": 9.75,
"learning_rate": 5.977709971489917e-06,
"loss": 0.2777871131896973,
"step": 90
},
{
"acc": 0.8767676767676768,
"epoch": 0.040983606557377046,
"grad_norm": 12.3125,
"learning_rate": 5.975168022408304e-06,
"loss": 0.31117370128631594,
"step": 95
},
{
"acc": 0.9202334630350194,
"epoch": 0.04314063848144953,
"grad_norm": 5.9375,
"learning_rate": 5.972489450669929e-06,
"loss": 0.23061840534210204,
"step": 100
},
{
"epoch": 0.04314063848144953,
"eval_acc": 0.9116072164099185,
"eval_loss": 0.031446851789951324,
"eval_mrr": 0.8872438524590165,
"eval_ndcg": 0.9145527118078347,
"eval_runtime": 92.9513,
"eval_samples_per_second": 21.0,
"eval_steps_per_second": 10.5,
"step": 100
},
{
"acc": 0.9188118811881189,
"epoch": 0.045297670405522,
"grad_norm": 10.6875,
"learning_rate": 5.969674379277452e-06,
"loss": 0.253974175453186,
"step": 105
},
{
"acc": 0.916030534351145,
"epoch": 0.04745470232959448,
"grad_norm": 8.5,
"learning_rate": 5.9667229375017345e-06,
"loss": 0.21623244285583496,
"step": 110
},
{
"acc": 0.9288461538461539,
"epoch": 0.04961173425366695,
"grad_norm": 9.4375,
"learning_rate": 5.9636352608759e-06,
"loss": 0.2198498487472534,
"step": 115
},
{
"acc": 0.8913480885311871,
"epoch": 0.05176876617773943,
"grad_norm": 10.1875,
"learning_rate": 5.960411491189113e-06,
"loss": 0.3081125497817993,
"step": 120
},
{
"acc": 0.8951612903225806,
"epoch": 0.05392579810181191,
"grad_norm": 10.125,
"learning_rate": 5.957051776480063e-06,
"loss": 0.2795823335647583,
"step": 125
},
{
"acc": 0.91,
"epoch": 0.056082830025884385,
"grad_norm": 10.375,
"learning_rate": 5.953556271030172e-06,
"loss": 0.24584596157073973,
"step": 130
},
{
"acc": 0.9195402298850575,
"epoch": 0.05823986194995686,
"grad_norm": 10.0,
"learning_rate": 5.949925135356506e-06,
"loss": 0.20374622344970703,
"step": 135
},
{
"acc": 0.9190283400809717,
"epoch": 0.060396893874029335,
"grad_norm": 13.25,
"learning_rate": 5.946158536204403e-06,
"loss": 0.2638064384460449,
"step": 140
},
{
"acc": 0.9001956947162426,
"epoch": 0.06255392579810182,
"grad_norm": 7.21875,
"learning_rate": 5.942256646539821e-06,
"loss": 0.2462130069732666,
"step": 145
},
{
"acc": 0.9201520912547528,
"epoch": 0.06471095772217429,
"grad_norm": 7.21875,
"learning_rate": 5.9382196455413914e-06,
"loss": 0.25420570373535156,
"step": 150
},
{
"acc": 0.9288702928870293,
"epoch": 0.06686798964624677,
"grad_norm": 10.1875,
"learning_rate": 5.9340477185921895e-06,
"loss": 0.2146221160888672,
"step": 155
},
{
"acc": 0.904397705544933,
"epoch": 0.06902502157031924,
"grad_norm": 27.875,
"learning_rate": 5.9297410572712245e-06,
"loss": 0.2619572877883911,
"step": 160
},
{
"acc": 0.903041825095057,
"epoch": 0.07118205349439172,
"grad_norm": 8.1875,
"learning_rate": 5.925299859344643e-06,
"loss": 0.2647380352020264,
"step": 165
},
{
"acc": 0.9238095238095239,
"epoch": 0.07333908541846419,
"grad_norm": 9.75,
"learning_rate": 5.9207243287566444e-06,
"loss": 0.2059628963470459,
"step": 170
},
{
"acc": 0.9080234833659491,
"epoch": 0.07549611734253667,
"grad_norm": 19.0,
"learning_rate": 5.916014675620117e-06,
"loss": 0.22357699871063233,
"step": 175
},
{
"acc": 0.9107883817427386,
"epoch": 0.07765314926660914,
"grad_norm": 12.125,
"learning_rate": 5.911171116206986e-06,
"loss": 0.23458666801452638,
"step": 180
},
{
"acc": 0.9092664092664092,
"epoch": 0.07981018119068162,
"grad_norm": 10.3125,
"learning_rate": 5.9061938729382915e-06,
"loss": 0.2300126552581787,
"step": 185
},
{
"acc": 0.924901185770751,
"epoch": 0.08196721311475409,
"grad_norm": 12.25,
"learning_rate": 5.901083174373961e-06,
"loss": 0.21468648910522461,
"step": 190
},
{
"acc": 0.9180327868852459,
"epoch": 0.08412424503882658,
"grad_norm": 9.375,
"learning_rate": 5.895839255202328e-06,
"loss": 0.2179854154586792,
"step": 195
},
{
"acc": 0.935871743486974,
"epoch": 0.08628127696289906,
"grad_norm": 11.9375,
"learning_rate": 5.8904623562293435e-06,
"loss": 0.22542862892150878,
"step": 200
},
{
"epoch": 0.08628127696289906,
"eval_acc": 0.9252821484471538,
"eval_loss": 0.02712932601571083,
"eval_mrr": 0.9001439305230289,
"eval_ndcg": 0.9243608674894062,
"eval_runtime": 85.9276,
"eval_samples_per_second": 22.717,
"eval_steps_per_second": 11.358,
"step": 200
},
{
"acc": 0.9358178053830227,
"epoch": 0.08843830888697153,
"grad_norm": 5.96875,
"learning_rate": 5.884952724367524e-06,
"loss": 0.20223827362060548,
"step": 205
},
{
"acc": 0.9375,
"epoch": 0.090595340811044,
"grad_norm": 7.1875,
"learning_rate": 5.879310612624611e-06,
"loss": 0.19913212060928345,
"step": 210
},
{
"acc": 0.9217557251908397,
"epoch": 0.09275237273511648,
"grad_norm": 18.5,
"learning_rate": 5.873536280091955e-06,
"loss": 0.21275293827056885,
"step": 215
},
{
"acc": 0.9144050104384134,
"epoch": 0.09490940465918896,
"grad_norm": 14.375,
"learning_rate": 5.867629991932611e-06,
"loss": 0.23634729385375977,
"step": 220
},
{
"acc": 0.9239766081871345,
"epoch": 0.09706643658326143,
"grad_norm": 13.125,
"learning_rate": 5.861592019369172e-06,
"loss": 0.2160120725631714,
"step": 225
},
{
"acc": 0.8967611336032388,
"epoch": 0.0992234685073339,
"grad_norm": 12.0625,
"learning_rate": 5.855422639671309e-06,
"loss": 0.25560975074768066,
"step": 230
},
{
"acc": 0.9405940594059405,
"epoch": 0.10138050043140638,
"grad_norm": 11.3125,
"learning_rate": 5.849122136143034e-06,
"loss": 0.20292723178863525,
"step": 235
},
{
"acc": 0.9289827255278311,
"epoch": 0.10353753235547886,
"grad_norm": 16.125,
"learning_rate": 5.842690798109697e-06,
"loss": 0.19165314435958863,
"step": 240
},
{
"acc": 0.9137931034482759,
"epoch": 0.10569456427955133,
"grad_norm": 14.375,
"learning_rate": 5.8361289209047004e-06,
"loss": 0.22526559829711915,
"step": 245
},
{
"acc": 0.9123505976095617,
"epoch": 0.10785159620362382,
"grad_norm": 11.375,
"learning_rate": 5.829436805855932e-06,
"loss": 0.1998949646949768,
"step": 250
},
{
"acc": 0.9298245614035088,
"epoch": 0.1100086281276963,
"grad_norm": 9.3125,
"learning_rate": 5.82261476027193e-06,
"loss": 0.211327862739563,
"step": 255
},
{
"acc": 0.9357429718875502,
"epoch": 0.11216566005176877,
"grad_norm": 23.375,
"learning_rate": 5.8156630974277715e-06,
"loss": 0.21703736782073973,
"step": 260
},
{
"acc": 0.9092783505154639,
"epoch": 0.11432269197584125,
"grad_norm": 17.875,
"learning_rate": 5.808582136550686e-06,
"loss": 0.2739748954772949,
"step": 265
},
{
"acc": 0.9468302658486708,
"epoch": 0.11647972389991372,
"grad_norm": 9.9375,
"learning_rate": 5.8013722028053985e-06,
"loss": 0.14410748481750488,
"step": 270
},
{
"acc": 0.9310344827586207,
"epoch": 0.1186367558239862,
"grad_norm": 8.5,
"learning_rate": 5.794033627279193e-06,
"loss": 0.1934072494506836,
"step": 275
},
{
"acc": 0.9155470249520153,
"epoch": 0.12079378774805867,
"grad_norm": 11.4375,
"learning_rate": 5.786566746966714e-06,
"loss": 0.20481424331665038,
"step": 280
},
{
"acc": 0.9216494845360824,
"epoch": 0.12295081967213115,
"grad_norm": 12.5,
"learning_rate": 5.778971904754485e-06,
"loss": 0.22041404247283936,
"step": 285
},
{
"acc": 0.917864476386037,
"epoch": 0.12510785159620363,
"grad_norm": 19.375,
"learning_rate": 5.771249449405169e-06,
"loss": 0.20896604061126708,
"step": 290
},
{
"acc": 0.950592885375494,
"epoch": 0.1272648835202761,
"grad_norm": 16.875,
"learning_rate": 5.763399735541551e-06,
"loss": 0.1629479169845581,
"step": 295
},
{
"acc": 0.9405737704918032,
"epoch": 0.12942191544434858,
"grad_norm": 13.25,
"learning_rate": 5.755423123630251e-06,
"loss": 0.19808459281921387,
"step": 300
},
{
"epoch": 0.12942191544434858,
"eval_acc": 0.9337671966389324,
"eval_loss": 0.024512404575943947,
"eval_mrr": 0.9131787909836067,
"eval_ndcg": 0.9342680843294187,
"eval_runtime": 79.6383,
"eval_samples_per_second": 24.511,
"eval_steps_per_second": 12.255,
"step": 300
},
{
"acc": 0.9308943089430894,
"epoch": 0.13157894736842105,
"grad_norm": 14.125,
"learning_rate": 5.747319979965173e-06,
"loss": 0.20357720851898192,
"step": 305
},
{
"acc": 0.9433962264150944,
"epoch": 0.13373597929249353,
"grad_norm": 14.6875,
"learning_rate": 5.739090676650683e-06,
"loss": 0.15039076805114746,
"step": 310
},
{
"acc": 0.9392712550607287,
"epoch": 0.135893011216566,
"grad_norm": 8.625,
"learning_rate": 5.730735591584529e-06,
"loss": 0.16305069923400878,
"step": 315
},
{
"acc": 0.9192307692307692,
"epoch": 0.13805004314063848,
"grad_norm": 12.75,
"learning_rate": 5.722255108440474e-06,
"loss": 0.2407283067703247,
"step": 320
},
{
"acc": 0.9298597194388778,
"epoch": 0.14020707506471095,
"grad_norm": 12.125,
"learning_rate": 5.713649616650687e-06,
"loss": 0.2122425317764282,
"step": 325
},
{
"acc": 0.9352226720647774,
"epoch": 0.14236410698878343,
"grad_norm": 11.125,
"learning_rate": 5.7049195113878585e-06,
"loss": 0.19069076776504518,
"step": 330
},
{
"acc": 0.9189765458422174,
"epoch": 0.14452113891285592,
"grad_norm": 17.75,
"learning_rate": 5.696065193547054e-06,
"loss": 0.22681286334991455,
"step": 335
},
{
"acc": 0.943579766536965,
"epoch": 0.14667817083692838,
"grad_norm": 14.8125,
"learning_rate": 5.6870870697273e-06,
"loss": 0.1799636721611023,
"step": 340
},
{
"acc": 0.9392712550607287,
"epoch": 0.14883520276100087,
"grad_norm": 8.0625,
"learning_rate": 5.677985552212919e-06,
"loss": 0.21220760345458983,
"step": 345
},
{
"acc": 0.9386138613861386,
"epoch": 0.15099223468507333,
"grad_norm": 15.0625,
"learning_rate": 5.668761058954594e-06,
"loss": 0.19215874671936034,
"step": 350
},
{
"acc": 0.9487704918032787,
"epoch": 0.15314926660914582,
"grad_norm": 9.75,
"learning_rate": 5.659414013550172e-06,
"loss": 0.17874677181243898,
"step": 355
},
{
"acc": 0.9449901768172888,
"epoch": 0.15530629853321828,
"grad_norm": 15.1875,
"learning_rate": 5.649944845225219e-06,
"loss": 0.17652757167816163,
"step": 360
},
{
"acc": 0.9281314168377823,
"epoch": 0.15746333045729077,
"grad_norm": 28.25,
"learning_rate": 5.6403539888133056e-06,
"loss": 0.2142080545425415,
"step": 365
},
{
"acc": 0.950920245398773,
"epoch": 0.15962036238136323,
"grad_norm": 12.0625,
"learning_rate": 5.63064188473604e-06,
"loss": 0.17437468767166137,
"step": 370
},
{
"acc": 0.9422680412371134,
"epoch": 0.16177739430543572,
"grad_norm": 19.75,
"learning_rate": 5.6208089789828435e-06,
"loss": 0.18122278451919555,
"step": 375
},
{
"acc": 0.9439071566731141,
"epoch": 0.16393442622950818,
"grad_norm": 13.8125,
"learning_rate": 5.610855723090466e-06,
"loss": 0.18091570138931273,
"step": 380
},
{
"acc": 0.923728813559322,
"epoch": 0.16609145815358067,
"grad_norm": 29.875,
"learning_rate": 5.600782574122259e-06,
"loss": 0.24442174434661865,
"step": 385
},
{
"acc": 0.9310344827586207,
"epoch": 0.16824849007765316,
"grad_norm": 16.75,
"learning_rate": 5.590589994647182e-06,
"loss": 0.20708949565887452,
"step": 390
},
{
"acc": 0.9395161290322581,
"epoch": 0.17040552200172562,
"grad_norm": 8.0625,
"learning_rate": 5.58027845271856e-06,
"loss": 0.21145825386047362,
"step": 395
},
{
"acc": 0.9618473895582329,
"epoch": 0.1725625539257981,
"grad_norm": 8.5,
"learning_rate": 5.569848421852592e-06,
"loss": 0.12078192234039306,
"step": 400
},
{
"epoch": 0.1725625539257981,
"eval_acc": 0.9410989373094983,
"eval_loss": 0.02222530171275139,
"eval_mrr": 0.9228056693989071,
"eval_ndcg": 0.9416691570915218,
"eval_runtime": 87.651,
"eval_samples_per_second": 22.27,
"eval_steps_per_second": 11.135,
"step": 400
},
{
"acc": 0.951310861423221,
"epoch": 0.17471958584987057,
"grad_norm": 9.25,
"learning_rate": 5.559300381006607e-06,
"loss": 0.1467392086982727,
"step": 405
},
{
"acc": 0.9362139917695473,
"epoch": 0.17687661777394306,
"grad_norm": 9.5625,
"learning_rate": 5.548634814557066e-06,
"loss": 0.16467268466949464,
"step": 410
},
{
"acc": 0.9474671669793621,
"epoch": 0.17903364969801552,
"grad_norm": 21.0,
"learning_rate": 5.537852212277326e-06,
"loss": 0.16810253858566285,
"step": 415
},
{
"acc": 0.9329388560157791,
"epoch": 0.181190681622088,
"grad_norm": 21.75,
"learning_rate": 5.5269530693151425e-06,
"loss": 0.15131351947784424,
"step": 420
},
{
"acc": 0.9265536723163842,
"epoch": 0.18334771354616047,
"grad_norm": 26.25,
"learning_rate": 5.5159378861699356e-06,
"loss": 0.2086423635482788,
"step": 425
},
{
"acc": 0.9393939393939394,
"epoch": 0.18550474547023296,
"grad_norm": 9.875,
"learning_rate": 5.504807168669804e-06,
"loss": 0.19425034523010254,
"step": 430
},
{
"acc": 0.9387351778656127,
"epoch": 0.18766177739430542,
"grad_norm": 40.0,
"learning_rate": 5.4935614279482984e-06,
"loss": 0.1988288640975952,
"step": 435
},
{
"acc": 0.9350912778904665,
"epoch": 0.1898188093183779,
"grad_norm": 29.125,
"learning_rate": 5.482201180420952e-06,
"loss": 0.1814996600151062,
"step": 440
},
{
"acc": 0.9528688524590164,
"epoch": 0.1919758412424504,
"grad_norm": 17.25,
"learning_rate": 5.4707269477615584e-06,
"loss": 0.1311618208885193,
"step": 445
},
{
"acc": 0.9548872180451128,
"epoch": 0.19413287316652286,
"grad_norm": 21.375,
"learning_rate": 5.4591392568782275e-06,
"loss": 0.15495063066482545,
"step": 450
},
{
"acc": 0.9633204633204633,
"epoch": 0.19628990509059535,
"grad_norm": 14.8125,
"learning_rate": 5.447438639889178e-06,
"loss": 0.12518519163131714,
"step": 455
},
{
"acc": 0.9411764705882353,
"epoch": 0.1984469370146678,
"grad_norm": 17.875,
"learning_rate": 5.435625634098311e-06,
"loss": 0.17223405838012695,
"step": 460
},
{
"acc": 0.9486166007905138,
"epoch": 0.2006039689387403,
"grad_norm": 19.0,
"learning_rate": 5.423700781970527e-06,
"loss": 0.14980016946792601,
"step": 465
},
{
"acc": 0.9354838709677419,
"epoch": 0.20276100086281276,
"grad_norm": 23.0,
"learning_rate": 5.411664631106827e-06,
"loss": 0.1699580192565918,
"step": 470
},
{
"acc": 0.9409368635437881,
"epoch": 0.20491803278688525,
"grad_norm": 11.6875,
"learning_rate": 5.399517734219159e-06,
"loss": 0.2162698268890381,
"step": 475
},
{
"acc": 0.9535353535353536,
"epoch": 0.2070750647109577,
"grad_norm": 16.625,
"learning_rate": 5.387260649105032e-06,
"loss": 0.15655677318572997,
"step": 480
},
{
"acc": 0.9541984732824428,
"epoch": 0.2092320966350302,
"grad_norm": 14.4375,
"learning_rate": 5.374893938621913e-06,
"loss": 0.15797934532165528,
"step": 485
},
{
"acc": 0.9343629343629344,
"epoch": 0.21138912855910266,
"grad_norm": 6.59375,
"learning_rate": 5.362418170661375e-06,
"loss": 0.2039581060409546,
"step": 490
},
{
"acc": 0.9559386973180076,
"epoch": 0.21354616048317515,
"grad_norm": 16.625,
"learning_rate": 5.3498339181230125e-06,
"loss": 0.1275785207748413,
"step": 495
},
{
"acc": 0.9521988527724665,
"epoch": 0.21570319240724764,
"grad_norm": 8.1875,
"learning_rate": 5.3371417588881456e-06,
"loss": 0.16547932624816894,
"step": 500
},
{
"epoch": 0.21570319240724764,
"eval_acc": 0.9440645852211879,
"eval_loss": 0.0200980044901371,
"eval_mrr": 0.9341749609679937,
"eval_ndcg": 0.9502093836313069,
"eval_runtime": 89.2845,
"eval_samples_per_second": 21.863,
"eval_steps_per_second": 10.931,
"step": 500
},
{
"acc": 0.9288617886178862,
"epoch": 0.2178602243313201,
"grad_norm": 17.0,
"learning_rate": 5.324342275793272e-06,
"loss": 0.22141244411468505,
"step": 505
},
{
"acc": 0.9502982107355865,
"epoch": 0.2200172562553926,
"grad_norm": 11.0,
"learning_rate": 5.3114360566033085e-06,
"loss": 0.13880524635314942,
"step": 510
},
{
"acc": 0.9284253578732107,
"epoch": 0.22217428817946505,
"grad_norm": 10.125,
"learning_rate": 5.298423693984598e-06,
"loss": 0.22597496509552,
"step": 515
},
{
"acc": 0.9553398058252427,
"epoch": 0.22433132010353754,
"grad_norm": 20.75,
"learning_rate": 5.285305785477699e-06,
"loss": 0.16823456287384034,
"step": 520
},
{
"acc": 0.9405940594059405,
"epoch": 0.22648835202761,
"grad_norm": 9.875,
"learning_rate": 5.272082933469936e-06,
"loss": 0.18091850280761718,
"step": 525
},
{
"acc": 0.9634888438133874,
"epoch": 0.2286453839516825,
"grad_norm": 9.6875,
"learning_rate": 5.258755745167744e-06,
"loss": 0.13490262031555175,
"step": 530
},
{
"acc": 0.9320754716981132,
"epoch": 0.23080241587575495,
"grad_norm": 18.625,
"learning_rate": 5.245324832568787e-06,
"loss": 0.19664840698242186,
"step": 535
},
{
"acc": 0.9470588235294117,
"epoch": 0.23295944779982744,
"grad_norm": 10.3125,
"learning_rate": 5.2317908124338475e-06,
"loss": 0.14916815757751464,
"step": 540
},
{
"acc": 0.9350912778904665,
"epoch": 0.2351164797238999,
"grad_norm": 12.0625,
"learning_rate": 5.21815430625851e-06,
"loss": 0.1888782262802124,
"step": 545
},
{
"acc": 0.9365079365079365,
"epoch": 0.2372735116479724,
"grad_norm": 14.6875,
"learning_rate": 5.204415940244618e-06,
"loss": 0.18109892606735228,
"step": 550
},
{
"acc": 0.9479768786127167,
"epoch": 0.23943054357204488,
"grad_norm": 35.0,
"learning_rate": 5.1905763452715215e-06,
"loss": 0.150074303150177,
"step": 555
},
{
"acc": 0.9408284023668639,
"epoch": 0.24158757549611734,
"grad_norm": 10.0625,
"learning_rate": 5.176636156867102e-06,
"loss": 0.13501374721527098,
"step": 560
},
{
"acc": 0.9490196078431372,
"epoch": 0.24374460742018983,
"grad_norm": 11.5625,
"learning_rate": 5.162596015178593e-06,
"loss": 0.14429715871810914,
"step": 565
},
{
"acc": 0.961038961038961,
"epoch": 0.2459016393442623,
"grad_norm": 4.53125,
"learning_rate": 5.14845656494318e-06,
"loss": 0.10108070373535157,
"step": 570
},
{
"acc": 0.9427402862985685,
"epoch": 0.24805867126833478,
"grad_norm": 13.5,
"learning_rate": 5.134218455458396e-06,
"loss": 0.16440529823303224,
"step": 575
},
{
"acc": 0.9512670565302144,
"epoch": 0.25021570319240727,
"grad_norm": 23.0,
"learning_rate": 5.119882340552303e-06,
"loss": 0.15792548656463623,
"step": 580
},
{
"acc": 0.9310344827586207,
"epoch": 0.25237273511647973,
"grad_norm": 20.375,
"learning_rate": 5.105448878553472e-06,
"loss": 0.17942277193069459,
"step": 585
},
{
"acc": 0.9586614173228346,
"epoch": 0.2545297670405522,
"grad_norm": 6.21875,
"learning_rate": 5.0909187322607434e-06,
"loss": 0.1267695426940918,
"step": 590
},
{
"acc": 0.9421157684630739,
"epoch": 0.25668679896462465,
"grad_norm": 10.8125,
"learning_rate": 5.076292568912801e-06,
"loss": 0.17552590370178223,
"step": 595
},
{
"acc": 0.9418837675350702,
"epoch": 0.25884383088869717,
"grad_norm": 15.5625,
"learning_rate": 5.061571060157525e-06,
"loss": 0.1582822322845459,
"step": 600
},
{
"epoch": 0.25884383088869717,
"eval_acc": 0.9523848751956504,
"eval_loss": 0.01805831491947174,
"eval_mrr": 0.9341017759562842,
"eval_ndcg": 0.9502097307474537,
"eval_runtime": 80.1164,
"eval_samples_per_second": 24.365,
"eval_steps_per_second": 12.182,
"step": 600
},
{
"acc": 0.9516806722689075,
"epoch": 0.26100086281276963,
"grad_norm": 9.5625,
"learning_rate": 5.04675488202115e-06,
"loss": 0.1585369348526001,
"step": 605
},
{
"acc": 0.967479674796748,
"epoch": 0.2631578947368421,
"grad_norm": 14.375,
"learning_rate": 5.031844714877224e-06,
"loss": 0.14685235023498536,
"step": 610
},
{
"acc": 0.9669902912621359,
"epoch": 0.2653149266609146,
"grad_norm": 11.875,
"learning_rate": 5.016841243415359e-06,
"loss": 0.09848676323890686,
"step": 615
},
{
"acc": 0.9692622950819673,
"epoch": 0.26747195858498707,
"grad_norm": 11.875,
"learning_rate": 5.001745156609801e-06,
"loss": 0.09488856792449951,
"step": 620
},
{
"acc": 0.962,
"epoch": 0.26962899050905953,
"grad_norm": 8.625,
"learning_rate": 4.9865571476877775e-06,
"loss": 0.10182394981384277,
"step": 625
},
{
"acc": 0.9538152610441767,
"epoch": 0.271786022433132,
"grad_norm": 13.0,
"learning_rate": 4.9712779140976725e-06,
"loss": 0.14965349435806274,
"step": 630
},
{
"acc": 0.9563567362428842,
"epoch": 0.2739430543572045,
"grad_norm": 15.5625,
"learning_rate": 4.9559081574769965e-06,
"loss": 0.14634023904800414,
"step": 635
},
{
"acc": 0.9402390438247012,
"epoch": 0.27610008628127697,
"grad_norm": 19.875,
"learning_rate": 4.9404485836201695e-06,
"loss": 0.15552909374237062,
"step": 640
},
{
"acc": 0.9487666034155597,
"epoch": 0.27825711820534943,
"grad_norm": 38.25,
"learning_rate": 4.924899902446105e-06,
"loss": 0.16740797758102416,
"step": 645
},
{
"acc": 0.9620253164556962,
"epoch": 0.2804141501294219,
"grad_norm": 8.9375,
"learning_rate": 4.909262827965613e-06,
"loss": 0.13096400499343872,
"step": 650
},
{
"acc": 0.9373737373737374,
"epoch": 0.2825711820534944,
"grad_norm": 23.375,
"learning_rate": 4.893538078248613e-06,
"loss": 0.19330756664276122,
"step": 655
},
{
"acc": 0.9327902240325866,
"epoch": 0.28472821397756687,
"grad_norm": 31.25,
"learning_rate": 4.877726375391156e-06,
"loss": 0.17176212072372438,
"step": 660
},
{
"acc": 0.9481037924151696,
"epoch": 0.28688524590163933,
"grad_norm": 17.125,
"learning_rate": 4.861828445482269e-06,
"loss": 0.15666646957397462,
"step": 665
},
{
"acc": 0.9557344064386318,
"epoch": 0.28904227782571185,
"grad_norm": 15.875,
"learning_rate": 4.8458450185706095e-06,
"loss": 0.12006416320800781,
"step": 670
},
{
"acc": 0.9530612244897959,
"epoch": 0.2911993097497843,
"grad_norm": 13.375,
"learning_rate": 4.829776828630942e-06,
"loss": 0.12216727733612061,
"step": 675
},
{
"acc": 0.9454545454545454,
"epoch": 0.29335634167385677,
"grad_norm": 12.8125,
"learning_rate": 4.813624613530434e-06,
"loss": 0.1579872488975525,
"step": 680
},
{
"acc": 0.94831013916501,
"epoch": 0.29551337359792923,
"grad_norm": 10.75,
"learning_rate": 4.79738911499477e-06,
"loss": 0.14849933385848998,
"step": 685
},
{
"acc": 0.958904109589041,
"epoch": 0.29767040552200175,
"grad_norm": 9.125,
"learning_rate": 4.781071078574092e-06,
"loss": 0.12549154758453368,
"step": 690
},
{
"acc": 0.943359375,
"epoch": 0.2998274374460742,
"grad_norm": 19.25,
"learning_rate": 4.764671253608765e-06,
"loss": 0.11965863704681397,
"step": 695
},
{
"acc": 0.9474747474747475,
"epoch": 0.30198446937014667,
"grad_norm": 24.5,
"learning_rate": 4.748190393194964e-06,
"loss": 0.15644127130508423,
"step": 700
},
{
"epoch": 0.30198446937014667,
"eval_acc": 0.9549386275640498,
"eval_loss": 0.017001556232571602,
"eval_mrr": 0.9429937548790007,
"eval_ndcg": 0.9569733975457528,
"eval_runtime": 87.9286,
"eval_samples_per_second": 22.2,
"eval_steps_per_second": 11.1,
"step": 700
},
{
"acc": 0.9433962264150944,
"epoch": 0.30414150129421913,
"grad_norm": 14.875,
"learning_rate": 4.731629254150091e-06,
"loss": 0.1656881332397461,
"step": 705
},
{
"acc": 0.9484536082474226,
"epoch": 0.30629853321829165,
"grad_norm": 16.875,
"learning_rate": 4.714988596978023e-06,
"loss": 0.16551544666290283,
"step": 710
},
{
"acc": 0.9601593625498008,
"epoch": 0.3084555651423641,
"grad_norm": 13.0625,
"learning_rate": 4.698269185834188e-06,
"loss": 0.13289109468460084,
"step": 715
},
{
"acc": 0.9621928166351607,
"epoch": 0.31061259706643657,
"grad_norm": 6.625,
"learning_rate": 4.681471788490473e-06,
"loss": 0.13107165098190307,
"step": 720
},
{
"acc": 0.9474708171206225,
"epoch": 0.3127696289905091,
"grad_norm": 17.125,
"learning_rate": 4.664597176299972e-06,
"loss": 0.15164241790771485,
"step": 725
},
{
"acc": 0.9386138613861386,
"epoch": 0.31492666091458155,
"grad_norm": 13.25,
"learning_rate": 4.647646124161557e-06,
"loss": 0.14998191595077515,
"step": 730
},
{
"acc": 0.9588014981273408,
"epoch": 0.317083692838654,
"grad_norm": 17.0,
"learning_rate": 4.6306194104843005e-06,
"loss": 0.1452179431915283,
"step": 735
},
{
"acc": 0.9697542533081286,
"epoch": 0.31924072476272647,
"grad_norm": 21.75,
"learning_rate": 4.613517817151725e-06,
"loss": 0.0863348662853241,
"step": 740
},
{
"acc": 0.9271653543307087,
"epoch": 0.321397756686799,
"grad_norm": 22.25,
"learning_rate": 4.596342129485904e-06,
"loss": 0.2219313383102417,
"step": 745
},
{
"acc": 0.948559670781893,
"epoch": 0.32355478861087145,
"grad_norm": 17.25,
"learning_rate": 4.579093136211394e-06,
"loss": 0.14628000259399415,
"step": 750
},
{
"acc": 0.9550321199143469,
"epoch": 0.3257118205349439,
"grad_norm": 19.375,
"learning_rate": 4.561771629419018e-06,
"loss": 0.14832402467727662,
"step": 755
},
{
"acc": 0.9540918163672655,
"epoch": 0.32786885245901637,
"grad_norm": 24.875,
"learning_rate": 4.544378404529493e-06,
"loss": 0.16552494764328002,
"step": 760
},
{
"acc": 0.9468302658486708,
"epoch": 0.3300258843830889,
"grad_norm": 11.75,
"learning_rate": 4.526914260256897e-06,
"loss": 0.18186469078063966,
"step": 765
},
{
"acc": 0.9342915811088296,
"epoch": 0.33218291630716135,
"grad_norm": 29.0,
"learning_rate": 4.509379998572003e-06,
"loss": 0.189409601688385,
"step": 770
},
{
"acc": 0.9652509652509652,
"epoch": 0.3343399482312338,
"grad_norm": 8.125,
"learning_rate": 4.491776424665441e-06,
"loss": 0.11067872047424317,
"step": 775
},
{
"acc": 0.9634408602150538,
"epoch": 0.3364969801553063,
"grad_norm": 19.125,
"learning_rate": 4.474104346910724e-06,
"loss": 0.12605880498886107,
"step": 780
},
{
"acc": 0.9620493358633776,
"epoch": 0.3386540120793788,
"grad_norm": 16.625,
"learning_rate": 4.4563645768271375e-06,
"loss": 0.1279581904411316,
"step": 785
},
{
"acc": 0.9608610567514677,
"epoch": 0.34081104400345125,
"grad_norm": 26.125,
"learning_rate": 4.438557929042457e-06,
"loss": 0.13559631109237671,
"step": 790
},
{
"acc": 0.963265306122449,
"epoch": 0.3429680759275237,
"grad_norm": 16.75,
"learning_rate": 4.4206852212555544e-06,
"loss": 0.11860382556915283,
"step": 795
},
{
"acc": 0.9424184261036468,
"epoch": 0.3451251078515962,
"grad_norm": 27.25,
"learning_rate": 4.402747274198838e-06,
"loss": 0.1454222559928894,
"step": 800
},
{
"epoch": 0.3451251078515962,
"eval_acc": 0.9581514128017135,
"eval_loss": 0.01614278368651867,
"eval_mrr": 0.9435499609679938,
"eval_ndcg": 0.9573436789903933,
"eval_runtime": 106.3602,
"eval_samples_per_second": 18.353,
"eval_steps_per_second": 9.176,
"step": 800
},
{
"acc": 0.9533468559837728,
"epoch": 0.3472821397756687,
"grad_norm": 23.125,
"learning_rate": 4.384744911600571e-06,
"loss": 0.15178160667419432,
"step": 805
},
{
"acc": 0.9653767820773931,
"epoch": 0.34943917169974115,
"grad_norm": 16.625,
"learning_rate": 4.36667896014704e-06,
"loss": 0.12023615837097168,
"step": 810
},
{
"acc": 0.9449715370018975,
"epoch": 0.3515962036238136,
"grad_norm": 15.0625,
"learning_rate": 4.3485502494445945e-06,
"loss": 0.1382935166358948,
"step": 815
},
{
"acc": 0.9700598802395209,
"epoch": 0.3537532355478861,
"grad_norm": 4.75,
"learning_rate": 4.330359611981552e-06,
"loss": 0.09947049617767334,
"step": 820
},
{
"acc": 0.9504132231404959,
"epoch": 0.3559102674719586,
"grad_norm": 11.4375,
"learning_rate": 4.312107883089969e-06,
"loss": 0.15309990644454957,
"step": 825
},
{
"acc": 0.9606625258799172,
"epoch": 0.35806729939603105,
"grad_norm": 13.375,
"learning_rate": 4.293795900907278e-06,
"loss": 0.1290311336517334,
"step": 830
},
{
"acc": 0.9569471624266145,
"epoch": 0.36022433132010356,
"grad_norm": 13.875,
"learning_rate": 4.275424506337804e-06,
"loss": 0.1386420726776123,
"step": 835
},
{
"acc": 0.9567779960707269,
"epoch": 0.362381363244176,
"grad_norm": 15.0,
"learning_rate": 4.256994543014147e-06,
"loss": 0.13683913946151732,
"step": 840
},
{
"acc": 0.978515625,
"epoch": 0.3645383951682485,
"grad_norm": 10.875,
"learning_rate": 4.2385068572584416e-06,
"loss": 0.07557401657104493,
"step": 845
},
{
"acc": 0.9506903353057199,
"epoch": 0.36669542709232095,
"grad_norm": 21.875,
"learning_rate": 4.2199622980434965e-06,
"loss": 0.15320024490356446,
"step": 850
},
{
"acc": 0.9474708171206225,
"epoch": 0.36885245901639346,
"grad_norm": 30.875,
"learning_rate": 4.2013617169537995e-06,
"loss": 0.15146974325180054,
"step": 855
},
{
"acc": 0.936127744510978,
"epoch": 0.3710094909404659,
"grad_norm": 13.9375,
"learning_rate": 4.182705968146426e-06,
"loss": 0.18005824089050293,
"step": 860
},
{
"acc": 0.9661354581673307,
"epoch": 0.3731665228645384,
"grad_norm": 11.0625,
"learning_rate": 4.163995908311802e-06,
"loss": 0.10441750288009644,
"step": 865
},
{
"acc": 0.9671179883945842,
"epoch": 0.37532355478861085,
"grad_norm": 11.625,
"learning_rate": 4.145232396634372e-06,
"loss": 0.11835912466049195,
"step": 870
},
{
"acc": 0.9686888454011742,
"epoch": 0.37748058671268336,
"grad_norm": 8.1875,
"learning_rate": 4.126416294753141e-06,
"loss": 0.08334695696830749,
"step": 875
},
{
"acc": 0.9533898305084746,
"epoch": 0.3796376186367558,
"grad_norm": 15.0625,
"learning_rate": 4.1075484667221095e-06,
"loss": 0.1570521354675293,
"step": 880
},
{
"acc": 0.9628865979381444,
"epoch": 0.3817946505608283,
"grad_norm": 11.875,
"learning_rate": 4.088629778970591e-06,
"loss": 0.10125092267990113,
"step": 885
},
{
"acc": 0.9517102615694165,
"epoch": 0.3839516824849008,
"grad_norm": 10.3125,
"learning_rate": 4.06966110026343e-06,
"loss": 0.15835769176483155,
"step": 890
},
{
"acc": 0.9445506692160612,
"epoch": 0.38610871440897326,
"grad_norm": 51.0,
"learning_rate": 4.050643301661107e-06,
"loss": 0.17581632137298583,
"step": 895
},
{
"acc": 0.9544554455445544,
"epoch": 0.3882657463330457,
"grad_norm": 17.75,
"learning_rate": 4.0315772564797325e-06,
"loss": 0.16853252649307252,
"step": 900
},
{
"epoch": 0.3882657463330457,
"eval_acc": 0.9614465771480353,
"eval_loss": 0.015336191281676292,
"eval_mrr": 0.9436902322404371,
"eval_ndcg": 0.9575083647474146,
"eval_runtime": 79.2201,
"eval_samples_per_second": 24.64,
"eval_steps_per_second": 12.32,
"step": 900
},
{
"acc": 0.9625984251968503,
"epoch": 0.3904227782571182,
"grad_norm": 9.5625,
"learning_rate": 4.012463840250949e-06,
"loss": 0.11725049018859864,
"step": 905
},
{
"acc": 0.9578313253012049,
"epoch": 0.3925798101811907,
"grad_norm": 26.125,
"learning_rate": 3.993303930681726e-06,
"loss": 0.11894035339355469,
"step": 910
},
{
"acc": 0.952,
"epoch": 0.39473684210526316,
"grad_norm": 26.375,
"learning_rate": 3.974098407614051e-06,
"loss": 0.14351377487182618,
"step": 915
},
{
"acc": 0.9588477366255144,
"epoch": 0.3968938740293356,
"grad_norm": 33.75,
"learning_rate": 3.954848152984529e-06,
"loss": 0.12398861646652222,
"step": 920
},
{
"acc": 0.9679358717434869,
"epoch": 0.3990509059534081,
"grad_norm": 10.4375,
"learning_rate": 3.935554050783885e-06,
"loss": 0.10695126056671142,
"step": 925
},
{
"acc": 0.9646365422396856,
"epoch": 0.4012079378774806,
"grad_norm": 9.5,
"learning_rate": 3.916216987016363e-06,
"loss": 0.11947808265686036,
"step": 930
},
{
"acc": 0.9588235294117647,
"epoch": 0.40336496980155306,
"grad_norm": 19.375,
"learning_rate": 3.8968378496590485e-06,
"loss": 0.12318050861358643,
"step": 935
},
{
"acc": 0.9699812382739212,
"epoch": 0.4055220017256255,
"grad_norm": 9.5625,
"learning_rate": 3.877417528621087e-06,
"loss": 0.08819143176078796,
"step": 940
},
{
"acc": 0.9529411764705882,
"epoch": 0.40767903364969804,
"grad_norm": 25.0,
"learning_rate": 3.8579569157028195e-06,
"loss": 0.1318346619606018,
"step": 945
},
{
"acc": 0.9395085066162571,
"epoch": 0.4098360655737705,
"grad_norm": 14.875,
"learning_rate": 3.838456904554829e-06,
"loss": 0.14923615455627443,
"step": 950
},
{
"acc": 0.9635974304068522,
"epoch": 0.41199309749784296,
"grad_norm": 11.25,
"learning_rate": 3.8189183906369035e-06,
"loss": 0.10555909872055054,
"step": 955
},
{
"acc": 0.9612244897959183,
"epoch": 0.4141501294219154,
"grad_norm": 27.0,
"learning_rate": 3.799342271176918e-06,
"loss": 0.11741663217544555,
"step": 960
},
{
"acc": 0.9591397849462365,
"epoch": 0.41630716134598794,
"grad_norm": 11.875,
"learning_rate": 3.7797294451296307e-06,
"loss": 0.1104978084564209,
"step": 965
},
{
"acc": 0.9538461538461539,
"epoch": 0.4184641932700604,
"grad_norm": 32.0,
"learning_rate": 3.7600808131354033e-06,
"loss": 0.12546448707580565,
"step": 970
},
{
"acc": 0.9642147117296223,
"epoch": 0.42062122519413286,
"grad_norm": 17.375,
"learning_rate": 3.740397277478841e-06,
"loss": 0.15062413215637208,
"step": 975
},
{
"acc": 0.9447731755424064,
"epoch": 0.4227782571182053,
"grad_norm": 20.25,
"learning_rate": 3.720679742047358e-06,
"loss": 0.19598883390426636,
"step": 980
},
{
"acc": 0.9592668024439919,
"epoch": 0.42493528904227784,
"grad_norm": 8.4375,
"learning_rate": 3.700929112289674e-06,
"loss": 0.11623775959014893,
"step": 985
},
{
"acc": 0.9693486590038314,
"epoch": 0.4270923209663503,
"grad_norm": 17.625,
"learning_rate": 3.681146295174234e-06,
"loss": 0.1038408637046814,
"step": 990
},
{
"acc": 0.9513184584178499,
"epoch": 0.42924935289042276,
"grad_norm": 6.40625,
"learning_rate": 3.6613321991475553e-06,
"loss": 0.16103934049606322,
"step": 995
},
{
"acc": 0.9710982658959537,
"epoch": 0.4314063848144953,
"grad_norm": 35.5,
"learning_rate": 3.6414877340925163e-06,
"loss": 0.1310266375541687,
"step": 1000
},
{
"epoch": 0.4314063848144953,
"eval_acc": 0.9601285114095065,
"eval_loss": 0.01538186427205801,
"eval_mrr": 0.9503037177985949,
"eval_ndcg": 0.9625558802620603,
"eval_runtime": 84.007,
"eval_samples_per_second": 23.236,
"eval_steps_per_second": 11.618,
"step": 1000
},
{
"acc": 0.9592668024439919,
"epoch": 0.43356341673856774,
"grad_norm": 14.5625,
"learning_rate": 3.6216138112865695e-06,
"loss": 0.11078298091888428,
"step": 1005
},
{
"acc": 0.9509433962264151,
"epoch": 0.4357204486626402,
"grad_norm": 31.75,
"learning_rate": 3.601711343359897e-06,
"loss": 0.12666620016098024,
"step": 1010
},
{
"acc": 0.958984375,
"epoch": 0.43787748058671266,
"grad_norm": 7.84375,
"learning_rate": 3.5817812442535008e-06,
"loss": 0.10231037139892578,
"step": 1015
},
{
"acc": 0.9519038076152304,
"epoch": 0.4400345125107852,
"grad_norm": 34.75,
"learning_rate": 3.561824429177234e-06,
"loss": 0.14617985486984253,
"step": 1020
},
{
"acc": 0.9489603024574669,
"epoch": 0.44219154443485764,
"grad_norm": 24.75,
"learning_rate": 3.541841814567774e-06,
"loss": 0.15405564308166503,
"step": 1025
},
{
"acc": 0.9458413926499033,
"epoch": 0.4443485763589301,
"grad_norm": 25.875,
"learning_rate": 3.521834318046539e-06,
"loss": 0.1635340690612793,
"step": 1030
},
{
"acc": 0.964509394572025,
"epoch": 0.44650560828300256,
"grad_norm": 10.9375,
"learning_rate": 3.5018028583775472e-06,
"loss": 0.10313594341278076,
"step": 1035
},
{
"acc": 0.9542857142857143,
"epoch": 0.4486626402070751,
"grad_norm": 23.375,
"learning_rate": 3.481748355425229e-06,
"loss": 0.13693466186523437,
"step": 1040
},
{
"acc": 0.9563567362428842,
"epoch": 0.45081967213114754,
"grad_norm": 22.875,
"learning_rate": 3.4616717301121857e-06,
"loss": 0.123587965965271,
"step": 1045
},
{
"acc": 0.9590163934426229,
"epoch": 0.45297670405522,
"grad_norm": 12.5,
"learning_rate": 3.441573904376899e-06,
"loss": 0.13192965984344482,
"step": 1050
},
{
"acc": 0.9732824427480916,
"epoch": 0.4551337359792925,
"grad_norm": 20.875,
"learning_rate": 3.4214558011313937e-06,
"loss": 0.09249483346939087,
"step": 1055
},
{
"acc": 0.9580838323353293,
"epoch": 0.457290767903365,
"grad_norm": 26.75,
"learning_rate": 3.4013183442188606e-06,
"loss": 0.11342880725860596,
"step": 1060
},
{
"acc": 0.9686274509803922,
"epoch": 0.45944779982743744,
"grad_norm": 22.375,
"learning_rate": 3.381162458371229e-06,
"loss": 0.08873859643936158,
"step": 1065
},
{
"acc": 0.969758064516129,
"epoch": 0.4616048317515099,
"grad_norm": 39.0,
"learning_rate": 3.3609890691667005e-06,
"loss": 0.0837608277797699,
"step": 1070
},
{
"acc": 0.962671905697446,
"epoch": 0.4637618636755824,
"grad_norm": 16.375,
"learning_rate": 3.340799102987251e-06,
"loss": 0.11619801521301269,
"step": 1075
},
{
"acc": 0.9606003752345216,
"epoch": 0.4659188955996549,
"grad_norm": 15.0625,
"learning_rate": 3.320593486976088e-06,
"loss": 0.10996119976043701,
"step": 1080
},
{
"acc": 0.955193482688391,
"epoch": 0.46807592752372734,
"grad_norm": 24.5,
"learning_rate": 3.300373148995072e-06,
"loss": 0.12069922685623169,
"step": 1085
},
{
"acc": 0.9691991786447639,
"epoch": 0.4702329594477998,
"grad_norm": 7.90625,
"learning_rate": 3.280139017582113e-06,
"loss": 0.14299554824829103,
"step": 1090
},
{
"acc": 0.9743589743589743,
"epoch": 0.4723899913718723,
"grad_norm": 10.875,
"learning_rate": 3.2598920219085285e-06,
"loss": 0.08163526654243469,
"step": 1095
},
{
"acc": 0.9682242990654205,
"epoch": 0.4745470232959448,
"grad_norm": 9.25,
"learning_rate": 3.2396330917363754e-06,
"loss": 0.108160400390625,
"step": 1100
},
{
"epoch": 0.4745470232959448,
"eval_acc": 0.964494604168383,
"eval_loss": 0.014143780805170536,
"eval_mrr": 0.9522601971116316,
"eval_ndcg": 0.963948349172127,
"eval_runtime": 88.1934,
"eval_samples_per_second": 22.133,
"eval_steps_per_second": 11.067,
"step": 1100
},
{
"acc": 0.9725490196078431,
"epoch": 0.47670405522001724,
"grad_norm": 11.875,
"learning_rate": 3.219363157375755e-06,
"loss": 0.08103461265563965,
"step": 1105
},
{
"acc": 0.97678916827853,
"epoch": 0.47886108714408976,
"grad_norm": 26.5,
"learning_rate": 3.1990831496420897e-06,
"loss": 0.08858168721199036,
"step": 1110
},
{
"acc": 0.9602385685884692,
"epoch": 0.4810181190681622,
"grad_norm": 15.6875,
"learning_rate": 3.1787939998133853e-06,
"loss": 0.11988803148269653,
"step": 1115
},
{
"acc": 0.9718875502008032,
"epoch": 0.4831751509922347,
"grad_norm": 19.0,
"learning_rate": 3.158496639587459e-06,
"loss": 0.09709318280220032,
"step": 1120
},
{
"acc": 0.948559670781893,
"epoch": 0.48533218291630714,
"grad_norm": 17.75,
"learning_rate": 3.1381920010391566e-06,
"loss": 0.1310401439666748,
"step": 1125
},
{
"acc": 0.9658444022770398,
"epoch": 0.48748921484037966,
"grad_norm": 27.875,
"learning_rate": 3.1178810165775532e-06,
"loss": 0.11905833482742309,
"step": 1130
},
{
"acc": 0.967680608365019,
"epoch": 0.4896462467644521,
"grad_norm": 14.9375,
"learning_rate": 3.0975646189031345e-06,
"loss": 0.09188791513442993,
"step": 1135
},
{
"acc": 0.9701195219123506,
"epoch": 0.4918032786885246,
"grad_norm": 14.3125,
"learning_rate": 3.0772437409649664e-06,
"loss": 0.09571850299835205,
"step": 1140
},
{
"acc": 0.9606741573033708,
"epoch": 0.49396031061259704,
"grad_norm": 16.875,
"learning_rate": 3.056919315917851e-06,
"loss": 0.1439572811126709,
"step": 1145
},
{
"acc": 0.9613152804642167,
"epoch": 0.49611734253666956,
"grad_norm": 23.625,
"learning_rate": 3.0365922770794798e-06,
"loss": 0.11582168340682983,
"step": 1150
},
{
"acc": 0.980276134122288,
"epoch": 0.498274374460742,
"grad_norm": 18.0,
"learning_rate": 3.016263557887571e-06,
"loss": 0.0658139169216156,
"step": 1155
},
{
"acc": 0.9581673306772909,
"epoch": 0.5004314063848145,
"grad_norm": 9.1875,
"learning_rate": 2.995934091857007e-06,
"loss": 0.11524436473846436,
"step": 1160
},
{
"acc": 0.9642105263157895,
"epoch": 0.502588438308887,
"grad_norm": 28.125,
"learning_rate": 2.975604812536964e-06,
"loss": 0.10523393154144287,
"step": 1165
},
{
"acc": 0.9673076923076923,
"epoch": 0.5047454702329595,
"grad_norm": 14.75,
"learning_rate": 2.9552766534680456e-06,
"loss": 0.10329176187515259,
"step": 1170
},
{
"acc": 0.9471544715447154,
"epoch": 0.5069025021570319,
"grad_norm": 22.5,
"learning_rate": 2.9349505481394128e-06,
"loss": 0.15873119831085206,
"step": 1175
},
{
"acc": 0.9680638722554891,
"epoch": 0.5090595340811044,
"grad_norm": 14.25,
"learning_rate": 2.914627429945915e-06,
"loss": 0.10048485994338989,
"step": 1180
},
{
"acc": 0.9631067961165048,
"epoch": 0.5112165660051768,
"grad_norm": 38.25,
"learning_rate": 2.894308232145232e-06,
"loss": 0.10857952833175659,
"step": 1185
},
{
"acc": 0.9545454545454546,
"epoch": 0.5133735979292493,
"grad_norm": 23.125,
"learning_rate": 2.8739938878150124e-06,
"loss": 0.11859817504882812,
"step": 1190
},
{
"acc": 0.9759036144578314,
"epoch": 0.5155306298533219,
"grad_norm": 14.8125,
"learning_rate": 2.8536853298100302e-06,
"loss": 0.06508604288101197,
"step": 1195
},
{
"acc": 0.9620253164556962,
"epoch": 0.5176876617773943,
"grad_norm": 10.6875,
"learning_rate": 2.833383490719347e-06,
"loss": 0.13556112051010133,
"step": 1200
},
{
"epoch": 0.5176876617773943,
"eval_acc": 0.964824120603015,
"eval_loss": 0.013948180712759495,
"eval_mrr": 0.9515539617486339,
"eval_ndcg": 0.9634276661273489,
"eval_runtime": 79.2148,
"eval_samples_per_second": 24.642,
"eval_steps_per_second": 12.321,
"step": 1200
}
],
"logging_steps": 5,
"max_steps": 2318,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 400,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.559934048421806e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}