backup / Area_Time_SFT /trainer_state.json
wf8888884's picture
Add files using upload-large-folder tool
a25887f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 19.45945945945946,
"eval_steps": 500,
"global_step": 540,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.36036036036036034,
"grad_norm": 4.098762512207031,
"learning_rate": 9.259259259259259e-07,
"logits/chosen": -2.332144260406494,
"logits/rejected": -2.3385167121887207,
"logps/chosen": -80.89369201660156,
"logps/rejected": -70.11573791503906,
"loss": 0.6929,
"rewards/accuracies": 0.4625000059604645,
"rewards/chosen": 0.00037987352698110044,
"rewards/margins": 0.004227532539516687,
"rewards/rejected": -0.003847658634185791,
"step": 10
},
{
"epoch": 0.7207207207207207,
"grad_norm": 3.641324281692505,
"learning_rate": 1.8518518518518519e-06,
"logits/chosen": -2.323789119720459,
"logits/rejected": -2.351041793823242,
"logps/chosen": -73.2725601196289,
"logps/rejected": -81.80250549316406,
"loss": 0.6932,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": 0.0012983012711629272,
"rewards/margins": 0.004207946360111237,
"rewards/rejected": -0.0029096449725329876,
"step": 20
},
{
"epoch": 1.0810810810810811,
"grad_norm": 3.9276015758514404,
"learning_rate": 2.7777777777777783e-06,
"logits/chosen": -2.3353028297424316,
"logits/rejected": -2.3445916175842285,
"logps/chosen": -69.34381103515625,
"logps/rejected": -74.37530517578125,
"loss": 0.6941,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.01035231165587902,
"rewards/margins": -0.006389107555150986,
"rewards/rejected": -0.00396320316940546,
"step": 30
},
{
"epoch": 1.4414414414414414,
"grad_norm": 4.809000492095947,
"learning_rate": 3.7037037037037037e-06,
"logits/chosen": -2.343184232711792,
"logits/rejected": -2.360262393951416,
"logps/chosen": -77.91002655029297,
"logps/rejected": -76.27156066894531,
"loss": 0.6902,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.008437180891633034,
"rewards/margins": 0.008391124196350574,
"rewards/rejected": -0.016828304156661034,
"step": 40
},
{
"epoch": 1.8018018018018018,
"grad_norm": 4.504117012023926,
"learning_rate": 4.62962962962963e-06,
"logits/chosen": -2.3394973278045654,
"logits/rejected": -2.3635268211364746,
"logps/chosen": -83.62376403808594,
"logps/rejected": -267.64569091796875,
"loss": 0.6851,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": 0.01695835217833519,
"rewards/margins": 0.14291717112064362,
"rewards/rejected": -0.12595881521701813,
"step": 50
},
{
"epoch": 2.1621621621621623,
"grad_norm": 4.033559322357178,
"learning_rate": 4.998119881260576e-06,
"logits/chosen": -2.32966685295105,
"logits/rejected": -2.3370490074157715,
"logps/chosen": -78.54629516601562,
"logps/rejected": -82.67992401123047,
"loss": 0.6767,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -0.03485359251499176,
"rewards/margins": 0.035757362842559814,
"rewards/rejected": -0.07061095535755157,
"step": 60
},
{
"epoch": 2.5225225225225225,
"grad_norm": 4.979142189025879,
"learning_rate": 4.9866405060165044e-06,
"logits/chosen": -2.364291191101074,
"logits/rejected": -2.376107931137085,
"logps/chosen": -70.61842346191406,
"logps/rejected": -81.80282592773438,
"loss": 0.6636,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -0.06025966256856918,
"rewards/margins": 0.03742799907922745,
"rewards/rejected": -0.09768766909837723,
"step": 70
},
{
"epoch": 2.8828828828828827,
"grad_norm": 4.0428690910339355,
"learning_rate": 4.964774158361991e-06,
"logits/chosen": -2.3341965675354004,
"logits/rejected": -2.3440909385681152,
"logps/chosen": -86.3591537475586,
"logps/rejected": -77.45347595214844,
"loss": 0.6519,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -0.09531830251216888,
"rewards/margins": 0.09854079782962799,
"rewards/rejected": -0.19385910034179688,
"step": 80
},
{
"epoch": 3.2432432432432434,
"grad_norm": 4.31919527053833,
"learning_rate": 4.93261217644956e-06,
"logits/chosen": -2.351658821105957,
"logits/rejected": -2.3437318801879883,
"logps/chosen": -77.31346130371094,
"logps/rejected": -80.43277740478516,
"loss": 0.6243,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -0.1381106823682785,
"rewards/margins": 0.16527524590492249,
"rewards/rejected": -0.3033859133720398,
"step": 90
},
{
"epoch": 3.6036036036036037,
"grad_norm": 5.029748439788818,
"learning_rate": 4.8902889044347e-06,
"logits/chosen": -2.3354241847991943,
"logits/rejected": -2.358518600463867,
"logps/chosen": -75.03588104248047,
"logps/rejected": -86.44483947753906,
"loss": 0.6025,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.22239580750465393,
"rewards/margins": 0.1877792775630951,
"rewards/rejected": -0.41017502546310425,
"step": 100
},
{
"epoch": 3.963963963963964,
"grad_norm": 4.6208648681640625,
"learning_rate": 4.837981131305475e-06,
"logits/chosen": -2.3195366859436035,
"logits/rejected": -2.3129196166992188,
"logps/chosen": -72.09532928466797,
"logps/rejected": -73.18878936767578,
"loss": 0.5955,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.22240504622459412,
"rewards/margins": 0.22803232073783875,
"rewards/rejected": -0.45043739676475525,
"step": 110
},
{
"epoch": 4.324324324324325,
"grad_norm": 4.163040637969971,
"learning_rate": 4.775907352415367e-06,
"logits/chosen": -2.3427720069885254,
"logits/rejected": -2.3731276988983154,
"logps/chosen": -85.9415283203125,
"logps/rejected": -93.53765869140625,
"loss": 0.5506,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -0.24449148774147034,
"rewards/margins": 0.3563767373561859,
"rewards/rejected": -0.6008682250976562,
"step": 120
},
{
"epoch": 4.684684684684685,
"grad_norm": 4.228254795074463,
"learning_rate": 4.70432685680402e-06,
"logits/chosen": -2.336733341217041,
"logits/rejected": -2.3446521759033203,
"logps/chosen": -81.07231140136719,
"logps/rejected": -90.82849884033203,
"loss": 0.5248,
"rewards/accuracies": 0.8125,
"rewards/chosen": -0.005195322446525097,
"rewards/margins": 0.6936509609222412,
"rewards/rejected": -0.6988462209701538,
"step": 130
},
{
"epoch": 5.045045045045045,
"grad_norm": 4.454960346221924,
"learning_rate": 4.623538644118244e-06,
"logits/chosen": -2.3331754207611084,
"logits/rejected": -2.3434836864471436,
"logps/chosen": -83.67604064941406,
"logps/rejected": -82.92774200439453,
"loss": 0.5288,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.2716079652309418,
"rewards/margins": 0.4593236446380615,
"rewards/rejected": -0.7309316396713257,
"step": 140
},
{
"epoch": 5.405405405405405,
"grad_norm": 5.223482608795166,
"learning_rate": 4.533880175657419e-06,
"logits/chosen": -2.362809658050537,
"logits/rejected": -2.3657679557800293,
"logps/chosen": -73.20018768310547,
"logps/rejected": -85.37998962402344,
"loss": 0.4682,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -0.3221462368965149,
"rewards/margins": 0.5968301892280579,
"rewards/rejected": -0.9189764261245728,
"step": 150
},
{
"epoch": 5.7657657657657655,
"grad_norm": 4.905521869659424,
"learning_rate": 4.435725964760331e-06,
"logits/chosen": -2.3808655738830566,
"logits/rejected": -2.368286609649658,
"logps/chosen": -68.88943481445312,
"logps/rejected": -82.69029235839844,
"loss": 0.4586,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.3172217011451721,
"rewards/margins": 0.7665462493896484,
"rewards/rejected": -1.0837678909301758,
"step": 160
},
{
"epoch": 6.126126126126126,
"grad_norm": 5.399628162384033,
"learning_rate": 4.329486012421531e-06,
"logits/chosen": -2.365935802459717,
"logits/rejected": -2.363004684448242,
"logps/chosen": -70.47642517089844,
"logps/rejected": -84.02542877197266,
"loss": 0.4462,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -0.45835933089256287,
"rewards/margins": 0.8438631892204285,
"rewards/rejected": -1.302222490310669,
"step": 170
},
{
"epoch": 6.486486486486487,
"grad_norm": 4.843445777893066,
"learning_rate": 4.215604094671835e-06,
"logits/chosen": -2.357231855392456,
"logits/rejected": -2.360239028930664,
"logps/chosen": -78.67561340332031,
"logps/rejected": -88.39659118652344,
"loss": 0.3976,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -0.4842923581600189,
"rewards/margins": 0.8027322888374329,
"rewards/rejected": -1.2870244979858398,
"step": 180
},
{
"epoch": 6.846846846846847,
"grad_norm": 4.972764015197754,
"learning_rate": 4.094555908876765e-06,
"logits/chosen": -2.3751468658447266,
"logits/rejected": -2.3993237018585205,
"logps/chosen": -73.63652038574219,
"logps/rejected": -278.0970458984375,
"loss": 0.3959,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -0.4291106164455414,
"rewards/margins": 0.9967883229255676,
"rewards/rejected": -1.4258991479873657,
"step": 190
},
{
"epoch": 7.207207207207207,
"grad_norm": 5.071193218231201,
"learning_rate": 3.966847086696045e-06,
"logits/chosen": -2.3572330474853516,
"logits/rejected": -2.357269763946533,
"logps/chosen": -84.92713928222656,
"logps/rejected": -98.15062713623047,
"loss": 0.3544,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.5852295756340027,
"rewards/margins": 1.2983506917953491,
"rewards/rejected": -1.883580207824707,
"step": 200
},
{
"epoch": 7.5675675675675675,
"grad_norm": 5.1891655921936035,
"learning_rate": 3.833011082004229e-06,
"logits/chosen": -2.368424892425537,
"logits/rejected": -2.378568649291992,
"logps/chosen": -72.57874298095703,
"logps/rejected": -84.37443542480469,
"loss": 0.3421,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -0.48721614480018616,
"rewards/margins": 1.2057541608810425,
"rewards/rejected": -1.6929700374603271,
"step": 210
},
{
"epoch": 7.927927927927928,
"grad_norm": 5.771843433380127,
"learning_rate": 3.693606942594873e-06,
"logits/chosen": -2.3891513347625732,
"logits/rejected": -2.4053854942321777,
"logps/chosen": -75.97737121582031,
"logps/rejected": -97.49588012695312,
"loss": 0.3211,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -0.6163657903671265,
"rewards/margins": 1.1816037893295288,
"rewards/rejected": -1.7979698181152344,
"step": 220
},
{
"epoch": 8.288288288288289,
"grad_norm": 5.1563029289245605,
"learning_rate": 3.549216974976073e-06,
"logits/chosen": -2.4075605869293213,
"logits/rejected": -2.406411647796631,
"logps/chosen": -82.80142974853516,
"logps/rejected": -96.36463928222656,
"loss": 0.2848,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -0.8106307983398438,
"rewards/margins": 1.647127389907837,
"rewards/rejected": -2.4577584266662598,
"step": 230
},
{
"epoch": 8.64864864864865,
"grad_norm": 5.483398914337158,
"learning_rate": 3.400444312011776e-06,
"logits/chosen": -2.3797879219055176,
"logits/rejected": -2.362518787384033,
"logps/chosen": -82.14349365234375,
"logps/rejected": -97.63994598388672,
"loss": 0.278,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -0.9469457864761353,
"rewards/margins": 1.488023281097412,
"rewards/rejected": -2.434968948364258,
"step": 240
},
{
"epoch": 9.00900900900901,
"grad_norm": 5.042275905609131,
"learning_rate": 3.2479103935691047e-06,
"logits/chosen": -2.3207201957702637,
"logits/rejected": -2.341810941696167,
"logps/chosen": -85.28227233886719,
"logps/rejected": -116.27372741699219,
"loss": 0.2494,
"rewards/accuracies": 0.9375,
"rewards/chosen": -1.0121912956237793,
"rewards/margins": 1.997532606124878,
"rewards/rejected": -3.0097243785858154,
"step": 250
},
{
"epoch": 9.36936936936937,
"grad_norm": 5.468939781188965,
"learning_rate": 3.092252370695298e-06,
"logits/chosen": -2.3408374786376953,
"logits/rejected": -2.366006851196289,
"logps/chosen": -72.05101013183594,
"logps/rejected": -102.21392822265625,
"loss": 0.2457,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -1.0319160223007202,
"rewards/margins": 1.8719971179962158,
"rewards/rejected": -2.9039134979248047,
"step": 260
},
{
"epoch": 9.72972972972973,
"grad_norm": 6.745687007904053,
"learning_rate": 2.9341204441673267e-06,
"logits/chosen": -2.327451467514038,
"logits/rejected": -2.3463993072509766,
"logps/chosen": -86.53431701660156,
"logps/rejected": -116.40992736816406,
"loss": 0.2059,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -1.3356889486312866,
"rewards/margins": 1.9991543292999268,
"rewards/rejected": -3.334843397140503,
"step": 270
},
{
"epoch": 10.09009009009009,
"grad_norm": 5.230775833129883,
"learning_rate": 2.7741751485313295e-06,
"logits/chosen": -2.3630144596099854,
"logits/rejected": -2.3630847930908203,
"logps/chosen": -76.57563018798828,
"logps/rejected": -99.20953369140625,
"loss": 0.2034,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.2317285537719727,
"rewards/margins": 1.8740953207015991,
"rewards/rejected": -3.1058237552642822,
"step": 280
},
{
"epoch": 10.45045045045045,
"grad_norm": 6.581757545471191,
"learning_rate": 2.6130845929767662e-06,
"logits/chosen": -2.3247475624084473,
"logits/rejected": -2.3450474739074707,
"logps/chosen": -83.9271240234375,
"logps/rejected": -109.54156494140625,
"loss": 0.174,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -1.4542334079742432,
"rewards/margins": 2.3127167224884033,
"rewards/rejected": -3.7669498920440674,
"step": 290
},
{
"epoch": 10.81081081081081,
"grad_norm": 5.604727745056152,
"learning_rate": 2.4515216705704396e-06,
"logits/chosen": -2.279327869415283,
"logits/rejected": -2.319913387298584,
"logps/chosen": -78.63652801513672,
"logps/rejected": -115.9185562133789,
"loss": 0.1831,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -1.3030173778533936,
"rewards/margins": 2.5270209312438965,
"rewards/rejected": -3.830038547515869,
"step": 300
},
{
"epoch": 11.17117117117117,
"grad_norm": 4.8863606452941895,
"learning_rate": 2.290161247507733e-06,
"logits/chosen": -2.273766040802002,
"logits/rejected": -2.3243603706359863,
"logps/chosen": -90.69010925292969,
"logps/rejected": -131.49423217773438,
"loss": 0.1513,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -1.566362738609314,
"rewards/margins": 3.1073012351989746,
"rewards/rejected": -4.673664093017578,
"step": 310
},
{
"epoch": 11.531531531531531,
"grad_norm": 5.772294521331787,
"learning_rate": 2.129677344121879e-06,
"logits/chosen": -2.302643299102783,
"logits/rejected": -2.3125722408294678,
"logps/chosen": -78.91960144042969,
"logps/rejected": -103.53559875488281,
"loss": 0.1624,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -1.6784160137176514,
"rewards/margins": 2.4029908180236816,
"rewards/rejected": -4.081407070159912,
"step": 320
},
{
"epoch": 11.891891891891891,
"grad_norm": 5.915937423706055,
"learning_rate": 1.970740319426474e-06,
"logits/chosen": -2.275726795196533,
"logits/rejected": -2.302337169647217,
"logps/chosen": -99.52557373046875,
"logps/rejected": -122.73197174072266,
"loss": 0.1348,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -1.9647445678710938,
"rewards/margins": 2.8115408420562744,
"rewards/rejected": -4.776285648345947,
"step": 330
},
{
"epoch": 12.252252252252251,
"grad_norm": 5.65620231628418,
"learning_rate": 1.8140140709517467e-06,
"logits/chosen": -2.274402379989624,
"logits/rejected": -2.2855653762817383,
"logps/chosen": -86.69510650634766,
"logps/rejected": -116.1146469116211,
"loss": 0.1366,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -1.855790376663208,
"rewards/margins": 2.536457061767578,
"rewards/rejected": -4.392247200012207,
"step": 340
},
{
"epoch": 12.612612612612612,
"grad_norm": 4.642848491668701,
"learning_rate": 1.6601532615711452e-06,
"logits/chosen": -2.2652974128723145,
"logits/rejected": -2.285008192062378,
"logps/chosen": -89.00364685058594,
"logps/rejected": -126.0003890991211,
"loss": 0.1216,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -1.8776098489761353,
"rewards/margins": 2.8089137077331543,
"rewards/rejected": -4.6865234375,
"step": 350
},
{
"epoch": 12.972972972972974,
"grad_norm": 4.52380895614624,
"learning_rate": 1.509800584902108e-06,
"logits/chosen": -2.263986349105835,
"logits/rejected": -2.2855420112609863,
"logps/chosen": -91.05010986328125,
"logps/rejected": -133.76361083984375,
"loss": 0.1076,
"rewards/accuracies": 1.0,
"rewards/chosen": -2.4504590034484863,
"rewards/margins": 3.495802402496338,
"rewards/rejected": -5.946260929107666,
"step": 360
},
{
"epoch": 13.333333333333334,
"grad_norm": 4.304037094116211,
"learning_rate": 1.3635840807037487e-06,
"logits/chosen": -2.261019229888916,
"logits/rejected": -2.264559268951416,
"logps/chosen": -93.27009582519531,
"logps/rejected": -118.05653381347656,
"loss": 0.1072,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.1757514476776123,
"rewards/margins": 3.157127857208252,
"rewards/rejected": -5.332879066467285,
"step": 370
},
{
"epoch": 13.693693693693694,
"grad_norm": 5.501009941101074,
"learning_rate": 1.2221145114853172e-06,
"logits/chosen": -2.211054563522339,
"logits/rejected": -2.22572660446167,
"logps/chosen": -90.1929702758789,
"logps/rejected": -138.2399139404297,
"loss": 0.0889,
"rewards/accuracies": 1.0,
"rewards/chosen": -2.5342886447906494,
"rewards/margins": 3.2334110736846924,
"rewards/rejected": -5.767699241638184,
"step": 380
},
{
"epoch": 14.054054054054054,
"grad_norm": 5.037735939025879,
"learning_rate": 1.085982811283654e-06,
"logits/chosen": -2.2411131858825684,
"logits/rejected": -2.261753559112549,
"logps/chosen": -98.27137756347656,
"logps/rejected": -134.04779052734375,
"loss": 0.0971,
"rewards/accuracies": 1.0,
"rewards/chosen": -2.593488931655884,
"rewards/margins": 3.3826117515563965,
"rewards/rejected": -5.976100444793701,
"step": 390
},
{
"epoch": 14.414414414414415,
"grad_norm": 5.258338451385498,
"learning_rate": 9.557576172663577e-07,
"logits/chosen": -2.244196653366089,
"logits/rejected": -2.2605862617492676,
"logps/chosen": -93.3480224609375,
"logps/rejected": -145.70272827148438,
"loss": 0.0913,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.5507616996765137,
"rewards/margins": 3.9265968799591064,
"rewards/rejected": -6.477358341217041,
"step": 400
},
{
"epoch": 14.774774774774775,
"grad_norm": 4.47573184967041,
"learning_rate": 8.319828944714508e-07,
"logits/chosen": -2.26932954788208,
"logits/rejected": -2.274758815765381,
"logps/chosen": -102.62467956542969,
"logps/rejected": -132.93116760253906,
"loss": 0.0805,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.376272201538086,
"rewards/margins": 3.278709888458252,
"rewards/rejected": -5.654982089996338,
"step": 410
},
{
"epoch": 15.135135135135135,
"grad_norm": 4.006404399871826,
"learning_rate": 7.151756636052529e-07,
"logits/chosen": -2.240022659301758,
"logits/rejected": -2.2303688526153564,
"logps/chosen": -112.4423828125,
"logps/rejected": -143.22732543945312,
"loss": 0.0796,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.971478223800659,
"rewards/margins": 3.4153618812561035,
"rewards/rejected": -6.3868408203125,
"step": 420
},
{
"epoch": 15.495495495495495,
"grad_norm": 4.521777629852295,
"learning_rate": 6.058238413897052e-07,
"logits/chosen": -2.1890573501586914,
"logits/rejected": -2.241264820098877,
"logps/chosen": -111.05755615234375,
"logps/rejected": -146.14109802246094,
"loss": 0.0724,
"rewards/accuracies": 1.0,
"rewards/chosen": -3.0282044410705566,
"rewards/margins": 3.6133179664611816,
"rewards/rejected": -6.641521453857422,
"step": 430
},
{
"epoch": 15.855855855855856,
"grad_norm": 4.783228397369385,
"learning_rate": 5.043842024802675e-07,
"logits/chosen": -2.1972153186798096,
"logits/rejected": -2.192469358444214,
"logps/chosen": -102.45082092285156,
"logps/rejected": -138.3446502685547,
"loss": 0.0723,
"rewards/accuracies": 1.0,
"rewards/chosen": -2.7581396102905273,
"rewards/margins": 3.8694820404052734,
"rewards/rejected": -6.627622127532959,
"step": 440
},
{
"epoch": 16.216216216216218,
"grad_norm": 4.521092891693115,
"learning_rate": 4.1128047146765936e-07,
"logits/chosen": -2.1869163513183594,
"logits/rejected": -2.182262897491455,
"logps/chosen": -104.68853759765625,
"logps/rejected": -145.3485870361328,
"loss": 0.075,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.978736400604248,
"rewards/margins": 3.886040449142456,
"rewards/rejected": -6.864776611328125,
"step": 450
},
{
"epoch": 16.576576576576578,
"grad_norm": 4.271122932434082,
"learning_rate": 3.269015529333805e-07,
"logits/chosen": -2.2191543579101562,
"logits/rejected": -2.2385966777801514,
"logps/chosen": -91.767578125,
"logps/rejected": -135.73141479492188,
"loss": 0.0701,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.994217872619629,
"rewards/margins": 3.757829189300537,
"rewards/rejected": -6.752047061920166,
"step": 460
},
{
"epoch": 16.936936936936938,
"grad_norm": 4.7400712966918945,
"learning_rate": 2.515999069522676e-07,
"logits/chosen": -2.174427032470703,
"logits/rejected": -2.211961507797241,
"logps/chosen": -100.193603515625,
"logps/rejected": -139.8927001953125,
"loss": 0.0682,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -3.3388049602508545,
"rewards/margins": 3.942704677581787,
"rewards/rejected": -7.281510353088379,
"step": 470
},
{
"epoch": 17.2972972972973,
"grad_norm": 4.059471130371094,
"learning_rate": 1.8569007682777417e-07,
"logits/chosen": -2.172400951385498,
"logits/rejected": -2.2151694297790527,
"logps/chosen": -109.02021789550781,
"logps/rejected": -408.2373962402344,
"loss": 0.0601,
"rewards/accuracies": 1.0,
"rewards/chosen": -3.6841540336608887,
"rewards/margins": 10.677932739257812,
"rewards/rejected": -14.362088203430176,
"step": 480
},
{
"epoch": 17.65765765765766,
"grad_norm": 3.554147243499756,
"learning_rate": 1.2944737520980883e-07,
"logits/chosen": -2.2005763053894043,
"logits/rejected": -2.234318494796753,
"logps/chosen": -105.55352783203125,
"logps/rejected": -156.26739501953125,
"loss": 0.0667,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -3.3424956798553467,
"rewards/margins": 3.9150681495666504,
"rewards/rejected": -7.257563591003418,
"step": 490
},
{
"epoch": 18.01801801801802,
"grad_norm": 4.700870990753174,
"learning_rate": 8.310673408334496e-08,
"logits/chosen": -2.2091004848480225,
"logits/rejected": -2.2198190689086914,
"logps/chosen": -123.39643859863281,
"logps/rejected": -159.49868774414062,
"loss": 0.0679,
"rewards/accuracies": 1.0,
"rewards/chosen": -3.437526226043701,
"rewards/margins": 3.751330852508545,
"rewards/rejected": -7.188857078552246,
"step": 500
},
{
"epoch": 18.37837837837838,
"grad_norm": 4.196156024932861,
"learning_rate": 4.6861723431538273e-08,
"logits/chosen": -2.166625738143921,
"logits/rejected": -2.182325839996338,
"logps/chosen": -105.13529968261719,
"logps/rejected": -160.1414031982422,
"loss": 0.0599,
"rewards/accuracies": 1.0,
"rewards/chosen": -3.3666820526123047,
"rewards/margins": 4.2599992752075195,
"rewards/rejected": -7.626680850982666,
"step": 510
},
{
"epoch": 18.73873873873874,
"grad_norm": 4.409626483917236,
"learning_rate": 2.0863742672497244e-08,
"logits/chosen": -2.2174572944641113,
"logits/rejected": -2.229252815246582,
"logps/chosen": -111.33894348144531,
"logps/rejected": -138.5430145263672,
"loss": 0.0639,
"rewards/accuracies": 1.0,
"rewards/chosen": -2.91447114944458,
"rewards/margins": 3.7303109169006348,
"rewards/rejected": -6.644782066345215,
"step": 520
},
{
"epoch": 19.0990990990991,
"grad_norm": 4.13588285446167,
"learning_rate": 5.221388247169945e-09,
"logits/chosen": -2.207942485809326,
"logits/rejected": -2.219350814819336,
"logps/chosen": -100.70853424072266,
"logps/rejected": -141.94883728027344,
"loss": 0.0683,
"rewards/accuracies": 1.0,
"rewards/chosen": -3.074063777923584,
"rewards/margins": 3.970613956451416,
"rewards/rejected": -7.044677734375,
"step": 530
},
{
"epoch": 19.45945945945946,
"grad_norm": 3.9963550567626953,
"learning_rate": 0.0,
"logits/chosen": -2.2123262882232666,
"logits/rejected": -2.2226064205169678,
"logps/chosen": -111.34256744384766,
"logps/rejected": -149.1357421875,
"loss": 0.0683,
"rewards/accuracies": 1.0,
"rewards/chosen": -3.439054012298584,
"rewards/margins": 4.026246070861816,
"rewards/rejected": -7.4653000831604,
"step": 540
},
{
"epoch": 19.45945945945946,
"step": 540,
"total_flos": 1.9727302677684552e+18,
"train_loss": 0.2973077946239048,
"train_runtime": 4131.3591,
"train_samples_per_second": 8.573,
"train_steps_per_second": 0.131
}
],
"logging_steps": 10,
"max_steps": 540,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.9727302677684552e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}