backup / Area_Time /checkpoint-400 /trainer_state.json
wf8888884's picture
Add files using upload-large-folder tool
54eddbe verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 14.414414414414415,
"eval_steps": 500,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.36036036036036034,
"grad_norm": 0.5300200581550598,
"learning_rate": 9.259259259259259e-07,
"logits/chosen": 1.7405741214752197,
"logits/rejected": 1.663368582725525,
"logps/chosen": -97.57881164550781,
"logps/rejected": -70.59793853759766,
"loss": 0.6938,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.0018835498485714197,
"rewards/margins": -0.0008800366776995361,
"rewards/rejected": 0.0027635858859866858,
"step": 10
},
{
"epoch": 0.7207207207207207,
"grad_norm": 0.4883837401866913,
"learning_rate": 1.8518518518518519e-06,
"logits/chosen": 1.8280715942382812,
"logits/rejected": 1.8215343952178955,
"logps/chosen": -90.60624694824219,
"logps/rejected": -79.04981994628906,
"loss": 0.6945,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": 0.0014454321935772896,
"rewards/margins": -0.004174981266260147,
"rewards/rejected": 0.005620413459837437,
"step": 20
},
{
"epoch": 1.0810810810810811,
"grad_norm": 0.5554109215736389,
"learning_rate": 2.7777777777777783e-06,
"logits/chosen": 1.7980501651763916,
"logits/rejected": 1.841104507446289,
"logps/chosen": -80.78228759765625,
"logps/rejected": -85.08882141113281,
"loss": 0.693,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": 0.0042568682692945,
"rewards/margins": -0.0038191028870642185,
"rewards/rejected": 0.008075973019003868,
"step": 30
},
{
"epoch": 1.4414414414414414,
"grad_norm": 0.537497341632843,
"learning_rate": 3.7037037037037037e-06,
"logits/chosen": 1.7320470809936523,
"logits/rejected": 1.7411377429962158,
"logps/chosen": -82.24813842773438,
"logps/rejected": -80.90709686279297,
"loss": 0.6952,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": -0.00350201572291553,
"rewards/margins": -0.012091752141714096,
"rewards/rejected": 0.008589735254645348,
"step": 40
},
{
"epoch": 1.8018018018018018,
"grad_norm": 0.5023094415664673,
"learning_rate": 4.62962962962963e-06,
"logits/chosen": 1.8292573690414429,
"logits/rejected": 1.8632844686508179,
"logps/chosen": -85.98481750488281,
"logps/rejected": -86.14008331298828,
"loss": 0.6928,
"rewards/accuracies": 0.4124999940395355,
"rewards/chosen": 0.00168000184930861,
"rewards/margins": -0.011136507615447044,
"rewards/rejected": 0.01281650923192501,
"step": 50
},
{
"epoch": 2.1621621621621623,
"grad_norm": 0.4981901943683624,
"learning_rate": 4.998119881260576e-06,
"logits/chosen": 1.8536121845245361,
"logits/rejected": 1.7948782444000244,
"logps/chosen": -90.0439682006836,
"logps/rejected": -79.8309555053711,
"loss": 0.6914,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": 0.0025153260212391615,
"rewards/margins": -0.0021209525875747204,
"rewards/rejected": 0.004636278375983238,
"step": 60
},
{
"epoch": 2.5225225225225225,
"grad_norm": 0.6208468079566956,
"learning_rate": 4.9866405060165044e-06,
"logits/chosen": 1.7845121622085571,
"logits/rejected": 1.8721017837524414,
"logps/chosen": -74.6461181640625,
"logps/rejected": -98.91078186035156,
"loss": 0.694,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 0.00022823773906566203,
"rewards/margins": 0.0024594543501734734,
"rewards/rejected": -0.00223121652379632,
"step": 70
},
{
"epoch": 2.8828828828828827,
"grad_norm": 0.5074446797370911,
"learning_rate": 4.964774158361991e-06,
"logits/chosen": 1.8522154092788696,
"logits/rejected": 1.8161392211914062,
"logps/chosen": -89.16864013671875,
"logps/rejected": -81.97349548339844,
"loss": 0.6911,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 0.006215038243681192,
"rewards/margins": 0.005521883722394705,
"rewards/rejected": 0.0006931538810022175,
"step": 80
},
{
"epoch": 3.2432432432432434,
"grad_norm": 0.6148263812065125,
"learning_rate": 4.93261217644956e-06,
"logits/chosen": 1.7915083169937134,
"logits/rejected": 1.7579513788223267,
"logps/chosen": -88.66618347167969,
"logps/rejected": -80.2238998413086,
"loss": 0.6918,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": 0.005774274934083223,
"rewards/margins": 0.016623441129922867,
"rewards/rejected": -0.010849165730178356,
"step": 90
},
{
"epoch": 3.6036036036036037,
"grad_norm": 0.6143240332603455,
"learning_rate": 4.8902889044347e-06,
"logits/chosen": 1.6886920928955078,
"logits/rejected": 1.8121178150177002,
"logps/chosen": -72.908203125,
"logps/rejected": -92.44017028808594,
"loss": 0.6911,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": -0.0015121791511774063,
"rewards/margins": 0.0036420777905732393,
"rewards/rejected": -0.005154256708920002,
"step": 100
},
{
"epoch": 3.963963963963964,
"grad_norm": 0.7140340209007263,
"learning_rate": 4.837981131305475e-06,
"logits/chosen": 1.7481105327606201,
"logits/rejected": 1.723141074180603,
"logps/chosen": -80.63452911376953,
"logps/rejected": -72.64492797851562,
"loss": 0.6894,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": -0.006246576085686684,
"rewards/margins": -0.0035218377597630024,
"rewards/rejected": -0.0027247383259236813,
"step": 110
},
{
"epoch": 4.324324324324325,
"grad_norm": 1.1645854711532593,
"learning_rate": 4.775907352415367e-06,
"logits/chosen": 1.7416290044784546,
"logits/rejected": 1.8237574100494385,
"logps/chosen": -89.01248931884766,
"logps/rejected": -92.31901550292969,
"loss": 0.6869,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -0.008855113759636879,
"rewards/margins": 0.013151508755981922,
"rewards/rejected": -0.022006623446941376,
"step": 120
},
{
"epoch": 4.684684684684685,
"grad_norm": 0.6638664603233337,
"learning_rate": 4.70432685680402e-06,
"logits/chosen": 1.7124770879745483,
"logits/rejected": 1.7777938842773438,
"logps/chosen": -84.87271881103516,
"logps/rejected": -92.4839096069336,
"loss": 0.6855,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": -0.004437069408595562,
"rewards/margins": 0.019811339676380157,
"rewards/rejected": -0.024248410016298294,
"step": 130
},
{
"epoch": 5.045045045045045,
"grad_norm": 0.785169780254364,
"learning_rate": 4.623538644118244e-06,
"logits/chosen": 1.7838690280914307,
"logits/rejected": 1.8052574396133423,
"logps/chosen": -94.57842254638672,
"logps/rejected": -80.6390609741211,
"loss": 0.6836,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": -0.026147600263357162,
"rewards/margins": 0.010936126112937927,
"rewards/rejected": -0.03708372637629509,
"step": 140
},
{
"epoch": 5.405405405405405,
"grad_norm": 0.745952844619751,
"learning_rate": 4.533880175657419e-06,
"logits/chosen": 1.7925735712051392,
"logits/rejected": 1.7987396717071533,
"logps/chosen": -82.4854507446289,
"logps/rejected": -77.50787353515625,
"loss": 0.6767,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.013469865545630455,
"rewards/margins": 0.04023212194442749,
"rewards/rejected": -0.053701985627412796,
"step": 150
},
{
"epoch": 5.7657657657657655,
"grad_norm": 0.7885063290596008,
"learning_rate": 4.435725964760331e-06,
"logits/chosen": 1.7725191116333008,
"logits/rejected": 1.8345234394073486,
"logps/chosen": -78.7154541015625,
"logps/rejected": -80.75708770751953,
"loss": 0.6721,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -0.018351273611187935,
"rewards/margins": 0.05368901416659355,
"rewards/rejected": -0.07204028964042664,
"step": 160
},
{
"epoch": 6.126126126126126,
"grad_norm": 0.7734187245368958,
"learning_rate": 4.329486012421531e-06,
"logits/chosen": 1.827528715133667,
"logits/rejected": 1.7939176559448242,
"logps/chosen": -75.33561706542969,
"logps/rejected": -71.34326934814453,
"loss": 0.6775,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -0.038218818604946136,
"rewards/margins": 0.06159939616918564,
"rewards/rejected": -0.09981821477413177,
"step": 170
},
{
"epoch": 6.486486486486487,
"grad_norm": 0.7796682715415955,
"learning_rate": 4.215604094671835e-06,
"logits/chosen": 1.729288101196289,
"logits/rejected": 1.7497504949569702,
"logps/chosen": -83.02750396728516,
"logps/rejected": -78.53959655761719,
"loss": 0.6632,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.03207743167877197,
"rewards/margins": 0.06858544796705246,
"rewards/rejected": -0.10066288709640503,
"step": 180
},
{
"epoch": 6.846846846846847,
"grad_norm": 0.8168752193450928,
"learning_rate": 4.094555908876765e-06,
"logits/chosen": 1.747865080833435,
"logits/rejected": 1.7993282079696655,
"logps/chosen": -78.75782775878906,
"logps/rejected": -85.8946304321289,
"loss": 0.668,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -0.0766468197107315,
"rewards/margins": 0.045263100415468216,
"rewards/rejected": -0.12190990149974823,
"step": 190
},
{
"epoch": 7.207207207207207,
"grad_norm": 0.9816763997077942,
"learning_rate": 3.966847086696045e-06,
"logits/chosen": 1.792106032371521,
"logits/rejected": 1.7798125743865967,
"logps/chosen": -93.53439331054688,
"logps/rejected": -73.8904037475586,
"loss": 0.6582,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.06042450666427612,
"rewards/margins": 0.10538975894451141,
"rewards/rejected": -0.16581428050994873,
"step": 200
},
{
"epoch": 7.5675675675675675,
"grad_norm": 0.9053374528884888,
"learning_rate": 3.833011082004229e-06,
"logits/chosen": 1.7648969888687134,
"logits/rejected": 1.7725557088851929,
"logps/chosen": -79.61383056640625,
"logps/rejected": -71.8048095703125,
"loss": 0.6551,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.040038011968135834,
"rewards/margins": 0.11104954779148102,
"rewards/rejected": -0.15108755230903625,
"step": 210
},
{
"epoch": 7.927927927927928,
"grad_norm": 1.1332669258117676,
"learning_rate": 3.693606942594873e-06,
"logits/chosen": 1.8605678081512451,
"logits/rejected": 1.91280996799469,
"logps/chosen": -74.39783477783203,
"logps/rejected": -90.66893005371094,
"loss": 0.6565,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -0.08056517690420151,
"rewards/margins": 0.0968993678689003,
"rewards/rejected": -0.1774645447731018,
"step": 220
},
{
"epoch": 8.288288288288289,
"grad_norm": 1.0496221780776978,
"learning_rate": 3.549216974976073e-06,
"logits/chosen": 1.8636070489883423,
"logits/rejected": 1.7996151447296143,
"logps/chosen": -89.9018783569336,
"logps/rejected": -74.14125061035156,
"loss": 0.6423,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.05598217993974686,
"rewards/margins": 0.14807265996932983,
"rewards/rejected": -0.2040548026561737,
"step": 230
},
{
"epoch": 8.64864864864865,
"grad_norm": 0.8506491780281067,
"learning_rate": 3.400444312011776e-06,
"logits/chosen": 1.7753232717514038,
"logits/rejected": 1.7647806406021118,
"logps/chosen": -88.50201416015625,
"logps/rejected": -81.80894470214844,
"loss": 0.6358,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -0.09656485170125961,
"rewards/margins": 0.11554913222789764,
"rewards/rejected": -0.21211397647857666,
"step": 240
},
{
"epoch": 9.00900900900901,
"grad_norm": 0.9554071426391602,
"learning_rate": 3.2479103935691047e-06,
"logits/chosen": 1.7528200149536133,
"logits/rejected": 1.7827808856964111,
"logps/chosen": -90.01268005371094,
"logps/rejected": -89.0714340209961,
"loss": 0.6374,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -0.07412372529506683,
"rewards/margins": 0.1321595013141632,
"rewards/rejected": -0.20628324151039124,
"step": 250
},
{
"epoch": 9.36936936936937,
"grad_norm": 0.9363342523574829,
"learning_rate": 3.092252370695298e-06,
"logits/chosen": 1.6990101337432861,
"logits/rejected": 1.8281656503677368,
"logps/chosen": -71.15428161621094,
"logps/rejected": -79.72063446044922,
"loss": 0.6303,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.08240491151809692,
"rewards/margins": 0.16954098641872406,
"rewards/rejected": -0.2519459128379822,
"step": 260
},
{
"epoch": 9.72972972972973,
"grad_norm": 1.023535966873169,
"learning_rate": 2.9341204441673267e-06,
"logits/chosen": 1.7418006658554077,
"logits/rejected": 1.7500028610229492,
"logps/chosen": -82.24595642089844,
"logps/rejected": -91.16307067871094,
"loss": 0.6321,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.13560767471790314,
"rewards/margins": 0.1204828992486,
"rewards/rejected": -0.25609058141708374,
"step": 270
},
{
"epoch": 10.09009009009009,
"grad_norm": 1.0393471717834473,
"learning_rate": 2.7741751485313295e-06,
"logits/chosen": 1.8437154293060303,
"logits/rejected": 1.8877627849578857,
"logps/chosen": -68.61182403564453,
"logps/rejected": -77.12557220458984,
"loss": 0.6109,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.11025551706552505,
"rewards/margins": 0.14969798922538757,
"rewards/rejected": -0.25995349884033203,
"step": 280
},
{
"epoch": 10.45045045045045,
"grad_norm": 0.9554691910743713,
"learning_rate": 2.6130845929767662e-06,
"logits/chosen": 1.7998764514923096,
"logits/rejected": 1.796451210975647,
"logps/chosen": -72.6626205444336,
"logps/rejected": -74.15937042236328,
"loss": 0.6123,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.13948099315166473,
"rewards/margins": 0.18039894104003906,
"rewards/rejected": -0.319879949092865,
"step": 290
},
{
"epoch": 10.81081081081081,
"grad_norm": 1.1667226552963257,
"learning_rate": 2.4515216705704396e-06,
"logits/chosen": 1.8437564373016357,
"logits/rejected": 1.9043811559677124,
"logps/chosen": -78.18916320800781,
"logps/rejected": -90.88524627685547,
"loss": 0.6218,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.12091793864965439,
"rewards/margins": 0.20490169525146484,
"rewards/rejected": -0.32581964135169983,
"step": 300
},
{
"epoch": 11.17117117117117,
"grad_norm": 1.2568660974502563,
"learning_rate": 2.290161247507733e-06,
"logits/chosen": 1.7295608520507812,
"logits/rejected": 1.8652616739273071,
"logps/chosen": -79.8567886352539,
"logps/rejected": -87.59798431396484,
"loss": 0.6067,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -0.10961383581161499,
"rewards/margins": 0.2835196554660797,
"rewards/rejected": -0.3931335210800171,
"step": 310
},
{
"epoch": 11.531531531531531,
"grad_norm": 1.115491271018982,
"learning_rate": 2.129677344121879e-06,
"logits/chosen": 1.803063988685608,
"logits/rejected": 1.820227861404419,
"logps/chosen": -74.64582061767578,
"logps/rejected": -72.14736938476562,
"loss": 0.6113,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.09987158328294754,
"rewards/margins": 0.2059938907623291,
"rewards/rejected": -0.30586546659469604,
"step": 320
},
{
"epoch": 11.891891891891891,
"grad_norm": 0.9227492213249207,
"learning_rate": 1.970740319426474e-06,
"logits/chosen": 1.7561572790145874,
"logits/rejected": 1.789878249168396,
"logps/chosen": -85.27911376953125,
"logps/rejected": -77.4220962524414,
"loss": 0.5891,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.14200571179389954,
"rewards/margins": 0.22272682189941406,
"rewards/rejected": -0.3647325336933136,
"step": 330
},
{
"epoch": 12.252252252252251,
"grad_norm": 1.541570782661438,
"learning_rate": 1.8140140709517467e-06,
"logits/chosen": 1.7364771366119385,
"logits/rejected": 1.7182557582855225,
"logps/chosen": -76.05533599853516,
"logps/rejected": -81.50830078125,
"loss": 0.5994,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.12165775150060654,
"rewards/margins": 0.19019995629787445,
"rewards/rejected": -0.3118577301502228,
"step": 340
},
{
"epoch": 12.612612612612612,
"grad_norm": 1.1125155687332153,
"learning_rate": 1.6601532615711452e-06,
"logits/chosen": 1.7223026752471924,
"logits/rejected": 1.8163812160491943,
"logps/chosen": -71.94869232177734,
"logps/rejected": -90.12442016601562,
"loss": 0.6013,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -0.16840626299381256,
"rewards/margins": 0.1776614487171173,
"rewards/rejected": -0.3460676968097687,
"step": 350
},
{
"epoch": 12.972972972972974,
"grad_norm": 1.0530396699905396,
"learning_rate": 1.509800584902108e-06,
"logits/chosen": 1.795819878578186,
"logits/rejected": 1.8446025848388672,
"logps/chosen": -82.24161529541016,
"logps/rejected": -79.51258850097656,
"loss": 0.5857,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -0.13156726956367493,
"rewards/margins": 0.2468404471874237,
"rewards/rejected": -0.37840771675109863,
"step": 360
},
{
"epoch": 13.333333333333334,
"grad_norm": 1.0298306941986084,
"learning_rate": 1.3635840807037487e-06,
"logits/chosen": 1.8054157495498657,
"logits/rejected": 1.8246568441390991,
"logps/chosen": -85.40760803222656,
"logps/rejected": -78.77482604980469,
"loss": 0.5943,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.10383795201778412,
"rewards/margins": 0.2493254393339157,
"rewards/rejected": -0.35316339135169983,
"step": 370
},
{
"epoch": 13.693693693693694,
"grad_norm": 1.0601342916488647,
"learning_rate": 1.2221145114853172e-06,
"logits/chosen": 1.6507200002670288,
"logits/rejected": 1.720569372177124,
"logps/chosen": -74.90982055664062,
"logps/rejected": -92.51995849609375,
"loss": 0.5789,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.13705268502235413,
"rewards/margins": 0.2740176022052765,
"rewards/rejected": -0.4110702872276306,
"step": 380
},
{
"epoch": 14.054054054054054,
"grad_norm": 1.1135063171386719,
"learning_rate": 1.085982811283654e-06,
"logits/chosen": 1.7133989334106445,
"logits/rejected": 1.7338130474090576,
"logps/chosen": -80.55327606201172,
"logps/rejected": -82.32875061035156,
"loss": 0.5847,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -0.12969203293323517,
"rewards/margins": 0.26957422494888306,
"rewards/rejected": -0.39926621317863464,
"step": 390
},
{
"epoch": 14.414414414414415,
"grad_norm": 1.0821375846862793,
"learning_rate": 9.557576172663577e-07,
"logits/chosen": 1.7035369873046875,
"logits/rejected": 1.7184028625488281,
"logps/chosen": -80.99266052246094,
"logps/rejected": -85.05413818359375,
"loss": 0.583,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -0.1366995871067047,
"rewards/margins": 0.268393874168396,
"rewards/rejected": -0.4050934910774231,
"step": 400
}
],
"logging_steps": 10,
"max_steps": 540,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.35529556182093e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}