| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9989094874591058, | |
| "eval_steps": 400, | |
| "global_step": 458, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.010905125408942203, | |
| "grad_norm": 64.1503528533801, | |
| "learning_rate": 8.695652173913042e-08, | |
| "logits/chosen": -10.94892692565918, | |
| "logits/rejected": -10.934142112731934, | |
| "logps/chosen": -0.8995465040206909, | |
| "logps/rejected": -0.8591210246086121, | |
| "loss": 5.7639, | |
| "rewards/accuracies": 0.48750001192092896, | |
| "rewards/chosen": -8.995465278625488, | |
| "rewards/margins": -0.40425485372543335, | |
| "rewards/rejected": -8.59121036529541, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.021810250817884406, | |
| "grad_norm": 111.15906224445196, | |
| "learning_rate": 1.7391304347826085e-07, | |
| "logits/chosen": -9.873212814331055, | |
| "logits/rejected": -9.899297714233398, | |
| "logps/chosen": -1.1384799480438232, | |
| "logps/rejected": -1.158919095993042, | |
| "loss": 6.1082, | |
| "rewards/accuracies": 0.518750011920929, | |
| "rewards/chosen": -11.384800910949707, | |
| "rewards/margins": 0.20439176261425018, | |
| "rewards/rejected": -11.589191436767578, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03271537622682661, | |
| "grad_norm": 96.8211465004871, | |
| "learning_rate": 2.608695652173913e-07, | |
| "logits/chosen": -10.422977447509766, | |
| "logits/rejected": -10.321990013122559, | |
| "logps/chosen": -0.8700820803642273, | |
| "logps/rejected": -0.840315043926239, | |
| "loss": 5.5576, | |
| "rewards/accuracies": 0.48124998807907104, | |
| "rewards/chosen": -8.700819969177246, | |
| "rewards/margins": -0.29766958951950073, | |
| "rewards/rejected": -8.40315055847168, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.04362050163576881, | |
| "grad_norm": 59.01122752402013, | |
| "learning_rate": 3.478260869565217e-07, | |
| "logits/chosen": -9.897313117980957, | |
| "logits/rejected": -9.771203994750977, | |
| "logps/chosen": -0.9016021490097046, | |
| "logps/rejected": -0.9813264608383179, | |
| "loss": 5.4183, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": -9.016023635864258, | |
| "rewards/margins": 0.797242283821106, | |
| "rewards/rejected": -9.813264846801758, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.05452562704471101, | |
| "grad_norm": 77.6935326260568, | |
| "learning_rate": 4.3478260869565214e-07, | |
| "logits/chosen": -10.004715919494629, | |
| "logits/rejected": -10.082869529724121, | |
| "logps/chosen": -0.7554069757461548, | |
| "logps/rejected": -0.7040703296661377, | |
| "loss": 5.4042, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": -7.554069519042969, | |
| "rewards/margins": -0.5133668780326843, | |
| "rewards/rejected": -7.040703773498535, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.06543075245365322, | |
| "grad_norm": 53.25393883515416, | |
| "learning_rate": 5.217391304347826e-07, | |
| "logits/chosen": -9.147699356079102, | |
| "logits/rejected": -9.047504425048828, | |
| "logps/chosen": -0.6732769012451172, | |
| "logps/rejected": -0.7553219199180603, | |
| "loss": 5.079, | |
| "rewards/accuracies": 0.5562499761581421, | |
| "rewards/chosen": -6.732769012451172, | |
| "rewards/margins": 0.8204498291015625, | |
| "rewards/rejected": -7.553219795227051, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.07633587786259542, | |
| "grad_norm": 49.376076232037846, | |
| "learning_rate": 6.08695652173913e-07, | |
| "logits/chosen": -9.227429389953613, | |
| "logits/rejected": -9.0322847366333, | |
| "logps/chosen": -0.6311928033828735, | |
| "logps/rejected": -0.6472190022468567, | |
| "loss": 4.9879, | |
| "rewards/accuracies": 0.543749988079071, | |
| "rewards/chosen": -6.311927795410156, | |
| "rewards/margins": 0.160261332988739, | |
| "rewards/rejected": -6.472189426422119, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.08724100327153762, | |
| "grad_norm": 71.82957946446516, | |
| "learning_rate": 6.956521739130434e-07, | |
| "logits/chosen": -8.839519500732422, | |
| "logits/rejected": -8.598153114318848, | |
| "logps/chosen": -0.5917301177978516, | |
| "logps/rejected": -0.6095324158668518, | |
| "loss": 4.9341, | |
| "rewards/accuracies": 0.543749988079071, | |
| "rewards/chosen": -5.917301177978516, | |
| "rewards/margins": 0.1780225783586502, | |
| "rewards/rejected": -6.09532356262207, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.09814612868047982, | |
| "grad_norm": 62.13252777111053, | |
| "learning_rate": 7.826086956521739e-07, | |
| "logits/chosen": -8.288870811462402, | |
| "logits/rejected": -8.250112533569336, | |
| "logps/chosen": -0.5544935464859009, | |
| "logps/rejected": -0.5844836235046387, | |
| "loss": 4.6875, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": -5.5449347496032715, | |
| "rewards/margins": 0.29990124702453613, | |
| "rewards/rejected": -5.844836235046387, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.10905125408942203, | |
| "grad_norm": 50.37809623047656, | |
| "learning_rate": 7.998139534493406e-07, | |
| "logits/chosen": -8.140437126159668, | |
| "logits/rejected": -7.910015106201172, | |
| "logps/chosen": -0.638617753982544, | |
| "logps/rejected": -0.7139040231704712, | |
| "loss": 4.5683, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": -6.386178493499756, | |
| "rewards/margins": 0.7528623938560486, | |
| "rewards/rejected": -7.139040470123291, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.11995637949836423, | |
| "grad_norm": 61.0124851348064, | |
| "learning_rate": 7.990584359406726e-07, | |
| "logits/chosen": -9.38193416595459, | |
| "logits/rejected": -9.200593948364258, | |
| "logps/chosen": -0.6021402478218079, | |
| "logps/rejected": -0.7378085255622864, | |
| "loss": 4.6702, | |
| "rewards/accuracies": 0.5562499761581421, | |
| "rewards/chosen": -6.021402359008789, | |
| "rewards/margins": 1.3566832542419434, | |
| "rewards/rejected": -7.378085136413574, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.13086150490730644, | |
| "grad_norm": 58.34041763150138, | |
| "learning_rate": 7.977229168076815e-07, | |
| "logits/chosen": -8.994357109069824, | |
| "logits/rejected": -9.095643043518066, | |
| "logps/chosen": -0.6254938244819641, | |
| "logps/rejected": -0.7731696963310242, | |
| "loss": 4.4672, | |
| "rewards/accuracies": 0.606249988079071, | |
| "rewards/chosen": -6.254937648773193, | |
| "rewards/margins": 1.4767591953277588, | |
| "rewards/rejected": -7.731697082519531, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.14176663031624864, | |
| "grad_norm": 54.06832565592651, | |
| "learning_rate": 7.95809337127655e-07, | |
| "logits/chosen": -9.56706428527832, | |
| "logits/rejected": -9.66978931427002, | |
| "logps/chosen": -0.6539489030838013, | |
| "logps/rejected": -0.7899436950683594, | |
| "loss": 4.4539, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": -6.539488315582275, | |
| "rewards/margins": 1.359948992729187, | |
| "rewards/rejected": -7.899436950683594, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.15267175572519084, | |
| "grad_norm": 61.12821185450308, | |
| "learning_rate": 7.933204781457008e-07, | |
| "logits/chosen": -9.499263763427734, | |
| "logits/rejected": -9.496051788330078, | |
| "logps/chosen": -0.6623054146766663, | |
| "logps/rejected": -0.7747452259063721, | |
| "loss": 4.4992, | |
| "rewards/accuracies": 0.65625, | |
| "rewards/chosen": -6.623053550720215, | |
| "rewards/margins": 1.1243983507156372, | |
| "rewards/rejected": -7.747453212738037, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.16357688113413305, | |
| "grad_norm": 51.861021015777716, | |
| "learning_rate": 7.902599572324151e-07, | |
| "logits/chosen": -9.391631126403809, | |
| "logits/rejected": -9.375069618225098, | |
| "logps/chosen": -0.6535338163375854, | |
| "logps/rejected": -0.8060415983200073, | |
| "loss": 4.4979, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": -6.535338401794434, | |
| "rewards/margins": 1.5250780582427979, | |
| "rewards/rejected": -8.060416221618652, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.17448200654307525, | |
| "grad_norm": 70.13525940500534, | |
| "learning_rate": 7.86632222626304e-07, | |
| "logits/chosen": -9.956920623779297, | |
| "logits/rejected": -9.78042984008789, | |
| "logps/chosen": -0.6164920926094055, | |
| "logps/rejected": -0.7590925097465515, | |
| "loss": 4.429, | |
| "rewards/accuracies": 0.65625, | |
| "rewards/chosen": -6.164921760559082, | |
| "rewards/margins": 1.4260046482086182, | |
| "rewards/rejected": -7.590925693511963, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.18538713195201745, | |
| "grad_norm": 73.7268648843484, | |
| "learning_rate": 7.824425469686014e-07, | |
| "logits/chosen": -9.828500747680664, | |
| "logits/rejected": -9.678921699523926, | |
| "logps/chosen": -0.6361075043678284, | |
| "logps/rejected": -0.7591807246208191, | |
| "loss": 4.438, | |
| "rewards/accuracies": 0.65625, | |
| "rewards/chosen": -6.361075401306152, | |
| "rewards/margins": 1.2307318449020386, | |
| "rewards/rejected": -7.591805934906006, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.19629225736095965, | |
| "grad_norm": 69.62622029011087, | |
| "learning_rate": 7.776970196398794e-07, | |
| "logits/chosen": -9.843175888061523, | |
| "logits/rejected": -9.792081832885742, | |
| "logps/chosen": -0.6243542432785034, | |
| "logps/rejected": -0.7286983132362366, | |
| "loss": 4.3737, | |
| "rewards/accuracies": 0.668749988079071, | |
| "rewards/chosen": -6.2435431480407715, | |
| "rewards/margins": 1.0434401035308838, | |
| "rewards/rejected": -7.286982536315918, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.20719738276990185, | |
| "grad_norm": 66.25031731585119, | |
| "learning_rate": 7.724025379095878e-07, | |
| "logits/chosen": -10.194116592407227, | |
| "logits/rejected": -10.006689071655273, | |
| "logps/chosen": -0.6633736491203308, | |
| "logps/rejected": -0.85277259349823, | |
| "loss": 4.2355, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": -6.633737087249756, | |
| "rewards/margins": 1.8939902782440186, | |
| "rewards/rejected": -8.527726173400879, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.21810250817884405, | |
| "grad_norm": 75.08429330907302, | |
| "learning_rate": 7.665667969113885e-07, | |
| "logits/chosen": -9.845739364624023, | |
| "logits/rejected": -9.587841033935547, | |
| "logps/chosen": -0.6887632608413696, | |
| "logps/rejected": -0.872734546661377, | |
| "loss": 4.2625, | |
| "rewards/accuracies": 0.668749988079071, | |
| "rewards/chosen": -6.887633323669434, | |
| "rewards/margins": 1.839712381362915, | |
| "rewards/rejected": -8.72734546661377, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.22900763358778625, | |
| "grad_norm": 69.02863249400193, | |
| "learning_rate": 7.601982784588541e-07, | |
| "logits/chosen": -10.62452220916748, | |
| "logits/rejected": -10.43299674987793, | |
| "logps/chosen": -0.7019127011299133, | |
| "logps/rejected": -0.7768973112106323, | |
| "loss": 4.2785, | |
| "rewards/accuracies": 0.6187499761581421, | |
| "rewards/chosen": -7.019126892089844, | |
| "rewards/margins": 0.7498467564582825, | |
| "rewards/rejected": -7.768974304199219, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.23991275899672845, | |
| "grad_norm": 71.91219250814, | |
| "learning_rate": 7.533062387177842e-07, | |
| "logits/chosen": -10.65683650970459, | |
| "logits/rejected": -10.636886596679688, | |
| "logps/chosen": -0.7213658094406128, | |
| "logps/rejected": -0.8907195329666138, | |
| "loss": 4.1753, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": -7.213658332824707, | |
| "rewards/margins": 1.6935373544692993, | |
| "rewards/rejected": -8.907196044921875, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.25081788440567065, | |
| "grad_norm": 73.69796183664997, | |
| "learning_rate": 7.459006947530617e-07, | |
| "logits/chosen": -11.56556224822998, | |
| "logits/rejected": -11.49560260772705, | |
| "logps/chosen": -0.838672935962677, | |
| "logps/rejected": -1.0182416439056396, | |
| "loss": 4.0384, | |
| "rewards/accuracies": 0.668749988079071, | |
| "rewards/chosen": -8.38672924041748, | |
| "rewards/margins": 1.7956879138946533, | |
| "rewards/rejected": -10.182416915893555, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.2617230098146129, | |
| "grad_norm": 94.92610924652415, | |
| "learning_rate": 7.379924099695959e-07, | |
| "logits/chosen": -11.925847053527832, | |
| "logits/rejected": -11.710810661315918, | |
| "logps/chosen": -0.8436716198921204, | |
| "logps/rejected": -1.0012091398239136, | |
| "loss": 4.113, | |
| "rewards/accuracies": 0.675000011920929, | |
| "rewards/chosen": -8.436716079711914, | |
| "rewards/margins": 1.5753746032714844, | |
| "rewards/rejected": -10.012091636657715, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.27262813522355506, | |
| "grad_norm": 113.62807962706827, | |
| "learning_rate": 7.29592878468518e-07, | |
| "logits/chosen": -13.345464706420898, | |
| "logits/rejected": -12.9557523727417, | |
| "logps/chosen": -0.8897609710693359, | |
| "logps/rejected": -1.0490643978118896, | |
| "loss": 3.9421, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": -8.897610664367676, | |
| "rewards/margins": 1.5930334329605103, | |
| "rewards/rejected": -10.490643501281738, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.2835332606324973, | |
| "grad_norm": 118.6845458542213, | |
| "learning_rate": 7.207143083413642e-07, | |
| "logits/chosen": -12.795938491821289, | |
| "logits/rejected": -12.771161079406738, | |
| "logps/chosen": -0.9606706500053406, | |
| "logps/rejected": -1.1983877420425415, | |
| "loss": 3.7793, | |
| "rewards/accuracies": 0.706250011920929, | |
| "rewards/chosen": -9.606706619262695, | |
| "rewards/margins": 2.3771698474884033, | |
| "rewards/rejected": -11.983877182006836, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.29443838604143946, | |
| "grad_norm": 112.00779108354124, | |
| "learning_rate": 7.11369603926526e-07, | |
| "logits/chosen": -13.919995307922363, | |
| "logits/rejected": -14.025039672851562, | |
| "logps/chosen": -1.1236939430236816, | |
| "logps/rejected": -1.3567845821380615, | |
| "loss": 3.8057, | |
| "rewards/accuracies": 0.7437499761581421, | |
| "rewards/chosen": -11.236939430236816, | |
| "rewards/margins": 2.3309078216552734, | |
| "rewards/rejected": -13.567845344543457, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.3053435114503817, | |
| "grad_norm": 121.41466704799392, | |
| "learning_rate": 7.015723470537589e-07, | |
| "logits/chosen": -14.653108596801758, | |
| "logits/rejected": -14.910409927368164, | |
| "logps/chosen": -1.2697908878326416, | |
| "logps/rejected": -1.5090426206588745, | |
| "loss": 3.7783, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": -12.69791030883789, | |
| "rewards/margins": 2.392517566680908, | |
| "rewards/rejected": -15.090426445007324, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.31624863685932386, | |
| "grad_norm": 118.0007773777601, | |
| "learning_rate": 6.913367773040073e-07, | |
| "logits/chosen": -15.646504402160645, | |
| "logits/rejected": -15.359659194946289, | |
| "logps/chosen": -1.2195041179656982, | |
| "logps/rejected": -1.5088005065917969, | |
| "loss": 3.626, | |
| "rewards/accuracies": 0.71875, | |
| "rewards/chosen": -12.195042610168457, | |
| "rewards/margins": 2.892961263656616, | |
| "rewards/rejected": -15.088003158569336, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.3271537622682661, | |
| "grad_norm": 123.35286255999112, | |
| "learning_rate": 6.806777713132373e-07, | |
| "logits/chosen": -15.674619674682617, | |
| "logits/rejected": -15.5663423538208, | |
| "logps/chosen": -1.3831182718276978, | |
| "logps/rejected": -1.60293710231781, | |
| "loss": 3.76, | |
| "rewards/accuracies": 0.6499999761581421, | |
| "rewards/chosen": -13.831182479858398, | |
| "rewards/margins": 2.1981873512268066, | |
| "rewards/rejected": -16.029369354248047, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.33805888767720826, | |
| "grad_norm": 131.11270501363964, | |
| "learning_rate": 6.69610821150358e-07, | |
| "logits/chosen": -15.65868854522705, | |
| "logits/rejected": -15.700967788696289, | |
| "logps/chosen": -1.3463469743728638, | |
| "logps/rejected": -1.6862995624542236, | |
| "loss": 3.4489, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": -13.463468551635742, | |
| "rewards/margins": 3.3995232582092285, | |
| "rewards/rejected": -16.862991333007812, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.3489640130861505, | |
| "grad_norm": 135.1222468933701, | |
| "learning_rate": 6.581520118006564e-07, | |
| "logits/chosen": -15.956563949584961, | |
| "logits/rejected": -16.274681091308594, | |
| "logps/chosen": -1.4811228513717651, | |
| "logps/rejected": -1.8506397008895874, | |
| "loss": 3.4319, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -14.81122875213623, | |
| "rewards/margins": 3.695168972015381, | |
| "rewards/rejected": -18.506397247314453, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.35986913849509267, | |
| "grad_norm": 117.31734546028652, | |
| "learning_rate": 6.46317997787473e-07, | |
| "logits/chosen": -16.915264129638672, | |
| "logits/rejected": -17.207149505615234, | |
| "logps/chosen": -1.4308239221572876, | |
| "logps/rejected": -1.7504551410675049, | |
| "loss": 3.7315, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": -14.30823802947998, | |
| "rewards/margins": 3.1963133811950684, | |
| "rewards/rejected": -17.504552841186523, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.3707742639040349, | |
| "grad_norm": 135.9602274797816, | |
| "learning_rate": 6.341259789660969e-07, | |
| "logits/chosen": -16.72784996032715, | |
| "logits/rejected": -16.691822052001953, | |
| "logps/chosen": -1.5182801485061646, | |
| "logps/rejected": -1.922546148300171, | |
| "loss": 3.3263, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -15.182802200317383, | |
| "rewards/margins": 4.042659759521484, | |
| "rewards/rejected": -19.225460052490234, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.3816793893129771, | |
| "grad_norm": 149.4264363196396, | |
| "learning_rate": 6.215936755250596e-07, | |
| "logits/chosen": -17.34011459350586, | |
| "logits/rejected": -17.421663284301758, | |
| "logps/chosen": -1.496767282485962, | |
| "logps/rejected": -1.85422682762146, | |
| "loss": 3.5029, | |
| "rewards/accuracies": 0.7562500238418579, | |
| "rewards/chosen": -14.967672348022461, | |
| "rewards/margins": 3.574597120285034, | |
| "rewards/rejected": -18.54227066040039, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.3925845147219193, | |
| "grad_norm": 116.14395125879456, | |
| "learning_rate": 6.087393022311666e-07, | |
| "logits/chosen": -16.09074592590332, | |
| "logits/rejected": -15.661008834838867, | |
| "logps/chosen": -1.6729342937469482, | |
| "logps/rejected": -2.0850119590759277, | |
| "loss": 3.3119, | |
| "rewards/accuracies": 0.7437499761581421, | |
| "rewards/chosen": -16.729345321655273, | |
| "rewards/margins": 4.1207756996154785, | |
| "rewards/rejected": -20.85011863708496, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.4034896401308615, | |
| "grad_norm": 302.66398320568743, | |
| "learning_rate": 5.955815419556934e-07, | |
| "logits/chosen": -17.55354118347168, | |
| "logits/rejected": -17.935550689697266, | |
| "logps/chosen": -1.8943233489990234, | |
| "logps/rejected": -2.240093231201172, | |
| "loss": 3.3613, | |
| "rewards/accuracies": 0.6812499761581421, | |
| "rewards/chosen": -18.943233489990234, | |
| "rewards/margins": 3.4576992988586426, | |
| "rewards/rejected": -22.40093231201172, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.4143947655398037, | |
| "grad_norm": 125.3361496668939, | |
| "learning_rate": 5.821395185202285e-07, | |
| "logits/chosen": -15.341293334960938, | |
| "logits/rejected": -15.44407844543457, | |
| "logps/chosen": -1.8814849853515625, | |
| "logps/rejected": -2.2990918159484863, | |
| "loss": 3.1174, | |
| "rewards/accuracies": 0.7124999761581421, | |
| "rewards/chosen": -18.814849853515625, | |
| "rewards/margins": 4.17606782913208, | |
| "rewards/rejected": -22.990917205810547, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.42529989094874593, | |
| "grad_norm": 123.17711415812771, | |
| "learning_rate": 5.684327689016264e-07, | |
| "logits/chosen": -15.80346393585205, | |
| "logits/rejected": -15.893457412719727, | |
| "logps/chosen": -1.6922333240509033, | |
| "logps/rejected": -2.1358566284179688, | |
| "loss": 3.3943, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -16.922334671020508, | |
| "rewards/margins": 4.436235427856445, | |
| "rewards/rejected": -21.358566284179688, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.4362050163576881, | |
| "grad_norm": 121.59808879397914, | |
| "learning_rate": 5.544812148364731e-07, | |
| "logits/chosen": -16.505403518676758, | |
| "logits/rejected": -16.2810001373291, | |
| "logps/chosen": -1.7912696599960327, | |
| "logps/rejected": -2.283963680267334, | |
| "loss": 3.3937, | |
| "rewards/accuracies": 0.768750011920929, | |
| "rewards/chosen": -17.912700653076172, | |
| "rewards/margins": 4.926938533782959, | |
| "rewards/rejected": -22.839635848999023, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.44711014176663033, | |
| "grad_norm": 130.03760301081726, | |
| "learning_rate": 5.40305133866328e-07, | |
| "logits/chosen": -16.51849365234375, | |
| "logits/rejected": -16.17244529724121, | |
| "logps/chosen": -1.8261501789093018, | |
| "logps/rejected": -2.216474771499634, | |
| "loss": 3.3439, | |
| "rewards/accuracies": 0.7437499761581421, | |
| "rewards/chosen": -18.26150131225586, | |
| "rewards/margins": 3.903247117996216, | |
| "rewards/rejected": -22.164745330810547, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.4580152671755725, | |
| "grad_norm": 107.25457872042979, | |
| "learning_rate": 5.259251298658339e-07, | |
| "logits/chosen": -14.851339340209961, | |
| "logits/rejected": -14.814165115356445, | |
| "logps/chosen": -1.7905569076538086, | |
| "logps/rejected": -2.320906162261963, | |
| "loss": 3.1591, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -17.905569076538086, | |
| "rewards/margins": 5.303492546081543, | |
| "rewards/rejected": -23.209064483642578, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.46892039258451473, | |
| "grad_norm": 129.6812695651334, | |
| "learning_rate": 5.113621030965238e-07, | |
| "logits/chosen": -16.215179443359375, | |
| "logits/rejected": -16.335643768310547, | |
| "logps/chosen": -1.8237041234970093, | |
| "logps/rejected": -2.207712411880493, | |
| "loss": 3.2475, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -18.23703956604004, | |
| "rewards/margins": 3.840085506439209, | |
| "rewards/rejected": -22.07712745666504, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.4798255179934569, | |
| "grad_norm": 103.25783765301819, | |
| "learning_rate": 4.96637219829852e-07, | |
| "logits/chosen": -14.638501167297363, | |
| "logits/rejected": -14.695428848266602, | |
| "logps/chosen": -1.8283414840698242, | |
| "logps/rejected": -2.376028537750244, | |
| "loss": 3.3144, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -18.283416748046875, | |
| "rewards/margins": 5.47686767578125, | |
| "rewards/rejected": -23.760284423828125, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.49073064340239914, | |
| "grad_norm": 132.036942227645, | |
| "learning_rate": 4.817718815835998e-07, | |
| "logits/chosen": -15.56849479675293, | |
| "logits/rejected": -15.743542671203613, | |
| "logps/chosen": -1.9234107732772827, | |
| "logps/rejected": -2.3087854385375977, | |
| "loss": 3.2906, | |
| "rewards/accuracies": 0.6812499761581421, | |
| "rewards/chosen": -19.234111785888672, | |
| "rewards/margins": 3.8537425994873047, | |
| "rewards/rejected": -23.087854385375977, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.5016357688113413, | |
| "grad_norm": 132.28323726857968, | |
| "learning_rate": 4.6678769401636887e-07, | |
| "logits/chosen": -15.513163566589355, | |
| "logits/rejected": -15.330879211425781, | |
| "logps/chosen": -1.868847131729126, | |
| "logps/rejected": -2.3147101402282715, | |
| "loss": 3.2416, | |
| "rewards/accuracies": 0.7437499761581421, | |
| "rewards/chosen": -18.688472747802734, | |
| "rewards/margins": 4.4586310386657715, | |
| "rewards/rejected": -23.14710235595703, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5125408942202835, | |
| "grad_norm": 116.14658388209503, | |
| "learning_rate": 4.517064355253696e-07, | |
| "logits/chosen": -14.959882736206055, | |
| "logits/rejected": -14.972663879394531, | |
| "logps/chosen": -1.9306243658065796, | |
| "logps/rejected": -2.457444667816162, | |
| "loss": 2.9094, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -19.306243896484375, | |
| "rewards/margins": 5.268204689025879, | |
| "rewards/rejected": -24.574447631835938, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.5234460196292258, | |
| "grad_norm": 125.188125846376, | |
| "learning_rate": 4.365500255931484e-07, | |
| "logits/chosen": -15.104351997375488, | |
| "logits/rejected": -15.062860488891602, | |
| "logps/chosen": -1.9797313213348389, | |
| "logps/rejected": -2.4472575187683105, | |
| "loss": 3.0593, | |
| "rewards/accuracies": 0.7437499761581421, | |
| "rewards/chosen": -19.797313690185547, | |
| "rewards/margins": 4.675261497497559, | |
| "rewards/rejected": -24.472576141357422, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5343511450381679, | |
| "grad_norm": 135.15384009467002, | |
| "learning_rate": 4.213404929292575e-07, | |
| "logits/chosen": -15.561657905578613, | |
| "logits/rejected": -15.077102661132812, | |
| "logps/chosen": -2.040621280670166, | |
| "logps/rejected": -2.5414981842041016, | |
| "loss": 2.9158, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -20.40620994567871, | |
| "rewards/margins": 5.008772373199463, | |
| "rewards/rejected": -25.41498374938965, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.5452562704471101, | |
| "grad_norm": 124.60869295390538, | |
| "learning_rate": 4.060999434531704e-07, | |
| "logits/chosen": -15.397348403930664, | |
| "logits/rejected": -15.1492280960083, | |
| "logps/chosen": -1.8068050146102905, | |
| "logps/rejected": -2.3928465843200684, | |
| "loss": 2.8634, | |
| "rewards/accuracies": 0.793749988079071, | |
| "rewards/chosen": -18.068050384521484, | |
| "rewards/margins": 5.860415935516357, | |
| "rewards/rejected": -23.928464889526367, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5561613958560524, | |
| "grad_norm": 129.14805119503163, | |
| "learning_rate": 3.908505281649805e-07, | |
| "logits/chosen": -14.593152046203613, | |
| "logits/rejected": -14.538106918334961, | |
| "logps/chosen": -1.871093988418579, | |
| "logps/rejected": -2.36173152923584, | |
| "loss": 2.9231, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -18.710941314697266, | |
| "rewards/margins": 4.906374454498291, | |
| "rewards/rejected": -23.617313385009766, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.5670665212649946, | |
| "grad_norm": 119.36679390601662, | |
| "learning_rate": 3.756144109505764e-07, | |
| "logits/chosen": -15.056838989257812, | |
| "logits/rejected": -15.148704528808594, | |
| "logps/chosen": -1.8173519372940063, | |
| "logps/rejected": -2.4098479747772217, | |
| "loss": 2.8089, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -18.173521041870117, | |
| "rewards/margins": 5.924956321716309, | |
| "rewards/rejected": -24.098478317260742, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.5779716466739367, | |
| "grad_norm": 119.03491048259538, | |
| "learning_rate": 3.6041373636809017e-07, | |
| "logits/chosen": -15.791028022766113, | |
| "logits/rejected": -15.623140335083008, | |
| "logps/chosen": -2.1847808361053467, | |
| "logps/rejected": -2.698356866836548, | |
| "loss": 3.1097, | |
| "rewards/accuracies": 0.7124999761581421, | |
| "rewards/chosen": -21.847808837890625, | |
| "rewards/margins": 5.135758399963379, | |
| "rewards/rejected": -26.983570098876953, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.5888767720828789, | |
| "grad_norm": 119.59861773726368, | |
| "learning_rate": 3.4527059746243596e-07, | |
| "logits/chosen": -15.602317810058594, | |
| "logits/rejected": -15.679903030395508, | |
| "logps/chosen": -2.001981735229492, | |
| "logps/rejected": -2.4404501914978027, | |
| "loss": 3.1726, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -20.019817352294922, | |
| "rewards/margins": 4.384683132171631, | |
| "rewards/rejected": -24.404499053955078, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.5997818974918212, | |
| "grad_norm": 143.27392642046235, | |
| "learning_rate": 3.302070036547201e-07, | |
| "logits/chosen": -15.435728073120117, | |
| "logits/rejected": -15.314961433410645, | |
| "logps/chosen": -2.168989896774292, | |
| "logps/rejected": -2.737366199493408, | |
| "loss": 3.0792, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -21.689899444580078, | |
| "rewards/margins": 5.683760643005371, | |
| "rewards/rejected": -27.3736572265625, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.6106870229007634, | |
| "grad_norm": 114.06559627918872, | |
| "learning_rate": 3.152448487531915e-07, | |
| "logits/chosen": -15.740002632141113, | |
| "logits/rejected": -15.62346076965332, | |
| "logps/chosen": -1.9302055835723877, | |
| "logps/rejected": -2.405974864959717, | |
| "loss": 2.9702, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -19.302053451538086, | |
| "rewards/margins": 4.757693767547607, | |
| "rewards/rejected": -24.059749603271484, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6215921483097055, | |
| "grad_norm": 107.47439430691443, | |
| "learning_rate": 3.0040587913222725e-07, | |
| "logits/chosen": -14.441022872924805, | |
| "logits/rejected": -14.633172988891602, | |
| "logps/chosen": -1.8620984554290771, | |
| "logps/rejected": -2.345489263534546, | |
| "loss": 3.0044, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -18.620983123779297, | |
| "rewards/margins": 4.833909034729004, | |
| "rewards/rejected": -23.45489501953125, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.6324972737186477, | |
| "grad_norm": 154.10880275569383, | |
| "learning_rate": 2.857116621256018e-07, | |
| "logits/chosen": -15.512109756469727, | |
| "logits/rejected": -15.243085861206055, | |
| "logps/chosen": -1.9362213611602783, | |
| "logps/rejected": -2.402963399887085, | |
| "loss": 3.2181, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": -19.36220932006836, | |
| "rewards/margins": 4.667425155639648, | |
| "rewards/rejected": -24.02963638305664, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.64340239912759, | |
| "grad_norm": 112.54594135095479, | |
| "learning_rate": 2.7118355467997835e-07, | |
| "logits/chosen": -15.574625968933105, | |
| "logits/rejected": -15.6203031539917, | |
| "logps/chosen": -1.9695098400115967, | |
| "logps/rejected": -2.388803482055664, | |
| "loss": 2.9531, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": -19.695100784301758, | |
| "rewards/margins": 4.192935943603516, | |
| "rewards/rejected": -23.888032913208008, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.6543075245365322, | |
| "grad_norm": 131.96864010640354, | |
| "learning_rate": 2.5684267231418333e-07, | |
| "logits/chosen": -16.529542922973633, | |
| "logits/rejected": -16.21666145324707, | |
| "logps/chosen": -2.111536741256714, | |
| "logps/rejected": -2.6558353900909424, | |
| "loss": 2.8683, | |
| "rewards/accuracies": 0.7562500238418579, | |
| "rewards/chosen": -21.11536979675293, | |
| "rewards/margins": 5.442984580993652, | |
| "rewards/rejected": -26.5583553314209, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6652126499454744, | |
| "grad_norm": 131.182887672942, | |
| "learning_rate": 2.427098584293759e-07, | |
| "logits/chosen": -15.954015731811523, | |
| "logits/rejected": -15.863182067871094, | |
| "logps/chosen": -2.109534978866577, | |
| "logps/rejected": -2.553351640701294, | |
| "loss": 2.9835, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -21.09535026550293, | |
| "rewards/margins": 4.438165187835693, | |
| "rewards/rejected": -25.533517837524414, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.6761177753544165, | |
| "grad_norm": 117.78663856490914, | |
| "learning_rate": 2.2880565401472287e-07, | |
| "logits/chosen": -15.363965034484863, | |
| "logits/rejected": -15.453814506530762, | |
| "logps/chosen": -2.0754928588867188, | |
| "logps/rejected": -2.7302043437957764, | |
| "loss": 2.9351, | |
| "rewards/accuracies": 0.793749988079071, | |
| "rewards/chosen": -20.754932403564453, | |
| "rewards/margins": 6.547112464904785, | |
| "rewards/rejected": -27.302043914794922, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.6870229007633588, | |
| "grad_norm": 102.64755949094341, | |
| "learning_rate": 2.151502677926042e-07, | |
| "logits/chosen": -15.314285278320312, | |
| "logits/rejected": -15.252153396606445, | |
| "logps/chosen": -2.0483040809631348, | |
| "logps/rejected": -2.753251552581787, | |
| "loss": 2.7283, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -20.483041763305664, | |
| "rewards/margins": 7.049475193023682, | |
| "rewards/rejected": -27.532512664794922, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.697928026172301, | |
| "grad_norm": 101.22013067633767, | |
| "learning_rate": 2.0176354684674575e-07, | |
| "logits/chosen": -16.12703514099121, | |
| "logits/rejected": -15.803072929382324, | |
| "logps/chosen": -2.151562213897705, | |
| "logps/rejected": -2.721626043319702, | |
| "loss": 2.9477, | |
| "rewards/accuracies": 0.793749988079071, | |
| "rewards/chosen": -21.515621185302734, | |
| "rewards/margins": 5.700636863708496, | |
| "rewards/rejected": -27.216259002685547, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7088331515812432, | |
| "grad_norm": 125.44812959020139, | |
| "learning_rate": 1.8866494777596534e-07, | |
| "logits/chosen": -15.385473251342773, | |
| "logits/rejected": -15.099942207336426, | |
| "logps/chosen": -2.194483518600464, | |
| "logps/rejected": -2.6979141235351562, | |
| "loss": 3.0708, | |
| "rewards/accuracies": 0.7562500238418579, | |
| "rewards/chosen": -21.944835662841797, | |
| "rewards/margins": 5.034304141998291, | |
| "rewards/rejected": -26.979141235351562, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.7197382769901853, | |
| "grad_norm": 95.2552912244021, | |
| "learning_rate": 1.7587350841546007e-07, | |
| "logits/chosen": -15.96777629852295, | |
| "logits/rejected": -16.115123748779297, | |
| "logps/chosen": -1.9400516748428345, | |
| "logps/rejected": -2.51167368888855, | |
| "loss": 2.8539, | |
| "rewards/accuracies": 0.8062499761581421, | |
| "rewards/chosen": -19.400516510009766, | |
| "rewards/margins": 5.716220855712891, | |
| "rewards/rejected": -25.11673927307129, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.7306434023991276, | |
| "grad_norm": 128.737726612481, | |
| "learning_rate": 1.634078201667347e-07, | |
| "logits/chosen": -15.98449420928955, | |
| "logits/rejected": -16.243240356445312, | |
| "logps/chosen": -2.1536316871643066, | |
| "logps/rejected": -2.7533793449401855, | |
| "loss": 2.9433, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -21.53631591796875, | |
| "rewards/margins": 5.9974775314331055, | |
| "rewards/rejected": -27.53379249572754, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.7415485278080698, | |
| "grad_norm": 150.4063701643456, | |
| "learning_rate": 1.512860009763891e-07, | |
| "logits/chosen": -15.605964660644531, | |
| "logits/rejected": -16.06555938720703, | |
| "logps/chosen": -1.8955068588256836, | |
| "logps/rejected": -2.390796422958374, | |
| "loss": 2.9364, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -18.955068588256836, | |
| "rewards/margins": 4.952897071838379, | |
| "rewards/rejected": -23.9079647064209, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.752453653217012, | |
| "grad_norm": 135.6732671225431, | |
| "learning_rate": 1.3952566900303602e-07, | |
| "logits/chosen": -16.359243392944336, | |
| "logits/rejected": -16.223447799682617, | |
| "logps/chosen": -2.149503469467163, | |
| "logps/rejected": -2.7064743041992188, | |
| "loss": 2.929, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -21.49503517150879, | |
| "rewards/margins": 5.569708347320557, | |
| "rewards/rejected": -27.064743041992188, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.7633587786259542, | |
| "grad_norm": 125.758255019621, | |
| "learning_rate": 1.2814391701062391e-07, | |
| "logits/chosen": -14.762056350708008, | |
| "logits/rejected": -14.951733589172363, | |
| "logps/chosen": -2.00016450881958, | |
| "logps/rejected": -2.528573989868164, | |
| "loss": 3.0167, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -20.00164794921875, | |
| "rewards/margins": 5.284093379974365, | |
| "rewards/rejected": -25.285737991333008, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.7742639040348964, | |
| "grad_norm": 123.77860608266465, | |
| "learning_rate": 1.17157287525381e-07, | |
| "logits/chosen": -14.972272872924805, | |
| "logits/rejected": -15.062446594238281, | |
| "logps/chosen": -2.0419890880584717, | |
| "logps/rejected": -2.7083749771118164, | |
| "loss": 3.1472, | |
| "rewards/accuracies": 0.793749988079071, | |
| "rewards/chosen": -20.419891357421875, | |
| "rewards/margins": 6.663858890533447, | |
| "rewards/rejected": -27.083751678466797, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.7851690294438386, | |
| "grad_norm": 128.49716646084636, | |
| "learning_rate": 1.0658174879249e-07, | |
| "logits/chosen": -15.742474555969238, | |
| "logits/rejected": -15.778717041015625, | |
| "logps/chosen": -2.0881805419921875, | |
| "logps/rejected": -2.6481823921203613, | |
| "loss": 2.9629, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": -20.881805419921875, | |
| "rewards/margins": 5.6000165939331055, | |
| "rewards/rejected": -26.481822967529297, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.7960741548527808, | |
| "grad_norm": 105.81239577262055, | |
| "learning_rate": 9.643267156743626e-08, | |
| "logits/chosen": -15.465214729309082, | |
| "logits/rejected": -15.414011001586914, | |
| "logps/chosen": -2.0494282245635986, | |
| "logps/rejected": -2.6396915912628174, | |
| "loss": 2.9365, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -20.49428367614746, | |
| "rewards/margins": 5.9026336669921875, | |
| "rewards/rejected": -26.39691734313965, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.806979280261723, | |
| "grad_norm": 106.05628455304455, | |
| "learning_rate": 8.672480677576266e-08, | |
| "logits/chosen": -15.015769958496094, | |
| "logits/rejected": -14.94062614440918, | |
| "logps/chosen": -2.0296902656555176, | |
| "logps/rejected": -2.648134231567383, | |
| "loss": 2.8337, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -20.29690170288086, | |
| "rewards/margins": 6.184438705444336, | |
| "rewards/rejected": -26.481342315673828, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8178844056706652, | |
| "grad_norm": 139.7590362680712, | |
| "learning_rate": 7.7472264073702e-08, | |
| "logits/chosen": -15.844355583190918, | |
| "logits/rejected": -15.914880752563477, | |
| "logps/chosen": -2.140533208847046, | |
| "logps/rejected": -2.731921434402466, | |
| "loss": 2.7732, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -21.405330657958984, | |
| "rewards/margins": 5.913882255554199, | |
| "rewards/rejected": -27.3192138671875, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.8287895310796074, | |
| "grad_norm": 123.27756798269391, | |
| "learning_rate": 6.868849134084534e-08, | |
| "logits/chosen": -14.827451705932617, | |
| "logits/rejected": -14.896784782409668, | |
| "logps/chosen": -2.1212401390075684, | |
| "logps/rejected": -2.6644959449768066, | |
| "loss": 2.7211, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -21.21240234375, | |
| "rewards/margins": 5.432553291320801, | |
| "rewards/rejected": -26.644954681396484, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8396946564885496, | |
| "grad_norm": 102.3035020753843, | |
| "learning_rate": 6.038625513465372e-08, | |
| "logits/chosen": -15.892682075500488, | |
| "logits/rejected": -15.998703002929688, | |
| "logps/chosen": -2.0303637981414795, | |
| "logps/rejected": -2.4150891304016113, | |
| "loss": 2.9474, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": -20.303638458251953, | |
| "rewards/margins": 3.8472514152526855, | |
| "rewards/rejected": -24.15088653564453, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.8505997818974919, | |
| "grad_norm": 110.56211402431573, | |
| "learning_rate": 5.2577622135220546e-08, | |
| "logits/chosen": -15.678448677062988, | |
| "logits/rejected": -15.579752922058105, | |
| "logps/chosen": -2.146989345550537, | |
| "logps/rejected": -2.7087996006011963, | |
| "loss": 2.8812, | |
| "rewards/accuracies": 0.8062499761581421, | |
| "rewards/chosen": -21.469894409179688, | |
| "rewards/margins": 5.618099689483643, | |
| "rewards/rejected": -27.087993621826172, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.861504907306434, | |
| "grad_norm": 136.5376747108113, | |
| "learning_rate": 4.527394160725375e-08, | |
| "logits/chosen": -15.390423774719238, | |
| "logits/rejected": -15.318705558776855, | |
| "logps/chosen": -2.0882272720336914, | |
| "logps/rejected": -2.5445287227630615, | |
| "loss": 2.9208, | |
| "rewards/accuracies": 0.7562500238418579, | |
| "rewards/chosen": -20.882274627685547, | |
| "rewards/margins": 4.563012599945068, | |
| "rewards/rejected": -25.44528579711914, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.8724100327153762, | |
| "grad_norm": 110.11074526975227, | |
| "learning_rate": 3.848582890476728e-08, | |
| "logits/chosen": -13.988882064819336, | |
| "logits/rejected": -14.337841987609863, | |
| "logps/chosen": -1.9440772533416748, | |
| "logps/rejected": -2.5067763328552246, | |
| "loss": 2.7931, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -19.440773010253906, | |
| "rewards/margins": 5.626988410949707, | |
| "rewards/rejected": -25.067760467529297, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.8724100327153762, | |
| "eval_logits/chosen": -13.542856216430664, | |
| "eval_logits/rejected": -13.604004859924316, | |
| "eval_logps/chosen": -1.9483120441436768, | |
| "eval_logps/rejected": -2.4730513095855713, | |
| "eval_loss": 2.762831449508667, | |
| "eval_rewards/accuracies": 0.7807376980781555, | |
| "eval_rewards/chosen": -19.48311996459961, | |
| "eval_rewards/margins": 5.247389316558838, | |
| "eval_rewards/rejected": -24.73050880432129, | |
| "eval_runtime": 66.0141, | |
| "eval_samples_per_second": 29.403, | |
| "eval_steps_per_second": 1.848, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.8833151581243184, | |
| "grad_norm": 106.81510695706733, | |
| "learning_rate": 3.222315004245715e-08, | |
| "logits/chosen": -15.698140144348145, | |
| "logits/rejected": -15.643528938293457, | |
| "logps/chosen": -2.1120400428771973, | |
| "logps/rejected": -2.7991552352905273, | |
| "loss": 2.7377, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -21.120403289794922, | |
| "rewards/margins": 6.871151924133301, | |
| "rewards/rejected": -27.991552352905273, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.8942202835332607, | |
| "grad_norm": 127.02527681895255, | |
| "learning_rate": 2.6495007356185817e-08, | |
| "logits/chosen": -14.854022026062012, | |
| "logits/rejected": -15.155965805053711, | |
| "logps/chosen": -2.028907299041748, | |
| "logps/rejected": -2.589961528778076, | |
| "loss": 2.7722, | |
| "rewards/accuracies": 0.793749988079071, | |
| "rewards/chosen": -20.289072036743164, | |
| "rewards/margins": 5.610544204711914, | |
| "rewards/rejected": -25.899616241455078, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9051254089422028, | |
| "grad_norm": 118.59066087169252, | |
| "learning_rate": 2.1309726273417605e-08, | |
| "logits/chosen": -15.23388385772705, | |
| "logits/rejected": -15.077451705932617, | |
| "logps/chosen": -2.019240379333496, | |
| "logps/rejected": -2.622464895248413, | |
| "loss": 2.7385, | |
| "rewards/accuracies": 0.8062499761581421, | |
| "rewards/chosen": -20.192401885986328, | |
| "rewards/margins": 6.0322442054748535, | |
| "rewards/rejected": -26.224645614624023, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.916030534351145, | |
| "grad_norm": 125.5340583771102, | |
| "learning_rate": 1.667484321283146e-08, | |
| "logits/chosen": -16.14217758178711, | |
| "logits/rejected": -15.975908279418945, | |
| "logps/chosen": -2.1546969413757324, | |
| "logps/rejected": -2.7095894813537598, | |
| "loss": 2.8351, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -21.54697036743164, | |
| "rewards/margins": 5.548920631408691, | |
| "rewards/rejected": -27.09589195251465, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.9269356597600873, | |
| "grad_norm": 126.63955014497344, | |
| "learning_rate": 1.2597094630699156e-08, | |
| "logits/chosen": -16.528745651245117, | |
| "logits/rejected": -16.543407440185547, | |
| "logps/chosen": -1.9707733392715454, | |
| "logps/rejected": -2.48225474357605, | |
| "loss": 2.7718, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -19.707733154296875, | |
| "rewards/margins": 5.1148176193237305, | |
| "rewards/rejected": -24.822551727294922, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.9378407851690295, | |
| "grad_norm": 141.60001641865918, | |
| "learning_rate": 9.082407229950017e-09, | |
| "logits/chosen": -16.1947021484375, | |
| "logits/rejected": -16.0598087310791, | |
| "logps/chosen": -2.160557985305786, | |
| "logps/rejected": -2.6326889991760254, | |
| "loss": 3.0444, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -21.605579376220703, | |
| "rewards/margins": 4.721311092376709, | |
| "rewards/rejected": -26.326889038085938, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.9487459105779716, | |
| "grad_norm": 114.72393277887394, | |
| "learning_rate": 6.1358893461505916e-09, | |
| "logits/chosen": -15.125299453735352, | |
| "logits/rejected": -15.178013801574707, | |
| "logps/chosen": -2.1593708992004395, | |
| "logps/rejected": -2.785780429840088, | |
| "loss": 2.7684, | |
| "rewards/accuracies": 0.78125, | |
| "rewards/chosen": -21.593708038330078, | |
| "rewards/margins": 6.264092445373535, | |
| "rewards/rejected": -27.857803344726562, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.9596510359869138, | |
| "grad_norm": 120.19496730357073, | |
| "learning_rate": 3.761823522920426e-09, | |
| "logits/chosen": -15.59558391571045, | |
| "logits/rejected": -15.761117935180664, | |
| "logps/chosen": -2.194023847579956, | |
| "logps/rejected": -2.795337677001953, | |
| "loss": 2.8552, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -21.940235137939453, | |
| "rewards/margins": 6.013139247894287, | |
| "rewards/rejected": -27.953380584716797, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.9705561613958561, | |
| "grad_norm": 139.41969950436, | |
| "learning_rate": 1.963660287575086e-09, | |
| "logits/chosen": -14.188136100769043, | |
| "logits/rejected": -14.617622375488281, | |
| "logps/chosen": -2.016371011734009, | |
| "logps/rejected": -2.6547458171844482, | |
| "loss": 2.746, | |
| "rewards/accuracies": 0.8187500238418579, | |
| "rewards/chosen": -20.16370964050293, | |
| "rewards/margins": 6.383749961853027, | |
| "rewards/rejected": -26.547454833984375, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.9814612868047983, | |
| "grad_norm": 121.93850210912117, | |
| "learning_rate": 7.44013136042465e-10, | |
| "logits/chosen": -15.886639595031738, | |
| "logits/rejected": -16.0024356842041, | |
| "logps/chosen": -2.115778684616089, | |
| "logps/rejected": -2.6602773666381836, | |
| "loss": 2.9859, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -21.157787322998047, | |
| "rewards/margins": 5.444985389709473, | |
| "rewards/rejected": -26.602771759033203, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.9923664122137404, | |
| "grad_norm": 122.9917872545958, | |
| "learning_rate": 1.0465473434155824e-10, | |
| "logits/chosen": -13.859865188598633, | |
| "logits/rejected": -13.9098539352417, | |
| "logps/chosen": -2.006869077682495, | |
| "logps/rejected": -2.4212822914123535, | |
| "loss": 3.0127, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -20.06869125366211, | |
| "rewards/margins": 4.144131660461426, | |
| "rewards/rejected": -24.21282386779785, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.9989094874591058, | |
| "step": 458, | |
| "total_flos": 0.0, | |
| "train_loss": 3.523275049492782, | |
| "train_runtime": 6692.3835, | |
| "train_samples_per_second": 8.769, | |
| "train_steps_per_second": 0.068 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 458, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 1000000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |