Invalid JSON: Unexpected token 'N', ..."/chosen": NaN,
"... is not valid JSON
| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.783681743316863, | |
| "eval_steps": 400, | |
| "global_step": 4400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.020271126314455847, | |
| "grad_norm": 85.69229125976562, | |
| "learning_rate": 3.310810810810811e-08, | |
| "logits/chosen": -1.2848942279815674, | |
| "logits/rejected": -1.2519584894180298, | |
| "logps/chosen": -304.9576110839844, | |
| "logps/rejected": -294.22052001953125, | |
| "loss": 0.6937, | |
| "rewards/accuracies": 0.4724999964237213, | |
| "rewards/chosen": -0.0019568712450563908, | |
| "rewards/margins": 0.0018780820537358522, | |
| "rewards/rejected": -0.0038349530659615993, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.040542252628911694, | |
| "grad_norm": 84.80493927001953, | |
| "learning_rate": 6.689189189189188e-08, | |
| "logits/chosen": -1.310579776763916, | |
| "logits/rejected": -1.248593807220459, | |
| "logps/chosen": -300.7874450683594, | |
| "logps/rejected": -293.58013916015625, | |
| "loss": 0.6896, | |
| "rewards/accuracies": 0.5224999785423279, | |
| "rewards/chosen": 0.009982499293982983, | |
| "rewards/margins": 0.009593657217919827, | |
| "rewards/rejected": 0.00038884030072949827, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.06081337894336754, | |
| "grad_norm": 77.98486328125, | |
| "learning_rate": 1.0067567567567567e-07, | |
| "logits/chosen": -1.2936054468154907, | |
| "logits/rejected": -1.231412410736084, | |
| "logps/chosen": -282.709716796875, | |
| "logps/rejected": -280.4858093261719, | |
| "loss": 0.6958, | |
| "rewards/accuracies": 0.4950000047683716, | |
| "rewards/chosen": 0.0011114570079371333, | |
| "rewards/margins": -0.0028824072796851397, | |
| "rewards/rejected": 0.0039938646368682384, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.08108450525782339, | |
| "grad_norm": 77.83967590332031, | |
| "learning_rate": 1.3445945945945944e-07, | |
| "logits/chosen": -1.213567852973938, | |
| "logits/rejected": -1.1411467790603638, | |
| "logps/chosen": -286.0394287109375, | |
| "logps/rejected": -285.6759033203125, | |
| "loss": 0.6945, | |
| "rewards/accuracies": 0.5137500166893005, | |
| "rewards/chosen": 0.008986199274659157, | |
| "rewards/margins": -0.00035649247001856565, | |
| "rewards/rejected": 0.00934269092977047, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.10135563157227924, | |
| "grad_norm": 91.26100158691406, | |
| "learning_rate": 1.6824324324324323e-07, | |
| "logits/chosen": -1.3186339139938354, | |
| "logits/rejected": -1.2554600238800049, | |
| "logps/chosen": -284.81689453125, | |
| "logps/rejected": -289.15362548828125, | |
| "loss": 0.6939, | |
| "rewards/accuracies": 0.5074999928474426, | |
| "rewards/chosen": 0.015378892421722412, | |
| "rewards/margins": 0.0011974199442192912, | |
| "rewards/rejected": 0.014181473292410374, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.12162675788673508, | |
| "grad_norm": 82.71849822998047, | |
| "learning_rate": 2.02027027027027e-07, | |
| "logits/chosen": -1.2918212413787842, | |
| "logits/rejected": -1.2194230556488037, | |
| "logps/chosen": -294.4346008300781, | |
| "logps/rejected": -290.1015319824219, | |
| "loss": 0.69, | |
| "rewards/accuracies": 0.5337499976158142, | |
| "rewards/chosen": 0.031797949224710464, | |
| "rewards/margins": 0.009314099326729774, | |
| "rewards/rejected": 0.02248384803533554, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.14189788420119093, | |
| "grad_norm": 80.30734252929688, | |
| "learning_rate": 2.3581081081081082e-07, | |
| "logits/chosen": -1.1948000192642212, | |
| "logits/rejected": -1.1280878782272339, | |
| "logps/chosen": -292.45068359375, | |
| "logps/rejected": -290.9993896484375, | |
| "loss": 0.6884, | |
| "rewards/accuracies": 0.5550000071525574, | |
| "rewards/chosen": 0.056668683886528015, | |
| "rewards/margins": 0.013978350907564163, | |
| "rewards/rejected": 0.04269032925367355, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.16216901051564678, | |
| "grad_norm": 90.31956481933594, | |
| "learning_rate": 2.695945945945946e-07, | |
| "logits/chosen": -1.2479510307312012, | |
| "logits/rejected": -1.1317458152770996, | |
| "logps/chosen": -284.8790283203125, | |
| "logps/rejected": -286.545654296875, | |
| "loss": 0.6809, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": 0.08359939604997635, | |
| "rewards/margins": 0.03173583000898361, | |
| "rewards/rejected": 0.05186356604099274, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.16216901051564678, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": NaN, | |
| "eval_logps/chosen": -298.5028381347656, | |
| "eval_logps/rejected": -308.0430908203125, | |
| "eval_loss": 0.6814237236976624, | |
| "eval_rewards/accuracies": 0.5817821621894836, | |
| "eval_rewards/chosen": 0.08737193793058395, | |
| "eval_rewards/margins": 0.03109372965991497, | |
| "eval_rewards/rejected": 0.05627821385860443, | |
| "eval_runtime": 415.88, | |
| "eval_samples_per_second": 6.071, | |
| "eval_steps_per_second": 6.071, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.18244013683010263, | |
| "grad_norm": 91.4817123413086, | |
| "learning_rate": 3.0337837837837835e-07, | |
| "logits/chosen": -1.2706668376922607, | |
| "logits/rejected": -1.2054940462112427, | |
| "logps/chosen": -301.8001403808594, | |
| "logps/rejected": -288.0747375488281, | |
| "loss": 0.6786, | |
| "rewards/accuracies": 0.6050000190734863, | |
| "rewards/chosen": 0.12451739609241486, | |
| "rewards/margins": 0.03897064924240112, | |
| "rewards/rejected": 0.08554673194885254, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.20271126314455848, | |
| "grad_norm": 90.96726989746094, | |
| "learning_rate": 3.371621621621621e-07, | |
| "logits/chosen": -1.2182999849319458, | |
| "logits/rejected": -1.1680446863174438, | |
| "logps/chosen": -287.1329650878906, | |
| "logps/rejected": -285.5204772949219, | |
| "loss": 0.6728, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": 0.15390531718730927, | |
| "rewards/margins": 0.05485306680202484, | |
| "rewards/rejected": 0.09905223548412323, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.22298238945901433, | |
| "grad_norm": 100.8331298828125, | |
| "learning_rate": 3.7094594594594594e-07, | |
| "logits/chosen": -1.2811435461044312, | |
| "logits/rejected": -1.166648507118225, | |
| "logps/chosen": -287.2263488769531, | |
| "logps/rejected": -282.2992248535156, | |
| "loss": 0.6702, | |
| "rewards/accuracies": 0.6050000190734863, | |
| "rewards/chosen": 0.15076057612895966, | |
| "rewards/margins": 0.06141732633113861, | |
| "rewards/rejected": 0.08934321999549866, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.24325351577347015, | |
| "grad_norm": 87.13448333740234, | |
| "learning_rate": 4.047297297297297e-07, | |
| "logits/chosen": -1.2385767698287964, | |
| "logits/rejected": -1.1927905082702637, | |
| "logps/chosen": -275.57989501953125, | |
| "logps/rejected": -279.74151611328125, | |
| "loss": 0.6604, | |
| "rewards/accuracies": 0.6112499833106995, | |
| "rewards/chosen": 0.15349659323692322, | |
| "rewards/margins": 0.08835410326719284, | |
| "rewards/rejected": 0.06514248996973038, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.26352464208792603, | |
| "grad_norm": 66.59146881103516, | |
| "learning_rate": 4.385135135135135e-07, | |
| "logits/chosen": -1.2698534727096558, | |
| "logits/rejected": -1.1808619499206543, | |
| "logps/chosen": -289.853515625, | |
| "logps/rejected": -284.6557922363281, | |
| "loss": 0.6463, | |
| "rewards/accuracies": 0.6187499761581421, | |
| "rewards/chosen": 0.2137494683265686, | |
| "rewards/margins": 0.13295292854309082, | |
| "rewards/rejected": 0.08079654723405838, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.28379576840238185, | |
| "grad_norm": 80.75248718261719, | |
| "learning_rate": 4.722972972972973e-07, | |
| "logits/chosen": -1.1937602758407593, | |
| "logits/rejected": -1.1288022994995117, | |
| "logps/chosen": -277.06024169921875, | |
| "logps/rejected": -276.2779235839844, | |
| "loss": 0.6333, | |
| "rewards/accuracies": 0.6412500143051147, | |
| "rewards/chosen": 0.23302273452281952, | |
| "rewards/margins": 0.17971622943878174, | |
| "rewards/rejected": 0.05330649018287659, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.3040668947168377, | |
| "grad_norm": 89.37080383300781, | |
| "learning_rate": 4.993241213577651e-07, | |
| "logits/chosen": -1.2752529382705688, | |
| "logits/rejected": -1.2224916219711304, | |
| "logps/chosen": -272.2314758300781, | |
| "logps/rejected": -283.24261474609375, | |
| "loss": 0.6403, | |
| "rewards/accuracies": 0.6225000023841858, | |
| "rewards/chosen": 0.20178855955600739, | |
| "rewards/margins": 0.18336616456508636, | |
| "rewards/rejected": 0.018422363325953484, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.32433802103129356, | |
| "grad_norm": 98.02351379394531, | |
| "learning_rate": 4.955692400120156e-07, | |
| "logits/chosen": -1.269060492515564, | |
| "logits/rejected": -1.2043280601501465, | |
| "logps/chosen": -276.89593505859375, | |
| "logps/rejected": -274.8650817871094, | |
| "loss": 0.6231, | |
| "rewards/accuracies": 0.6637499928474426, | |
| "rewards/chosen": 0.17858818173408508, | |
| "rewards/margins": 0.23695340752601624, | |
| "rewards/rejected": -0.05836523696780205, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.32433802103129356, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": NaN, | |
| "eval_logps/chosen": -297.7666931152344, | |
| "eval_logps/rejected": -309.3333740234375, | |
| "eval_loss": 0.6285235285758972, | |
| "eval_rewards/accuracies": 0.6289108991622925, | |
| "eval_rewards/chosen": 0.1609875112771988, | |
| "eval_rewards/margins": 0.2337331473827362, | |
| "eval_rewards/rejected": -0.07274564355611801, | |
| "eval_runtime": 415.75, | |
| "eval_samples_per_second": 6.073, | |
| "eval_steps_per_second": 6.073, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.3446091473457494, | |
| "grad_norm": 58.97211837768555, | |
| "learning_rate": 4.918143586662662e-07, | |
| "logits/chosen": -1.3953185081481934, | |
| "logits/rejected": -1.2967430353164673, | |
| "logps/chosen": -286.43359375, | |
| "logps/rejected": -286.7919006347656, | |
| "loss": 0.6211, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": 0.15985938906669617, | |
| "rewards/margins": 0.24788908660411835, | |
| "rewards/rejected": -0.08802969753742218, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.36488027366020526, | |
| "grad_norm": 87.69761657714844, | |
| "learning_rate": 4.880594773205166e-07, | |
| "logits/chosen": -1.3257156610488892, | |
| "logits/rejected": -1.2500947713851929, | |
| "logps/chosen": -278.18524169921875, | |
| "logps/rejected": -292.2138977050781, | |
| "loss": 0.6206, | |
| "rewards/accuracies": 0.6349999904632568, | |
| "rewards/chosen": 0.12427999824285507, | |
| "rewards/margins": 0.2608846426010132, | |
| "rewards/rejected": -0.1366046667098999, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.3851513999746611, | |
| "grad_norm": 82.38545227050781, | |
| "learning_rate": 4.843045959747672e-07, | |
| "logits/chosen": -1.357102394104004, | |
| "logits/rejected": -1.2402279376983643, | |
| "logps/chosen": -285.491455078125, | |
| "logps/rejected": -297.6613464355469, | |
| "loss": 0.615, | |
| "rewards/accuracies": 0.6449999809265137, | |
| "rewards/chosen": 0.1982826292514801, | |
| "rewards/margins": 0.31170952320098877, | |
| "rewards/rejected": -0.11342689394950867, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.40542252628911696, | |
| "grad_norm": 99.3398666381836, | |
| "learning_rate": 4.805497146290177e-07, | |
| "logits/chosen": -1.3474574089050293, | |
| "logits/rejected": -1.2824286222457886, | |
| "logps/chosen": -286.2711181640625, | |
| "logps/rejected": -294.86309814453125, | |
| "loss": 0.6017, | |
| "rewards/accuracies": 0.6362500190734863, | |
| "rewards/chosen": 0.15238681435585022, | |
| "rewards/margins": 0.3537806570529938, | |
| "rewards/rejected": -0.20139385759830475, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.4256936526035728, | |
| "grad_norm": 70.56417083740234, | |
| "learning_rate": 4.7679483328326823e-07, | |
| "logits/chosen": -1.3699766397476196, | |
| "logits/rejected": -1.3327956199645996, | |
| "logps/chosen": -283.1145324707031, | |
| "logps/rejected": -289.5287170410156, | |
| "loss": 0.5955, | |
| "rewards/accuracies": 0.6675000190734863, | |
| "rewards/chosen": 0.09915956854820251, | |
| "rewards/margins": 0.372953861951828, | |
| "rewards/rejected": -0.2737942636013031, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.44596477891802866, | |
| "grad_norm": 56.053646087646484, | |
| "learning_rate": 4.7303995193751876e-07, | |
| "logits/chosen": -1.3838495016098022, | |
| "logits/rejected": -1.3065294027328491, | |
| "logps/chosen": -273.82110595703125, | |
| "logps/rejected": -283.1221923828125, | |
| "loss": 0.5867, | |
| "rewards/accuracies": 0.6712499856948853, | |
| "rewards/chosen": 0.07628773897886276, | |
| "rewards/margins": 0.4302709102630615, | |
| "rewards/rejected": -0.35398316383361816, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.4662359052324845, | |
| "grad_norm": 70.6491928100586, | |
| "learning_rate": 4.692850705917693e-07, | |
| "logits/chosen": -1.4128690958023071, | |
| "logits/rejected": -1.314226508140564, | |
| "logps/chosen": -284.3450622558594, | |
| "logps/rejected": -289.0674743652344, | |
| "loss": 0.5922, | |
| "rewards/accuracies": 0.6474999785423279, | |
| "rewards/chosen": 0.04309426248073578, | |
| "rewards/margins": 0.4344441890716553, | |
| "rewards/rejected": -0.3913499414920807, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.4865070315469403, | |
| "grad_norm": 75.06768798828125, | |
| "learning_rate": 4.655301892460198e-07, | |
| "logits/chosen": -1.421556830406189, | |
| "logits/rejected": -1.3319025039672852, | |
| "logps/chosen": -275.91021728515625, | |
| "logps/rejected": -287.9272155761719, | |
| "loss": 0.5675, | |
| "rewards/accuracies": 0.6837499737739563, | |
| "rewards/chosen": 0.018606889992952347, | |
| "rewards/margins": 0.5360429286956787, | |
| "rewards/rejected": -0.5174359679222107, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.4865070315469403, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": NaN, | |
| "eval_logps/chosen": -299.53399658203125, | |
| "eval_logps/rejected": -313.1133117675781, | |
| "eval_loss": 0.5999007225036621, | |
| "eval_rewards/accuracies": 0.6396039724349976, | |
| "eval_rewards/chosen": -0.015744315460324287, | |
| "eval_rewards/margins": 0.43499550223350525, | |
| "eval_rewards/rejected": -0.4507397711277008, | |
| "eval_runtime": 415.9657, | |
| "eval_samples_per_second": 6.07, | |
| "eval_steps_per_second": 6.07, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.5067781578613961, | |
| "grad_norm": 92.7549057006836, | |
| "learning_rate": 4.617753079002703e-07, | |
| "logits/chosen": -1.4393095970153809, | |
| "logits/rejected": -1.4011856317520142, | |
| "logps/chosen": -280.40020751953125, | |
| "logps/rejected": -284.593505859375, | |
| "loss": 0.5973, | |
| "rewards/accuracies": 0.6449999809265137, | |
| "rewards/chosen": -0.031209612265229225, | |
| "rewards/margins": 0.4549286663532257, | |
| "rewards/rejected": -0.4861382246017456, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.5270492841758521, | |
| "grad_norm": 96.96080017089844, | |
| "learning_rate": 4.5802042655452084e-07, | |
| "logits/chosen": -1.3751548528671265, | |
| "logits/rejected": -1.3570141792297363, | |
| "logps/chosen": -299.5054626464844, | |
| "logps/rejected": -295.8842468261719, | |
| "loss": 0.5954, | |
| "rewards/accuracies": 0.6637499928474426, | |
| "rewards/chosen": 0.0502300038933754, | |
| "rewards/margins": 0.4635014832019806, | |
| "rewards/rejected": -0.4132714867591858, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.5473204104903079, | |
| "grad_norm": 73.79745483398438, | |
| "learning_rate": 4.542655452087714e-07, | |
| "logits/chosen": -1.4204226732254028, | |
| "logits/rejected": -1.3422303199768066, | |
| "logps/chosen": -294.5567321777344, | |
| "logps/rejected": -296.5347900390625, | |
| "loss": 0.5707, | |
| "rewards/accuracies": 0.6899999976158142, | |
| "rewards/chosen": 0.08490865677595139, | |
| "rewards/margins": 0.5479007959365845, | |
| "rewards/rejected": -0.4629921615123749, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.5675915368047637, | |
| "grad_norm": 76.16097259521484, | |
| "learning_rate": 4.505106638630219e-07, | |
| "logits/chosen": -1.4311484098434448, | |
| "logits/rejected": -1.3465266227722168, | |
| "logps/chosen": -278.8095703125, | |
| "logps/rejected": -290.2147521972656, | |
| "loss": 0.5641, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": 0.0666603371500969, | |
| "rewards/margins": 0.5910163521766663, | |
| "rewards/rejected": -0.5243560075759888, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.5878626631192195, | |
| "grad_norm": 121.95441436767578, | |
| "learning_rate": 4.4675578251727244e-07, | |
| "logits/chosen": -1.4767857789993286, | |
| "logits/rejected": -1.4544174671173096, | |
| "logps/chosen": -286.6951904296875, | |
| "logps/rejected": -298.58074951171875, | |
| "loss": 0.5638, | |
| "rewards/accuracies": 0.6762499809265137, | |
| "rewards/chosen": -0.023380979895591736, | |
| "rewards/margins": 0.6335786581039429, | |
| "rewards/rejected": -0.6569597125053406, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.6081337894336754, | |
| "grad_norm": 71.31324768066406, | |
| "learning_rate": 4.430009011715229e-07, | |
| "logits/chosen": -1.50288724899292, | |
| "logits/rejected": -1.4406689405441284, | |
| "logps/chosen": -287.81842041015625, | |
| "logps/rejected": -287.2833557128906, | |
| "loss": 0.5803, | |
| "rewards/accuracies": 0.6762499809265137, | |
| "rewards/chosen": -0.0009390163468196988, | |
| "rewards/margins": 0.5071465969085693, | |
| "rewards/rejected": -0.5080856084823608, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.6284049157481313, | |
| "grad_norm": 98.81620788574219, | |
| "learning_rate": 4.3924601982577346e-07, | |
| "logits/chosen": -1.5208916664123535, | |
| "logits/rejected": -1.4801013469696045, | |
| "logps/chosen": -283.47320556640625, | |
| "logps/rejected": -284.3282470703125, | |
| "loss": 0.5546, | |
| "rewards/accuracies": 0.6825000047683716, | |
| "rewards/chosen": 0.03210862725973129, | |
| "rewards/margins": 0.6261197328567505, | |
| "rewards/rejected": -0.5940110683441162, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.6486760420625871, | |
| "grad_norm": 84.55746459960938, | |
| "learning_rate": 4.35491138480024e-07, | |
| "logits/chosen": -1.5223252773284912, | |
| "logits/rejected": -1.4359028339385986, | |
| "logps/chosen": -272.9998779296875, | |
| "logps/rejected": -277.84747314453125, | |
| "loss": 0.565, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": 0.024004172533750534, | |
| "rewards/margins": 0.5467111468315125, | |
| "rewards/rejected": -0.5227069854736328, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.6486760420625871, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": NaN, | |
| "eval_logps/chosen": -299.356201171875, | |
| "eval_logps/rejected": -314.2126159667969, | |
| "eval_loss": 0.5811572074890137, | |
| "eval_rewards/accuracies": 0.6594059467315674, | |
| "eval_rewards/chosen": 0.002034247387200594, | |
| "eval_rewards/margins": 0.562709391117096, | |
| "eval_rewards/rejected": -0.5606752038002014, | |
| "eval_runtime": 415.8683, | |
| "eval_samples_per_second": 6.072, | |
| "eval_steps_per_second": 6.072, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.6689471683770429, | |
| "grad_norm": 74.32270050048828, | |
| "learning_rate": 4.317362571342745e-07, | |
| "logits/chosen": -1.525417447090149, | |
| "logits/rejected": -1.4973794221878052, | |
| "logps/chosen": -294.22406005859375, | |
| "logps/rejected": -290.90863037109375, | |
| "loss": 0.5552, | |
| "rewards/accuracies": 0.6800000071525574, | |
| "rewards/chosen": 0.01900729537010193, | |
| "rewards/margins": 0.6314102411270142, | |
| "rewards/rejected": -0.6124029159545898, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.6892182946914988, | |
| "grad_norm": 65.72740173339844, | |
| "learning_rate": 4.2798137578852506e-07, | |
| "logits/chosen": -1.5028984546661377, | |
| "logits/rejected": -1.415768027305603, | |
| "logps/chosen": -285.7267150878906, | |
| "logps/rejected": -290.1302185058594, | |
| "loss": 0.5559, | |
| "rewards/accuracies": 0.6737499833106995, | |
| "rewards/chosen": 0.007276506628841162, | |
| "rewards/margins": 0.6502852439880371, | |
| "rewards/rejected": -0.643008828163147, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.7094894210059547, | |
| "grad_norm": 95.13706970214844, | |
| "learning_rate": 4.242264944427756e-07, | |
| "logits/chosen": -1.4592907428741455, | |
| "logits/rejected": -1.3769149780273438, | |
| "logps/chosen": -288.2720947265625, | |
| "logps/rejected": -305.466552734375, | |
| "loss": 0.5686, | |
| "rewards/accuracies": 0.6887500286102295, | |
| "rewards/chosen": 0.07554805278778076, | |
| "rewards/margins": 0.602150022983551, | |
| "rewards/rejected": -0.5266019701957703, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.7297605473204105, | |
| "grad_norm": 106.6552963256836, | |
| "learning_rate": 4.204716130970261e-07, | |
| "logits/chosen": -1.4575302600860596, | |
| "logits/rejected": -1.3902875185012817, | |
| "logps/chosen": -293.3002014160156, | |
| "logps/rejected": -302.8847351074219, | |
| "loss": 0.566, | |
| "rewards/accuracies": 0.6762499809265137, | |
| "rewards/chosen": 0.09604083001613617, | |
| "rewards/margins": 0.6465904712677002, | |
| "rewards/rejected": -0.5505496859550476, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.7500316736348663, | |
| "grad_norm": 65.49022674560547, | |
| "learning_rate": 4.1671673175127666e-07, | |
| "logits/chosen": -1.5301294326782227, | |
| "logits/rejected": -1.4222004413604736, | |
| "logps/chosen": -291.46368408203125, | |
| "logps/rejected": -299.5816650390625, | |
| "loss": 0.5524, | |
| "rewards/accuracies": 0.7074999809265137, | |
| "rewards/chosen": 0.13709893822669983, | |
| "rewards/margins": 0.6951863765716553, | |
| "rewards/rejected": -0.5580874085426331, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.7703027999493222, | |
| "grad_norm": 73.81986236572266, | |
| "learning_rate": 4.129618504055272e-07, | |
| "logits/chosen": -1.5067791938781738, | |
| "logits/rejected": -1.4256831407546997, | |
| "logps/chosen": -289.6808166503906, | |
| "logps/rejected": -292.93865966796875, | |
| "loss": 0.5678, | |
| "rewards/accuracies": 0.6937500238418579, | |
| "rewards/chosen": -0.05619234964251518, | |
| "rewards/margins": 0.6666356921195984, | |
| "rewards/rejected": -0.7228280901908875, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.790573926263778, | |
| "grad_norm": 92.40637969970703, | |
| "learning_rate": 4.092069690597777e-07, | |
| "logits/chosen": -1.4289398193359375, | |
| "logits/rejected": -1.3181583881378174, | |
| "logps/chosen": -284.9779052734375, | |
| "logps/rejected": -303.327392578125, | |
| "loss": 0.5482, | |
| "rewards/accuracies": 0.7149999737739563, | |
| "rewards/chosen": 0.0824815109372139, | |
| "rewards/margins": 0.6665127277374268, | |
| "rewards/rejected": -0.584031343460083, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.8108450525782339, | |
| "grad_norm": 93.90290069580078, | |
| "learning_rate": 4.054520877140282e-07, | |
| "logits/chosen": -1.4142777919769287, | |
| "logits/rejected": -1.3291970491409302, | |
| "logps/chosen": -279.107177734375, | |
| "logps/rejected": -290.64715576171875, | |
| "loss": 0.561, | |
| "rewards/accuracies": 0.6587499976158142, | |
| "rewards/chosen": 0.0036598199512809515, | |
| "rewards/margins": 0.6877757906913757, | |
| "rewards/rejected": -0.684116005897522, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.8108450525782339, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": NaN, | |
| "eval_logps/chosen": -299.9206848144531, | |
| "eval_logps/rejected": -315.6641540527344, | |
| "eval_loss": 0.5711494088172913, | |
| "eval_rewards/accuracies": 0.6673267483711243, | |
| "eval_rewards/chosen": -0.05441296845674515, | |
| "eval_rewards/margins": 0.6514108180999756, | |
| "eval_rewards/rejected": -0.7058237791061401, | |
| "eval_runtime": 416.0972, | |
| "eval_samples_per_second": 6.068, | |
| "eval_steps_per_second": 6.068, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.8311161788926897, | |
| "grad_norm": 72.61177062988281, | |
| "learning_rate": 4.0169720636827874e-07, | |
| "logits/chosen": -1.413093090057373, | |
| "logits/rejected": -1.3807673454284668, | |
| "logps/chosen": -278.2998046875, | |
| "logps/rejected": -291.52801513671875, | |
| "loss": 0.555, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": -0.05374690890312195, | |
| "rewards/margins": 0.6846556663513184, | |
| "rewards/rejected": -0.7384025454521179, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.8513873052071456, | |
| "grad_norm": 67.99674224853516, | |
| "learning_rate": 3.9794232502252927e-07, | |
| "logits/chosen": -1.477223515510559, | |
| "logits/rejected": -1.382552146911621, | |
| "logps/chosen": -286.6242370605469, | |
| "logps/rejected": -294.1151428222656, | |
| "loss": 0.559, | |
| "rewards/accuracies": 0.6899999976158142, | |
| "rewards/chosen": 0.0026776748709380627, | |
| "rewards/margins": 0.6922184824943542, | |
| "rewards/rejected": -0.6895408630371094, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.8716584315216014, | |
| "grad_norm": 90.21094512939453, | |
| "learning_rate": 3.941874436767798e-07, | |
| "logits/chosen": -1.409265398979187, | |
| "logits/rejected": -1.355931043624878, | |
| "logps/chosen": -270.63671875, | |
| "logps/rejected": -273.80743408203125, | |
| "loss": 0.5555, | |
| "rewards/accuracies": 0.6725000143051147, | |
| "rewards/chosen": -0.1644386649131775, | |
| "rewards/margins": 0.705502986907959, | |
| "rewards/rejected": -0.8699417114257812, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.8919295578360573, | |
| "grad_norm": 86.49909973144531, | |
| "learning_rate": 3.9043256233103033e-07, | |
| "logits/chosen": -1.4334936141967773, | |
| "logits/rejected": -1.273209571838379, | |
| "logps/chosen": -278.0585021972656, | |
| "logps/rejected": -282.90301513671875, | |
| "loss": 0.556, | |
| "rewards/accuracies": 0.6837499737739563, | |
| "rewards/chosen": 0.02689175121486187, | |
| "rewards/margins": 0.6583454608917236, | |
| "rewards/rejected": -0.6314537525177002, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.9122006841505131, | |
| "grad_norm": 75.58121490478516, | |
| "learning_rate": 3.8667768098528087e-07, | |
| "logits/chosen": -1.4251587390899658, | |
| "logits/rejected": -1.3200613260269165, | |
| "logps/chosen": -280.7901611328125, | |
| "logps/rejected": -297.78741455078125, | |
| "loss": 0.547, | |
| "rewards/accuracies": 0.6825000047683716, | |
| "rewards/chosen": 0.030514003708958626, | |
| "rewards/margins": 0.7190001606941223, | |
| "rewards/rejected": -0.6884861588478088, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.932471810464969, | |
| "grad_norm": 93.07740783691406, | |
| "learning_rate": 3.829227996395314e-07, | |
| "logits/chosen": -1.3386443853378296, | |
| "logits/rejected": -1.2963026762008667, | |
| "logps/chosen": -291.5765686035156, | |
| "logps/rejected": -293.8046569824219, | |
| "loss": 0.5682, | |
| "rewards/accuracies": 0.6700000166893005, | |
| "rewards/chosen": 0.01123973447829485, | |
| "rewards/margins": 0.695156991481781, | |
| "rewards/rejected": -0.683917224407196, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.9527429367794248, | |
| "grad_norm": 67.15850067138672, | |
| "learning_rate": 3.7916791829378193e-07, | |
| "logits/chosen": -1.4805598258972168, | |
| "logits/rejected": -1.3885161876678467, | |
| "logps/chosen": -291.665283203125, | |
| "logps/rejected": -302.1773376464844, | |
| "loss": 0.5598, | |
| "rewards/accuracies": 0.6924999952316284, | |
| "rewards/chosen": -0.1170397400856018, | |
| "rewards/margins": 0.7286503314971924, | |
| "rewards/rejected": -0.8456900119781494, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.9730140630938806, | |
| "grad_norm": 79.25121307373047, | |
| "learning_rate": 3.7541303694803247e-07, | |
| "logits/chosen": -1.490938425064087, | |
| "logits/rejected": -1.4275964498519897, | |
| "logps/chosen": -273.4732666015625, | |
| "logps/rejected": -283.27557373046875, | |
| "loss": 0.529, | |
| "rewards/accuracies": 0.7174999713897705, | |
| "rewards/chosen": -0.04049629345536232, | |
| "rewards/margins": 0.8318525552749634, | |
| "rewards/rejected": -0.8723488450050354, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.9730140630938806, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": NaN, | |
| "eval_logps/chosen": -300.1016540527344, | |
| "eval_logps/rejected": -316.64300537109375, | |
| "eval_loss": 0.5641157031059265, | |
| "eval_rewards/accuracies": 0.6768316626548767, | |
| "eval_rewards/chosen": -0.07250940054655075, | |
| "eval_rewards/margins": 0.7312046885490417, | |
| "eval_rewards/rejected": -0.8037140369415283, | |
| "eval_runtime": 415.6098, | |
| "eval_samples_per_second": 6.075, | |
| "eval_steps_per_second": 6.075, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.9932851894083365, | |
| "grad_norm": 62.19463348388672, | |
| "learning_rate": 3.71658155602283e-07, | |
| "logits/chosen": -1.5025901794433594, | |
| "logits/rejected": -1.4539462327957153, | |
| "logps/chosen": -268.9754638671875, | |
| "logps/rejected": -291.55938720703125, | |
| "loss": 0.5229, | |
| "rewards/accuracies": 0.7212499976158142, | |
| "rewards/chosen": -0.08627080172300339, | |
| "rewards/margins": 0.9152739644050598, | |
| "rewards/rejected": -1.001544713973999, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 1.0133789433675409, | |
| "grad_norm": 54.7685661315918, | |
| "learning_rate": 3.679032742565335e-07, | |
| "logits/chosen": -1.5413084030151367, | |
| "logits/rejected": -1.4709727764129639, | |
| "logps/chosen": -277.89471435546875, | |
| "logps/rejected": -287.55975341796875, | |
| "loss": 0.4686, | |
| "rewards/accuracies": 0.7894073128700256, | |
| "rewards/chosen": 0.044554296880960464, | |
| "rewards/margins": 1.0038422346115112, | |
| "rewards/rejected": -0.9592878818511963, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.0336500696819968, | |
| "grad_norm": 57.75327682495117, | |
| "learning_rate": 3.64148392910784e-07, | |
| "logits/chosen": -1.5344880819320679, | |
| "logits/rejected": -1.4133801460266113, | |
| "logps/chosen": -282.7772521972656, | |
| "logps/rejected": -299.0523376464844, | |
| "loss": 0.4152, | |
| "rewards/accuracies": 0.8450000286102295, | |
| "rewards/chosen": 0.12068451195955276, | |
| "rewards/margins": 1.151495337486267, | |
| "rewards/rejected": -1.030810832977295, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 1.0539211959964525, | |
| "grad_norm": 65.04196166992188, | |
| "learning_rate": 3.603935115650345e-07, | |
| "logits/chosen": -1.570306658744812, | |
| "logits/rejected": -1.505313515663147, | |
| "logps/chosen": -277.1626892089844, | |
| "logps/rejected": -296.0166931152344, | |
| "loss": 0.4307, | |
| "rewards/accuracies": 0.8462499976158142, | |
| "rewards/chosen": 0.0085002351552248, | |
| "rewards/margins": 1.0881829261779785, | |
| "rewards/rejected": -1.07968270778656, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.0741923223109084, | |
| "grad_norm": 78.38238525390625, | |
| "learning_rate": 3.5663863021928503e-07, | |
| "logits/chosen": -1.675102710723877, | |
| "logits/rejected": -1.5891515016555786, | |
| "logps/chosen": -281.3057556152344, | |
| "logps/rejected": -308.35198974609375, | |
| "loss": 0.4227, | |
| "rewards/accuracies": 0.8475000262260437, | |
| "rewards/chosen": -0.032773420214653015, | |
| "rewards/margins": 1.1719969511032104, | |
| "rewards/rejected": -1.2047704458236694, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 1.0944634486253642, | |
| "grad_norm": 55.8791389465332, | |
| "learning_rate": 3.5288374887353556e-07, | |
| "logits/chosen": -1.6009550094604492, | |
| "logits/rejected": -1.5042208433151245, | |
| "logps/chosen": -279.0746765136719, | |
| "logps/rejected": -288.80792236328125, | |
| "loss": 0.3933, | |
| "rewards/accuracies": 0.8737499713897705, | |
| "rewards/chosen": 0.02033129520714283, | |
| "rewards/margins": 1.2630525827407837, | |
| "rewards/rejected": -1.2427213191986084, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.11473457493982, | |
| "grad_norm": 56.23662185668945, | |
| "learning_rate": 3.491288675277861e-07, | |
| "logits/chosen": -1.6724143028259277, | |
| "logits/rejected": -1.5931694507598877, | |
| "logps/chosen": -272.8004455566406, | |
| "logps/rejected": -282.1023254394531, | |
| "loss": 0.4183, | |
| "rewards/accuracies": 0.8612499833106995, | |
| "rewards/chosen": -0.01254047267138958, | |
| "rewards/margins": 1.2163379192352295, | |
| "rewards/rejected": -1.228878378868103, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 1.135005701254276, | |
| "grad_norm": 54.696922302246094, | |
| "learning_rate": 3.4537398618203663e-07, | |
| "logits/chosen": -1.6559054851531982, | |
| "logits/rejected": -1.5775338411331177, | |
| "logps/chosen": -284.3577880859375, | |
| "logps/rejected": -291.72021484375, | |
| "loss": 0.4216, | |
| "rewards/accuracies": 0.8450000286102295, | |
| "rewards/chosen": 0.12303484231233597, | |
| "rewards/margins": 1.1699702739715576, | |
| "rewards/rejected": -1.0469355583190918, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.135005701254276, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": NaN, | |
| "eval_logps/chosen": -300.6401062011719, | |
| "eval_logps/rejected": -318.04827880859375, | |
| "eval_loss": 0.5609720945358276, | |
| "eval_rewards/accuracies": 0.676435649394989, | |
| "eval_rewards/chosen": -0.12635362148284912, | |
| "eval_rewards/margins": 0.8178859949111938, | |
| "eval_rewards/rejected": -0.944239616394043, | |
| "eval_runtime": 415.4489, | |
| "eval_samples_per_second": 6.078, | |
| "eval_steps_per_second": 6.078, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.1552768275687317, | |
| "grad_norm": 76.64007568359375, | |
| "learning_rate": 3.4161910483628716e-07, | |
| "logits/chosen": -1.6990894079208374, | |
| "logits/rejected": -1.607717752456665, | |
| "logps/chosen": -280.3568420410156, | |
| "logps/rejected": -290.5302429199219, | |
| "loss": 0.412, | |
| "rewards/accuracies": 0.84375, | |
| "rewards/chosen": 0.03269599378108978, | |
| "rewards/margins": 1.2243117094039917, | |
| "rewards/rejected": -1.1916155815124512, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 1.1755479538831877, | |
| "grad_norm": 80.47607421875, | |
| "learning_rate": 3.378642234905377e-07, | |
| "logits/chosen": -1.6762568950653076, | |
| "logits/rejected": -1.6170974969863892, | |
| "logps/chosen": -289.716064453125, | |
| "logps/rejected": -306.2812194824219, | |
| "loss": 0.4166, | |
| "rewards/accuracies": 0.8587499856948853, | |
| "rewards/chosen": -0.08021766692399979, | |
| "rewards/margins": 1.212770938873291, | |
| "rewards/rejected": -1.2929885387420654, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.1958190801976434, | |
| "grad_norm": 64.71836853027344, | |
| "learning_rate": 3.3410934214478817e-07, | |
| "logits/chosen": -1.6764569282531738, | |
| "logits/rejected": -1.5992953777313232, | |
| "logps/chosen": -290.1752624511719, | |
| "logps/rejected": -298.4311218261719, | |
| "loss": 0.4022, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": 0.07498079538345337, | |
| "rewards/margins": 1.2821089029312134, | |
| "rewards/rejected": -1.2071280479431152, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 1.2160902065120993, | |
| "grad_norm": 58.280731201171875, | |
| "learning_rate": 3.303544607990387e-07, | |
| "logits/chosen": -1.6789518594741821, | |
| "logits/rejected": -1.6140516996383667, | |
| "logps/chosen": -291.8467102050781, | |
| "logps/rejected": -303.8788757324219, | |
| "loss": 0.3935, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": 0.05883245915174484, | |
| "rewards/margins": 1.2953184843063354, | |
| "rewards/rejected": -1.2364859580993652, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.2363613328265552, | |
| "grad_norm": 82.7146224975586, | |
| "learning_rate": 3.2659957945328924e-07, | |
| "logits/chosen": -1.6874868869781494, | |
| "logits/rejected": -1.6047091484069824, | |
| "logps/chosen": -278.9095458984375, | |
| "logps/rejected": -296.8002624511719, | |
| "loss": 0.4161, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": -0.09684688597917557, | |
| "rewards/margins": 1.266066312789917, | |
| "rewards/rejected": -1.3629130125045776, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 1.256632459141011, | |
| "grad_norm": 49.19455337524414, | |
| "learning_rate": 3.2284469810753977e-07, | |
| "logits/chosen": -1.6933773756027222, | |
| "logits/rejected": -1.5838935375213623, | |
| "logps/chosen": -294.2673034667969, | |
| "logps/rejected": -305.1930236816406, | |
| "loss": 0.4016, | |
| "rewards/accuracies": 0.8650000095367432, | |
| "rewards/chosen": -0.06393048912286758, | |
| "rewards/margins": 1.2513405084609985, | |
| "rewards/rejected": -1.315271019935608, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 1.276903585455467, | |
| "grad_norm": 67.41085815429688, | |
| "learning_rate": 3.190898167617903e-07, | |
| "logits/chosen": -1.7262557744979858, | |
| "logits/rejected": -1.654240369796753, | |
| "logps/chosen": -280.0416259765625, | |
| "logps/rejected": -287.1568603515625, | |
| "loss": 0.4178, | |
| "rewards/accuracies": 0.8362500071525574, | |
| "rewards/chosen": -0.050981488078832626, | |
| "rewards/margins": 1.1942505836486816, | |
| "rewards/rejected": -1.245232105255127, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 1.2971747117699226, | |
| "grad_norm": 50.611080169677734, | |
| "learning_rate": 3.1533493541604084e-07, | |
| "logits/chosen": -1.7681493759155273, | |
| "logits/rejected": -1.6676533222198486, | |
| "logps/chosen": -292.8888854980469, | |
| "logps/rejected": -304.50030517578125, | |
| "loss": 0.4123, | |
| "rewards/accuracies": 0.8550000190734863, | |
| "rewards/chosen": 0.014382703229784966, | |
| "rewards/margins": 1.202967643737793, | |
| "rewards/rejected": -1.1885849237442017, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 1.2971747117699226, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": NaN, | |
| "eval_logps/chosen": -301.85089111328125, | |
| "eval_logps/rejected": -319.952880859375, | |
| "eval_loss": 0.5564723610877991, | |
| "eval_rewards/accuracies": 0.6760395765304565, | |
| "eval_rewards/chosen": -0.247432142496109, | |
| "eval_rewards/margins": 0.8872644901275635, | |
| "eval_rewards/rejected": -1.13469660282135, | |
| "eval_runtime": 415.8955, | |
| "eval_samples_per_second": 6.071, | |
| "eval_steps_per_second": 6.071, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 1.3174458380843785, | |
| "grad_norm": 42.45451354980469, | |
| "learning_rate": 3.1158005407029137e-07, | |
| "logits/chosen": -1.7138203382492065, | |
| "logits/rejected": -1.6301144361495972, | |
| "logps/chosen": -286.63836669921875, | |
| "logps/rejected": -297.8773193359375, | |
| "loss": 0.3941, | |
| "rewards/accuracies": 0.8650000095367432, | |
| "rewards/chosen": -0.07159855216741562, | |
| "rewards/margins": 1.34830904006958, | |
| "rewards/rejected": -1.419907569885254, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 1.3377169643988345, | |
| "grad_norm": 53.452518463134766, | |
| "learning_rate": 3.078251727245419e-07, | |
| "logits/chosen": -1.7930119037628174, | |
| "logits/rejected": -1.6965413093566895, | |
| "logps/chosen": -295.5643310546875, | |
| "logps/rejected": -311.3070373535156, | |
| "loss": 0.3959, | |
| "rewards/accuracies": 0.8525000214576721, | |
| "rewards/chosen": -0.20387369394302368, | |
| "rewards/margins": 1.3728365898132324, | |
| "rewards/rejected": -1.5767103433609009, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 1.3579880907132902, | |
| "grad_norm": 46.77083206176758, | |
| "learning_rate": 3.0407029137879244e-07, | |
| "logits/chosen": -1.7601673603057861, | |
| "logits/rejected": -1.704328179359436, | |
| "logps/chosen": -292.89984130859375, | |
| "logps/rejected": -305.4393005371094, | |
| "loss": 0.3861, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": -0.18237976729869843, | |
| "rewards/margins": 1.388597846031189, | |
| "rewards/rejected": -1.5709774494171143, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 1.3782592170277461, | |
| "grad_norm": 76.36799621582031, | |
| "learning_rate": 3.0031541003304297e-07, | |
| "logits/chosen": -1.7609264850616455, | |
| "logits/rejected": -1.6439956426620483, | |
| "logps/chosen": -284.9732360839844, | |
| "logps/rejected": -297.753662109375, | |
| "loss": 0.3842, | |
| "rewards/accuracies": 0.8462499976158142, | |
| "rewards/chosen": -0.0034372068475931883, | |
| "rewards/margins": 1.4692819118499756, | |
| "rewards/rejected": -1.472719430923462, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 1.3985303433422018, | |
| "grad_norm": 76.1081314086914, | |
| "learning_rate": 2.9656052868729345e-07, | |
| "logits/chosen": -1.7323936223983765, | |
| "logits/rejected": -1.6665867567062378, | |
| "logps/chosen": -275.983642578125, | |
| "logps/rejected": -287.6267395019531, | |
| "loss": 0.4022, | |
| "rewards/accuracies": 0.8412500023841858, | |
| "rewards/chosen": -0.03143583610653877, | |
| "rewards/margins": 1.3420977592468262, | |
| "rewards/rejected": -1.3735336065292358, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 1.4188014696566578, | |
| "grad_norm": 62.67765426635742, | |
| "learning_rate": 2.92805647341544e-07, | |
| "logits/chosen": -1.7675509452819824, | |
| "logits/rejected": -1.7154219150543213, | |
| "logps/chosen": -279.376708984375, | |
| "logps/rejected": -305.7305908203125, | |
| "loss": 0.3838, | |
| "rewards/accuracies": 0.8587499856948853, | |
| "rewards/chosen": -0.05659040808677673, | |
| "rewards/margins": 1.4510650634765625, | |
| "rewards/rejected": -1.5076556205749512, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.4390725959711137, | |
| "grad_norm": 78.5838851928711, | |
| "learning_rate": 2.890507659957945e-07, | |
| "logits/chosen": -1.8132127523422241, | |
| "logits/rejected": -1.6867140531539917, | |
| "logps/chosen": -282.88348388671875, | |
| "logps/rejected": -299.5718688964844, | |
| "loss": 0.3871, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": -0.12158777564764023, | |
| "rewards/margins": 1.45265531539917, | |
| "rewards/rejected": -1.5742430686950684, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 1.4593437222855694, | |
| "grad_norm": 54.62107467651367, | |
| "learning_rate": 2.8529588465004505e-07, | |
| "logits/chosen": -1.7798316478729248, | |
| "logits/rejected": -1.7143536806106567, | |
| "logps/chosen": -294.67694091796875, | |
| "logps/rejected": -312.669189453125, | |
| "loss": 0.3976, | |
| "rewards/accuracies": 0.8399999737739563, | |
| "rewards/chosen": -0.04213954880833626, | |
| "rewards/margins": 1.3824139833450317, | |
| "rewards/rejected": -1.4245535135269165, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 1.4593437222855694, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": NaN, | |
| "eval_logps/chosen": -302.8695983886719, | |
| "eval_logps/rejected": -321.6412353515625, | |
| "eval_loss": 0.5528350472450256, | |
| "eval_rewards/accuracies": 0.6760395765304565, | |
| "eval_rewards/chosen": -0.3493024706840515, | |
| "eval_rewards/margins": 0.9542282223701477, | |
| "eval_rewards/rejected": -1.3035306930541992, | |
| "eval_runtime": 415.2398, | |
| "eval_samples_per_second": 6.081, | |
| "eval_steps_per_second": 6.081, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 1.4796148486000253, | |
| "grad_norm": 70.0921859741211, | |
| "learning_rate": 2.815410033042956e-07, | |
| "logits/chosen": -1.7442843914031982, | |
| "logits/rejected": -1.6810210943222046, | |
| "logps/chosen": -276.6694641113281, | |
| "logps/rejected": -303.5418701171875, | |
| "loss": 0.3959, | |
| "rewards/accuracies": 0.8424999713897705, | |
| "rewards/chosen": -0.1733025759458542, | |
| "rewards/margins": 1.3963100910186768, | |
| "rewards/rejected": -1.569612741470337, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 1.499885974914481, | |
| "grad_norm": 81.23762512207031, | |
| "learning_rate": 2.777861219585461e-07, | |
| "logits/chosen": -1.7525570392608643, | |
| "logits/rejected": -1.730958104133606, | |
| "logps/chosen": -293.6806945800781, | |
| "logps/rejected": -300.5395202636719, | |
| "loss": 0.4084, | |
| "rewards/accuracies": 0.8362500071525574, | |
| "rewards/chosen": -0.22353293001651764, | |
| "rewards/margins": 1.2744700908660889, | |
| "rewards/rejected": -1.4980032444000244, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 1.520157101228937, | |
| "grad_norm": 54.750484466552734, | |
| "learning_rate": 2.7403124061279665e-07, | |
| "logits/chosen": -1.8778899908065796, | |
| "logits/rejected": -1.718734860420227, | |
| "logps/chosen": -290.9227294921875, | |
| "logps/rejected": -297.7557373046875, | |
| "loss": 0.3776, | |
| "rewards/accuracies": 0.8675000071525574, | |
| "rewards/chosen": -0.23451055586338043, | |
| "rewards/margins": 1.4131709337234497, | |
| "rewards/rejected": -1.647681474685669, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 1.540428227543393, | |
| "grad_norm": 67.8171157836914, | |
| "learning_rate": 2.702763592670472e-07, | |
| "logits/chosen": -1.8570775985717773, | |
| "logits/rejected": -1.7831798791885376, | |
| "logps/chosen": -294.40594482421875, | |
| "logps/rejected": -302.93975830078125, | |
| "loss": 0.3741, | |
| "rewards/accuracies": 0.8725000023841858, | |
| "rewards/chosen": -0.1573440581560135, | |
| "rewards/margins": 1.4768372774124146, | |
| "rewards/rejected": -1.6341813802719116, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 1.5606993538578489, | |
| "grad_norm": 64.60455322265625, | |
| "learning_rate": 2.665214779212977e-07, | |
| "logits/chosen": -1.8007545471191406, | |
| "logits/rejected": -1.6933119297027588, | |
| "logps/chosen": -283.2760009765625, | |
| "logps/rejected": -295.49053955078125, | |
| "loss": 0.391, | |
| "rewards/accuracies": 0.8525000214576721, | |
| "rewards/chosen": -0.20751281082630157, | |
| "rewards/margins": 1.4947165250778198, | |
| "rewards/rejected": -1.7022292613983154, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 1.5809704801723046, | |
| "grad_norm": 60.319190979003906, | |
| "learning_rate": 2.6276659657554825e-07, | |
| "logits/chosen": -1.7409034967422485, | |
| "logits/rejected": -1.6810635328292847, | |
| "logps/chosen": -282.11065673828125, | |
| "logps/rejected": -310.8985290527344, | |
| "loss": 0.3883, | |
| "rewards/accuracies": 0.8487499952316284, | |
| "rewards/chosen": -0.10306248068809509, | |
| "rewards/margins": 1.5504050254821777, | |
| "rewards/rejected": -1.6534672975540161, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 1.6012416064867603, | |
| "grad_norm": 54.668697357177734, | |
| "learning_rate": 2.590117152297987e-07, | |
| "logits/chosen": -1.7251437902450562, | |
| "logits/rejected": -1.6725904941558838, | |
| "logps/chosen": -290.4062194824219, | |
| "logps/rejected": -302.4581298828125, | |
| "loss": 0.3672, | |
| "rewards/accuracies": 0.8650000095367432, | |
| "rewards/chosen": -0.07678358256816864, | |
| "rewards/margins": 1.4900747537612915, | |
| "rewards/rejected": -1.5668585300445557, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 1.6215127328012162, | |
| "grad_norm": 67.85160064697266, | |
| "learning_rate": 2.552568338840492e-07, | |
| "logits/chosen": -1.8258453607559204, | |
| "logits/rejected": -1.7406154870986938, | |
| "logps/chosen": -291.4789123535156, | |
| "logps/rejected": -297.304443359375, | |
| "loss": 0.3902, | |
| "rewards/accuracies": 0.8712499737739563, | |
| "rewards/chosen": -0.0996013805270195, | |
| "rewards/margins": 1.4313582181930542, | |
| "rewards/rejected": -1.5309594869613647, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.6215127328012162, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": NaN, | |
| "eval_logps/chosen": -302.88287353515625, | |
| "eval_logps/rejected": -322.1484680175781, | |
| "eval_loss": 0.5519309639930725, | |
| "eval_rewards/accuracies": 0.6776237487792969, | |
| "eval_rewards/chosen": -0.35062944889068604, | |
| "eval_rewards/margins": 1.0036237239837646, | |
| "eval_rewards/rejected": -1.3542530536651611, | |
| "eval_runtime": 416.4674, | |
| "eval_samples_per_second": 6.063, | |
| "eval_steps_per_second": 6.063, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.6417838591156722, | |
| "grad_norm": 73.27534484863281, | |
| "learning_rate": 2.5150195253829974e-07, | |
| "logits/chosen": -1.7660610675811768, | |
| "logits/rejected": -1.7104580402374268, | |
| "logps/chosen": -285.9803771972656, | |
| "logps/rejected": -299.6733093261719, | |
| "loss": 0.3787, | |
| "rewards/accuracies": 0.8612499833106995, | |
| "rewards/chosen": -0.12544548511505127, | |
| "rewards/margins": 1.4188430309295654, | |
| "rewards/rejected": -1.5442882776260376, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 1.662054985430128, | |
| "grad_norm": 46.9147834777832, | |
| "learning_rate": 2.4774707119255033e-07, | |
| "logits/chosen": -1.8191289901733398, | |
| "logits/rejected": -1.6872003078460693, | |
| "logps/chosen": -290.36480712890625, | |
| "logps/rejected": -293.54962158203125, | |
| "loss": 0.3812, | |
| "rewards/accuracies": 0.8612499833106995, | |
| "rewards/chosen": -0.06606242805719376, | |
| "rewards/margins": 1.3529201745986938, | |
| "rewards/rejected": -1.4189825057983398, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 1.6823261117445838, | |
| "grad_norm": 58.750465393066406, | |
| "learning_rate": 2.4399218984680086e-07, | |
| "logits/chosen": -1.7732608318328857, | |
| "logits/rejected": -1.669424295425415, | |
| "logps/chosen": -288.0268249511719, | |
| "logps/rejected": -300.3560485839844, | |
| "loss": 0.3628, | |
| "rewards/accuracies": 0.8650000095367432, | |
| "rewards/chosen": -0.24312597513198853, | |
| "rewards/margins": 1.5965678691864014, | |
| "rewards/rejected": -1.8396939039230347, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 1.7025972380590395, | |
| "grad_norm": 69.84751892089844, | |
| "learning_rate": 2.4023730850105134e-07, | |
| "logits/chosen": -1.79349946975708, | |
| "logits/rejected": -1.7183064222335815, | |
| "logps/chosen": -282.3658142089844, | |
| "logps/rejected": -301.5096130371094, | |
| "loss": 0.3855, | |
| "rewards/accuracies": 0.8575000166893005, | |
| "rewards/chosen": -0.11784594506025314, | |
| "rewards/margins": 1.4399559497833252, | |
| "rewards/rejected": -1.5578017234802246, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 1.7228683643734954, | |
| "grad_norm": 67.40689086914062, | |
| "learning_rate": 2.364824271553019e-07, | |
| "logits/chosen": -1.798845648765564, | |
| "logits/rejected": -1.7296922206878662, | |
| "logps/chosen": -296.6754150390625, | |
| "logps/rejected": -309.8477478027344, | |
| "loss": 0.3876, | |
| "rewards/accuracies": 0.8412500023841858, | |
| "rewards/chosen": -0.1499800682067871, | |
| "rewards/margins": 1.4837852716445923, | |
| "rewards/rejected": -1.6337652206420898, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 1.7431394906879514, | |
| "grad_norm": 59.47840118408203, | |
| "learning_rate": 2.3272754580955238e-07, | |
| "logits/chosen": -1.8581206798553467, | |
| "logits/rejected": -1.695722222328186, | |
| "logps/chosen": -275.04913330078125, | |
| "logps/rejected": -299.3261413574219, | |
| "loss": 0.367, | |
| "rewards/accuracies": 0.8799999952316284, | |
| "rewards/chosen": -0.2011287659406662, | |
| "rewards/margins": 1.5775054693222046, | |
| "rewards/rejected": -1.7786343097686768, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 1.7634106170024073, | |
| "grad_norm": 68.79319763183594, | |
| "learning_rate": 2.2897266446380292e-07, | |
| "logits/chosen": -1.829512596130371, | |
| "logits/rejected": -1.7692248821258545, | |
| "logps/chosen": -292.6074523925781, | |
| "logps/rejected": -304.02276611328125, | |
| "loss": 0.3698, | |
| "rewards/accuracies": 0.8662499785423279, | |
| "rewards/chosen": -0.27817782759666443, | |
| "rewards/margins": 1.6162424087524414, | |
| "rewards/rejected": -1.8944201469421387, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 1.783681743316863, | |
| "grad_norm": 89.96156311035156, | |
| "learning_rate": 2.2521778311805345e-07, | |
| "logits/chosen": -1.8753999471664429, | |
| "logits/rejected": -1.7941206693649292, | |
| "logps/chosen": -275.5766296386719, | |
| "logps/rejected": -291.0476989746094, | |
| "loss": 0.3676, | |
| "rewards/accuracies": 0.8700000047683716, | |
| "rewards/chosen": -0.2780623733997345, | |
| "rewards/margins": 1.61216402053833, | |
| "rewards/rejected": -1.8902264833450317, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 1.783681743316863, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": NaN, | |
| "eval_logps/chosen": -303.3946838378906, | |
| "eval_logps/rejected": -322.9828186035156, | |
| "eval_loss": 0.5497509241104126, | |
| "eval_rewards/accuracies": 0.6796039342880249, | |
| "eval_rewards/chosen": -0.4018118381500244, | |
| "eval_rewards/margins": 1.0358790159225464, | |
| "eval_rewards/rejected": -1.4376907348632812, | |
| "eval_runtime": 416.5617, | |
| "eval_samples_per_second": 6.062, | |
| "eval_steps_per_second": 6.062, | |
| "step": 4400 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 7398, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 400, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |