Invalid JSON:
Unexpected token 'N', ..."/chosen": NaN,
"... is not valid JSON
| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 50, | |
| "global_step": 1449, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.006904289289721239, | |
| "grad_norm": 117.87319946289062, | |
| "learning_rate": 4.968944099378881e-07, | |
| "logits/chosen": -1.6597025394439697, | |
| "logits/rejected": -1.4006378650665283, | |
| "logps/chosen": -401.00640869140625, | |
| "logps/rejected": -376.5096130371094, | |
| "loss": 0.698, | |
| "rewards/accuracies": 0.42500001192092896, | |
| "rewards/chosen": -0.004771101754158735, | |
| "rewards/margins": -0.0065481411293148994, | |
| "rewards/rejected": 0.001777039491571486, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.013808578579442479, | |
| "grad_norm": 90.4378433227539, | |
| "learning_rate": 4.934437543133195e-07, | |
| "logits/chosen": NaN, | |
| "logits/rejected": NaN, | |
| "logps/chosen": -391.04168701171875, | |
| "logps/rejected": -373.75286865234375, | |
| "loss": 0.6922, | |
| "rewards/accuracies": 0.518750011920929, | |
| "rewards/chosen": 0.012870313599705696, | |
| "rewards/margins": 0.003891288535669446, | |
| "rewards/rejected": 0.00897902436554432, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02071286786916372, | |
| "grad_norm": 107.49934387207031, | |
| "learning_rate": 4.899930986887508e-07, | |
| "logits/chosen": -1.2349631786346436, | |
| "logits/rejected": -1.1878811120986938, | |
| "logps/chosen": -379.464599609375, | |
| "logps/rejected": -380.815185546875, | |
| "loss": 0.6913, | |
| "rewards/accuracies": 0.518750011920929, | |
| "rewards/chosen": 0.016194406896829605, | |
| "rewards/margins": 0.0065515548922121525, | |
| "rewards/rejected": 0.00964285247027874, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.027617157158884957, | |
| "grad_norm": 103.74991607666016, | |
| "learning_rate": 4.865424430641822e-07, | |
| "logits/chosen": -1.433064579963684, | |
| "logits/rejected": -1.22975492477417, | |
| "logps/chosen": -411.400634765625, | |
| "logps/rejected": -369.98309326171875, | |
| "loss": 0.6929, | |
| "rewards/accuracies": 0.512499988079071, | |
| "rewards/chosen": 0.012084421701729298, | |
| "rewards/margins": 0.003508642315864563, | |
| "rewards/rejected": 0.008575777523219585, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0345214464486062, | |
| "grad_norm": 122.35696411132812, | |
| "learning_rate": 4.830917874396135e-07, | |
| "logits/chosen": -1.6217113733291626, | |
| "logits/rejected": -1.3987983465194702, | |
| "logps/chosen": -398.4120178222656, | |
| "logps/rejected": -421.19915771484375, | |
| "loss": 0.6945, | |
| "rewards/accuracies": 0.48124998807907104, | |
| "rewards/chosen": 0.026384565979242325, | |
| "rewards/margins": 0.0009822694119066, | |
| "rewards/rejected": 0.02540229819715023, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.0345214464486062, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1866018772125244, | |
| "eval_logps/chosen": -413.12347412109375, | |
| "eval_logps/rejected": -386.6858825683594, | |
| "eval_loss": 0.6903728246688843, | |
| "eval_rewards/accuracies": 0.534844696521759, | |
| "eval_rewards/chosen": 0.034061625599861145, | |
| "eval_rewards/margins": 0.008847548626363277, | |
| "eval_rewards/rejected": 0.025214076042175293, | |
| "eval_runtime": 174.7974, | |
| "eval_samples_per_second": 6.814, | |
| "eval_steps_per_second": 6.814, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.04142573573832744, | |
| "grad_norm": 93.54374694824219, | |
| "learning_rate": 4.796411318150448e-07, | |
| "logits/chosen": -1.348595142364502, | |
| "logits/rejected": -1.2194318771362305, | |
| "logps/chosen": -402.88262939453125, | |
| "logps/rejected": -371.10443115234375, | |
| "loss": 0.6856, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": 0.04310911148786545, | |
| "rewards/margins": 0.018718212842941284, | |
| "rewards/rejected": 0.024390896782279015, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.04833002502804867, | |
| "grad_norm": 107.30802917480469, | |
| "learning_rate": 4.761904761904761e-07, | |
| "logits/chosen": -1.6407556533813477, | |
| "logits/rejected": -1.3553545475006104, | |
| "logps/chosen": -373.2159729003906, | |
| "logps/rejected": -349.29217529296875, | |
| "loss": 0.6962, | |
| "rewards/accuracies": 0.512499988079071, | |
| "rewards/chosen": 0.04530588164925575, | |
| "rewards/margins": -0.002548071090131998, | |
| "rewards/rejected": 0.04785395413637161, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.055234314317769914, | |
| "grad_norm": 107.91950988769531, | |
| "learning_rate": 4.727398205659075e-07, | |
| "logits/chosen": -1.5417871475219727, | |
| "logits/rejected": -1.3636682033538818, | |
| "logps/chosen": -411.05743408203125, | |
| "logps/rejected": -385.8003845214844, | |
| "loss": 0.6882, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.08122040331363678, | |
| "rewards/margins": 0.014340770430862904, | |
| "rewards/rejected": 0.06687963753938675, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.062138603607491155, | |
| "grad_norm": 91.66450500488281, | |
| "learning_rate": 4.6928916494133887e-07, | |
| "logits/chosen": -1.541373610496521, | |
| "logits/rejected": -1.3865256309509277, | |
| "logps/chosen": -416.6394958496094, | |
| "logps/rejected": -386.05023193359375, | |
| "loss": 0.6845, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.10198626667261124, | |
| "rewards/margins": 0.02344668284058571, | |
| "rewards/rejected": 0.07853958010673523, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.0690428928972124, | |
| "grad_norm": 84.30016326904297, | |
| "learning_rate": 4.6583850931677014e-07, | |
| "logits/chosen": -1.452531337738037, | |
| "logits/rejected": -1.2693792581558228, | |
| "logps/chosen": -432.15301513671875, | |
| "logps/rejected": -382.9742126464844, | |
| "loss": 0.6756, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.12693843245506287, | |
| "rewards/margins": 0.04367733746767044, | |
| "rewards/rejected": 0.08326110243797302, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.0690428928972124, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1412357091903687, | |
| "eval_logps/chosen": -412.13031005859375, | |
| "eval_logps/rejected": -385.9539794921875, | |
| "eval_loss": 0.6796301603317261, | |
| "eval_rewards/accuracies": 0.5751469135284424, | |
| "eval_rewards/chosen": 0.13337667286396027, | |
| "eval_rewards/margins": 0.034973062574863434, | |
| "eval_rewards/rejected": 0.09840361773967743, | |
| "eval_runtime": 174.9705, | |
| "eval_samples_per_second": 6.807, | |
| "eval_steps_per_second": 6.807, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.07594718218693364, | |
| "grad_norm": 106.82269287109375, | |
| "learning_rate": 4.623878536922015e-07, | |
| "logits/chosen": -1.4040501117706299, | |
| "logits/rejected": -1.3599127531051636, | |
| "logps/chosen": -406.8678283691406, | |
| "logps/rejected": -398.7535095214844, | |
| "loss": 0.6888, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": 0.14022807776927948, | |
| "rewards/margins": 0.01770814135670662, | |
| "rewards/rejected": 0.12251994758844376, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.08285147147665488, | |
| "grad_norm": 115.4521713256836, | |
| "learning_rate": 4.5893719806763283e-07, | |
| "logits/chosen": -1.555271863937378, | |
| "logits/rejected": -1.3029377460479736, | |
| "logps/chosen": -389.0620422363281, | |
| "logps/rejected": -369.9129943847656, | |
| "loss": 0.685, | |
| "rewards/accuracies": 0.53125, | |
| "rewards/chosen": 0.14878275990486145, | |
| "rewards/margins": 0.026980455964803696, | |
| "rewards/rejected": 0.12180230766534805, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.0897557607663761, | |
| "grad_norm": 117.05790710449219, | |
| "learning_rate": 4.5548654244306415e-07, | |
| "logits/chosen": -1.5591048002243042, | |
| "logits/rejected": -1.329421877861023, | |
| "logps/chosen": -406.03240966796875, | |
| "logps/rejected": -395.0228271484375, | |
| "loss": 0.683, | |
| "rewards/accuracies": 0.543749988079071, | |
| "rewards/chosen": 0.17079384624958038, | |
| "rewards/margins": 0.033137861639261246, | |
| "rewards/rejected": 0.13765597343444824, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.09666005005609735, | |
| "grad_norm": 95.98699951171875, | |
| "learning_rate": 4.520358868184955e-07, | |
| "logits/chosen": -1.3720393180847168, | |
| "logits/rejected": -1.1874357461929321, | |
| "logps/chosen": -386.15765380859375, | |
| "logps/rejected": -347.72686767578125, | |
| "loss": 0.6762, | |
| "rewards/accuracies": 0.606249988079071, | |
| "rewards/chosen": 0.14972324669361115, | |
| "rewards/margins": 0.04324805364012718, | |
| "rewards/rejected": 0.10647518932819366, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.10356433934581859, | |
| "grad_norm": 93.65694427490234, | |
| "learning_rate": 4.4858523119392685e-07, | |
| "logits/chosen": -1.3812742233276367, | |
| "logits/rejected": -1.2242071628570557, | |
| "logps/chosen": -407.5322570800781, | |
| "logps/rejected": -380.6641540527344, | |
| "loss": 0.6806, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": 0.1826440393924713, | |
| "rewards/margins": 0.03333988040685654, | |
| "rewards/rejected": 0.14930415153503418, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.10356433934581859, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.134230613708496, | |
| "eval_logps/chosen": -411.6000671386719, | |
| "eval_logps/rejected": -385.556640625, | |
| "eval_loss": 0.6752930283546448, | |
| "eval_rewards/accuracies": 0.5894206762313843, | |
| "eval_rewards/chosen": 0.1863998919725418, | |
| "eval_rewards/margins": 0.04826309531927109, | |
| "eval_rewards/rejected": 0.13813677430152893, | |
| "eval_runtime": 175.5391, | |
| "eval_samples_per_second": 6.785, | |
| "eval_steps_per_second": 6.785, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.11046862863553983, | |
| "grad_norm": 88.74897766113281, | |
| "learning_rate": 4.4513457556935817e-07, | |
| "logits/chosen": -1.3776233196258545, | |
| "logits/rejected": -1.0611753463745117, | |
| "logps/chosen": -353.5108337402344, | |
| "logps/rejected": -313.82452392578125, | |
| "loss": 0.6647, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": 0.16904719173908234, | |
| "rewards/margins": 0.07107650488615036, | |
| "rewards/rejected": 0.09797067940235138, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.11737291792526107, | |
| "grad_norm": 88.10498809814453, | |
| "learning_rate": 4.416839199447895e-07, | |
| "logits/chosen": -1.5680519342422485, | |
| "logits/rejected": -1.441646933555603, | |
| "logps/chosen": -388.49346923828125, | |
| "logps/rejected": -345.51287841796875, | |
| "loss": 0.6763, | |
| "rewards/accuracies": 0.5687500238418579, | |
| "rewards/chosen": 0.2311728447675705, | |
| "rewards/margins": 0.05381856486201286, | |
| "rewards/rejected": 0.17735427618026733, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.12427720721498231, | |
| "grad_norm": 116.68927764892578, | |
| "learning_rate": 4.3823326432022087e-07, | |
| "logits/chosen": -1.4206693172454834, | |
| "logits/rejected": -1.175628900527954, | |
| "logps/chosen": -421.7391052246094, | |
| "logps/rejected": -380.89300537109375, | |
| "loss": 0.6777, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": 0.25413596630096436, | |
| "rewards/margins": 0.04794498533010483, | |
| "rewards/rejected": 0.20619097352027893, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.13118149650470354, | |
| "grad_norm": 100.38077545166016, | |
| "learning_rate": 4.3478260869565214e-07, | |
| "logits/chosen": -1.4726192951202393, | |
| "logits/rejected": -1.368575096130371, | |
| "logps/chosen": -413.97216796875, | |
| "logps/rejected": -382.13116455078125, | |
| "loss": 0.6751, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.2780989110469818, | |
| "rewards/margins": 0.06077272817492485, | |
| "rewards/rejected": 0.21732616424560547, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.1380857857944248, | |
| "grad_norm": 102.20967102050781, | |
| "learning_rate": 4.313319530710835e-07, | |
| "logits/chosen": -1.1611400842666626, | |
| "logits/rejected": -0.9503771662712097, | |
| "logps/chosen": -409.99951171875, | |
| "logps/rejected": -378.935546875, | |
| "loss": 0.6825, | |
| "rewards/accuracies": 0.518750011920929, | |
| "rewards/chosen": 0.25466907024383545, | |
| "rewards/margins": 0.03956790268421173, | |
| "rewards/rejected": 0.21510115265846252, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1380857857944248, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.082338571548462, | |
| "eval_logps/chosen": -410.69976806640625, | |
| "eval_logps/rejected": -384.83831787109375, | |
| "eval_loss": 0.670859694480896, | |
| "eval_rewards/accuracies": 0.5835432410240173, | |
| "eval_rewards/chosen": 0.27643224596977234, | |
| "eval_rewards/margins": 0.06646192073822021, | |
| "eval_rewards/rejected": 0.20997031033039093, | |
| "eval_runtime": 175.4317, | |
| "eval_samples_per_second": 6.789, | |
| "eval_steps_per_second": 6.789, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.14499007508414602, | |
| "grad_norm": 90.83344268798828, | |
| "learning_rate": 4.278812974465148e-07, | |
| "logits/chosen": -1.4185205698013306, | |
| "logits/rejected": -1.2403172254562378, | |
| "logps/chosen": -382.5022888183594, | |
| "logps/rejected": -358.65582275390625, | |
| "loss": 0.6733, | |
| "rewards/accuracies": 0.5687500238418579, | |
| "rewards/chosen": 0.2773120105266571, | |
| "rewards/margins": 0.06518922746181488, | |
| "rewards/rejected": 0.21212276816368103, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.15189436437386727, | |
| "grad_norm": 107.23486328125, | |
| "learning_rate": 4.2443064182194615e-07, | |
| "logits/chosen": -1.3033957481384277, | |
| "logits/rejected": -1.0996795892715454, | |
| "logps/chosen": -407.9463195800781, | |
| "logps/rejected": -421.99310302734375, | |
| "loss": 0.6744, | |
| "rewards/accuracies": 0.543749988079071, | |
| "rewards/chosen": 0.3146436810493469, | |
| "rewards/margins": 0.056582093238830566, | |
| "rewards/rejected": 0.25806158781051636, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.1587986536635885, | |
| "grad_norm": 110.84426879882812, | |
| "learning_rate": 4.209799861973775e-07, | |
| "logits/chosen": -1.305482268333435, | |
| "logits/rejected": -1.1424553394317627, | |
| "logps/chosen": -403.65643310546875, | |
| "logps/rejected": -390.69427490234375, | |
| "loss": 0.6616, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": 0.32808226346969604, | |
| "rewards/margins": 0.0847150906920433, | |
| "rewards/rejected": 0.24336715042591095, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.16570294295330976, | |
| "grad_norm": 101.36908721923828, | |
| "learning_rate": 4.175293305728088e-07, | |
| "logits/chosen": -1.1226942539215088, | |
| "logits/rejected": -0.9961945414543152, | |
| "logps/chosen": -400.3923034667969, | |
| "logps/rejected": -379.03253173828125, | |
| "loss": 0.6891, | |
| "rewards/accuracies": 0.543749988079071, | |
| "rewards/chosen": 0.2640329897403717, | |
| "rewards/margins": 0.02379865013062954, | |
| "rewards/rejected": 0.24023433029651642, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.17260723224303098, | |
| "grad_norm": 111.8565902709961, | |
| "learning_rate": 4.1407867494824017e-07, | |
| "logits/chosen": -1.4368869066238403, | |
| "logits/rejected": -1.1330538988113403, | |
| "logps/chosen": -404.5845642089844, | |
| "logps/rejected": -387.0537414550781, | |
| "loss": 0.6547, | |
| "rewards/accuracies": 0.59375, | |
| "rewards/chosen": 0.3308050334453583, | |
| "rewards/margins": 0.10226805508136749, | |
| "rewards/rejected": 0.22853699326515198, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.17260723224303098, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.0943695306777954, | |
| "eval_logps/chosen": -410.5004577636719, | |
| "eval_logps/rejected": -384.76116943359375, | |
| "eval_loss": 0.6669625639915466, | |
| "eval_rewards/accuracies": 0.5894206762313843, | |
| "eval_rewards/chosen": 0.2963607609272003, | |
| "eval_rewards/margins": 0.07867557555437088, | |
| "eval_rewards/rejected": 0.21768519282341003, | |
| "eval_runtime": 174.8859, | |
| "eval_samples_per_second": 6.81, | |
| "eval_steps_per_second": 6.81, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.1795115215327522, | |
| "grad_norm": 83.30036926269531, | |
| "learning_rate": 4.106280193236715e-07, | |
| "logits/chosen": -1.3349380493164062, | |
| "logits/rejected": -1.1268976926803589, | |
| "logps/chosen": -408.28631591796875, | |
| "logps/rejected": -409.681884765625, | |
| "loss": 0.6812, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.2973114848136902, | |
| "rewards/margins": 0.0491001270711422, | |
| "rewards/rejected": 0.2482113093137741, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.18641581082247347, | |
| "grad_norm": 109.46472930908203, | |
| "learning_rate": 4.071773636991028e-07, | |
| "logits/chosen": -1.2926867008209229, | |
| "logits/rejected": -1.175357460975647, | |
| "logps/chosen": -437.421142578125, | |
| "logps/rejected": -408.375, | |
| "loss": 0.6695, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": 0.28953924775123596, | |
| "rewards/margins": 0.07461558282375336, | |
| "rewards/rejected": 0.2149236649274826, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.1933201001121947, | |
| "grad_norm": 107.54530334472656, | |
| "learning_rate": 4.0372670807453413e-07, | |
| "logits/chosen": -1.3313913345336914, | |
| "logits/rejected": -1.216606855392456, | |
| "logps/chosen": -390.5809326171875, | |
| "logps/rejected": -385.1927795410156, | |
| "loss": 0.6815, | |
| "rewards/accuracies": 0.550000011920929, | |
| "rewards/chosen": 0.2860502302646637, | |
| "rewards/margins": 0.05519815534353256, | |
| "rewards/rejected": 0.23085205256938934, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.20022438940191595, | |
| "grad_norm": 100.78239440917969, | |
| "learning_rate": 4.002760524499655e-07, | |
| "logits/chosen": -1.4627584218978882, | |
| "logits/rejected": -1.2909877300262451, | |
| "logps/chosen": -370.5834045410156, | |
| "logps/rejected": -371.1207275390625, | |
| "loss": 0.6749, | |
| "rewards/accuracies": 0.606249988079071, | |
| "rewards/chosen": 0.29502958059310913, | |
| "rewards/margins": 0.05848982185125351, | |
| "rewards/rejected": 0.23653972148895264, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.20712867869163717, | |
| "grad_norm": 92.07577514648438, | |
| "learning_rate": 3.968253968253968e-07, | |
| "logits/chosen": -1.3123762607574463, | |
| "logits/rejected": -1.1357301473617554, | |
| "logps/chosen": -360.8210144042969, | |
| "logps/rejected": -342.74444580078125, | |
| "loss": 0.6613, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.2599900960922241, | |
| "rewards/margins": 0.08143080025911331, | |
| "rewards/rejected": 0.17855927348136902, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.20712867869163717, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1052350997924805, | |
| "eval_logps/chosen": -410.4856872558594, | |
| "eval_logps/rejected": -384.8339538574219, | |
| "eval_loss": 0.6639109253883362, | |
| "eval_rewards/accuracies": 0.5994962453842163, | |
| "eval_rewards/chosen": 0.2978414297103882, | |
| "eval_rewards/margins": 0.08743705600500107, | |
| "eval_rewards/rejected": 0.2104043960571289, | |
| "eval_runtime": 175.082, | |
| "eval_samples_per_second": 6.803, | |
| "eval_steps_per_second": 6.803, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.21403296798135843, | |
| "grad_norm": 96.54574584960938, | |
| "learning_rate": 3.9337474120082815e-07, | |
| "logits/chosen": -1.4912551641464233, | |
| "logits/rejected": -1.2566022872924805, | |
| "logps/chosen": -447.9432067871094, | |
| "logps/rejected": -382.5755920410156, | |
| "loss": 0.6313, | |
| "rewards/accuracies": 0.6937500238418579, | |
| "rewards/chosen": 0.3629879057407379, | |
| "rewards/margins": 0.16798029839992523, | |
| "rewards/rejected": 0.19500760734081268, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.22093725727107966, | |
| "grad_norm": 88.83377075195312, | |
| "learning_rate": 3.8992408557625947e-07, | |
| "logits/chosen": NaN, | |
| "logits/rejected": -1.2359377145767212, | |
| "logps/chosen": -397.00042724609375, | |
| "logps/rejected": -368.5748596191406, | |
| "loss": 0.6573, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": 0.37342214584350586, | |
| "rewards/margins": 0.10562543570995331, | |
| "rewards/rejected": 0.26779669523239136, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.22784154656080088, | |
| "grad_norm": 92.1037368774414, | |
| "learning_rate": 3.864734299516908e-07, | |
| "logits/chosen": -1.4307029247283936, | |
| "logits/rejected": -1.1998441219329834, | |
| "logps/chosen": -365.99481201171875, | |
| "logps/rejected": -349.28411865234375, | |
| "loss": 0.6768, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.30214032530784607, | |
| "rewards/margins": 0.06596145778894424, | |
| "rewards/rejected": 0.23617887496948242, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.23474583585052214, | |
| "grad_norm": 102.44023895263672, | |
| "learning_rate": 3.8302277432712217e-07, | |
| "logits/chosen": -1.5167787075042725, | |
| "logits/rejected": -1.3095741271972656, | |
| "logps/chosen": -430.99078369140625, | |
| "logps/rejected": -414.8297424316406, | |
| "loss": 0.6712, | |
| "rewards/accuracies": 0.6312500238418579, | |
| "rewards/chosen": 0.34007564187049866, | |
| "rewards/margins": 0.0786903128027916, | |
| "rewards/rejected": 0.26138535141944885, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.24165012514024337, | |
| "grad_norm": 99.54808044433594, | |
| "learning_rate": 3.7957211870255344e-07, | |
| "logits/chosen": -1.3461697101593018, | |
| "logits/rejected": -1.3173012733459473, | |
| "logps/chosen": -348.45855712890625, | |
| "logps/rejected": -328.32958984375, | |
| "loss": 0.6956, | |
| "rewards/accuracies": 0.53125, | |
| "rewards/chosen": 0.2547549903392792, | |
| "rewards/margins": 0.02365240827202797, | |
| "rewards/rejected": 0.23110255599021912, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.24165012514024337, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1432876586914062, | |
| "eval_logps/chosen": -410.524658203125, | |
| "eval_logps/rejected": -384.90447998046875, | |
| "eval_loss": 0.6644947528839111, | |
| "eval_rewards/accuracies": 0.6087321639060974, | |
| "eval_rewards/chosen": 0.29394418001174927, | |
| "eval_rewards/margins": 0.09058759361505508, | |
| "eval_rewards/rejected": 0.20335662364959717, | |
| "eval_runtime": 174.1765, | |
| "eval_samples_per_second": 6.838, | |
| "eval_steps_per_second": 6.838, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.24855441442996462, | |
| "grad_norm": 95.7793960571289, | |
| "learning_rate": 3.761214630779848e-07, | |
| "logits/chosen": -1.4167577028274536, | |
| "logits/rejected": -1.2274904251098633, | |
| "logps/chosen": -398.9777526855469, | |
| "logps/rejected": -375.252685546875, | |
| "loss": 0.6706, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": 0.2948678433895111, | |
| "rewards/margins": 0.07842870056629181, | |
| "rewards/rejected": 0.2164391577243805, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.2554587037196859, | |
| "grad_norm": 115.86205291748047, | |
| "learning_rate": 3.7267080745341613e-07, | |
| "logits/chosen": -1.475426435470581, | |
| "logits/rejected": -1.3543269634246826, | |
| "logps/chosen": -379.80987548828125, | |
| "logps/rejected": -393.3304138183594, | |
| "loss": 0.6962, | |
| "rewards/accuracies": 0.550000011920929, | |
| "rewards/chosen": 0.2728275656700134, | |
| "rewards/margins": 0.02714722231030464, | |
| "rewards/rejected": 0.2456803321838379, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.2623629930094071, | |
| "grad_norm": 94.25291442871094, | |
| "learning_rate": 3.6922015182884745e-07, | |
| "logits/chosen": -1.3522151708602905, | |
| "logits/rejected": -1.1761189699172974, | |
| "logps/chosen": -410.3907165527344, | |
| "logps/rejected": -408.0509948730469, | |
| "loss": 0.6491, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": 0.3047269582748413, | |
| "rewards/margins": 0.1313890665769577, | |
| "rewards/rejected": 0.17333786189556122, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.26926728229912833, | |
| "grad_norm": 87.04241943359375, | |
| "learning_rate": 3.657694962042788e-07, | |
| "logits/chosen": -1.4277892112731934, | |
| "logits/rejected": -1.2214381694793701, | |
| "logps/chosen": -401.0085754394531, | |
| "logps/rejected": -370.23626708984375, | |
| "loss": 0.6507, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.30590149760246277, | |
| "rewards/margins": 0.11139049381017685, | |
| "rewards/rejected": 0.1945110410451889, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.2761715715888496, | |
| "grad_norm": 92.03490447998047, | |
| "learning_rate": 3.6231884057971015e-07, | |
| "logits/chosen": -1.3768854141235352, | |
| "logits/rejected": -1.2312657833099365, | |
| "logps/chosen": -381.2527770996094, | |
| "logps/rejected": -368.76116943359375, | |
| "loss": 0.6524, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": 0.30460983514785767, | |
| "rewards/margins": 0.1114836111664772, | |
| "rewards/rejected": 0.19312623143196106, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.2761715715888496, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1455719470977783, | |
| "eval_logps/chosen": -410.375, | |
| "eval_logps/rejected": -384.88275146484375, | |
| "eval_loss": 0.6600396037101746, | |
| "eval_rewards/accuracies": 0.6112510561943054, | |
| "eval_rewards/chosen": 0.30890780687332153, | |
| "eval_rewards/margins": 0.10338202118873596, | |
| "eval_rewards/rejected": 0.20552580058574677, | |
| "eval_runtime": 173.7586, | |
| "eval_samples_per_second": 6.854, | |
| "eval_steps_per_second": 6.854, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.28307586087857084, | |
| "grad_norm": 84.42977905273438, | |
| "learning_rate": 3.5886818495514147e-07, | |
| "logits/chosen": -1.5309593677520752, | |
| "logits/rejected": -1.3086028099060059, | |
| "logps/chosen": -412.37579345703125, | |
| "logps/rejected": -371.64691162109375, | |
| "loss": 0.6613, | |
| "rewards/accuracies": 0.5687500238418579, | |
| "rewards/chosen": 0.30601978302001953, | |
| "rewards/margins": 0.11061513423919678, | |
| "rewards/rejected": 0.19540461897850037, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.28998015016829204, | |
| "grad_norm": 78.36170959472656, | |
| "learning_rate": 3.554175293305728e-07, | |
| "logits/chosen": -1.5359389781951904, | |
| "logits/rejected": NaN, | |
| "logps/chosen": -388.64056396484375, | |
| "logps/rejected": -351.0355529785156, | |
| "loss": 0.6426, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.3550390601158142, | |
| "rewards/margins": 0.1474730670452118, | |
| "rewards/rejected": 0.20756597816944122, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.2968844394580133, | |
| "grad_norm": 115.095458984375, | |
| "learning_rate": 3.5196687370600417e-07, | |
| "logits/chosen": -1.5559413433074951, | |
| "logits/rejected": -1.3967716693878174, | |
| "logps/chosen": -410.49249267578125, | |
| "logps/rejected": -402.0940246582031, | |
| "loss": 0.6704, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": 0.3497038781642914, | |
| "rewards/margins": 0.09456796944141388, | |
| "rewards/rejected": 0.2551359534263611, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.30378872874773455, | |
| "grad_norm": 90.03668212890625, | |
| "learning_rate": 3.4851621808143543e-07, | |
| "logits/chosen": -1.3578782081604004, | |
| "logits/rejected": -1.1276133060455322, | |
| "logps/chosen": -388.08343505859375, | |
| "logps/rejected": -370.1001281738281, | |
| "loss": 0.6763, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": 0.33041852712631226, | |
| "rewards/margins": 0.0695219561457634, | |
| "rewards/rejected": 0.26089656352996826, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.31069301803745575, | |
| "grad_norm": 69.30516052246094, | |
| "learning_rate": 3.450655624568668e-07, | |
| "logits/chosen": -1.3265705108642578, | |
| "logits/rejected": -1.1799354553222656, | |
| "logps/chosen": -370.85882568359375, | |
| "logps/rejected": -352.0668029785156, | |
| "loss": 0.668, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": 0.32953041791915894, | |
| "rewards/margins": 0.09510184824466705, | |
| "rewards/rejected": 0.2344285249710083, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.31069301803745575, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1227965354919434, | |
| "eval_logps/chosen": -409.8273620605469, | |
| "eval_logps/rejected": -384.4886474609375, | |
| "eval_loss": 0.6568603515625, | |
| "eval_rewards/accuracies": 0.6120907068252563, | |
| "eval_rewards/chosen": 0.36367088556289673, | |
| "eval_rewards/margins": 0.1187346950173378, | |
| "eval_rewards/rejected": 0.24493615329265594, | |
| "eval_runtime": 173.6419, | |
| "eval_samples_per_second": 6.859, | |
| "eval_steps_per_second": 6.859, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.317597307327177, | |
| "grad_norm": 78.89984893798828, | |
| "learning_rate": 3.416149068322981e-07, | |
| "logits/chosen": NaN, | |
| "logits/rejected": -1.305570363998413, | |
| "logps/chosen": -416.13726806640625, | |
| "logps/rejected": -372.0033264160156, | |
| "loss": 0.6481, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": 0.4147353172302246, | |
| "rewards/margins": 0.1446826159954071, | |
| "rewards/rejected": 0.2700527012348175, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.32450159661689826, | |
| "grad_norm": 103.23446655273438, | |
| "learning_rate": 3.3816425120772945e-07, | |
| "logits/chosen": -1.5534855127334595, | |
| "logits/rejected": NaN, | |
| "logps/chosen": -368.0318603515625, | |
| "logps/rejected": -359.0155944824219, | |
| "loss": 0.6966, | |
| "rewards/accuracies": 0.53125, | |
| "rewards/chosen": 0.3369843661785126, | |
| "rewards/margins": 0.03579873591661453, | |
| "rewards/rejected": 0.30118563771247864, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.3314058859066195, | |
| "grad_norm": 78.23011016845703, | |
| "learning_rate": 3.347135955831608e-07, | |
| "logits/chosen": -1.5334765911102295, | |
| "logits/rejected": -1.240417242050171, | |
| "logps/chosen": -399.98333740234375, | |
| "logps/rejected": -373.2457580566406, | |
| "loss": 0.6489, | |
| "rewards/accuracies": 0.6312500238418579, | |
| "rewards/chosen": 0.35515138506889343, | |
| "rewards/margins": 0.13600310683250427, | |
| "rewards/rejected": 0.21914830803871155, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.3383101751963407, | |
| "grad_norm": 101.94794464111328, | |
| "learning_rate": 3.312629399585921e-07, | |
| "logits/chosen": -1.6701557636260986, | |
| "logits/rejected": -1.3532097339630127, | |
| "logps/chosen": -370.28363037109375, | |
| "logps/rejected": -324.91845703125, | |
| "loss": 0.6381, | |
| "rewards/accuracies": 0.6312500238418579, | |
| "rewards/chosen": 0.3905836045742035, | |
| "rewards/margins": 0.1694364845752716, | |
| "rewards/rejected": 0.22114713490009308, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.34521446448606197, | |
| "grad_norm": 133.3175811767578, | |
| "learning_rate": 3.2781228433402347e-07, | |
| "logits/chosen": -1.3769439458847046, | |
| "logits/rejected": -1.239466905593872, | |
| "logps/chosen": -426.37969970703125, | |
| "logps/rejected": -407.44061279296875, | |
| "loss": 0.6857, | |
| "rewards/accuracies": 0.5562499761581421, | |
| "rewards/chosen": 0.31460708379745483, | |
| "rewards/margins": 0.06561081111431122, | |
| "rewards/rejected": 0.24899625778198242, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.34521446448606197, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.135837197303772, | |
| "eval_logps/chosen": -409.88800048828125, | |
| "eval_logps/rejected": -384.6177978515625, | |
| "eval_loss": 0.6550039052963257, | |
| "eval_rewards/accuracies": 0.6045340299606323, | |
| "eval_rewards/chosen": 0.3576072156429291, | |
| "eval_rewards/margins": 0.1255844682455063, | |
| "eval_rewards/rejected": 0.2320227324962616, | |
| "eval_runtime": 174.8216, | |
| "eval_samples_per_second": 6.813, | |
| "eval_steps_per_second": 6.813, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.3521187537757832, | |
| "grad_norm": 80.95104217529297, | |
| "learning_rate": 3.243616287094548e-07, | |
| "logits/chosen": -1.50025475025177, | |
| "logits/rejected": -1.2558667659759521, | |
| "logps/chosen": -384.44903564453125, | |
| "logps/rejected": -405.1640930175781, | |
| "loss": 0.6756, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": 0.32988351583480835, | |
| "rewards/margins": 0.08733965456485748, | |
| "rewards/rejected": 0.24254386126995087, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.3590230430655044, | |
| "grad_norm": 92.58390045166016, | |
| "learning_rate": 3.209109730848861e-07, | |
| "logits/chosen": -1.4795461893081665, | |
| "logits/rejected": -1.2118721008300781, | |
| "logps/chosen": -381.4693298339844, | |
| "logps/rejected": -409.9659729003906, | |
| "loss": 0.6841, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": 0.2862910032272339, | |
| "rewards/margins": 0.05146180838346481, | |
| "rewards/rejected": 0.23482923209667206, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.3659273323552257, | |
| "grad_norm": 91.9266128540039, | |
| "learning_rate": 3.1746031746031743e-07, | |
| "logits/chosen": -1.3780999183654785, | |
| "logits/rejected": -1.1586534976959229, | |
| "logps/chosen": -426.71636962890625, | |
| "logps/rejected": -420.63250732421875, | |
| "loss": 0.6447, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": 0.3207171559333801, | |
| "rewards/margins": 0.14687559008598328, | |
| "rewards/rejected": 0.17384156584739685, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.37283162164494693, | |
| "grad_norm": 107.24482727050781, | |
| "learning_rate": 3.140096618357488e-07, | |
| "logits/chosen": -1.5469199419021606, | |
| "logits/rejected": -1.3309047222137451, | |
| "logps/chosen": -423.83251953125, | |
| "logps/rejected": -390.2005310058594, | |
| "loss": 0.649, | |
| "rewards/accuracies": 0.59375, | |
| "rewards/chosen": 0.35180041193962097, | |
| "rewards/margins": 0.13371917605400085, | |
| "rewards/rejected": 0.2180812656879425, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.3797359109346682, | |
| "grad_norm": 90.34844207763672, | |
| "learning_rate": 3.105590062111801e-07, | |
| "logits/chosen": -1.437620759010315, | |
| "logits/rejected": -1.2692636251449585, | |
| "logps/chosen": -375.0314025878906, | |
| "logps/rejected": -353.4309997558594, | |
| "loss": 0.6666, | |
| "rewards/accuracies": 0.6187499761581421, | |
| "rewards/chosen": 0.305647611618042, | |
| "rewards/margins": 0.09037254005670547, | |
| "rewards/rejected": 0.2152750939130783, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.3797359109346682, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1454120874404907, | |
| "eval_logps/chosen": -410.1690673828125, | |
| "eval_logps/rejected": -384.90673828125, | |
| "eval_loss": 0.6544755697250366, | |
| "eval_rewards/accuracies": 0.6129302978515625, | |
| "eval_rewards/chosen": 0.3294997811317444, | |
| "eval_rewards/margins": 0.12637175619602203, | |
| "eval_rewards/rejected": 0.20312805473804474, | |
| "eval_runtime": 173.6702, | |
| "eval_samples_per_second": 6.858, | |
| "eval_steps_per_second": 6.858, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.3866402002243894, | |
| "grad_norm": 85.39889526367188, | |
| "learning_rate": 3.0710835058661145e-07, | |
| "logits/chosen": -1.47105872631073, | |
| "logits/rejected": -1.2406635284423828, | |
| "logps/chosen": -343.89788818359375, | |
| "logps/rejected": -328.2047119140625, | |
| "loss": 0.6619, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": 0.2944934368133545, | |
| "rewards/margins": 0.11027739197015762, | |
| "rewards/rejected": 0.18421605229377747, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.39354448951411064, | |
| "grad_norm": 96.35163116455078, | |
| "learning_rate": 3.036576949620428e-07, | |
| "logits/chosen": -1.636962890625, | |
| "logits/rejected": -1.3636503219604492, | |
| "logps/chosen": -404.2951965332031, | |
| "logps/rejected": -391.49615478515625, | |
| "loss": 0.6572, | |
| "rewards/accuracies": 0.606249988079071, | |
| "rewards/chosen": 0.3543318808078766, | |
| "rewards/margins": 0.14378124475479126, | |
| "rewards/rejected": 0.21055062115192413, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.4004487788038319, | |
| "grad_norm": 99.60575866699219, | |
| "learning_rate": 3.002070393374741e-07, | |
| "logits/chosen": -1.3705114126205444, | |
| "logits/rejected": -1.0337412357330322, | |
| "logps/chosen": -356.34637451171875, | |
| "logps/rejected": -346.79766845703125, | |
| "loss": 0.6537, | |
| "rewards/accuracies": 0.59375, | |
| "rewards/chosen": 0.2836061120033264, | |
| "rewards/margins": 0.13429149985313416, | |
| "rewards/rejected": 0.14931461215019226, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.4073530680935531, | |
| "grad_norm": 96.50724029541016, | |
| "learning_rate": 2.9675638371290547e-07, | |
| "logits/chosen": -1.4598503112792969, | |
| "logits/rejected": -1.2610881328582764, | |
| "logps/chosen": -410.27117919921875, | |
| "logps/rejected": -395.2956237792969, | |
| "loss": 0.6455, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": 0.379031240940094, | |
| "rewards/margins": 0.15479955077171326, | |
| "rewards/rejected": 0.22423171997070312, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.41425735738327435, | |
| "grad_norm": 104.0567626953125, | |
| "learning_rate": 2.9330572808833673e-07, | |
| "logits/chosen": -1.329871654510498, | |
| "logits/rejected": -1.1407099962234497, | |
| "logps/chosen": -380.8253173828125, | |
| "logps/rejected": -364.3226013183594, | |
| "loss": 0.6657, | |
| "rewards/accuracies": 0.606249988079071, | |
| "rewards/chosen": 0.31459444761276245, | |
| "rewards/margins": 0.11110667884349823, | |
| "rewards/rejected": 0.20348772406578064, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.41425735738327435, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.137957215309143, | |
| "eval_logps/chosen": -410.0342102050781, | |
| "eval_logps/rejected": -384.80487060546875, | |
| "eval_loss": 0.6545748114585876, | |
| "eval_rewards/accuracies": 0.6179680824279785, | |
| "eval_rewards/chosen": 0.34298890829086304, | |
| "eval_rewards/margins": 0.1296708583831787, | |
| "eval_rewards/rejected": 0.21331804990768433, | |
| "eval_runtime": 173.6763, | |
| "eval_samples_per_second": 6.858, | |
| "eval_steps_per_second": 6.858, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.4211616466729956, | |
| "grad_norm": 100.69998168945312, | |
| "learning_rate": 2.898550724637681e-07, | |
| "logits/chosen": -1.3926723003387451, | |
| "logits/rejected": -1.302227258682251, | |
| "logps/chosen": -382.15863037109375, | |
| "logps/rejected": -377.8441467285156, | |
| "loss": 0.6654, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": 0.3457278609275818, | |
| "rewards/margins": 0.10164386034011841, | |
| "rewards/rejected": 0.24408404529094696, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.42806593596271686, | |
| "grad_norm": 90.8844985961914, | |
| "learning_rate": 2.8640441683919943e-07, | |
| "logits/chosen": -1.4987343549728394, | |
| "logits/rejected": -1.3479670286178589, | |
| "logps/chosen": -374.33746337890625, | |
| "logps/rejected": -344.2901916503906, | |
| "loss": 0.6639, | |
| "rewards/accuracies": 0.59375, | |
| "rewards/chosen": 0.32695019245147705, | |
| "rewards/margins": 0.105133056640625, | |
| "rewards/rejected": 0.22181710600852966, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.43497022525243806, | |
| "grad_norm": 100.92221069335938, | |
| "learning_rate": 2.8295376121463075e-07, | |
| "logits/chosen": -1.6069577932357788, | |
| "logits/rejected": -1.357591986656189, | |
| "logps/chosen": -391.8199157714844, | |
| "logps/rejected": -361.18756103515625, | |
| "loss": 0.6445, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": 0.3474247455596924, | |
| "rewards/margins": 0.15058517456054688, | |
| "rewards/rejected": 0.1968395859003067, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.4418745145421593, | |
| "grad_norm": 90.86432647705078, | |
| "learning_rate": 2.7950310559006207e-07, | |
| "logits/chosen": -1.4821056127548218, | |
| "logits/rejected": -1.32998526096344, | |
| "logps/chosen": -390.75848388671875, | |
| "logps/rejected": -352.44219970703125, | |
| "loss": 0.6924, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": 0.2883882522583008, | |
| "rewards/margins": 0.04232420399785042, | |
| "rewards/rejected": 0.24606403708457947, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.44877880383188057, | |
| "grad_norm": 99.2775650024414, | |
| "learning_rate": 2.7605244996549345e-07, | |
| "logits/chosen": -1.398462176322937, | |
| "logits/rejected": NaN, | |
| "logps/chosen": -411.4620056152344, | |
| "logps/rejected": -401.382080078125, | |
| "loss": 0.6576, | |
| "rewards/accuracies": 0.5687500238418579, | |
| "rewards/chosen": 0.3147468864917755, | |
| "rewards/margins": 0.1398230642080307, | |
| "rewards/rejected": 0.1749238222837448, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.44877880383188057, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.155210018157959, | |
| "eval_logps/chosen": -410.21453857421875, | |
| "eval_logps/rejected": -385.0280456542969, | |
| "eval_loss": 0.6529302000999451, | |
| "eval_rewards/accuracies": 0.6179680824279785, | |
| "eval_rewards/chosen": 0.3249528408050537, | |
| "eval_rewards/margins": 0.13395507633686066, | |
| "eval_rewards/rejected": 0.19099776446819305, | |
| "eval_runtime": 173.1301, | |
| "eval_samples_per_second": 6.879, | |
| "eval_steps_per_second": 6.879, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.45568309312160177, | |
| "grad_norm": 92.97110748291016, | |
| "learning_rate": 2.7260179434092477e-07, | |
| "logits/chosen": -1.43343985080719, | |
| "logits/rejected": -1.3144687414169312, | |
| "logps/chosen": -399.0343933105469, | |
| "logps/rejected": -371.15106201171875, | |
| "loss": 0.6415, | |
| "rewards/accuracies": 0.643750011920929, | |
| "rewards/chosen": 0.3595283031463623, | |
| "rewards/margins": 0.15820467472076416, | |
| "rewards/rejected": 0.20132365822792053, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.462587382411323, | |
| "grad_norm": 103.37547302246094, | |
| "learning_rate": 2.691511387163561e-07, | |
| "logits/chosen": -1.4777318239212036, | |
| "logits/rejected": -1.2895824909210205, | |
| "logps/chosen": -414.9844665527344, | |
| "logps/rejected": -380.60174560546875, | |
| "loss": 0.66, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.34182053804397583, | |
| "rewards/margins": 0.12233030796051025, | |
| "rewards/rejected": 0.21949021518230438, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.4694916717010443, | |
| "grad_norm": 96.86497497558594, | |
| "learning_rate": 2.6570048309178746e-07, | |
| "logits/chosen": -1.3753479719161987, | |
| "logits/rejected": -1.2074251174926758, | |
| "logps/chosen": -373.104248046875, | |
| "logps/rejected": -358.5914001464844, | |
| "loss": 0.6344, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": 0.32691770792007446, | |
| "rewards/margins": 0.17708414793014526, | |
| "rewards/rejected": 0.1498335748910904, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.47639596099076553, | |
| "grad_norm": 101.70399475097656, | |
| "learning_rate": 2.6224982746721873e-07, | |
| "logits/chosen": -1.4386012554168701, | |
| "logits/rejected": -1.2293193340301514, | |
| "logps/chosen": -370.0113220214844, | |
| "logps/rejected": -367.55242919921875, | |
| "loss": 0.6806, | |
| "rewards/accuracies": 0.59375, | |
| "rewards/chosen": 0.3327386975288391, | |
| "rewards/margins": 0.07743687927722931, | |
| "rewards/rejected": 0.2553017735481262, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.48330025028048673, | |
| "grad_norm": 99.53569030761719, | |
| "learning_rate": 2.587991718426501e-07, | |
| "logits/chosen": -1.4941202402114868, | |
| "logits/rejected": -1.3218339681625366, | |
| "logps/chosen": -389.9266357421875, | |
| "logps/rejected": -346.59326171875, | |
| "loss": 0.6406, | |
| "rewards/accuracies": 0.668749988079071, | |
| "rewards/chosen": 0.3879491984844208, | |
| "rewards/margins": 0.1433873325586319, | |
| "rewards/rejected": 0.24456188082695007, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.48330025028048673, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1317625045776367, | |
| "eval_logps/chosen": -409.8088684082031, | |
| "eval_logps/rejected": -384.7127685546875, | |
| "eval_loss": 0.6519591212272644, | |
| "eval_rewards/accuracies": 0.6196473836898804, | |
| "eval_rewards/chosen": 0.36551961302757263, | |
| "eval_rewards/margins": 0.14299476146697998, | |
| "eval_rewards/rejected": 0.22252482175827026, | |
| "eval_runtime": 173.3348, | |
| "eval_samples_per_second": 6.871, | |
| "eval_steps_per_second": 6.871, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.490204539570208, | |
| "grad_norm": 97.43363189697266, | |
| "learning_rate": 2.553485162180814e-07, | |
| "logits/chosen": -1.5055333375930786, | |
| "logits/rejected": -1.2466411590576172, | |
| "logps/chosen": -424.33074951171875, | |
| "logps/rejected": -419.7621154785156, | |
| "loss": 0.6469, | |
| "rewards/accuracies": 0.6187499761581421, | |
| "rewards/chosen": 0.43039917945861816, | |
| "rewards/margins": 0.15889059007167816, | |
| "rewards/rejected": 0.2715086042881012, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.49710882885992924, | |
| "grad_norm": 100.15609741210938, | |
| "learning_rate": 2.5189786059351275e-07, | |
| "logits/chosen": NaN, | |
| "logits/rejected": -1.3018251657485962, | |
| "logps/chosen": -383.4604187011719, | |
| "logps/rejected": -386.2461853027344, | |
| "loss": 0.6626, | |
| "rewards/accuracies": 0.59375, | |
| "rewards/chosen": 0.3409530222415924, | |
| "rewards/margins": 0.13614806532859802, | |
| "rewards/rejected": 0.20480497181415558, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.5040131181496504, | |
| "grad_norm": 104.17349243164062, | |
| "learning_rate": 2.4844720496894407e-07, | |
| "logits/chosen": -1.3319756984710693, | |
| "logits/rejected": -1.1958295106887817, | |
| "logps/chosen": -379.02313232421875, | |
| "logps/rejected": -340.41943359375, | |
| "loss": 0.689, | |
| "rewards/accuracies": 0.53125, | |
| "rewards/chosen": 0.29082250595092773, | |
| "rewards/margins": 0.06135740876197815, | |
| "rewards/rejected": 0.22946509718894958, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.5109174074393718, | |
| "grad_norm": 100.09748077392578, | |
| "learning_rate": 2.449965493443754e-07, | |
| "logits/chosen": -1.4177885055541992, | |
| "logits/rejected": -1.2871652841567993, | |
| "logps/chosen": -377.2490234375, | |
| "logps/rejected": -341.6965637207031, | |
| "loss": 0.6714, | |
| "rewards/accuracies": 0.5562499761581421, | |
| "rewards/chosen": 0.4028010368347168, | |
| "rewards/margins": 0.11562231928110123, | |
| "rewards/rejected": 0.28717875480651855, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.517821696729093, | |
| "grad_norm": 103.06417846679688, | |
| "learning_rate": 2.4154589371980677e-07, | |
| "logits/chosen": -1.4369862079620361, | |
| "logits/rejected": -1.2680126428604126, | |
| "logps/chosen": -375.4537658691406, | |
| "logps/rejected": -380.01666259765625, | |
| "loss": 0.654, | |
| "rewards/accuracies": 0.65625, | |
| "rewards/chosen": 0.34813031554222107, | |
| "rewards/margins": 0.13279417157173157, | |
| "rewards/rejected": 0.2153361290693283, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.517821696729093, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.128820538520813, | |
| "eval_logps/chosen": -409.6957092285156, | |
| "eval_logps/rejected": -384.63482666015625, | |
| "eval_loss": 0.651150643825531, | |
| "eval_rewards/accuracies": 0.6062132716178894, | |
| "eval_rewards/chosen": 0.37683534622192383, | |
| "eval_rewards/margins": 0.14651274681091309, | |
| "eval_rewards/rejected": 0.23032261431217194, | |
| "eval_runtime": 173.301, | |
| "eval_samples_per_second": 6.872, | |
| "eval_steps_per_second": 6.872, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.5247259860188142, | |
| "grad_norm": 109.10247802734375, | |
| "learning_rate": 2.3809523809523806e-07, | |
| "logits/chosen": -1.5034782886505127, | |
| "logits/rejected": -1.2673709392547607, | |
| "logps/chosen": -394.3305969238281, | |
| "logps/rejected": -371.8570251464844, | |
| "loss": 0.6226, | |
| "rewards/accuracies": 0.65625, | |
| "rewards/chosen": 0.3872641921043396, | |
| "rewards/margins": 0.20504610240459442, | |
| "rewards/rejected": 0.18221807479858398, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.5316302753085355, | |
| "grad_norm": 96.4645767211914, | |
| "learning_rate": 2.3464458247066943e-07, | |
| "logits/chosen": -1.4274332523345947, | |
| "logits/rejected": -1.2091481685638428, | |
| "logps/chosen": -425.9805603027344, | |
| "logps/rejected": -407.9385681152344, | |
| "loss": 0.6542, | |
| "rewards/accuracies": 0.6187499761581421, | |
| "rewards/chosen": 0.40589994192123413, | |
| "rewards/margins": 0.16390803456306458, | |
| "rewards/rejected": 0.24199192225933075, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.5385345645982567, | |
| "grad_norm": 111.85659790039062, | |
| "learning_rate": 2.3119392684610076e-07, | |
| "logits/chosen": NaN, | |
| "logits/rejected": NaN, | |
| "logps/chosen": -409.7381896972656, | |
| "logps/rejected": -390.08599853515625, | |
| "loss": 0.6571, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": 0.4092329442501068, | |
| "rewards/margins": 0.14268992841243744, | |
| "rewards/rejected": 0.2665430009365082, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.5454388538879779, | |
| "grad_norm": 107.36064910888672, | |
| "learning_rate": 2.2774327122153208e-07, | |
| "logits/chosen": -1.2583667039871216, | |
| "logits/rejected": -1.238249659538269, | |
| "logps/chosen": -377.5832214355469, | |
| "logps/rejected": -362.2899169921875, | |
| "loss": 0.6579, | |
| "rewards/accuracies": 0.6499999761581421, | |
| "rewards/chosen": 0.3105475902557373, | |
| "rewards/margins": 0.12654425203800201, | |
| "rewards/rejected": 0.1840033382177353, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.5523431431776992, | |
| "grad_norm": 96.03706359863281, | |
| "learning_rate": 2.2429261559696343e-07, | |
| "logits/chosen": -1.3471505641937256, | |
| "logits/rejected": -1.0923124551773071, | |
| "logps/chosen": -367.17803955078125, | |
| "logps/rejected": -356.579833984375, | |
| "loss": 0.6456, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.3641411364078522, | |
| "rewards/margins": 0.1728210747241974, | |
| "rewards/rejected": 0.1913200318813324, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5523431431776992, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1347825527191162, | |
| "eval_logps/chosen": -409.65533447265625, | |
| "eval_logps/rejected": -384.67364501953125, | |
| "eval_loss": 0.6488075256347656, | |
| "eval_rewards/accuracies": 0.6146095991134644, | |
| "eval_rewards/chosen": 0.3808766305446625, | |
| "eval_rewards/margins": 0.15444040298461914, | |
| "eval_rewards/rejected": 0.22643625736236572, | |
| "eval_runtime": 173.4914, | |
| "eval_samples_per_second": 6.865, | |
| "eval_steps_per_second": 6.865, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5592474324674204, | |
| "grad_norm": 106.62773132324219, | |
| "learning_rate": 2.2084195997239475e-07, | |
| "logits/chosen": -1.6777063608169556, | |
| "logits/rejected": -1.4368447065353394, | |
| "logps/chosen": -391.08135986328125, | |
| "logps/rejected": -382.46142578125, | |
| "loss": 0.6622, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.38126569986343384, | |
| "rewards/margins": 0.11523087322711945, | |
| "rewards/rejected": 0.2660347819328308, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.5661517217571417, | |
| "grad_norm": 83.2169418334961, | |
| "learning_rate": 2.1739130434782607e-07, | |
| "logits/chosen": -1.4315296411514282, | |
| "logits/rejected": -1.306997537612915, | |
| "logps/chosen": -373.8775329589844, | |
| "logps/rejected": -349.00909423828125, | |
| "loss": 0.6473, | |
| "rewards/accuracies": 0.675000011920929, | |
| "rewards/chosen": 0.3956912159919739, | |
| "rewards/margins": 0.17349565029144287, | |
| "rewards/rejected": 0.2221955806016922, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.5730560110468629, | |
| "grad_norm": 121.26054382324219, | |
| "learning_rate": 2.139406487232574e-07, | |
| "logits/chosen": -1.4633328914642334, | |
| "logits/rejected": -1.3243625164031982, | |
| "logps/chosen": -400.9346008300781, | |
| "logps/rejected": -395.54705810546875, | |
| "loss": 0.6725, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": 0.36117586493492126, | |
| "rewards/margins": 0.12074120342731476, | |
| "rewards/rejected": 0.2404346913099289, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.5799603003365841, | |
| "grad_norm": 98.78184509277344, | |
| "learning_rate": 2.1048999309868874e-07, | |
| "logits/chosen": -1.5927917957305908, | |
| "logits/rejected": -1.3326520919799805, | |
| "logps/chosen": -395.4635009765625, | |
| "logps/rejected": -390.7279357910156, | |
| "loss": 0.7039, | |
| "rewards/accuracies": 0.543749988079071, | |
| "rewards/chosen": 0.2971932291984558, | |
| "rewards/margins": 0.03939943015575409, | |
| "rewards/rejected": 0.2577938139438629, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.5868645896263054, | |
| "grad_norm": 83.3376235961914, | |
| "learning_rate": 2.0703933747412008e-07, | |
| "logits/chosen": -1.4211909770965576, | |
| "logits/rejected": -1.2685062885284424, | |
| "logps/chosen": -404.7445983886719, | |
| "logps/rejected": -383.61993408203125, | |
| "loss": 0.634, | |
| "rewards/accuracies": 0.6312500238418579, | |
| "rewards/chosen": 0.37307268381118774, | |
| "rewards/margins": 0.16583478450775146, | |
| "rewards/rejected": 0.2072378695011139, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.5868645896263054, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1369266510009766, | |
| "eval_logps/chosen": -409.7511901855469, | |
| "eval_logps/rejected": -384.7900695800781, | |
| "eval_loss": 0.6476391553878784, | |
| "eval_rewards/accuracies": 0.6087321639060974, | |
| "eval_rewards/chosen": 0.3712875247001648, | |
| "eval_rewards/margins": 0.15649177134037018, | |
| "eval_rewards/rejected": 0.2147957682609558, | |
| "eval_runtime": 173.685, | |
| "eval_samples_per_second": 6.857, | |
| "eval_steps_per_second": 6.857, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.5937688789160266, | |
| "grad_norm": 91.06554412841797, | |
| "learning_rate": 2.035886818495514e-07, | |
| "logits/chosen": -1.365419864654541, | |
| "logits/rejected": -1.2031277418136597, | |
| "logps/chosen": -363.187744140625, | |
| "logps/rejected": -333.2200622558594, | |
| "loss": 0.6489, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": 0.3162587881088257, | |
| "rewards/margins": 0.14883024990558624, | |
| "rewards/rejected": 0.16742852330207825, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.6006731682057478, | |
| "grad_norm": 95.876220703125, | |
| "learning_rate": 2.0013802622498275e-07, | |
| "logits/chosen": NaN, | |
| "logits/rejected": -1.3413794040679932, | |
| "logps/chosen": -389.54595947265625, | |
| "logps/rejected": -358.7523498535156, | |
| "loss": 0.6445, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.3688567280769348, | |
| "rewards/margins": 0.15345630049705505, | |
| "rewards/rejected": 0.21540041267871857, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.6075774574954691, | |
| "grad_norm": 99.45269012451172, | |
| "learning_rate": 1.9668737060041408e-07, | |
| "logits/chosen": -1.4373767375946045, | |
| "logits/rejected": NaN, | |
| "logps/chosen": -378.1737365722656, | |
| "logps/rejected": -353.63250732421875, | |
| "loss": 0.6638, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": 0.33772993087768555, | |
| "rewards/margins": 0.11045277118682861, | |
| "rewards/rejected": 0.22727715969085693, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.6144817467851903, | |
| "grad_norm": 94.84121704101562, | |
| "learning_rate": 1.932367149758454e-07, | |
| "logits/chosen": -1.5210530757904053, | |
| "logits/rejected": -1.3680551052093506, | |
| "logps/chosen": -404.3708190917969, | |
| "logps/rejected": -369.14068603515625, | |
| "loss": 0.6429, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": 0.40372776985168457, | |
| "rewards/margins": 0.16238902509212494, | |
| "rewards/rejected": 0.2413388043642044, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.6213860360749115, | |
| "grad_norm": 90.55352783203125, | |
| "learning_rate": 1.8978605935127672e-07, | |
| "logits/chosen": -1.4078751802444458, | |
| "logits/rejected": -1.2929816246032715, | |
| "logps/chosen": -353.96453857421875, | |
| "logps/rejected": -348.5416564941406, | |
| "loss": 0.66, | |
| "rewards/accuracies": 0.6812499761581421, | |
| "rewards/chosen": 0.34200939536094666, | |
| "rewards/margins": 0.11792133003473282, | |
| "rewards/rejected": 0.22408807277679443, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6213860360749115, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.139318823814392, | |
| "eval_logps/chosen": -409.7491760253906, | |
| "eval_logps/rejected": -384.7567138671875, | |
| "eval_loss": 0.6495311856269836, | |
| "eval_rewards/accuracies": 0.6204869747161865, | |
| "eval_rewards/chosen": 0.3714897930622101, | |
| "eval_rewards/margins": 0.15335862338542938, | |
| "eval_rewards/rejected": 0.2181311994791031, | |
| "eval_runtime": 173.8621, | |
| "eval_samples_per_second": 6.85, | |
| "eval_steps_per_second": 6.85, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6282903253646328, | |
| "grad_norm": 84.32146453857422, | |
| "learning_rate": 1.8633540372670807e-07, | |
| "logits/chosen": NaN, | |
| "logits/rejected": -1.269124984741211, | |
| "logps/chosen": -383.15533447265625, | |
| "logps/rejected": -359.85833740234375, | |
| "loss": 0.6471, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": 0.38757017254829407, | |
| "rewards/margins": 0.15741722285747528, | |
| "rewards/rejected": 0.230152890086174, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.635194614654354, | |
| "grad_norm": 109.78142547607422, | |
| "learning_rate": 1.828847481021394e-07, | |
| "logits/chosen": -1.4139900207519531, | |
| "logits/rejected": -1.3312866687774658, | |
| "logps/chosen": -381.28277587890625, | |
| "logps/rejected": -390.725830078125, | |
| "loss": 0.6325, | |
| "rewards/accuracies": 0.6312500238418579, | |
| "rewards/chosen": 0.3692389130592346, | |
| "rewards/margins": 0.17626893520355225, | |
| "rewards/rejected": 0.19296997785568237, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.6420989039440752, | |
| "grad_norm": 107.46936798095703, | |
| "learning_rate": 1.7943409247757073e-07, | |
| "logits/chosen": -1.4537097215652466, | |
| "logits/rejected": -1.2186715602874756, | |
| "logps/chosen": -396.77392578125, | |
| "logps/rejected": -374.40618896484375, | |
| "loss": 0.6333, | |
| "rewards/accuracies": 0.6499999761581421, | |
| "rewards/chosen": 0.4124184548854828, | |
| "rewards/margins": 0.21722407639026642, | |
| "rewards/rejected": 0.19519445300102234, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.6490031932337965, | |
| "grad_norm": 87.2940444946289, | |
| "learning_rate": 1.7598343685300208e-07, | |
| "logits/chosen": -1.4950411319732666, | |
| "logits/rejected": NaN, | |
| "logps/chosen": -382.4498596191406, | |
| "logps/rejected": -320.4793701171875, | |
| "loss": 0.6422, | |
| "rewards/accuracies": 0.6499999761581421, | |
| "rewards/chosen": 0.36269697546958923, | |
| "rewards/margins": 0.15182676911354065, | |
| "rewards/rejected": 0.2108701765537262, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.6559074825235177, | |
| "grad_norm": 86.76087951660156, | |
| "learning_rate": 1.725327812284334e-07, | |
| "logits/chosen": -1.2916945219039917, | |
| "logits/rejected": -1.1377825736999512, | |
| "logps/chosen": -370.7120666503906, | |
| "logps/rejected": -355.99212646484375, | |
| "loss": 0.6847, | |
| "rewards/accuracies": 0.5249999761581421, | |
| "rewards/chosen": 0.3327692747116089, | |
| "rewards/margins": 0.07058731466531754, | |
| "rewards/rejected": 0.26218199729919434, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.6559074825235177, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1284286975860596, | |
| "eval_logps/chosen": -409.7359313964844, | |
| "eval_logps/rejected": -384.813720703125, | |
| "eval_loss": 0.647310197353363, | |
| "eval_rewards/accuracies": 0.6078925132751465, | |
| "eval_rewards/chosen": 0.37281379103660583, | |
| "eval_rewards/margins": 0.16038304567337036, | |
| "eval_rewards/rejected": 0.21243073046207428, | |
| "eval_runtime": 173.875, | |
| "eval_samples_per_second": 6.85, | |
| "eval_steps_per_second": 6.85, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.662811771813239, | |
| "grad_norm": 84.70791625976562, | |
| "learning_rate": 1.6908212560386473e-07, | |
| "logits/chosen": -1.2661709785461426, | |
| "logits/rejected": -1.1103928089141846, | |
| "logps/chosen": -381.80291748046875, | |
| "logps/rejected": -328.13555908203125, | |
| "loss": 0.6615, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": 0.31213271617889404, | |
| "rewards/margins": 0.13148072361946106, | |
| "rewards/rejected": 0.18065199255943298, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.6697160611029602, | |
| "grad_norm": 97.82742309570312, | |
| "learning_rate": 1.6563146997929605e-07, | |
| "logits/chosen": -1.4364001750946045, | |
| "logits/rejected": -1.3133249282836914, | |
| "logps/chosen": -417.947998046875, | |
| "logps/rejected": -395.54681396484375, | |
| "loss": 0.6598, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": 0.40047353506088257, | |
| "rewards/margins": 0.12428589165210724, | |
| "rewards/rejected": 0.2761876583099365, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.6766203503926814, | |
| "grad_norm": 85.0315170288086, | |
| "learning_rate": 1.621808143547274e-07, | |
| "logits/chosen": -1.5771453380584717, | |
| "logits/rejected": -1.3503344058990479, | |
| "logps/chosen": -363.5293884277344, | |
| "logps/rejected": -334.8780212402344, | |
| "loss": 0.6364, | |
| "rewards/accuracies": 0.6312500238418579, | |
| "rewards/chosen": 0.3424316346645355, | |
| "rewards/margins": 0.15654315054416656, | |
| "rewards/rejected": 0.18588848412036896, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.6835246396824027, | |
| "grad_norm": 126.81350708007812, | |
| "learning_rate": 1.5873015873015872e-07, | |
| "logits/chosen": -1.4660173654556274, | |
| "logits/rejected": -1.261212706565857, | |
| "logps/chosen": -408.24102783203125, | |
| "logps/rejected": -408.9168701171875, | |
| "loss": 0.6551, | |
| "rewards/accuracies": 0.675000011920929, | |
| "rewards/chosen": 0.3640829920768738, | |
| "rewards/margins": 0.13493572175502777, | |
| "rewards/rejected": 0.2291472852230072, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.6904289289721239, | |
| "grad_norm": 101.64756774902344, | |
| "learning_rate": 1.5527950310559004e-07, | |
| "logits/chosen": -1.4593558311462402, | |
| "logits/rejected": -1.284190058708191, | |
| "logps/chosen": -428.12188720703125, | |
| "logps/rejected": -404.9797668457031, | |
| "loss": 0.6473, | |
| "rewards/accuracies": 0.606249988079071, | |
| "rewards/chosen": 0.4170054495334625, | |
| "rewards/margins": 0.16244842112064362, | |
| "rewards/rejected": 0.2545570433139801, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6904289289721239, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1269783973693848, | |
| "eval_logps/chosen": -409.7304382324219, | |
| "eval_logps/rejected": -384.82025146484375, | |
| "eval_loss": 0.6481216549873352, | |
| "eval_rewards/accuracies": 0.6078925132751465, | |
| "eval_rewards/chosen": 0.3733593821525574, | |
| "eval_rewards/margins": 0.16157987713813782, | |
| "eval_rewards/rejected": 0.21177950501441956, | |
| "eval_runtime": 173.8531, | |
| "eval_samples_per_second": 6.851, | |
| "eval_steps_per_second": 6.851, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6973332182618451, | |
| "grad_norm": 79.46784973144531, | |
| "learning_rate": 1.518288474810214e-07, | |
| "logits/chosen": -1.3219681978225708, | |
| "logits/rejected": -1.1236135959625244, | |
| "logps/chosen": -415.108642578125, | |
| "logps/rejected": -416.01629638671875, | |
| "loss": 0.636, | |
| "rewards/accuracies": 0.675000011920929, | |
| "rewards/chosen": 0.3638695180416107, | |
| "rewards/margins": 0.19566866755485535, | |
| "rewards/rejected": 0.16820085048675537, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.7042375075515664, | |
| "grad_norm": 98.8138198852539, | |
| "learning_rate": 1.4837819185645273e-07, | |
| "logits/chosen": -1.5049182176589966, | |
| "logits/rejected": -1.3138519525527954, | |
| "logps/chosen": -380.9891052246094, | |
| "logps/rejected": -373.53729248046875, | |
| "loss": 0.6605, | |
| "rewards/accuracies": 0.6312500238418579, | |
| "rewards/chosen": 0.35565224289894104, | |
| "rewards/margins": 0.1293681114912033, | |
| "rewards/rejected": 0.22628410160541534, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.7111417968412876, | |
| "grad_norm": 96.57266235351562, | |
| "learning_rate": 1.4492753623188405e-07, | |
| "logits/chosen": -1.3988029956817627, | |
| "logits/rejected": NaN, | |
| "logps/chosen": -406.1911926269531, | |
| "logps/rejected": -391.26300048828125, | |
| "loss": 0.6774, | |
| "rewards/accuracies": 0.550000011920929, | |
| "rewards/chosen": 0.3720816671848297, | |
| "rewards/margins": 0.09232926368713379, | |
| "rewards/rejected": 0.2797524034976959, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.7180460861310088, | |
| "grad_norm": 104.09722900390625, | |
| "learning_rate": 1.4147688060731538e-07, | |
| "logits/chosen": -1.5075652599334717, | |
| "logits/rejected": -1.2237682342529297, | |
| "logps/chosen": -392.244873046875, | |
| "logps/rejected": -347.1589660644531, | |
| "loss": 0.6265, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": 0.3723699748516083, | |
| "rewards/margins": 0.1985810399055481, | |
| "rewards/rejected": 0.17378894984722137, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.7249503754207302, | |
| "grad_norm": 95.77185821533203, | |
| "learning_rate": 1.3802622498274672e-07, | |
| "logits/chosen": -1.3212801218032837, | |
| "logits/rejected": -1.1590490341186523, | |
| "logps/chosen": -377.89013671875, | |
| "logps/rejected": -363.0843811035156, | |
| "loss": 0.6494, | |
| "rewards/accuracies": 0.6499999761581421, | |
| "rewards/chosen": 0.33318403363227844, | |
| "rewards/margins": 0.15657344460487366, | |
| "rewards/rejected": 0.17661058902740479, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.7249503754207302, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1044600009918213, | |
| "eval_logps/chosen": -409.47552490234375, | |
| "eval_logps/rejected": -384.6523742675781, | |
| "eval_loss": 0.6458028554916382, | |
| "eval_rewards/accuracies": 0.6087321639060974, | |
| "eval_rewards/chosen": 0.39885976910591125, | |
| "eval_rewards/margins": 0.17029373347759247, | |
| "eval_rewards/rejected": 0.22856605052947998, | |
| "eval_runtime": 173.7412, | |
| "eval_samples_per_second": 6.855, | |
| "eval_steps_per_second": 6.855, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.7318546647104514, | |
| "grad_norm": 82.92915344238281, | |
| "learning_rate": 1.3457556935817804e-07, | |
| "logits/chosen": -1.450514316558838, | |
| "logits/rejected": -1.1329553127288818, | |
| "logps/chosen": -370.6721496582031, | |
| "logps/rejected": -376.0997314453125, | |
| "loss": 0.6139, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": 0.4179357886314392, | |
| "rewards/margins": 0.23517921566963196, | |
| "rewards/rejected": 0.18275660276412964, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.7387589540001726, | |
| "grad_norm": 95.6248779296875, | |
| "learning_rate": 1.3112491373360937e-07, | |
| "logits/chosen": -1.4916682243347168, | |
| "logits/rejected": NaN, | |
| "logps/chosen": -399.62060546875, | |
| "logps/rejected": -378.38043212890625, | |
| "loss": 0.6628, | |
| "rewards/accuracies": 0.606249988079071, | |
| "rewards/chosen": 0.4076872766017914, | |
| "rewards/margins": 0.12090712785720825, | |
| "rewards/rejected": 0.2867801785469055, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.7456632432898939, | |
| "grad_norm": 87.97006225585938, | |
| "learning_rate": 1.276742581090407e-07, | |
| "logits/chosen": -1.2332895994186401, | |
| "logits/rejected": -1.0287469625473022, | |
| "logps/chosen": -379.1185302734375, | |
| "logps/rejected": -376.4650573730469, | |
| "loss": 0.6665, | |
| "rewards/accuracies": 0.59375, | |
| "rewards/chosen": 0.3581882119178772, | |
| "rewards/margins": 0.11071789264678955, | |
| "rewards/rejected": 0.24747030436992645, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.7525675325796151, | |
| "grad_norm": 88.0757064819336, | |
| "learning_rate": 1.2422360248447204e-07, | |
| "logits/chosen": -1.3982305526733398, | |
| "logits/rejected": -1.2792531251907349, | |
| "logps/chosen": -404.583984375, | |
| "logps/rejected": -371.7993469238281, | |
| "loss": 0.655, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": 0.36442241072654724, | |
| "rewards/margins": 0.13779766857624054, | |
| "rewards/rejected": 0.2266247272491455, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.7594718218693364, | |
| "grad_norm": 94.24323272705078, | |
| "learning_rate": 1.2077294685990338e-07, | |
| "logits/chosen": -1.3688199520111084, | |
| "logits/rejected": -1.2200040817260742, | |
| "logps/chosen": -402.47320556640625, | |
| "logps/rejected": -378.7218933105469, | |
| "loss": 0.6456, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": 0.3827332854270935, | |
| "rewards/margins": 0.14937618374824524, | |
| "rewards/rejected": 0.23335710167884827, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.7594718218693364, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.0987216234207153, | |
| "eval_logps/chosen": -409.48699951171875, | |
| "eval_logps/rejected": -384.6772766113281, | |
| "eval_loss": 0.6459550857543945, | |
| "eval_rewards/accuracies": 0.6204869747161865, | |
| "eval_rewards/chosen": 0.39770370721817017, | |
| "eval_rewards/margins": 0.1716250628232956, | |
| "eval_rewards/rejected": 0.22607862949371338, | |
| "eval_runtime": 173.2214, | |
| "eval_samples_per_second": 6.876, | |
| "eval_steps_per_second": 6.876, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.7663761111590576, | |
| "grad_norm": 97.47419738769531, | |
| "learning_rate": 1.1732229123533472e-07, | |
| "logits/chosen": -1.2847949266433716, | |
| "logits/rejected": -1.2520158290863037, | |
| "logps/chosen": -397.53900146484375, | |
| "logps/rejected": -373.2650451660156, | |
| "loss": 0.6825, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": 0.35880032181739807, | |
| "rewards/margins": 0.0934886708855629, | |
| "rewards/rejected": 0.26531165838241577, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.7732804004487788, | |
| "grad_norm": 118.96842956542969, | |
| "learning_rate": 1.1387163561076604e-07, | |
| "logits/chosen": -1.404904842376709, | |
| "logits/rejected": NaN, | |
| "logps/chosen": -397.9188537597656, | |
| "logps/rejected": -357.46038818359375, | |
| "loss": 0.6383, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.3993021249771118, | |
| "rewards/margins": 0.18723976612091064, | |
| "rewards/rejected": 0.21206238865852356, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.7801846897385001, | |
| "grad_norm": 92.28319549560547, | |
| "learning_rate": 1.1042097998619737e-07, | |
| "logits/chosen": -1.4132840633392334, | |
| "logits/rejected": -1.1864253282546997, | |
| "logps/chosen": -376.830810546875, | |
| "logps/rejected": -378.5279846191406, | |
| "loss": 0.6474, | |
| "rewards/accuracies": 0.6312500238418579, | |
| "rewards/chosen": 0.38667160272598267, | |
| "rewards/margins": 0.1643599271774292, | |
| "rewards/rejected": 0.22231166064739227, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.7870889790282213, | |
| "grad_norm": 83.31596374511719, | |
| "learning_rate": 1.069703243616287e-07, | |
| "logits/chosen": -1.4797419309616089, | |
| "logits/rejected": -1.30462646484375, | |
| "logps/chosen": -376.4138488769531, | |
| "logps/rejected": -348.89801025390625, | |
| "loss": 0.661, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": 0.40063542127609253, | |
| "rewards/margins": 0.1296340525150299, | |
| "rewards/rejected": 0.27100133895874023, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.7939932683179425, | |
| "grad_norm": 103.56004333496094, | |
| "learning_rate": 1.0351966873706004e-07, | |
| "logits/chosen": -1.5351804494857788, | |
| "logits/rejected": -1.3304837942123413, | |
| "logps/chosen": -399.09283447265625, | |
| "logps/rejected": -388.9432678222656, | |
| "loss": 0.6282, | |
| "rewards/accuracies": 0.643750011920929, | |
| "rewards/chosen": 0.4186945855617523, | |
| "rewards/margins": 0.2270217388868332, | |
| "rewards/rejected": 0.19167284667491913, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.7939932683179425, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1050151586532593, | |
| "eval_logps/chosen": -409.507080078125, | |
| "eval_logps/rejected": -384.7345886230469, | |
| "eval_loss": 0.6440198421478271, | |
| "eval_rewards/accuracies": 0.6120907068252563, | |
| "eval_rewards/chosen": 0.3957003355026245, | |
| "eval_rewards/margins": 0.17536155879497528, | |
| "eval_rewards/rejected": 0.22033876180648804, | |
| "eval_runtime": 173.6491, | |
| "eval_samples_per_second": 6.859, | |
| "eval_steps_per_second": 6.859, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.8008975576076638, | |
| "grad_norm": 90.4014663696289, | |
| "learning_rate": 1.0006901311249138e-07, | |
| "logits/chosen": -1.5077835321426392, | |
| "logits/rejected": -1.2734102010726929, | |
| "logps/chosen": -379.27410888671875, | |
| "logps/rejected": -351.67279052734375, | |
| "loss": 0.6381, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": 0.38737237453460693, | |
| "rewards/margins": 0.174201101064682, | |
| "rewards/rejected": 0.21317127346992493, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.807801846897385, | |
| "grad_norm": 66.83627319335938, | |
| "learning_rate": 9.66183574879227e-08, | |
| "logits/chosen": -1.3416311740875244, | |
| "logits/rejected": -1.2346522808074951, | |
| "logps/chosen": -397.44805908203125, | |
| "logps/rejected": -378.79144287109375, | |
| "loss": 0.6569, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": 0.3850553333759308, | |
| "rewards/margins": 0.15491603314876556, | |
| "rewards/rejected": 0.23013930022716522, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.8147061361871062, | |
| "grad_norm": 85.14424896240234, | |
| "learning_rate": 9.316770186335403e-08, | |
| "logits/chosen": -1.2797658443450928, | |
| "logits/rejected": -1.1257115602493286, | |
| "logps/chosen": -405.3745422363281, | |
| "logps/rejected": -366.5975036621094, | |
| "loss": 0.654, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": 0.35339251160621643, | |
| "rewards/margins": 0.14307786524295807, | |
| "rewards/rejected": 0.21031466126441956, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.8216104254768275, | |
| "grad_norm": 102.25609588623047, | |
| "learning_rate": 8.971704623878537e-08, | |
| "logits/chosen": -1.6227967739105225, | |
| "logits/rejected": -1.4342666864395142, | |
| "logps/chosen": -377.0340881347656, | |
| "logps/rejected": -383.2848815917969, | |
| "loss": 0.6524, | |
| "rewards/accuracies": 0.6312500238418579, | |
| "rewards/chosen": 0.3992713987827301, | |
| "rewards/margins": 0.14228931069374084, | |
| "rewards/rejected": 0.25698205828666687, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.8285147147665487, | |
| "grad_norm": 86.08372497558594, | |
| "learning_rate": 8.62663906142167e-08, | |
| "logits/chosen": -1.4563744068145752, | |
| "logits/rejected": -1.352936029434204, | |
| "logps/chosen": -397.0251770019531, | |
| "logps/rejected": -369.11883544921875, | |
| "loss": 0.657, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": 0.38390326499938965, | |
| "rewards/margins": 0.14529408514499664, | |
| "rewards/rejected": 0.2386091649532318, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.8285147147665487, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.105219841003418, | |
| "eval_logps/chosen": -409.50103759765625, | |
| "eval_logps/rejected": -384.6767883300781, | |
| "eval_loss": 0.6475593447685242, | |
| "eval_rewards/accuracies": 0.6095718145370483, | |
| "eval_rewards/chosen": 0.396301805973053, | |
| "eval_rewards/margins": 0.17017905414104462, | |
| "eval_rewards/rejected": 0.22612272202968597, | |
| "eval_runtime": 173.6541, | |
| "eval_samples_per_second": 6.858, | |
| "eval_steps_per_second": 6.858, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.83541900405627, | |
| "grad_norm": 103.7150650024414, | |
| "learning_rate": 8.281573498964802e-08, | |
| "logits/chosen": NaN, | |
| "logits/rejected": -1.2163610458374023, | |
| "logps/chosen": -372.20989990234375, | |
| "logps/rejected": -364.71295166015625, | |
| "loss": 0.7005, | |
| "rewards/accuracies": 0.48750001192092896, | |
| "rewards/chosen": 0.3188936710357666, | |
| "rewards/margins": 0.05130113288760185, | |
| "rewards/rejected": 0.26759251952171326, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.8423232933459912, | |
| "grad_norm": 94.80174255371094, | |
| "learning_rate": 7.936507936507936e-08, | |
| "logits/chosen": -1.2603299617767334, | |
| "logits/rejected": -1.2094240188598633, | |
| "logps/chosen": -381.99761962890625, | |
| "logps/rejected": -394.0637512207031, | |
| "loss": 0.6938, | |
| "rewards/accuracies": 0.53125, | |
| "rewards/chosen": 0.3386909067630768, | |
| "rewards/margins": 0.05538243055343628, | |
| "rewards/rejected": 0.2833084464073181, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.8492275826357124, | |
| "grad_norm": 96.21173858642578, | |
| "learning_rate": 7.59144237405107e-08, | |
| "logits/chosen": -1.2958943843841553, | |
| "logits/rejected": -1.0766475200653076, | |
| "logps/chosen": -389.5054626464844, | |
| "logps/rejected": -359.45501708984375, | |
| "loss": 0.6571, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": 0.37096503376960754, | |
| "rewards/margins": 0.14743277430534363, | |
| "rewards/rejected": 0.22353224456310272, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.8561318719254337, | |
| "grad_norm": 111.18427276611328, | |
| "learning_rate": 7.246376811594203e-08, | |
| "logits/chosen": -1.4039971828460693, | |
| "logits/rejected": NaN, | |
| "logps/chosen": -395.5658264160156, | |
| "logps/rejected": -356.8921813964844, | |
| "loss": 0.6967, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": 0.3431479334831238, | |
| "rewards/margins": 0.04705189913511276, | |
| "rewards/rejected": 0.2960960268974304, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.8630361612151549, | |
| "grad_norm": 85.78289031982422, | |
| "learning_rate": 6.901311249137336e-08, | |
| "logits/chosen": -1.4105193614959717, | |
| "logits/rejected": -1.2953484058380127, | |
| "logps/chosen": -370.317626953125, | |
| "logps/rejected": -343.9225158691406, | |
| "loss": 0.6743, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": 0.3631276488304138, | |
| "rewards/margins": 0.11619863659143448, | |
| "rewards/rejected": 0.24692897498607635, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.8630361612151549, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1084688901901245, | |
| "eval_logps/chosen": -409.56561279296875, | |
| "eval_logps/rejected": -384.7563171386719, | |
| "eval_loss": 0.6463225483894348, | |
| "eval_rewards/accuracies": 0.6146095991134644, | |
| "eval_rewards/chosen": 0.3898475766181946, | |
| "eval_rewards/margins": 0.17167842388153076, | |
| "eval_rewards/rejected": 0.2181691974401474, | |
| "eval_runtime": 173.5928, | |
| "eval_samples_per_second": 6.861, | |
| "eval_steps_per_second": 6.861, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.8699404505048761, | |
| "grad_norm": 88.3895492553711, | |
| "learning_rate": 6.556245686680468e-08, | |
| "logits/chosen": -1.4868072271347046, | |
| "logits/rejected": -1.3154561519622803, | |
| "logps/chosen": -389.04620361328125, | |
| "logps/rejected": -379.40338134765625, | |
| "loss": 0.6299, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": 0.3888487219810486, | |
| "rewards/margins": 0.20886433124542236, | |
| "rewards/rejected": 0.1799844205379486, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.8768447397945974, | |
| "grad_norm": 110.88898468017578, | |
| "learning_rate": 6.211180124223602e-08, | |
| "logits/chosen": -1.2930047512054443, | |
| "logits/rejected": NaN, | |
| "logps/chosen": -417.73126220703125, | |
| "logps/rejected": -387.33563232421875, | |
| "loss": 0.6686, | |
| "rewards/accuracies": 0.59375, | |
| "rewards/chosen": 0.3636248707771301, | |
| "rewards/margins": 0.12059865146875381, | |
| "rewards/rejected": 0.24302616715431213, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.8837490290843186, | |
| "grad_norm": 89.85807800292969, | |
| "learning_rate": 5.866114561766736e-08, | |
| "logits/chosen": -1.390554428100586, | |
| "logits/rejected": -1.328037977218628, | |
| "logps/chosen": -413.32244873046875, | |
| "logps/rejected": -387.0220947265625, | |
| "loss": 0.6563, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": 0.41810736060142517, | |
| "rewards/margins": 0.14793208241462708, | |
| "rewards/rejected": 0.2701753079891205, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.8906533183740398, | |
| "grad_norm": 102.83927917480469, | |
| "learning_rate": 5.5210489993098687e-08, | |
| "logits/chosen": -1.3396317958831787, | |
| "logits/rejected": -1.0842339992523193, | |
| "logps/chosen": -372.3958740234375, | |
| "logps/rejected": -378.26324462890625, | |
| "loss": 0.6655, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": 0.367808997631073, | |
| "rewards/margins": 0.13876868784427643, | |
| "rewards/rejected": 0.22904033958911896, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.8975576076637611, | |
| "grad_norm": 108.53854370117188, | |
| "learning_rate": 5.175983436853002e-08, | |
| "logits/chosen": -1.4352149963378906, | |
| "logits/rejected": -1.2435885667800903, | |
| "logps/chosen": -431.7513732910156, | |
| "logps/rejected": -421.14215087890625, | |
| "loss": 0.6502, | |
| "rewards/accuracies": 0.6187499761581421, | |
| "rewards/chosen": 0.40972501039505005, | |
| "rewards/margins": 0.16878488659858704, | |
| "rewards/rejected": 0.2409401386976242, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.8975576076637611, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.114261269569397, | |
| "eval_logps/chosen": -409.6363220214844, | |
| "eval_logps/rejected": -384.7745056152344, | |
| "eval_loss": 0.6483097076416016, | |
| "eval_rewards/accuracies": 0.6120907068252563, | |
| "eval_rewards/chosen": 0.3827739357948303, | |
| "eval_rewards/margins": 0.16641728579998016, | |
| "eval_rewards/rejected": 0.21635663509368896, | |
| "eval_runtime": 173.3443, | |
| "eval_samples_per_second": 6.871, | |
| "eval_steps_per_second": 6.871, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.9044618969534823, | |
| "grad_norm": 91.35237121582031, | |
| "learning_rate": 4.830917874396135e-08, | |
| "logits/chosen": -1.2966923713684082, | |
| "logits/rejected": -1.116370439529419, | |
| "logps/chosen": -388.6182556152344, | |
| "logps/rejected": -388.0231628417969, | |
| "loss": 0.6535, | |
| "rewards/accuracies": 0.643750011920929, | |
| "rewards/chosen": 0.41747522354125977, | |
| "rewards/margins": 0.14344492554664612, | |
| "rewards/rejected": 0.27403026819229126, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.9113661862432035, | |
| "grad_norm": 122.95155334472656, | |
| "learning_rate": 4.4858523119392684e-08, | |
| "logits/chosen": -1.486899971961975, | |
| "logits/rejected": -1.4145976305007935, | |
| "logps/chosen": -389.6207580566406, | |
| "logps/rejected": -373.4767150878906, | |
| "loss": 0.6954, | |
| "rewards/accuracies": 0.606249988079071, | |
| "rewards/chosen": 0.37160056829452515, | |
| "rewards/margins": 0.07465855777263641, | |
| "rewards/rejected": 0.29694199562072754, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.9182704755329248, | |
| "grad_norm": 101.79618072509766, | |
| "learning_rate": 4.140786749482401e-08, | |
| "logits/chosen": -1.3558008670806885, | |
| "logits/rejected": -1.236149549484253, | |
| "logps/chosen": -407.06085205078125, | |
| "logps/rejected": -349.31304931640625, | |
| "loss": 0.6597, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": 0.34769049286842346, | |
| "rewards/margins": 0.1324794739484787, | |
| "rewards/rejected": 0.21521100401878357, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.925174764822646, | |
| "grad_norm": 81.09689331054688, | |
| "learning_rate": 3.795721187025535e-08, | |
| "logits/chosen": -1.5392885208129883, | |
| "logits/rejected": -1.367064118385315, | |
| "logps/chosen": -356.09393310546875, | |
| "logps/rejected": -319.89520263671875, | |
| "loss": 0.6443, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": 0.3483354449272156, | |
| "rewards/margins": 0.16505715250968933, | |
| "rewards/rejected": 0.18327829241752625, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.9320790541123674, | |
| "grad_norm": 100.64744567871094, | |
| "learning_rate": 3.450655624568668e-08, | |
| "logits/chosen": -1.4479949474334717, | |
| "logits/rejected": -1.1448233127593994, | |
| "logps/chosen": -375.9516296386719, | |
| "logps/rejected": -373.54180908203125, | |
| "loss": 0.627, | |
| "rewards/accuracies": 0.6812499761581421, | |
| "rewards/chosen": 0.3978195786476135, | |
| "rewards/margins": 0.2024550437927246, | |
| "rewards/rejected": 0.1953645646572113, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.9320790541123674, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1154719591140747, | |
| "eval_logps/chosen": -409.63140869140625, | |
| "eval_logps/rejected": -384.8327941894531, | |
| "eval_loss": 0.6454894542694092, | |
| "eval_rewards/accuracies": 0.6104114055633545, | |
| "eval_rewards/chosen": 0.3832691013813019, | |
| "eval_rewards/margins": 0.1727461814880371, | |
| "eval_rewards/rejected": 0.21052293479442596, | |
| "eval_runtime": 173.3889, | |
| "eval_samples_per_second": 6.869, | |
| "eval_steps_per_second": 6.869, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.9389833434020886, | |
| "grad_norm": 94.91573333740234, | |
| "learning_rate": 3.105590062111801e-08, | |
| "logits/chosen": -1.3383309841156006, | |
| "logits/rejected": -1.2590186595916748, | |
| "logps/chosen": -361.2745666503906, | |
| "logps/rejected": -342.0213623046875, | |
| "loss": 0.6599, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.33325955271720886, | |
| "rewards/margins": 0.1604005992412567, | |
| "rewards/rejected": 0.17285892367362976, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.9458876326918098, | |
| "grad_norm": 87.90577697753906, | |
| "learning_rate": 2.7605244996549343e-08, | |
| "logits/chosen": -1.5077590942382812, | |
| "logits/rejected": -1.2809985876083374, | |
| "logps/chosen": -388.0267333984375, | |
| "logps/rejected": -360.5313720703125, | |
| "loss": 0.6284, | |
| "rewards/accuracies": 0.675000011920929, | |
| "rewards/chosen": 0.392368346452713, | |
| "rewards/margins": 0.18990589678287506, | |
| "rewards/rejected": 0.20246246457099915, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.9527919219815311, | |
| "grad_norm": 99.08484649658203, | |
| "learning_rate": 2.4154589371980675e-08, | |
| "logits/chosen": -1.5224372148513794, | |
| "logits/rejected": -1.38528311252594, | |
| "logps/chosen": -414.42901611328125, | |
| "logps/rejected": -400.0823059082031, | |
| "loss": 0.6683, | |
| "rewards/accuracies": 0.5687500238418579, | |
| "rewards/chosen": 0.3946588933467865, | |
| "rewards/margins": 0.11731584370136261, | |
| "rewards/rejected": 0.2773430645465851, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.9596962112712523, | |
| "grad_norm": 109.33934020996094, | |
| "learning_rate": 2.0703933747412006e-08, | |
| "logits/chosen": -1.316396951675415, | |
| "logits/rejected": -1.102836012840271, | |
| "logps/chosen": -387.43194580078125, | |
| "logps/rejected": -370.46539306640625, | |
| "loss": 0.6383, | |
| "rewards/accuracies": 0.6312500238418579, | |
| "rewards/chosen": 0.37366634607315063, | |
| "rewards/margins": 0.18556134402751923, | |
| "rewards/rejected": 0.18810497224330902, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.9666005005609735, | |
| "grad_norm": 113.74722290039062, | |
| "learning_rate": 1.725327812284334e-08, | |
| "logits/chosen": -1.435141682624817, | |
| "logits/rejected": NaN, | |
| "logps/chosen": -382.6386413574219, | |
| "logps/rejected": -358.0811767578125, | |
| "loss": 0.6845, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": 0.303692489862442, | |
| "rewards/margins": 0.08099554479122162, | |
| "rewards/rejected": 0.2226969450712204, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.9666005005609735, | |
| "eval_logits/chosen": NaN, | |
| "eval_logits/rejected": -1.1203211545944214, | |
| "eval_logps/chosen": -409.6943664550781, | |
| "eval_logps/rejected": -384.8702392578125, | |
| "eval_loss": 0.6463683247566223, | |
| "eval_rewards/accuracies": 0.6070529222488403, | |
| "eval_rewards/chosen": 0.3769714832305908, | |
| "eval_rewards/margins": 0.17019246518611908, | |
| "eval_rewards/rejected": 0.20677906274795532, | |
| "eval_runtime": 173.6978, | |
| "eval_samples_per_second": 6.857, | |
| "eval_steps_per_second": 6.857, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.9735047898506948, | |
| "grad_norm": 75.73210906982422, | |
| "learning_rate": 1.3802622498274672e-08, | |
| "logits/chosen": -1.4460961818695068, | |
| "logits/rejected": -1.2193866968154907, | |
| "logps/chosen": -385.35186767578125, | |
| "logps/rejected": -381.22784423828125, | |
| "loss": 0.6737, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.37897366285324097, | |
| "rewards/margins": 0.09797494113445282, | |
| "rewards/rejected": 0.28099876642227173, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.980409079140416, | |
| "grad_norm": 95.66902923583984, | |
| "learning_rate": 1.0351966873706003e-08, | |
| "logits/chosen": -1.525224208831787, | |
| "logits/rejected": -1.349152684211731, | |
| "logps/chosen": -374.0038146972656, | |
| "logps/rejected": -349.24755859375, | |
| "loss": 0.6468, | |
| "rewards/accuracies": 0.668749988079071, | |
| "rewards/chosen": 0.37688612937927246, | |
| "rewards/margins": 0.1724523901939392, | |
| "rewards/rejected": 0.20443375408649445, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.9873133684301372, | |
| "grad_norm": 84.95851135253906, | |
| "learning_rate": 6.901311249137336e-09, | |
| "logits/chosen": -1.6710094213485718, | |
| "logits/rejected": -1.3803582191467285, | |
| "logps/chosen": -413.56396484375, | |
| "logps/rejected": -392.18267822265625, | |
| "loss": 0.6433, | |
| "rewards/accuracies": 0.643750011920929, | |
| "rewards/chosen": 0.3816106915473938, | |
| "rewards/margins": 0.18318775296211243, | |
| "rewards/rejected": 0.19842293858528137, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.9942176577198585, | |
| "grad_norm": 91.50519561767578, | |
| "learning_rate": 3.450655624568668e-09, | |
| "logits/chosen": -1.3672393560409546, | |
| "logits/rejected": -1.2583658695220947, | |
| "logps/chosen": -423.9781799316406, | |
| "logps/rejected": -384.3447265625, | |
| "loss": 0.6292, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.40440577268600464, | |
| "rewards/margins": 0.20975744724273682, | |
| "rewards/rejected": 0.1946483552455902, | |
| "step": 1440 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1449, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 400, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |