| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9552238805970149, | |
| "eval_steps": 500, | |
| "global_step": 2400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03980099502487562, | |
| "grad_norm": 1.5459058284759521, | |
| "learning_rate": 4.980474401576887e-07, | |
| "logits/chosen": -1.4838274717330933, | |
| "logits/rejected": -1.5693057775497437, | |
| "logps/chosen": -190.30685424804688, | |
| "logps/rejected": -190.12179565429688, | |
| "loss": 0.7107, | |
| "rewards/accuracies": 0.49312499165534973, | |
| "rewards/chosen": -0.8337415456771851, | |
| "rewards/margins": 0.0017802910879254341, | |
| "rewards/rejected": -0.8355217576026917, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.07960199004975124, | |
| "grad_norm": 1.247958779335022, | |
| "learning_rate": 4.922202605502572e-07, | |
| "logits/chosen": -1.4577226638793945, | |
| "logits/rejected": -1.544972538948059, | |
| "logps/chosen": -191.1748504638672, | |
| "logps/rejected": -191.21063232421875, | |
| "loss": 0.7063, | |
| "rewards/accuracies": 0.5017187595367432, | |
| "rewards/chosen": -0.7915948629379272, | |
| "rewards/margins": 0.004952423740178347, | |
| "rewards/rejected": -0.7965472340583801, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.11940298507462686, | |
| "grad_norm": 1.1431488990783691, | |
| "learning_rate": 4.82609484512869e-07, | |
| "logits/chosen": -1.4753738641738892, | |
| "logits/rejected": -1.5457732677459717, | |
| "logps/chosen": -189.42088317871094, | |
| "logps/rejected": -190.7891082763672, | |
| "loss": 0.7009, | |
| "rewards/accuracies": 0.5146874785423279, | |
| "rewards/chosen": -0.7856764197349548, | |
| "rewards/margins": 0.013686330057680607, | |
| "rewards/rejected": -0.7993627190589905, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.15920398009950248, | |
| "grad_norm": 1.1773804426193237, | |
| "learning_rate": 4.6936523696827614e-07, | |
| "logits/chosen": -1.4836928844451904, | |
| "logits/rejected": -1.5391887426376343, | |
| "logps/chosen": -191.47879028320312, | |
| "logps/rejected": -192.72312927246094, | |
| "loss": 0.7031, | |
| "rewards/accuracies": 0.4935937523841858, | |
| "rewards/chosen": -0.7633911967277527, | |
| "rewards/margins": 0.00844556000083685, | |
| "rewards/rejected": -0.7718366384506226, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.19900497512437812, | |
| "grad_norm": 1.1332217454910278, | |
| "learning_rate": 4.5269439940365644e-07, | |
| "logits/chosen": -1.5031734704971313, | |
| "logits/rejected": -1.5771981477737427, | |
| "logps/chosen": -188.72503662109375, | |
| "logps/rejected": -188.9233856201172, | |
| "loss": 0.6982, | |
| "rewards/accuracies": 0.5159375071525574, | |
| "rewards/chosen": -0.7517531514167786, | |
| "rewards/margins": 0.017128920182585716, | |
| "rewards/rejected": -0.7688820362091064, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.23880597014925373, | |
| "grad_norm": 1.1783969402313232, | |
| "learning_rate": 4.328573782827409e-07, | |
| "logits/chosen": -1.4775351285934448, | |
| "logits/rejected": -1.5578263998031616, | |
| "logps/chosen": -187.82449340820312, | |
| "logps/rejected": -186.08935546875, | |
| "loss": 0.6956, | |
| "rewards/accuracies": 0.5301562547683716, | |
| "rewards/chosen": -0.7211830615997314, | |
| "rewards/margins": 0.02292461320757866, | |
| "rewards/rejected": -0.744107723236084, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.27860696517412936, | |
| "grad_norm": 1.2275896072387695, | |
| "learning_rate": 4.1016403737218373e-07, | |
| "logits/chosen": -1.4353595972061157, | |
| "logits/rejected": -1.5325896739959717, | |
| "logps/chosen": -190.53240966796875, | |
| "logps/rejected": -186.2373504638672, | |
| "loss": 0.6918, | |
| "rewards/accuracies": 0.5306249856948853, | |
| "rewards/chosen": -0.7291114926338196, | |
| "rewards/margins": 0.02977893128991127, | |
| "rewards/rejected": -0.7588903903961182, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.31840796019900497, | |
| "grad_norm": 1.2193249464035034, | |
| "learning_rate": 3.849688575211836e-07, | |
| "logits/chosen": -1.4531009197235107, | |
| "logits/rejected": -1.528568148612976, | |
| "logps/chosen": -193.5086669921875, | |
| "logps/rejected": -190.48219299316406, | |
| "loss": 0.6947, | |
| "rewards/accuracies": 0.5360937714576721, | |
| "rewards/chosen": -0.7271183729171753, | |
| "rewards/margins": 0.027006981894373894, | |
| "rewards/rejected": -0.7541253566741943, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.3582089552238806, | |
| "grad_norm": 1.3636894226074219, | |
| "learning_rate": 3.576653995009154e-07, | |
| "logits/chosen": -1.4581520557403564, | |
| "logits/rejected": -1.5382471084594727, | |
| "logps/chosen": -186.9196319580078, | |
| "logps/rejected": -187.58917236328125, | |
| "loss": 0.6881, | |
| "rewards/accuracies": 0.538281261920929, | |
| "rewards/chosen": -0.7291906476020813, | |
| "rewards/margins": 0.039425306022167206, | |
| "rewards/rejected": -0.7686160802841187, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.39800995024875624, | |
| "grad_norm": 1.2546725273132324, | |
| "learning_rate": 3.286801563968721e-07, | |
| "logits/chosen": -1.4330791234970093, | |
| "logits/rejected": -1.5031362771987915, | |
| "logps/chosen": -190.80686950683594, | |
| "logps/rejected": -189.16189575195312, | |
| "loss": 0.692, | |
| "rewards/accuracies": 0.5220312476158142, | |
| "rewards/chosen": -0.7390533685684204, | |
| "rewards/margins": 0.03137853741645813, | |
| "rewards/rejected": -0.7704318165779114, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.43781094527363185, | |
| "grad_norm": 1.1531291007995605, | |
| "learning_rate": 2.9846589158269034e-07, | |
| "logits/chosen": -1.4307806491851807, | |
| "logits/rejected": -1.5092757940292358, | |
| "logps/chosen": -193.00648498535156, | |
| "logps/rejected": -189.84764099121094, | |
| "loss": 0.6892, | |
| "rewards/accuracies": 0.5384374856948853, | |
| "rewards/chosen": -0.7566314935684204, | |
| "rewards/margins": 0.03836291283369064, | |
| "rewards/rejected": -0.7949943542480469, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.47761194029850745, | |
| "grad_norm": 1.5831681489944458, | |
| "learning_rate": 2.674945663394993e-07, | |
| "logits/chosen": -1.430738925933838, | |
| "logits/rejected": -1.5082927942276, | |
| "logps/chosen": -189.89476013183594, | |
| "logps/rejected": -186.1005096435547, | |
| "loss": 0.6903, | |
| "rewards/accuracies": 0.5414062738418579, | |
| "rewards/chosen": -0.754692018032074, | |
| "rewards/margins": 0.03536057472229004, | |
| "rewards/rejected": -0.790052592754364, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.5174129353233831, | |
| "grad_norm": 1.3418159484863281, | |
| "learning_rate": 2.3624996759476285e-07, | |
| "logits/chosen": -1.4191877841949463, | |
| "logits/rejected": -1.501223087310791, | |
| "logps/chosen": -190.763671875, | |
| "logps/rejected": -188.54310607910156, | |
| "loss": 0.6848, | |
| "rewards/accuracies": 0.5510937571525574, | |
| "rewards/chosen": -0.7421095967292786, | |
| "rewards/margins": 0.05018935725092888, | |
| "rewards/rejected": -0.7922989726066589, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.5572139303482587, | |
| "grad_norm": 1.0886973142623901, | |
| "learning_rate": 2.0522015093886614e-07, | |
| "logits/chosen": -1.4274046421051025, | |
| "logits/rejected": -1.511489748954773, | |
| "logps/chosen": -190.62181091308594, | |
| "logps/rejected": -189.4832000732422, | |
| "loss": 0.6884, | |
| "rewards/accuracies": 0.5412499904632568, | |
| "rewards/chosen": -0.742114782333374, | |
| "rewards/margins": 0.042963698506355286, | |
| "rewards/rejected": -0.7850785255432129, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.5970149253731343, | |
| "grad_norm": 1.2839570045471191, | |
| "learning_rate": 1.7488981696314154e-07, | |
| "logits/chosen": -1.4310470819473267, | |
| "logits/rejected": -1.504390001296997, | |
| "logps/chosen": -190.2706298828125, | |
| "logps/rejected": -188.0977020263672, | |
| "loss": 0.6868, | |
| "rewards/accuracies": 0.547656238079071, | |
| "rewards/chosen": -0.7403038144111633, | |
| "rewards/margins": 0.04620310664176941, | |
| "rewards/rejected": -0.7865069508552551, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.6368159203980099, | |
| "grad_norm": 1.2762988805770874, | |
| "learning_rate": 1.4573274000458839e-07, | |
| "logits/chosen": -1.4215110540390015, | |
| "logits/rejected": -1.5031652450561523, | |
| "logps/chosen": -189.7293701171875, | |
| "logps/rejected": -189.09996032714844, | |
| "loss": 0.6835, | |
| "rewards/accuracies": 0.5501562356948853, | |
| "rewards/chosen": -0.7511222958564758, | |
| "rewards/margins": 0.05315619334578514, | |
| "rewards/rejected": -0.8042784929275513, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.6766169154228856, | |
| "grad_norm": 1.517782211303711, | |
| "learning_rate": 1.1820436756391414e-07, | |
| "logits/chosen": -1.385495901107788, | |
| "logits/rejected": -1.4789422750473022, | |
| "logps/chosen": -193.28277587890625, | |
| "logps/rejected": -189.7715606689453, | |
| "loss": 0.6848, | |
| "rewards/accuracies": 0.5560937523841858, | |
| "rewards/chosen": -0.7423578500747681, | |
| "rewards/margins": 0.052127428352832794, | |
| "rewards/rejected": -0.794485330581665, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.7164179104477612, | |
| "grad_norm": 1.2326488494873047, | |
| "learning_rate": 9.273470599753375e-08, | |
| "logits/chosen": -1.4209693670272827, | |
| "logits/rejected": -1.491072654724121, | |
| "logps/chosen": -188.58055114746094, | |
| "logps/rejected": -188.7568359375, | |
| "loss": 0.6834, | |
| "rewards/accuracies": 0.5548437237739563, | |
| "rewards/chosen": -0.7456573247909546, | |
| "rewards/margins": 0.05509215220808983, | |
| "rewards/rejected": -0.8007495403289795, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.7562189054726368, | |
| "grad_norm": 1.1635903120040894, | |
| "learning_rate": 6.972160361242119e-08, | |
| "logits/chosen": -1.4076248407363892, | |
| "logits/rejected": -1.4701765775680542, | |
| "logps/chosen": -187.25048828125, | |
| "logps/rejected": -188.06027221679688, | |
| "loss": 0.6849, | |
| "rewards/accuracies": 0.5457812547683716, | |
| "rewards/chosen": -0.7482488751411438, | |
| "rewards/margins": 0.050881754606962204, | |
| "rewards/rejected": -0.7991305589675903, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.7960199004975125, | |
| "grad_norm": 1.463752031326294, | |
| "learning_rate": 4.952453608509e-08, | |
| "logits/chosen": -1.4001586437225342, | |
| "logits/rejected": -1.4962129592895508, | |
| "logps/chosen": -192.7788848876953, | |
| "logps/rejected": -191.05316162109375, | |
| "loss": 0.6877, | |
| "rewards/accuracies": 0.5378124713897705, | |
| "rewards/chosen": -0.7623865008354187, | |
| "rewards/margins": 0.047943364828825, | |
| "rewards/rejected": -0.8103299736976624, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.835820895522388, | |
| "grad_norm": 1.200461506843567, | |
| "learning_rate": 3.2458991279430717e-08, | |
| "logits/chosen": -1.4338061809539795, | |
| "logits/rejected": -1.492598295211792, | |
| "logps/chosen": -187.86001586914062, | |
| "logps/rejected": -190.3682403564453, | |
| "loss": 0.6804, | |
| "rewards/accuracies": 0.5598437786102295, | |
| "rewards/chosen": -0.7565799951553345, | |
| "rewards/margins": 0.06183221936225891, | |
| "rewards/rejected": -0.818412184715271, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.8756218905472637, | |
| "grad_norm": 1.30251145362854, | |
| "learning_rate": 1.8791541175240787e-08, | |
| "logits/chosen": -1.4070285558700562, | |
| "logits/rejected": -1.487685203552246, | |
| "logps/chosen": -189.3640594482422, | |
| "logps/rejected": -188.98089599609375, | |
| "loss": 0.6791, | |
| "rewards/accuracies": 0.5614062547683716, | |
| "rewards/chosen": -0.7591237425804138, | |
| "rewards/margins": 0.06378559023141861, | |
| "rewards/rejected": -0.822909414768219, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.9154228855721394, | |
| "grad_norm": 1.1758041381835938, | |
| "learning_rate": 8.735677886282183e-09, | |
| "logits/chosen": -1.4267709255218506, | |
| "logits/rejected": -1.4703466892242432, | |
| "logps/chosen": -188.0686492919922, | |
| "logps/rejected": -192.47093200683594, | |
| "loss": 0.6804, | |
| "rewards/accuracies": 0.553906261920929, | |
| "rewards/chosen": -0.7582595348358154, | |
| "rewards/margins": 0.06273117661476135, | |
| "rewards/rejected": -0.8209907412528992, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.9552238805970149, | |
| "grad_norm": 1.3671549558639526, | |
| "learning_rate": 2.4484788112571488e-09, | |
| "logits/chosen": -1.3968582153320312, | |
| "logits/rejected": -1.4773013591766357, | |
| "logps/chosen": -190.7488250732422, | |
| "logps/rejected": -189.8838348388672, | |
| "loss": 0.6805, | |
| "rewards/accuracies": 0.5579687356948853, | |
| "rewards/chosen": -0.756488025188446, | |
| "rewards/margins": 0.061892423778772354, | |
| "rewards/rejected": -0.8183805346488953, | |
| "step": 2400 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 2512, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |