| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.6368159203980099, | |
| "eval_steps": 500, | |
| "global_step": 1600, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03980099502487562, | |
| "grad_norm": 1.5459058284759521, | |
| "learning_rate": 4.980474401576887e-07, | |
| "logits/chosen": -1.4838274717330933, | |
| "logits/rejected": -1.5693057775497437, | |
| "logps/chosen": -190.30685424804688, | |
| "logps/rejected": -190.12179565429688, | |
| "loss": 0.7107, | |
| "rewards/accuracies": 0.49312499165534973, | |
| "rewards/chosen": -0.8337415456771851, | |
| "rewards/margins": 0.0017802910879254341, | |
| "rewards/rejected": -0.8355217576026917, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.07960199004975124, | |
| "grad_norm": 1.247958779335022, | |
| "learning_rate": 4.922202605502572e-07, | |
| "logits/chosen": -1.4577226638793945, | |
| "logits/rejected": -1.544972538948059, | |
| "logps/chosen": -191.1748504638672, | |
| "logps/rejected": -191.21063232421875, | |
| "loss": 0.7063, | |
| "rewards/accuracies": 0.5017187595367432, | |
| "rewards/chosen": -0.7915948629379272, | |
| "rewards/margins": 0.004952423740178347, | |
| "rewards/rejected": -0.7965472340583801, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.11940298507462686, | |
| "grad_norm": 1.1431488990783691, | |
| "learning_rate": 4.82609484512869e-07, | |
| "logits/chosen": -1.4753738641738892, | |
| "logits/rejected": -1.5457732677459717, | |
| "logps/chosen": -189.42088317871094, | |
| "logps/rejected": -190.7891082763672, | |
| "loss": 0.7009, | |
| "rewards/accuracies": 0.5146874785423279, | |
| "rewards/chosen": -0.7856764197349548, | |
| "rewards/margins": 0.013686330057680607, | |
| "rewards/rejected": -0.7993627190589905, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.15920398009950248, | |
| "grad_norm": 1.1773804426193237, | |
| "learning_rate": 4.6936523696827614e-07, | |
| "logits/chosen": -1.4836928844451904, | |
| "logits/rejected": -1.5391887426376343, | |
| "logps/chosen": -191.47879028320312, | |
| "logps/rejected": -192.72312927246094, | |
| "loss": 0.7031, | |
| "rewards/accuracies": 0.4935937523841858, | |
| "rewards/chosen": -0.7633911967277527, | |
| "rewards/margins": 0.00844556000083685, | |
| "rewards/rejected": -0.7718366384506226, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.19900497512437812, | |
| "grad_norm": 1.1332217454910278, | |
| "learning_rate": 4.5269439940365644e-07, | |
| "logits/chosen": -1.5031734704971313, | |
| "logits/rejected": -1.5771981477737427, | |
| "logps/chosen": -188.72503662109375, | |
| "logps/rejected": -188.9233856201172, | |
| "loss": 0.6982, | |
| "rewards/accuracies": 0.5159375071525574, | |
| "rewards/chosen": -0.7517531514167786, | |
| "rewards/margins": 0.017128920182585716, | |
| "rewards/rejected": -0.7688820362091064, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.23880597014925373, | |
| "grad_norm": 1.1783969402313232, | |
| "learning_rate": 4.328573782827409e-07, | |
| "logits/chosen": -1.4775351285934448, | |
| "logits/rejected": -1.5578263998031616, | |
| "logps/chosen": -187.82449340820312, | |
| "logps/rejected": -186.08935546875, | |
| "loss": 0.6956, | |
| "rewards/accuracies": 0.5301562547683716, | |
| "rewards/chosen": -0.7211830615997314, | |
| "rewards/margins": 0.02292461320757866, | |
| "rewards/rejected": -0.744107723236084, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.27860696517412936, | |
| "grad_norm": 1.2275896072387695, | |
| "learning_rate": 4.1016403737218373e-07, | |
| "logits/chosen": -1.4353595972061157, | |
| "logits/rejected": -1.5325896739959717, | |
| "logps/chosen": -190.53240966796875, | |
| "logps/rejected": -186.2373504638672, | |
| "loss": 0.6918, | |
| "rewards/accuracies": 0.5306249856948853, | |
| "rewards/chosen": -0.7291114926338196, | |
| "rewards/margins": 0.02977893128991127, | |
| "rewards/rejected": -0.7588903903961182, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.31840796019900497, | |
| "grad_norm": 1.2193249464035034, | |
| "learning_rate": 3.849688575211836e-07, | |
| "logits/chosen": -1.4531009197235107, | |
| "logits/rejected": -1.528568148612976, | |
| "logps/chosen": -193.5086669921875, | |
| "logps/rejected": -190.48219299316406, | |
| "loss": 0.6947, | |
| "rewards/accuracies": 0.5360937714576721, | |
| "rewards/chosen": -0.7271183729171753, | |
| "rewards/margins": 0.027006981894373894, | |
| "rewards/rejected": -0.7541253566741943, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.3582089552238806, | |
| "grad_norm": 1.3636894226074219, | |
| "learning_rate": 3.576653995009154e-07, | |
| "logits/chosen": -1.4581520557403564, | |
| "logits/rejected": -1.5382471084594727, | |
| "logps/chosen": -186.9196319580078, | |
| "logps/rejected": -187.58917236328125, | |
| "loss": 0.6881, | |
| "rewards/accuracies": 0.538281261920929, | |
| "rewards/chosen": -0.7291906476020813, | |
| "rewards/margins": 0.039425306022167206, | |
| "rewards/rejected": -0.7686160802841187, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.39800995024875624, | |
| "grad_norm": 1.2546725273132324, | |
| "learning_rate": 3.286801563968721e-07, | |
| "logits/chosen": -1.4330791234970093, | |
| "logits/rejected": -1.5031362771987915, | |
| "logps/chosen": -190.80686950683594, | |
| "logps/rejected": -189.16189575195312, | |
| "loss": 0.692, | |
| "rewards/accuracies": 0.5220312476158142, | |
| "rewards/chosen": -0.7390533685684204, | |
| "rewards/margins": 0.03137853741645813, | |
| "rewards/rejected": -0.7704318165779114, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.43781094527363185, | |
| "grad_norm": 1.1531291007995605, | |
| "learning_rate": 2.9846589158269034e-07, | |
| "logits/chosen": -1.4307806491851807, | |
| "logits/rejected": -1.5092757940292358, | |
| "logps/chosen": -193.00648498535156, | |
| "logps/rejected": -189.84764099121094, | |
| "loss": 0.6892, | |
| "rewards/accuracies": 0.5384374856948853, | |
| "rewards/chosen": -0.7566314935684204, | |
| "rewards/margins": 0.03836291283369064, | |
| "rewards/rejected": -0.7949943542480469, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.47761194029850745, | |
| "grad_norm": 1.5831681489944458, | |
| "learning_rate": 2.674945663394993e-07, | |
| "logits/chosen": -1.430738925933838, | |
| "logits/rejected": -1.5082927942276, | |
| "logps/chosen": -189.89476013183594, | |
| "logps/rejected": -186.1005096435547, | |
| "loss": 0.6903, | |
| "rewards/accuracies": 0.5414062738418579, | |
| "rewards/chosen": -0.754692018032074, | |
| "rewards/margins": 0.03536057472229004, | |
| "rewards/rejected": -0.790052592754364, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.5174129353233831, | |
| "grad_norm": 1.3418159484863281, | |
| "learning_rate": 2.3624996759476285e-07, | |
| "logits/chosen": -1.4191877841949463, | |
| "logits/rejected": -1.501223087310791, | |
| "logps/chosen": -190.763671875, | |
| "logps/rejected": -188.54310607910156, | |
| "loss": 0.6848, | |
| "rewards/accuracies": 0.5510937571525574, | |
| "rewards/chosen": -0.7421095967292786, | |
| "rewards/margins": 0.05018935725092888, | |
| "rewards/rejected": -0.7922989726066589, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.5572139303482587, | |
| "grad_norm": 1.0886973142623901, | |
| "learning_rate": 2.0522015093886614e-07, | |
| "logits/chosen": -1.4274046421051025, | |
| "logits/rejected": -1.511489748954773, | |
| "logps/chosen": -190.62181091308594, | |
| "logps/rejected": -189.4832000732422, | |
| "loss": 0.6884, | |
| "rewards/accuracies": 0.5412499904632568, | |
| "rewards/chosen": -0.742114782333374, | |
| "rewards/margins": 0.042963698506355286, | |
| "rewards/rejected": -0.7850785255432129, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.5970149253731343, | |
| "grad_norm": 1.2839570045471191, | |
| "learning_rate": 1.7488981696314154e-07, | |
| "logits/chosen": -1.4310470819473267, | |
| "logits/rejected": -1.504390001296997, | |
| "logps/chosen": -190.2706298828125, | |
| "logps/rejected": -188.0977020263672, | |
| "loss": 0.6868, | |
| "rewards/accuracies": 0.547656238079071, | |
| "rewards/chosen": -0.7403038144111633, | |
| "rewards/margins": 0.04620310664176941, | |
| "rewards/rejected": -0.7865069508552551, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.6368159203980099, | |
| "grad_norm": 1.2762988805770874, | |
| "learning_rate": 1.4573274000458839e-07, | |
| "logits/chosen": -1.4215110540390015, | |
| "logits/rejected": -1.5031652450561523, | |
| "logps/chosen": -189.7293701171875, | |
| "logps/rejected": -189.09996032714844, | |
| "loss": 0.6835, | |
| "rewards/accuracies": 0.5501562356948853, | |
| "rewards/chosen": -0.7511222958564758, | |
| "rewards/margins": 0.05315619334578514, | |
| "rewards/rejected": -0.8042784929275513, | |
| "step": 1600 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 2512, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |