| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.7960199004975125, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03980099502487562, | |
| "grad_norm": 9.524229049682617, | |
| "learning_rate": 4.980474401576887e-07, | |
| "logits/chosen": -0.1925463080406189, | |
| "logits/rejected": -0.15873177349567413, | |
| "logps/chosen": -239.6322021484375, | |
| "logps/rejected": -242.57594299316406, | |
| "loss": 0.8819, | |
| "rewards/accuracies": 0.49281251430511475, | |
| "rewards/chosen": -1.3072433471679688, | |
| "rewards/margins": 0.008615786209702492, | |
| "rewards/rejected": -1.315859079360962, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.07960199004975124, | |
| "grad_norm": 7.333236217498779, | |
| "learning_rate": 4.922202605502572e-07, | |
| "logits/chosen": 0.046361636370420456, | |
| "logits/rejected": 0.08236894011497498, | |
| "logps/chosen": -231.10765075683594, | |
| "logps/rejected": -233.24984741210938, | |
| "loss": 0.8361, | |
| "rewards/accuracies": 0.48875001072883606, | |
| "rewards/chosen": -0.9621695876121521, | |
| "rewards/margins": -0.021255964413285255, | |
| "rewards/rejected": -0.9409136772155762, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.11940298507462686, | |
| "grad_norm": 5.164106369018555, | |
| "learning_rate": 4.82609484512869e-07, | |
| "logits/chosen": 0.0250965878367424, | |
| "logits/rejected": 0.054141998291015625, | |
| "logps/chosen": -229.79965209960938, | |
| "logps/rejected": -231.60562133789062, | |
| "loss": 0.8114, | |
| "rewards/accuracies": 0.48750001192092896, | |
| "rewards/chosen": -0.8564431667327881, | |
| "rewards/margins": -0.01034404058009386, | |
| "rewards/rejected": -0.8460990786552429, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.15920398009950248, | |
| "grad_norm": 5.328795909881592, | |
| "learning_rate": 4.6936523696827614e-07, | |
| "logits/chosen": 0.039505548775196075, | |
| "logits/rejected": 0.05482972040772438, | |
| "logps/chosen": -233.69537353515625, | |
| "logps/rejected": -236.25210571289062, | |
| "loss": 0.7907, | |
| "rewards/accuracies": 0.49406251311302185, | |
| "rewards/chosen": -0.8005841970443726, | |
| "rewards/margins": 0.00799934659153223, | |
| "rewards/rejected": -0.8085834980010986, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.19900497512437812, | |
| "grad_norm": 8.521681785583496, | |
| "learning_rate": 4.5269439940365644e-07, | |
| "logits/chosen": 0.11151163280010223, | |
| "logits/rejected": 0.1460810899734497, | |
| "logps/chosen": -227.23020935058594, | |
| "logps/rejected": -230.19451904296875, | |
| "loss": 0.7849, | |
| "rewards/accuracies": 0.4946874976158142, | |
| "rewards/chosen": -0.7644023299217224, | |
| "rewards/margins": -0.004693666007369757, | |
| "rewards/rejected": -0.7597086429595947, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.23880597014925373, | |
| "grad_norm": 7.143113136291504, | |
| "learning_rate": 4.328573782827409e-07, | |
| "logits/chosen": 0.09673156589269638, | |
| "logits/rejected": 0.14390873908996582, | |
| "logps/chosen": -225.45919799804688, | |
| "logps/rejected": -225.44052124023438, | |
| "loss": 0.77, | |
| "rewards/accuracies": 0.5045312643051147, | |
| "rewards/chosen": -0.7256423830986023, | |
| "rewards/margins": 0.004257932770997286, | |
| "rewards/rejected": -0.7299003005027771, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.27860696517412936, | |
| "grad_norm": 6.525391101837158, | |
| "learning_rate": 4.1016403737218373e-07, | |
| "logits/chosen": 0.05486857891082764, | |
| "logits/rejected": 0.09620587527751923, | |
| "logps/chosen": -228.3573760986328, | |
| "logps/rejected": -223.32395935058594, | |
| "loss": 0.7696, | |
| "rewards/accuracies": 0.5034375190734863, | |
| "rewards/chosen": -0.7243590354919434, | |
| "rewards/margins": 0.0008349200943484902, | |
| "rewards/rejected": -0.7251940369606018, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.31840796019900497, | |
| "grad_norm": 8.071100234985352, | |
| "learning_rate": 3.849688575211836e-07, | |
| "logits/chosen": 0.004134657327085733, | |
| "logits/rejected": 0.02180260606110096, | |
| "logps/chosen": -234.44007873535156, | |
| "logps/rejected": -232.92913818359375, | |
| "loss": 0.7682, | |
| "rewards/accuracies": 0.5073437690734863, | |
| "rewards/chosen": -0.7022367119789124, | |
| "rewards/margins": 0.0076367598958313465, | |
| "rewards/rejected": -0.7098734974861145, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.3582089552238806, | |
| "grad_norm": 9.765380859375, | |
| "learning_rate": 3.576653995009154e-07, | |
| "logits/chosen": 0.018308693543076515, | |
| "logits/rejected": 0.04190211370587349, | |
| "logps/chosen": -224.4905242919922, | |
| "logps/rejected": -228.26222229003906, | |
| "loss": 0.7467, | |
| "rewards/accuracies": 0.5228124856948853, | |
| "rewards/chosen": -0.7309367656707764, | |
| "rewards/margins": 0.04116936773061752, | |
| "rewards/rejected": -0.7721061110496521, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.39800995024875624, | |
| "grad_norm": 6.442622184753418, | |
| "learning_rate": 3.286801563968721e-07, | |
| "logits/chosen": -0.018274417147040367, | |
| "logits/rejected": -0.015018883161246777, | |
| "logps/chosen": -230.45724487304688, | |
| "logps/rejected": -229.37107849121094, | |
| "loss": 0.7517, | |
| "rewards/accuracies": 0.5123437643051147, | |
| "rewards/chosen": -0.738405704498291, | |
| "rewards/margins": 0.026958582922816277, | |
| "rewards/rejected": -0.765364408493042, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.43781094527363185, | |
| "grad_norm": 3.437434434890747, | |
| "learning_rate": 2.9846589158269034e-07, | |
| "logits/chosen": -0.03212662786245346, | |
| "logits/rejected": -0.016695672646164894, | |
| "logps/chosen": -233.62451171875, | |
| "logps/rejected": -230.5555419921875, | |
| "loss": 0.7495, | |
| "rewards/accuracies": 0.5182812213897705, | |
| "rewards/chosen": -0.6797711253166199, | |
| "rewards/margins": 0.029431035742163658, | |
| "rewards/rejected": -0.7092021107673645, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.47761194029850745, | |
| "grad_norm": 9.21783447265625, | |
| "learning_rate": 2.674945663394993e-07, | |
| "logits/chosen": -0.022503716871142387, | |
| "logits/rejected": -0.0002497506211511791, | |
| "logps/chosen": -224.26544189453125, | |
| "logps/rejected": -220.6869354248047, | |
| "loss": 0.7452, | |
| "rewards/accuracies": 0.5198437571525574, | |
| "rewards/chosen": -0.6790177822113037, | |
| "rewards/margins": 0.027901820838451385, | |
| "rewards/rejected": -0.7069195508956909, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.5174129353233831, | |
| "grad_norm": 5.184940814971924, | |
| "learning_rate": 2.3624996759476285e-07, | |
| "logits/chosen": -0.02832232229411602, | |
| "logits/rejected": 0.001360323396511376, | |
| "logps/chosen": -225.9873504638672, | |
| "logps/rejected": -224.61253356933594, | |
| "loss": 0.7448, | |
| "rewards/accuracies": 0.5210937261581421, | |
| "rewards/chosen": -0.6495236158370972, | |
| "rewards/margins": 0.03301383554935455, | |
| "rewards/rejected": -0.6825373768806458, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.5572139303482587, | |
| "grad_norm": 4.051429271697998, | |
| "learning_rate": 2.0522015093886614e-07, | |
| "logits/chosen": -0.07805901765823364, | |
| "logits/rejected": -0.054497309029102325, | |
| "logps/chosen": -227.91299438476562, | |
| "logps/rejected": -231.43540954589844, | |
| "loss": 0.7352, | |
| "rewards/accuracies": 0.5206249952316284, | |
| "rewards/chosen": -0.6832866072654724, | |
| "rewards/margins": 0.05871019884943962, | |
| "rewards/rejected": -0.7419967651367188, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.5970149253731343, | |
| "grad_norm": 4.9932379722595215, | |
| "learning_rate": 1.7488981696314154e-07, | |
| "logits/chosen": -0.07948292791843414, | |
| "logits/rejected": -0.04982204735279083, | |
| "logps/chosen": -226.94752502441406, | |
| "logps/rejected": -225.03204345703125, | |
| "loss": 0.7349, | |
| "rewards/accuracies": 0.5214062333106995, | |
| "rewards/chosen": -0.6902438998222351, | |
| "rewards/margins": 0.03936518728733063, | |
| "rewards/rejected": -0.7296090722084045, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.6368159203980099, | |
| "grad_norm": 8.183513641357422, | |
| "learning_rate": 1.4573274000458839e-07, | |
| "logits/chosen": -0.08282151073217392, | |
| "logits/rejected": -0.06661933660507202, | |
| "logps/chosen": -227.28172302246094, | |
| "logps/rejected": -227.94952392578125, | |
| "loss": 0.7374, | |
| "rewards/accuracies": 0.5310937762260437, | |
| "rewards/chosen": -0.6844578385353088, | |
| "rewards/margins": 0.03974698856472969, | |
| "rewards/rejected": -0.7242047190666199, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.6766169154228856, | |
| "grad_norm": 5.1135573387146, | |
| "learning_rate": 1.1820436756391414e-07, | |
| "logits/chosen": -0.1615159809589386, | |
| "logits/rejected": -0.13768981397151947, | |
| "logps/chosen": -230.94302368164062, | |
| "logps/rejected": -229.08021545410156, | |
| "loss": 0.7359, | |
| "rewards/accuracies": 0.5257812738418579, | |
| "rewards/chosen": -0.654849648475647, | |
| "rewards/margins": 0.04666764661669731, | |
| "rewards/rejected": -0.7015172839164734, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.7164179104477612, | |
| "grad_norm": 4.888497829437256, | |
| "learning_rate": 9.273470599753375e-08, | |
| "logits/chosen": -0.07336300611495972, | |
| "logits/rejected": -0.0592733770608902, | |
| "logps/chosen": -223.34539794921875, | |
| "logps/rejected": -226.36752319335938, | |
| "loss": 0.7341, | |
| "rewards/accuracies": 0.5315625071525574, | |
| "rewards/chosen": -0.6613838076591492, | |
| "rewards/margins": 0.04804745316505432, | |
| "rewards/rejected": -0.7094312310218811, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.7562189054726368, | |
| "grad_norm": 6.096445083618164, | |
| "learning_rate": 6.972160361242119e-08, | |
| "logits/chosen": -0.11273051053285599, | |
| "logits/rejected": -0.09009736031293869, | |
| "logps/chosen": -223.08956909179688, | |
| "logps/rejected": -226.54151916503906, | |
| "loss": 0.7362, | |
| "rewards/accuracies": 0.5253124833106995, | |
| "rewards/chosen": -0.6732974052429199, | |
| "rewards/margins": 0.03796848654747009, | |
| "rewards/rejected": -0.7112659215927124, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.7960199004975125, | |
| "grad_norm": 4.404871940612793, | |
| "learning_rate": 4.952453608509e-08, | |
| "logits/chosen": -0.12635847926139832, | |
| "logits/rejected": -0.09713621437549591, | |
| "logps/chosen": -230.83767700195312, | |
| "logps/rejected": -230.70140075683594, | |
| "loss": 0.7348, | |
| "rewards/accuracies": 0.5314062237739563, | |
| "rewards/chosen": -0.6768426299095154, | |
| "rewards/margins": 0.04782358556985855, | |
| "rewards/rejected": -0.7246662378311157, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 2512, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |