| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9971486761710794, | |
| "eval_steps": 100, | |
| "global_step": 153, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 3.125e-07, | |
| "logits/chosen": -2.8404829502105713, | |
| "logits/rejected": -2.7363457679748535, | |
| "logps/chosen": -137.61058044433594, | |
| "logps/rejected": -117.74949645996094, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 3.125e-06, | |
| "logits/chosen": -2.7154855728149414, | |
| "logits/rejected": -2.6928329467773438, | |
| "logps/chosen": -123.34764099121094, | |
| "logps/rejected": -126.27674865722656, | |
| "loss": 0.693, | |
| "rewards/accuracies": 0.4895833432674408, | |
| "rewards/chosen": 0.0009532907279208302, | |
| "rewards/margins": 0.00024687068071216345, | |
| "rewards/rejected": 0.0007064202218316495, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.989490450759331e-06, | |
| "logits/chosen": -2.775507688522339, | |
| "logits/rejected": -2.7605175971984863, | |
| "logps/chosen": -131.35130310058594, | |
| "logps/rejected": -129.89755249023438, | |
| "loss": 0.6927, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": 0.012469060719013214, | |
| "rewards/margins": 0.00148476823233068, | |
| "rewards/rejected": 0.010984293185174465, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 4.872270441827174e-06, | |
| "logits/chosen": -2.7530579566955566, | |
| "logits/rejected": -2.703279733657837, | |
| "logps/chosen": -124.97566223144531, | |
| "logps/rejected": -123.0599136352539, | |
| "loss": 0.6914, | |
| "rewards/accuracies": 0.578125, | |
| "rewards/chosen": 0.04268375411629677, | |
| "rewards/margins": 0.004245065152645111, | |
| "rewards/rejected": 0.03843868523836136, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 4.630851211353007e-06, | |
| "logits/chosen": -2.7293448448181152, | |
| "logits/rejected": -2.70413875579834, | |
| "logps/chosen": -121.0544204711914, | |
| "logps/rejected": -125.32257080078125, | |
| "loss": 0.6899, | |
| "rewards/accuracies": 0.606249988079071, | |
| "rewards/chosen": 0.06691654026508331, | |
| "rewards/margins": 0.008759822696447372, | |
| "rewards/rejected": 0.05815672129392624, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 4.277872161641682e-06, | |
| "logits/chosen": -2.7005789279937744, | |
| "logits/rejected": -2.6852593421936035, | |
| "logps/chosen": -121.9286880493164, | |
| "logps/rejected": -122.9526596069336, | |
| "loss": 0.6889, | |
| "rewards/accuracies": 0.59375, | |
| "rewards/chosen": 0.08492810279130936, | |
| "rewards/margins": 0.008367636241018772, | |
| "rewards/rejected": 0.07656045258045197, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 3.831813362428005e-06, | |
| "logits/chosen": -2.6874237060546875, | |
| "logits/rejected": -2.6661763191223145, | |
| "logps/chosen": -127.428955078125, | |
| "logps/rejected": -120.95658111572266, | |
| "loss": 0.6868, | |
| "rewards/accuracies": 0.5718749761581421, | |
| "rewards/chosen": 0.09333746135234833, | |
| "rewards/margins": 0.018141191452741623, | |
| "rewards/rejected": 0.075196273624897, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 3.3160280345958614e-06, | |
| "logits/chosen": -2.631645679473877, | |
| "logits/rejected": -2.592644214630127, | |
| "logps/chosen": -116.2786636352539, | |
| "logps/rejected": -119.904541015625, | |
| "loss": 0.6833, | |
| "rewards/accuracies": 0.621874988079071, | |
| "rewards/chosen": 0.068717822432518, | |
| "rewards/margins": 0.020535219460725784, | |
| "rewards/rejected": 0.04818259924650192, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 2.757519902117886e-06, | |
| "logits/chosen": -2.6794140338897705, | |
| "logits/rejected": -2.6262881755828857, | |
| "logps/chosen": -115.40583801269531, | |
| "logps/rejected": -118.37687683105469, | |
| "loss": 0.6826, | |
| "rewards/accuracies": 0.643750011920929, | |
| "rewards/chosen": 0.04097718372941017, | |
| "rewards/margins": 0.0270619448274374, | |
| "rewards/rejected": 0.01391523890197277, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 2.185529423440807e-06, | |
| "logits/chosen": -2.654372453689575, | |
| "logits/rejected": -2.6041693687438965, | |
| "logps/chosen": -125.3586654663086, | |
| "logps/rejected": -126.3048095703125, | |
| "loss": 0.6802, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": 0.07956322282552719, | |
| "rewards/margins": 0.03575668856501579, | |
| "rewards/rejected": 0.0438065305352211, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 1.6300029195778454e-06, | |
| "logits/chosen": -2.6299571990966797, | |
| "logits/rejected": -2.6014647483825684, | |
| "logps/chosen": -121.17118835449219, | |
| "logps/rejected": -126.56787109375, | |
| "loss": 0.6781, | |
| "rewards/accuracies": 0.590624988079071, | |
| "rewards/chosen": 0.06307043880224228, | |
| "rewards/margins": 0.035764340311288834, | |
| "rewards/rejected": 0.027306100353598595, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "eval_logits/chosen": -2.6276280879974365, | |
| "eval_logits/rejected": -2.5400655269622803, | |
| "eval_logps/chosen": -289.5703125, | |
| "eval_logps/rejected": -269.8819274902344, | |
| "eval_loss": 0.6668701171875, | |
| "eval_rewards/accuracies": 0.6439999938011169, | |
| "eval_rewards/chosen": -0.026262342929840088, | |
| "eval_rewards/margins": 0.059951718896627426, | |
| "eval_rewards/rejected": -0.08621405810117722, | |
| "eval_runtime": 381.9384, | |
| "eval_samples_per_second": 5.236, | |
| "eval_steps_per_second": 0.655, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 1.1200247470632394e-06, | |
| "logits/chosen": -2.640122652053833, | |
| "logits/rejected": -2.605874538421631, | |
| "logps/chosen": -128.22695922851562, | |
| "logps/rejected": -126.72957611083984, | |
| "loss": 0.6778, | |
| "rewards/accuracies": 0.565625011920929, | |
| "rewards/chosen": 0.03247489407658577, | |
| "rewards/margins": 0.034549370408058167, | |
| "rewards/rejected": -0.0020744793582707644, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 6.822945986946386e-07, | |
| "logits/chosen": -2.6430487632751465, | |
| "logits/rejected": -2.6212897300720215, | |
| "logps/chosen": -125.3406753540039, | |
| "logps/rejected": -127.47813415527344, | |
| "loss": 0.6773, | |
| "rewards/accuracies": 0.546875, | |
| "rewards/chosen": 0.04354492202401161, | |
| "rewards/margins": 0.01878989301621914, | |
| "rewards/rejected": 0.02475503273308277, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 3.397296523427807e-07, | |
| "logits/chosen": -2.5850777626037598, | |
| "logits/rejected": -2.558905839920044, | |
| "logps/chosen": -126.96197509765625, | |
| "logps/rejected": -130.18173217773438, | |
| "loss": 0.676, | |
| "rewards/accuracies": 0.6343749761581421, | |
| "rewards/chosen": 0.026839906349778175, | |
| "rewards/margins": 0.039194256067276, | |
| "rewards/rejected": -0.012354351580142975, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 1.102647517397798e-07, | |
| "logits/chosen": -2.6094307899475098, | |
| "logits/rejected": -2.559872627258301, | |
| "logps/chosen": -123.62800598144531, | |
| "logps/rejected": -125.2538833618164, | |
| "loss": 0.6781, | |
| "rewards/accuracies": 0.590624988079071, | |
| "rewards/chosen": 0.02713879942893982, | |
| "rewards/margins": 0.04252254590392113, | |
| "rewards/rejected": -0.015383748337626457, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 5.9134352763748345e-09, | |
| "logits/chosen": -2.6256508827209473, | |
| "logits/rejected": -2.61336088180542, | |
| "logps/chosen": -125.59254455566406, | |
| "logps/rejected": -130.33981323242188, | |
| "loss": 0.6745, | |
| "rewards/accuracies": 0.6031249761581421, | |
| "rewards/chosen": 0.032428476959466934, | |
| "rewards/margins": 0.05681389570236206, | |
| "rewards/rejected": -0.024385426193475723, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 153, | |
| "total_flos": 0.0, | |
| "train_loss": 0.6834432619069916, | |
| "train_runtime": 6282.2252, | |
| "train_samples_per_second": 3.125, | |
| "train_steps_per_second": 0.024 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 153, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |