| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9968652037617555, | |
| "eval_steps": 500, | |
| "global_step": 159, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 3.125e-08, | |
| "logits/chosen": -2.2583727836608887, | |
| "logits/rejected": -2.194669246673584, | |
| "logps/chosen": -344.6805114746094, | |
| "logps/rejected": -430.3193359375, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 3.1249999999999997e-07, | |
| "logits/chosen": -2.2495357990264893, | |
| "logits/rejected": -2.220221996307373, | |
| "logps/chosen": -281.9364929199219, | |
| "logps/rejected": -305.81884765625, | |
| "loss": 0.689, | |
| "rewards/accuracies": 0.4652777910232544, | |
| "rewards/chosen": -0.01338761206716299, | |
| "rewards/margins": 0.006979748606681824, | |
| "rewards/rejected": -0.02036735974252224, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.990353313429303e-07, | |
| "logits/chosen": -2.1989874839782715, | |
| "logits/rejected": -2.167945146560669, | |
| "logps/chosen": -258.35382080078125, | |
| "logps/rejected": -364.7663269042969, | |
| "loss": 0.6641, | |
| "rewards/accuracies": 0.65625, | |
| "rewards/chosen": -0.2977336645126343, | |
| "rewards/margins": 0.21266262233257294, | |
| "rewards/rejected": -0.5103963613510132, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 4.882681251368548e-07, | |
| "logits/chosen": -2.0941319465637207, | |
| "logits/rejected": -2.063507080078125, | |
| "logps/chosen": -295.8477783203125, | |
| "logps/rejected": -372.314453125, | |
| "loss": 0.6929, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": -0.3137749135494232, | |
| "rewards/margins": 0.2363557368516922, | |
| "rewards/rejected": -0.5501306056976318, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 4.6604720940421207e-07, | |
| "logits/chosen": -2.178748607635498, | |
| "logits/rejected": -2.1334617137908936, | |
| "logps/chosen": -285.15130615234375, | |
| "logps/rejected": -334.0563659667969, | |
| "loss": 0.6656, | |
| "rewards/accuracies": 0.543749988079071, | |
| "rewards/chosen": -0.13720116019248962, | |
| "rewards/margins": 0.07690997421741486, | |
| "rewards/rejected": -0.21411113440990448, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 4.3344075855595097e-07, | |
| "logits/chosen": -2.209764003753662, | |
| "logits/rejected": -2.192694664001465, | |
| "logps/chosen": -287.18182373046875, | |
| "logps/rejected": -383.46197509765625, | |
| "loss": 0.6532, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": -0.21152925491333008, | |
| "rewards/margins": 0.1477278620004654, | |
| "rewards/rejected": -0.35925716161727905, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 3.920161866827889e-07, | |
| "logits/chosen": -2.0722219944000244, | |
| "logits/rejected": -2.036907196044922, | |
| "logps/chosen": -295.5084228515625, | |
| "logps/rejected": -374.8094787597656, | |
| "loss": 0.6568, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": -0.31626835465431213, | |
| "rewards/margins": 0.2158782035112381, | |
| "rewards/rejected": -0.5321465730667114, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 3.4376480090239047e-07, | |
| "logits/chosen": -2.0726826190948486, | |
| "logits/rejected": -2.032034397125244, | |
| "logps/chosen": -259.9215393066406, | |
| "logps/rejected": -369.0412292480469, | |
| "loss": 0.6325, | |
| "rewards/accuracies": 0.6499999761581421, | |
| "rewards/chosen": -0.19777385890483856, | |
| "rewards/margins": 0.27027979493141174, | |
| "rewards/rejected": -0.4680536389350891, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 2.910060778827554e-07, | |
| "logits/chosen": -2.107374906539917, | |
| "logits/rejected": -2.0704822540283203, | |
| "logps/chosen": -334.5880432128906, | |
| "logps/rejected": -371.534912109375, | |
| "loss": 0.6459, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": -0.4631880223751068, | |
| "rewards/margins": 0.126481831073761, | |
| "rewards/rejected": -0.5896698236465454, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 2.3627616503391812e-07, | |
| "logits/chosen": -2.1149628162384033, | |
| "logits/rejected": -2.0265822410583496, | |
| "logps/chosen": -295.9393005371094, | |
| "logps/rejected": -402.9593200683594, | |
| "loss": 0.6123, | |
| "rewards/accuracies": 0.6937500238418579, | |
| "rewards/chosen": -0.31046614050865173, | |
| "rewards/margins": 0.31097108125686646, | |
| "rewards/rejected": -0.6214371919631958, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 1.8220596619089573e-07, | |
| "logits/chosen": -2.065992593765259, | |
| "logits/rejected": -2.0325350761413574, | |
| "logps/chosen": -307.4365234375, | |
| "logps/rejected": -424.5880432128906, | |
| "loss": 0.6372, | |
| "rewards/accuracies": 0.668749988079071, | |
| "rewards/chosen": -0.38495463132858276, | |
| "rewards/margins": 0.3768821358680725, | |
| "rewards/rejected": -0.7618367671966553, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 1.3139467229135998e-07, | |
| "logits/chosen": -2.0376338958740234, | |
| "logits/rejected": -2.006533145904541, | |
| "logps/chosen": -308.51556396484375, | |
| "logps/rejected": -427.515625, | |
| "loss": 0.645, | |
| "rewards/accuracies": 0.668749988079071, | |
| "rewards/chosen": -0.45776548981666565, | |
| "rewards/margins": 0.3285262882709503, | |
| "rewards/rejected": -0.7862917184829712, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 8.628481651367875e-08, | |
| "logits/chosen": -2.0662786960601807, | |
| "logits/rejected": -2.0550084114074707, | |
| "logps/chosen": -303.14727783203125, | |
| "logps/rejected": -370.09716796875, | |
| "loss": 0.6298, | |
| "rewards/accuracies": 0.5687500238418579, | |
| "rewards/chosen": -0.4310707151889801, | |
| "rewards/margins": 0.14983342587947845, | |
| "rewards/rejected": -0.5809041261672974, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 4.904486005914027e-08, | |
| "logits/chosen": -2.055657148361206, | |
| "logits/rejected": -2.022127389907837, | |
| "logps/chosen": -311.9975891113281, | |
| "logps/rejected": -384.63360595703125, | |
| "loss": 0.6525, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": -0.4104999005794525, | |
| "rewards/margins": 0.20988547801971436, | |
| "rewards/rejected": -0.620385468006134, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 2.1464952759020856e-08, | |
| "logits/chosen": -2.042844533920288, | |
| "logits/rejected": -2.010148525238037, | |
| "logps/chosen": -300.5053405761719, | |
| "logps/rejected": -373.68572998046875, | |
| "loss": 0.6315, | |
| "rewards/accuracies": 0.59375, | |
| "rewards/chosen": -0.37878933548927307, | |
| "rewards/margins": 0.2560186982154846, | |
| "rewards/rejected": -0.6348080039024353, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 4.8708793644441086e-09, | |
| "logits/chosen": -2.0472328662872314, | |
| "logits/rejected": -2.0771114826202393, | |
| "logps/chosen": -301.08148193359375, | |
| "logps/rejected": -429.6272888183594, | |
| "loss": 0.6246, | |
| "rewards/accuracies": 0.6312500238418579, | |
| "rewards/chosen": -0.3863406181335449, | |
| "rewards/margins": 0.33363914489746094, | |
| "rewards/rejected": -0.7199797630310059, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 159, | |
| "total_flos": 0.0, | |
| "train_loss": 0.6471116527821283, | |
| "train_runtime": 2657.8372, | |
| "train_samples_per_second": 7.667, | |
| "train_steps_per_second": 0.06 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 159, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "total_flos": 0.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |