| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 6.772151898734177, | |
| "eval_steps": 500, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.33755274261603374, | |
| "grad_norm": 0.4396141469478607, | |
| "learning_rate": 8.620689655172415e-07, | |
| "logits/chosen": 1.6453087329864502, | |
| "logits/rejected": 1.694819450378418, | |
| "logps/chosen": -74.5937728881836, | |
| "logps/rejected": -83.19783782958984, | |
| "loss": 0.6928, | |
| "rewards/accuracies": 0.42500001192092896, | |
| "rewards/chosen": 0.003385844174772501, | |
| "rewards/margins": 0.004794469103217125, | |
| "rewards/rejected": -0.0014086246956139803, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.6751054852320675, | |
| "grad_norm": 3.18404483795166, | |
| "learning_rate": 1.724137931034483e-06, | |
| "logits/chosen": 1.7945035696029663, | |
| "logits/rejected": 1.8347476720809937, | |
| "logps/chosen": -95.46636199951172, | |
| "logps/rejected": -101.22709655761719, | |
| "loss": 0.6933, | |
| "rewards/accuracies": 0.4625000059604645, | |
| "rewards/chosen": -0.0005593777750618756, | |
| "rewards/margins": -0.002013001125305891, | |
| "rewards/rejected": 0.0014536241069436073, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.0126582278481013, | |
| "grad_norm": 0.48596081137657166, | |
| "learning_rate": 2.5862068965517246e-06, | |
| "logits/chosen": 1.774155616760254, | |
| "logits/rejected": 1.8374344110488892, | |
| "logps/chosen": -82.01265716552734, | |
| "logps/rejected": -84.23635864257812, | |
| "loss": 0.6948, | |
| "rewards/accuracies": 0.44999998807907104, | |
| "rewards/chosen": -0.0031326995231211185, | |
| "rewards/margins": -0.00463916826993227, | |
| "rewards/rejected": 0.0015064675826579332, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.350210970464135, | |
| "grad_norm": 0.5144609808921814, | |
| "learning_rate": 3.448275862068966e-06, | |
| "logits/chosen": 1.838865876197815, | |
| "logits/rejected": 1.9505430459976196, | |
| "logps/chosen": -73.50071716308594, | |
| "logps/rejected": -88.8648452758789, | |
| "loss": 0.6928, | |
| "rewards/accuracies": 0.42500001192092896, | |
| "rewards/chosen": -0.0035957477521151304, | |
| "rewards/margins": -0.008830643258988857, | |
| "rewards/rejected": 0.005234894808381796, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.6877637130801688, | |
| "grad_norm": 0.48295095562934875, | |
| "learning_rate": 4.310344827586207e-06, | |
| "logits/chosen": 1.8197529315948486, | |
| "logits/rejected": 1.8459796905517578, | |
| "logps/chosen": -83.36590576171875, | |
| "logps/rejected": -70.26930236816406, | |
| "loss": 0.6929, | |
| "rewards/accuracies": 0.4749999940395355, | |
| "rewards/chosen": -0.0030026868917047977, | |
| "rewards/margins": -0.006082554347813129, | |
| "rewards/rejected": 0.003079867223277688, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.0464135021097047, | |
| "grad_norm": 0.5616092085838318, | |
| "learning_rate": 4.999818897894192e-06, | |
| "logits/chosen": 1.7525272369384766, | |
| "logits/rejected": 1.8092533349990845, | |
| "logps/chosen": -82.01802062988281, | |
| "logps/rejected": -78.39723205566406, | |
| "loss": 0.6949, | |
| "rewards/accuracies": 0.5249999761581421, | |
| "rewards/chosen": -0.0010920714121311903, | |
| "rewards/margins": -0.0016235255170613527, | |
| "rewards/rejected": 0.0005314538720995188, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.3839662447257384, | |
| "grad_norm": 0.5237122774124146, | |
| "learning_rate": 4.9934830787948756e-06, | |
| "logits/chosen": 1.754880666732788, | |
| "logits/rejected": 1.8100305795669556, | |
| "logps/chosen": -89.62864685058594, | |
| "logps/rejected": -88.30036926269531, | |
| "loss": 0.694, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.002314353361725807, | |
| "rewards/margins": -0.001038494287058711, | |
| "rewards/rejected": -0.0012758590746670961, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.721518987341772, | |
| "grad_norm": 0.507128119468689, | |
| "learning_rate": 4.978118375700895e-06, | |
| "logits/chosen": 1.7501789331436157, | |
| "logits/rejected": 1.6666221618652344, | |
| "logps/chosen": -94.74919128417969, | |
| "logps/rejected": -74.9794692993164, | |
| "loss": 0.6922, | |
| "rewards/accuracies": 0.4625000059604645, | |
| "rewards/chosen": 0.00475720502436161, | |
| "rewards/margins": 0.01054429542273283, | |
| "rewards/rejected": -0.005787090864032507, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 3.059071729957806, | |
| "grad_norm": 0.8151038885116577, | |
| "learning_rate": 4.953780424089803e-06, | |
| "logits/chosen": 1.7203710079193115, | |
| "logits/rejected": 1.7376699447631836, | |
| "logps/chosen": -76.8824691772461, | |
| "logps/rejected": -84.74555206298828, | |
| "loss": 0.6925, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": 0.01064519863575697, | |
| "rewards/margins": 0.012716365046799183, | |
| "rewards/rejected": -0.0020711664110422134, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.3966244725738397, | |
| "grad_norm": 0.5476706027984619, | |
| "learning_rate": 4.920557351506409e-06, | |
| "logits/chosen": 1.7857105731964111, | |
| "logits/rejected": 1.7341340780258179, | |
| "logps/chosen": -88.39775085449219, | |
| "logps/rejected": -80.00445556640625, | |
| "loss": 0.6893, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": 0.0052363635040819645, | |
| "rewards/margins": 0.016881367191672325, | |
| "rewards/rejected": -0.011645001359283924, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.7341772151898733, | |
| "grad_norm": 0.6562405824661255, | |
| "learning_rate": 4.878569458453592e-06, | |
| "logits/chosen": 1.737647294998169, | |
| "logits/rejected": 1.7781591415405273, | |
| "logps/chosen": -92.72380065917969, | |
| "logps/rejected": -85.51634216308594, | |
| "loss": 0.6899, | |
| "rewards/accuracies": 0.512499988079071, | |
| "rewards/chosen": -0.010819707065820694, | |
| "rewards/margins": -0.0005886269500479102, | |
| "rewards/rejected": -0.010231079533696175, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 4.071729957805907, | |
| "grad_norm": 0.6393210291862488, | |
| "learning_rate": 4.827968782785062e-06, | |
| "logits/chosen": 1.7890704870224, | |
| "logits/rejected": 1.8340994119644165, | |
| "logps/chosen": -87.615478515625, | |
| "logps/rejected": -108.13908386230469, | |
| "loss": 0.6872, | |
| "rewards/accuracies": 0.5249999761581421, | |
| "rewards/chosen": 0.002250433200970292, | |
| "rewards/margins": 0.012117428705096245, | |
| "rewards/rejected": -0.009866995736956596, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.409282700421941, | |
| "grad_norm": 0.7390326261520386, | |
| "learning_rate": 4.7689385491773934e-06, | |
| "logits/chosen": 1.7916072607040405, | |
| "logits/rejected": 1.787153959274292, | |
| "logps/chosen": -84.11034393310547, | |
| "logps/rejected": -76.81166076660156, | |
| "loss": 0.6844, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": -0.003190569579601288, | |
| "rewards/margins": 0.015628555789589882, | |
| "rewards/rejected": -0.01881912164390087, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.746835443037975, | |
| "grad_norm": 0.7888330817222595, | |
| "learning_rate": 4.70169250567482e-06, | |
| "logits/chosen": 1.8455331325531006, | |
| "logits/rejected": 1.8137515783309937, | |
| "logps/chosen": -78.94912719726562, | |
| "logps/rejected": -76.1368637084961, | |
| "loss": 0.6819, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": -0.012986091896891594, | |
| "rewards/margins": 0.015167826786637306, | |
| "rewards/rejected": -0.02815392054617405, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 5.084388185654008, | |
| "grad_norm": 0.7248090505599976, | |
| "learning_rate": 4.626474149709127e-06, | |
| "logits/chosen": 1.6686553955078125, | |
| "logits/rejected": 1.6654260158538818, | |
| "logps/chosen": -78.25769805908203, | |
| "logps/rejected": -71.3498306274414, | |
| "loss": 0.677, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": -0.0182911679148674, | |
| "rewards/margins": 0.02633112668991089, | |
| "rewards/rejected": -0.04462229460477829, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 5.421940928270042, | |
| "grad_norm": 0.7906918525695801, | |
| "learning_rate": 4.54355584639723e-06, | |
| "logits/chosen": 1.8054498434066772, | |
| "logits/rejected": 1.751422643661499, | |
| "logps/chosen": -91.65776824951172, | |
| "logps/rejected": -68.41027069091797, | |
| "loss": 0.6741, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": -0.04313550516963005, | |
| "rewards/margins": 0.017568334937095642, | |
| "rewards/rejected": -0.060703836381435394, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 5.759493670886076, | |
| "grad_norm": 0.8152453303337097, | |
| "learning_rate": 4.45323784230908e-06, | |
| "logits/chosen": 1.742419958114624, | |
| "logits/rejected": 1.9456249475479126, | |
| "logps/chosen": -80.52605438232422, | |
| "logps/rejected": -117.02473449707031, | |
| "loss": 0.67, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": -0.030041133984923363, | |
| "rewards/margins": 0.03393586724996567, | |
| "rewards/rejected": -0.06397700309753418, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 6.09704641350211, | |
| "grad_norm": 0.7848192453384399, | |
| "learning_rate": 4.355847178277025e-06, | |
| "logits/chosen": 1.836090087890625, | |
| "logits/rejected": 2.0285377502441406, | |
| "logps/chosen": -72.87449645996094, | |
| "logps/rejected": -101.46552276611328, | |
| "loss": 0.6659, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": -0.04543738812208176, | |
| "rewards/margins": 0.0431094653904438, | |
| "rewards/rejected": -0.08854684978723526, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 6.434599156118144, | |
| "grad_norm": 0.8349910378456116, | |
| "learning_rate": 4.2517365051833564e-06, | |
| "logits/chosen": 1.7459189891815186, | |
| "logits/rejected": 1.8324615955352783, | |
| "logps/chosen": -72.0348892211914, | |
| "logps/rejected": -86.13719940185547, | |
| "loss": 0.6555, | |
| "rewards/accuracies": 0.6499999761581421, | |
| "rewards/chosen": -0.04952271655201912, | |
| "rewards/margins": 0.07018387317657471, | |
| "rewards/rejected": -0.11970658600330353, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 6.772151898734177, | |
| "grad_norm": 0.9766960144042969, | |
| "learning_rate": 4.141282807014034e-06, | |
| "logits/chosen": 1.7785131931304932, | |
| "logits/rejected": 1.8624244928359985, | |
| "logps/chosen": -91.82891845703125, | |
| "logps/rejected": -102.28694915771484, | |
| "loss": 0.6465, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": -0.04159725084900856, | |
| "rewards/margins": 0.09511855244636536, | |
| "rewards/rejected": -0.1367158144712448, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 580, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.868334382038385e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |