| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 10.147679324894515, | |
| "eval_steps": 500, | |
| "global_step": 300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.33755274261603374, | |
| "grad_norm": 0.4396141469478607, | |
| "learning_rate": 8.620689655172415e-07, | |
| "logits/chosen": 1.6453087329864502, | |
| "logits/rejected": 1.694819450378418, | |
| "logps/chosen": -74.5937728881836, | |
| "logps/rejected": -83.19783782958984, | |
| "loss": 0.6928, | |
| "rewards/accuracies": 0.42500001192092896, | |
| "rewards/chosen": 0.003385844174772501, | |
| "rewards/margins": 0.004794469103217125, | |
| "rewards/rejected": -0.0014086246956139803, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.6751054852320675, | |
| "grad_norm": 3.18404483795166, | |
| "learning_rate": 1.724137931034483e-06, | |
| "logits/chosen": 1.7945035696029663, | |
| "logits/rejected": 1.8347476720809937, | |
| "logps/chosen": -95.46636199951172, | |
| "logps/rejected": -101.22709655761719, | |
| "loss": 0.6933, | |
| "rewards/accuracies": 0.4625000059604645, | |
| "rewards/chosen": -0.0005593777750618756, | |
| "rewards/margins": -0.002013001125305891, | |
| "rewards/rejected": 0.0014536241069436073, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.0126582278481013, | |
| "grad_norm": 0.48596081137657166, | |
| "learning_rate": 2.5862068965517246e-06, | |
| "logits/chosen": 1.774155616760254, | |
| "logits/rejected": 1.8374344110488892, | |
| "logps/chosen": -82.01265716552734, | |
| "logps/rejected": -84.23635864257812, | |
| "loss": 0.6948, | |
| "rewards/accuracies": 0.44999998807907104, | |
| "rewards/chosen": -0.0031326995231211185, | |
| "rewards/margins": -0.00463916826993227, | |
| "rewards/rejected": 0.0015064675826579332, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.350210970464135, | |
| "grad_norm": 0.5144609808921814, | |
| "learning_rate": 3.448275862068966e-06, | |
| "logits/chosen": 1.838865876197815, | |
| "logits/rejected": 1.9505430459976196, | |
| "logps/chosen": -73.50071716308594, | |
| "logps/rejected": -88.8648452758789, | |
| "loss": 0.6928, | |
| "rewards/accuracies": 0.42500001192092896, | |
| "rewards/chosen": -0.0035957477521151304, | |
| "rewards/margins": -0.008830643258988857, | |
| "rewards/rejected": 0.005234894808381796, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.6877637130801688, | |
| "grad_norm": 0.48295095562934875, | |
| "learning_rate": 4.310344827586207e-06, | |
| "logits/chosen": 1.8197529315948486, | |
| "logits/rejected": 1.8459796905517578, | |
| "logps/chosen": -83.36590576171875, | |
| "logps/rejected": -70.26930236816406, | |
| "loss": 0.6929, | |
| "rewards/accuracies": 0.4749999940395355, | |
| "rewards/chosen": -0.0030026868917047977, | |
| "rewards/margins": -0.006082554347813129, | |
| "rewards/rejected": 0.003079867223277688, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.0464135021097047, | |
| "grad_norm": 0.5616092085838318, | |
| "learning_rate": 4.999818897894192e-06, | |
| "logits/chosen": 1.7525272369384766, | |
| "logits/rejected": 1.8092533349990845, | |
| "logps/chosen": -82.01802062988281, | |
| "logps/rejected": -78.39723205566406, | |
| "loss": 0.6949, | |
| "rewards/accuracies": 0.5249999761581421, | |
| "rewards/chosen": -0.0010920714121311903, | |
| "rewards/margins": -0.0016235255170613527, | |
| "rewards/rejected": 0.0005314538720995188, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.3839662447257384, | |
| "grad_norm": 0.5237122774124146, | |
| "learning_rate": 4.9934830787948756e-06, | |
| "logits/chosen": 1.754880666732788, | |
| "logits/rejected": 1.8100305795669556, | |
| "logps/chosen": -89.62864685058594, | |
| "logps/rejected": -88.30036926269531, | |
| "loss": 0.694, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.002314353361725807, | |
| "rewards/margins": -0.001038494287058711, | |
| "rewards/rejected": -0.0012758590746670961, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.721518987341772, | |
| "grad_norm": 0.507128119468689, | |
| "learning_rate": 4.978118375700895e-06, | |
| "logits/chosen": 1.7501789331436157, | |
| "logits/rejected": 1.6666221618652344, | |
| "logps/chosen": -94.74919128417969, | |
| "logps/rejected": -74.9794692993164, | |
| "loss": 0.6922, | |
| "rewards/accuracies": 0.4625000059604645, | |
| "rewards/chosen": 0.00475720502436161, | |
| "rewards/margins": 0.01054429542273283, | |
| "rewards/rejected": -0.005787090864032507, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 3.059071729957806, | |
| "grad_norm": 0.8151038885116577, | |
| "learning_rate": 4.953780424089803e-06, | |
| "logits/chosen": 1.7203710079193115, | |
| "logits/rejected": 1.7376699447631836, | |
| "logps/chosen": -76.8824691772461, | |
| "logps/rejected": -84.74555206298828, | |
| "loss": 0.6925, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": 0.01064519863575697, | |
| "rewards/margins": 0.012716365046799183, | |
| "rewards/rejected": -0.0020711664110422134, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.3966244725738397, | |
| "grad_norm": 0.5476706027984619, | |
| "learning_rate": 4.920557351506409e-06, | |
| "logits/chosen": 1.7857105731964111, | |
| "logits/rejected": 1.7341340780258179, | |
| "logps/chosen": -88.39775085449219, | |
| "logps/rejected": -80.00445556640625, | |
| "loss": 0.6893, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": 0.0052363635040819645, | |
| "rewards/margins": 0.016881367191672325, | |
| "rewards/rejected": -0.011645001359283924, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.7341772151898733, | |
| "grad_norm": 0.6562405824661255, | |
| "learning_rate": 4.878569458453592e-06, | |
| "logits/chosen": 1.737647294998169, | |
| "logits/rejected": 1.7781591415405273, | |
| "logps/chosen": -92.72380065917969, | |
| "logps/rejected": -85.51634216308594, | |
| "loss": 0.6899, | |
| "rewards/accuracies": 0.512499988079071, | |
| "rewards/chosen": -0.010819707065820694, | |
| "rewards/margins": -0.0005886269500479102, | |
| "rewards/rejected": -0.010231079533696175, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 4.071729957805907, | |
| "grad_norm": 0.6393210291862488, | |
| "learning_rate": 4.827968782785062e-06, | |
| "logits/chosen": 1.7890704870224, | |
| "logits/rejected": 1.8340994119644165, | |
| "logps/chosen": -87.615478515625, | |
| "logps/rejected": -108.13908386230469, | |
| "loss": 0.6872, | |
| "rewards/accuracies": 0.5249999761581421, | |
| "rewards/chosen": 0.002250433200970292, | |
| "rewards/margins": 0.012117428705096245, | |
| "rewards/rejected": -0.009866995736956596, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.409282700421941, | |
| "grad_norm": 0.7390326261520386, | |
| "learning_rate": 4.7689385491773934e-06, | |
| "logits/chosen": 1.7916072607040405, | |
| "logits/rejected": 1.787153959274292, | |
| "logps/chosen": -84.11034393310547, | |
| "logps/rejected": -76.81166076660156, | |
| "loss": 0.6844, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": -0.003190569579601288, | |
| "rewards/margins": 0.015628555789589882, | |
| "rewards/rejected": -0.01881912164390087, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.746835443037975, | |
| "grad_norm": 0.7888330817222595, | |
| "learning_rate": 4.70169250567482e-06, | |
| "logits/chosen": 1.8455331325531006, | |
| "logits/rejected": 1.8137515783309937, | |
| "logps/chosen": -78.94912719726562, | |
| "logps/rejected": -76.1368637084961, | |
| "loss": 0.6819, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": -0.012986091896891594, | |
| "rewards/margins": 0.015167826786637306, | |
| "rewards/rejected": -0.02815392054617405, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 5.084388185654008, | |
| "grad_norm": 0.7248090505599976, | |
| "learning_rate": 4.626474149709127e-06, | |
| "logits/chosen": 1.6686553955078125, | |
| "logits/rejected": 1.6654260158538818, | |
| "logps/chosen": -78.25769805908203, | |
| "logps/rejected": -71.3498306274414, | |
| "loss": 0.677, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": -0.0182911679148674, | |
| "rewards/margins": 0.02633112668991089, | |
| "rewards/rejected": -0.04462229460477829, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 5.421940928270042, | |
| "grad_norm": 0.7906918525695801, | |
| "learning_rate": 4.54355584639723e-06, | |
| "logits/chosen": 1.8054498434066772, | |
| "logits/rejected": 1.751422643661499, | |
| "logps/chosen": -91.65776824951172, | |
| "logps/rejected": -68.41027069091797, | |
| "loss": 0.6741, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": -0.04313550516963005, | |
| "rewards/margins": 0.017568334937095642, | |
| "rewards/rejected": -0.060703836381435394, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 5.759493670886076, | |
| "grad_norm": 0.8152453303337097, | |
| "learning_rate": 4.45323784230908e-06, | |
| "logits/chosen": 1.742419958114624, | |
| "logits/rejected": 1.9456249475479126, | |
| "logps/chosen": -80.52605438232422, | |
| "logps/rejected": -117.02473449707031, | |
| "loss": 0.67, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": -0.030041133984923363, | |
| "rewards/margins": 0.03393586724996567, | |
| "rewards/rejected": -0.06397700309753418, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 6.09704641350211, | |
| "grad_norm": 0.7848192453384399, | |
| "learning_rate": 4.355847178277025e-06, | |
| "logits/chosen": 1.836090087890625, | |
| "logits/rejected": 2.0285377502441406, | |
| "logps/chosen": -72.87449645996094, | |
| "logps/rejected": -101.46552276611328, | |
| "loss": 0.6659, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": -0.04543738812208176, | |
| "rewards/margins": 0.0431094653904438, | |
| "rewards/rejected": -0.08854684978723526, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 6.434599156118144, | |
| "grad_norm": 0.8349910378456116, | |
| "learning_rate": 4.2517365051833564e-06, | |
| "logits/chosen": 1.7459189891815186, | |
| "logits/rejected": 1.8324615955352783, | |
| "logps/chosen": -72.0348892211914, | |
| "logps/rejected": -86.13719940185547, | |
| "loss": 0.6555, | |
| "rewards/accuracies": 0.6499999761581421, | |
| "rewards/chosen": -0.04952271655201912, | |
| "rewards/margins": 0.07018387317657471, | |
| "rewards/rejected": -0.11970658600330353, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 6.772151898734177, | |
| "grad_norm": 0.9766960144042969, | |
| "learning_rate": 4.141282807014034e-06, | |
| "logits/chosen": 1.7785131931304932, | |
| "logits/rejected": 1.8624244928359985, | |
| "logps/chosen": -91.82891845703125, | |
| "logps/rejected": -102.28694915771484, | |
| "loss": 0.6465, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": -0.04159725084900856, | |
| "rewards/margins": 0.09511855244636536, | |
| "rewards/rejected": -0.1367158144712448, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 7.109704641350211, | |
| "grad_norm": 0.9714512228965759, | |
| "learning_rate": 4.024886035802432e-06, | |
| "logits/chosen": 1.756864309310913, | |
| "logits/rejected": 1.7497355937957764, | |
| "logps/chosen": -82.28770446777344, | |
| "logps/rejected": -72.7796859741211, | |
| "loss": 0.6498, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.0668434351682663, | |
| "rewards/margins": 0.1403656005859375, | |
| "rewards/rejected": -0.2072090357542038, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 7.447257383966245, | |
| "grad_norm": 0.9135355949401855, | |
| "learning_rate": 3.9029676634059565e-06, | |
| "logits/chosen": 1.677150011062622, | |
| "logits/rejected": 1.7840830087661743, | |
| "logps/chosen": -78.84297180175781, | |
| "logps/rejected": -102.04107666015625, | |
| "loss": 0.6438, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.10343215614557266, | |
| "rewards/margins": 0.15733790397644043, | |
| "rewards/rejected": -0.2607700526714325, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 7.784810126582278, | |
| "grad_norm": 0.8835084438323975, | |
| "learning_rate": 3.7759691553595214e-06, | |
| "logits/chosen": 1.8346344232559204, | |
| "logits/rejected": 1.838543176651001, | |
| "logps/chosen": -109.93656921386719, | |
| "logps/rejected": -73.76078033447266, | |
| "loss": 0.6279, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.08149126917123795, | |
| "rewards/margins": 0.17928050458431244, | |
| "rewards/rejected": -0.26077181100845337, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 8.122362869198312, | |
| "grad_norm": 0.9854581952095032, | |
| "learning_rate": 3.6443503723320837e-06, | |
| "logits/chosen": 1.7367273569107056, | |
| "logits/rejected": 1.723976492881775, | |
| "logps/chosen": -80.76436614990234, | |
| "logps/rejected": -75.7645034790039, | |
| "loss": 0.6309, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.12716726958751678, | |
| "rewards/margins": 0.15734827518463135, | |
| "rewards/rejected": -0.2845155596733093, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 8.459915611814345, | |
| "grad_norm": 0.9569210410118103, | |
| "learning_rate": 3.508587904974522e-06, | |
| "logits/chosen": 1.711611032485962, | |
| "logits/rejected": 1.820723295211792, | |
| "logps/chosen": -81.62940979003906, | |
| "logps/rejected": -104.07059478759766, | |
| "loss": 0.6206, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.12964895367622375, | |
| "rewards/margins": 0.2236514836549759, | |
| "rewards/rejected": -0.35330042243003845, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 8.79746835443038, | |
| "grad_norm": 0.9199231863021851, | |
| "learning_rate": 3.3691733481883693e-06, | |
| "logits/chosen": 1.860053300857544, | |
| "logits/rejected": 1.8826453685760498, | |
| "logps/chosen": -89.85298919677734, | |
| "logps/rejected": -90.7202377319336, | |
| "loss": 0.6129, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.1632840931415558, | |
| "rewards/margins": 0.22734332084655762, | |
| "rewards/rejected": -0.3906274437904358, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 9.135021097046414, | |
| "grad_norm": 0.996032178401947, | |
| "learning_rate": 3.226611521064278e-06, | |
| "logits/chosen": 1.8121602535247803, | |
| "logits/rejected": 1.7851508855819702, | |
| "logps/chosen": -76.85626220703125, | |
| "logps/rejected": -77.3901138305664, | |
| "loss": 0.6155, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.16423651576042175, | |
| "rewards/margins": 0.2163097858428955, | |
| "rewards/rejected": -0.38054633140563965, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 9.472573839662447, | |
| "grad_norm": 0.96324622631073, | |
| "learning_rate": 3.0814186389357765e-06, | |
| "logits/chosen": 1.7609102725982666, | |
| "logits/rejected": 1.782065749168396, | |
| "logps/chosen": -78.853271484375, | |
| "logps/rejected": -81.43799591064453, | |
| "loss": 0.5991, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.19906465709209442, | |
| "rewards/margins": 0.2417418658733368, | |
| "rewards/rejected": -0.4408065378665924, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 9.810126582278482, | |
| "grad_norm": 0.9327431917190552, | |
| "learning_rate": 2.9341204441673267e-06, | |
| "logits/chosen": 1.7485430240631104, | |
| "logits/rejected": 1.7885955572128296, | |
| "logps/chosen": -81.84668731689453, | |
| "logps/rejected": -84.5110855102539, | |
| "loss": 0.6035, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": -0.24916405975818634, | |
| "rewards/margins": 0.22875118255615234, | |
| "rewards/rejected": -0.47791528701782227, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 10.147679324894515, | |
| "grad_norm": 1.1683701276779175, | |
| "learning_rate": 2.785250302445062e-06, | |
| "logits/chosen": 1.8126602172851562, | |
| "logits/rejected": 1.8814666271209717, | |
| "logps/chosen": -105.7123031616211, | |
| "logps/rejected": -92.78335571289062, | |
| "loss": 0.5916, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.257813036441803, | |
| "rewards/margins": 0.28732481598854065, | |
| "rewards/rejected": -0.5451378226280212, | |
| "step": 300 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 580, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.0293313016661279e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |