| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 19.59915611814346, | |
| "eval_steps": 500, | |
| "global_step": 580, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.33755274261603374, | |
| "grad_norm": 0.4396141469478607, | |
| "learning_rate": 8.620689655172415e-07, | |
| "logits/chosen": 1.6453087329864502, | |
| "logits/rejected": 1.694819450378418, | |
| "logps/chosen": -74.5937728881836, | |
| "logps/rejected": -83.19783782958984, | |
| "loss": 0.6928, | |
| "rewards/accuracies": 0.42500001192092896, | |
| "rewards/chosen": 0.003385844174772501, | |
| "rewards/margins": 0.004794469103217125, | |
| "rewards/rejected": -0.0014086246956139803, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.6751054852320675, | |
| "grad_norm": 3.18404483795166, | |
| "learning_rate": 1.724137931034483e-06, | |
| "logits/chosen": 1.7945035696029663, | |
| "logits/rejected": 1.8347476720809937, | |
| "logps/chosen": -95.46636199951172, | |
| "logps/rejected": -101.22709655761719, | |
| "loss": 0.6933, | |
| "rewards/accuracies": 0.4625000059604645, | |
| "rewards/chosen": -0.0005593777750618756, | |
| "rewards/margins": -0.002013001125305891, | |
| "rewards/rejected": 0.0014536241069436073, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.0126582278481013, | |
| "grad_norm": 0.48596081137657166, | |
| "learning_rate": 2.5862068965517246e-06, | |
| "logits/chosen": 1.774155616760254, | |
| "logits/rejected": 1.8374344110488892, | |
| "logps/chosen": -82.01265716552734, | |
| "logps/rejected": -84.23635864257812, | |
| "loss": 0.6948, | |
| "rewards/accuracies": 0.44999998807907104, | |
| "rewards/chosen": -0.0031326995231211185, | |
| "rewards/margins": -0.00463916826993227, | |
| "rewards/rejected": 0.0015064675826579332, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.350210970464135, | |
| "grad_norm": 0.5144609808921814, | |
| "learning_rate": 3.448275862068966e-06, | |
| "logits/chosen": 1.838865876197815, | |
| "logits/rejected": 1.9505430459976196, | |
| "logps/chosen": -73.50071716308594, | |
| "logps/rejected": -88.8648452758789, | |
| "loss": 0.6928, | |
| "rewards/accuracies": 0.42500001192092896, | |
| "rewards/chosen": -0.0035957477521151304, | |
| "rewards/margins": -0.008830643258988857, | |
| "rewards/rejected": 0.005234894808381796, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.6877637130801688, | |
| "grad_norm": 0.48295095562934875, | |
| "learning_rate": 4.310344827586207e-06, | |
| "logits/chosen": 1.8197529315948486, | |
| "logits/rejected": 1.8459796905517578, | |
| "logps/chosen": -83.36590576171875, | |
| "logps/rejected": -70.26930236816406, | |
| "loss": 0.6929, | |
| "rewards/accuracies": 0.4749999940395355, | |
| "rewards/chosen": -0.0030026868917047977, | |
| "rewards/margins": -0.006082554347813129, | |
| "rewards/rejected": 0.003079867223277688, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.0464135021097047, | |
| "grad_norm": 0.5616092085838318, | |
| "learning_rate": 4.999818897894192e-06, | |
| "logits/chosen": 1.7525272369384766, | |
| "logits/rejected": 1.8092533349990845, | |
| "logps/chosen": -82.01802062988281, | |
| "logps/rejected": -78.39723205566406, | |
| "loss": 0.6949, | |
| "rewards/accuracies": 0.5249999761581421, | |
| "rewards/chosen": -0.0010920714121311903, | |
| "rewards/margins": -0.0016235255170613527, | |
| "rewards/rejected": 0.0005314538720995188, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.3839662447257384, | |
| "grad_norm": 0.5237122774124146, | |
| "learning_rate": 4.9934830787948756e-06, | |
| "logits/chosen": 1.754880666732788, | |
| "logits/rejected": 1.8100305795669556, | |
| "logps/chosen": -89.62864685058594, | |
| "logps/rejected": -88.30036926269531, | |
| "loss": 0.694, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.002314353361725807, | |
| "rewards/margins": -0.001038494287058711, | |
| "rewards/rejected": -0.0012758590746670961, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.721518987341772, | |
| "grad_norm": 0.507128119468689, | |
| "learning_rate": 4.978118375700895e-06, | |
| "logits/chosen": 1.7501789331436157, | |
| "logits/rejected": 1.6666221618652344, | |
| "logps/chosen": -94.74919128417969, | |
| "logps/rejected": -74.9794692993164, | |
| "loss": 0.6922, | |
| "rewards/accuracies": 0.4625000059604645, | |
| "rewards/chosen": 0.00475720502436161, | |
| "rewards/margins": 0.01054429542273283, | |
| "rewards/rejected": -0.005787090864032507, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 3.059071729957806, | |
| "grad_norm": 0.8151038885116577, | |
| "learning_rate": 4.953780424089803e-06, | |
| "logits/chosen": 1.7203710079193115, | |
| "logits/rejected": 1.7376699447631836, | |
| "logps/chosen": -76.8824691772461, | |
| "logps/rejected": -84.74555206298828, | |
| "loss": 0.6925, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": 0.01064519863575697, | |
| "rewards/margins": 0.012716365046799183, | |
| "rewards/rejected": -0.0020711664110422134, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.3966244725738397, | |
| "grad_norm": 0.5476706027984619, | |
| "learning_rate": 4.920557351506409e-06, | |
| "logits/chosen": 1.7857105731964111, | |
| "logits/rejected": 1.7341340780258179, | |
| "logps/chosen": -88.39775085449219, | |
| "logps/rejected": -80.00445556640625, | |
| "loss": 0.6893, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": 0.0052363635040819645, | |
| "rewards/margins": 0.016881367191672325, | |
| "rewards/rejected": -0.011645001359283924, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.7341772151898733, | |
| "grad_norm": 0.6562405824661255, | |
| "learning_rate": 4.878569458453592e-06, | |
| "logits/chosen": 1.737647294998169, | |
| "logits/rejected": 1.7781591415405273, | |
| "logps/chosen": -92.72380065917969, | |
| "logps/rejected": -85.51634216308594, | |
| "loss": 0.6899, | |
| "rewards/accuracies": 0.512499988079071, | |
| "rewards/chosen": -0.010819707065820694, | |
| "rewards/margins": -0.0005886269500479102, | |
| "rewards/rejected": -0.010231079533696175, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 4.071729957805907, | |
| "grad_norm": 0.6393210291862488, | |
| "learning_rate": 4.827968782785062e-06, | |
| "logits/chosen": 1.7890704870224, | |
| "logits/rejected": 1.8340994119644165, | |
| "logps/chosen": -87.615478515625, | |
| "logps/rejected": -108.13908386230469, | |
| "loss": 0.6872, | |
| "rewards/accuracies": 0.5249999761581421, | |
| "rewards/chosen": 0.002250433200970292, | |
| "rewards/margins": 0.012117428705096245, | |
| "rewards/rejected": -0.009866995736956596, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.409282700421941, | |
| "grad_norm": 0.7390326261520386, | |
| "learning_rate": 4.7689385491773934e-06, | |
| "logits/chosen": 1.7916072607040405, | |
| "logits/rejected": 1.787153959274292, | |
| "logps/chosen": -84.11034393310547, | |
| "logps/rejected": -76.81166076660156, | |
| "loss": 0.6844, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": -0.003190569579601288, | |
| "rewards/margins": 0.015628555789589882, | |
| "rewards/rejected": -0.01881912164390087, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.746835443037975, | |
| "grad_norm": 0.7888330817222595, | |
| "learning_rate": 4.70169250567482e-06, | |
| "logits/chosen": 1.8455331325531006, | |
| "logits/rejected": 1.8137515783309937, | |
| "logps/chosen": -78.94912719726562, | |
| "logps/rejected": -76.1368637084961, | |
| "loss": 0.6819, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": -0.012986091896891594, | |
| "rewards/margins": 0.015167826786637306, | |
| "rewards/rejected": -0.02815392054617405, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 5.084388185654008, | |
| "grad_norm": 0.7248090505599976, | |
| "learning_rate": 4.626474149709127e-06, | |
| "logits/chosen": 1.6686553955078125, | |
| "logits/rejected": 1.6654260158538818, | |
| "logps/chosen": -78.25769805908203, | |
| "logps/rejected": -71.3498306274414, | |
| "loss": 0.677, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": -0.0182911679148674, | |
| "rewards/margins": 0.02633112668991089, | |
| "rewards/rejected": -0.04462229460477829, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 5.421940928270042, | |
| "grad_norm": 0.7906918525695801, | |
| "learning_rate": 4.54355584639723e-06, | |
| "logits/chosen": 1.8054498434066772, | |
| "logits/rejected": 1.751422643661499, | |
| "logps/chosen": -91.65776824951172, | |
| "logps/rejected": -68.41027069091797, | |
| "loss": 0.6741, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": -0.04313550516963005, | |
| "rewards/margins": 0.017568334937095642, | |
| "rewards/rejected": -0.060703836381435394, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 5.759493670886076, | |
| "grad_norm": 0.8152453303337097, | |
| "learning_rate": 4.45323784230908e-06, | |
| "logits/chosen": 1.742419958114624, | |
| "logits/rejected": 1.9456249475479126, | |
| "logps/chosen": -80.52605438232422, | |
| "logps/rejected": -117.02473449707031, | |
| "loss": 0.67, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": -0.030041133984923363, | |
| "rewards/margins": 0.03393586724996567, | |
| "rewards/rejected": -0.06397700309753418, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 6.09704641350211, | |
| "grad_norm": 0.7848192453384399, | |
| "learning_rate": 4.355847178277025e-06, | |
| "logits/chosen": 1.836090087890625, | |
| "logits/rejected": 2.0285377502441406, | |
| "logps/chosen": -72.87449645996094, | |
| "logps/rejected": -101.46552276611328, | |
| "loss": 0.6659, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": -0.04543738812208176, | |
| "rewards/margins": 0.0431094653904438, | |
| "rewards/rejected": -0.08854684978723526, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 6.434599156118144, | |
| "grad_norm": 0.8349910378456116, | |
| "learning_rate": 4.2517365051833564e-06, | |
| "logits/chosen": 1.7459189891815186, | |
| "logits/rejected": 1.8324615955352783, | |
| "logps/chosen": -72.0348892211914, | |
| "logps/rejected": -86.13719940185547, | |
| "loss": 0.6555, | |
| "rewards/accuracies": 0.6499999761581421, | |
| "rewards/chosen": -0.04952271655201912, | |
| "rewards/margins": 0.07018387317657471, | |
| "rewards/rejected": -0.11970658600330353, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 6.772151898734177, | |
| "grad_norm": 0.9766960144042969, | |
| "learning_rate": 4.141282807014034e-06, | |
| "logits/chosen": 1.7785131931304932, | |
| "logits/rejected": 1.8624244928359985, | |
| "logps/chosen": -91.82891845703125, | |
| "logps/rejected": -102.28694915771484, | |
| "loss": 0.6465, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": -0.04159725084900856, | |
| "rewards/margins": 0.09511855244636536, | |
| "rewards/rejected": -0.1367158144712448, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 7.109704641350211, | |
| "grad_norm": 0.9714512228965759, | |
| "learning_rate": 4.024886035802432e-06, | |
| "logits/chosen": 1.756864309310913, | |
| "logits/rejected": 1.7497355937957764, | |
| "logps/chosen": -82.28770446777344, | |
| "logps/rejected": -72.7796859741211, | |
| "loss": 0.6498, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.0668434351682663, | |
| "rewards/margins": 0.1403656005859375, | |
| "rewards/rejected": -0.2072090357542038, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 7.447257383966245, | |
| "grad_norm": 0.9135355949401855, | |
| "learning_rate": 3.9029676634059565e-06, | |
| "logits/chosen": 1.677150011062622, | |
| "logits/rejected": 1.7840830087661743, | |
| "logps/chosen": -78.84297180175781, | |
| "logps/rejected": -102.04107666015625, | |
| "loss": 0.6438, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.10343215614557266, | |
| "rewards/margins": 0.15733790397644043, | |
| "rewards/rejected": -0.2607700526714325, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 7.784810126582278, | |
| "grad_norm": 0.8835084438323975, | |
| "learning_rate": 3.7759691553595214e-06, | |
| "logits/chosen": 1.8346344232559204, | |
| "logits/rejected": 1.838543176651001, | |
| "logps/chosen": -109.93656921386719, | |
| "logps/rejected": -73.76078033447266, | |
| "loss": 0.6279, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.08149126917123795, | |
| "rewards/margins": 0.17928050458431244, | |
| "rewards/rejected": -0.26077181100845337, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 8.122362869198312, | |
| "grad_norm": 0.9854581952095032, | |
| "learning_rate": 3.6443503723320837e-06, | |
| "logits/chosen": 1.7367273569107056, | |
| "logits/rejected": 1.723976492881775, | |
| "logps/chosen": -80.76436614990234, | |
| "logps/rejected": -75.7645034790039, | |
| "loss": 0.6309, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.12716726958751678, | |
| "rewards/margins": 0.15734827518463135, | |
| "rewards/rejected": -0.2845155596733093, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 8.459915611814345, | |
| "grad_norm": 0.9569210410118103, | |
| "learning_rate": 3.508587904974522e-06, | |
| "logits/chosen": 1.711611032485962, | |
| "logits/rejected": 1.820723295211792, | |
| "logps/chosen": -81.62940979003906, | |
| "logps/rejected": -104.07059478759766, | |
| "loss": 0.6206, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.12964895367622375, | |
| "rewards/margins": 0.2236514836549759, | |
| "rewards/rejected": -0.35330042243003845, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 8.79746835443038, | |
| "grad_norm": 0.9199231863021851, | |
| "learning_rate": 3.3691733481883693e-06, | |
| "logits/chosen": 1.860053300857544, | |
| "logits/rejected": 1.8826453685760498, | |
| "logps/chosen": -89.85298919677734, | |
| "logps/rejected": -90.7202377319336, | |
| "loss": 0.6129, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.1632840931415558, | |
| "rewards/margins": 0.22734332084655762, | |
| "rewards/rejected": -0.3906274437904358, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 9.135021097046414, | |
| "grad_norm": 0.996032178401947, | |
| "learning_rate": 3.226611521064278e-06, | |
| "logits/chosen": 1.8121602535247803, | |
| "logits/rejected": 1.7851508855819702, | |
| "logps/chosen": -76.85626220703125, | |
| "logps/rejected": -77.3901138305664, | |
| "loss": 0.6155, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.16423651576042175, | |
| "rewards/margins": 0.2163097858428955, | |
| "rewards/rejected": -0.38054633140563965, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 9.472573839662447, | |
| "grad_norm": 0.96324622631073, | |
| "learning_rate": 3.0814186389357765e-06, | |
| "logits/chosen": 1.7609102725982666, | |
| "logits/rejected": 1.782065749168396, | |
| "logps/chosen": -78.853271484375, | |
| "logps/rejected": -81.43799591064453, | |
| "loss": 0.5991, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.19906465709209442, | |
| "rewards/margins": 0.2417418658733368, | |
| "rewards/rejected": -0.4408065378665924, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 9.810126582278482, | |
| "grad_norm": 0.9327431917190552, | |
| "learning_rate": 2.9341204441673267e-06, | |
| "logits/chosen": 1.7485430240631104, | |
| "logits/rejected": 1.7885955572128296, | |
| "logps/chosen": -81.84668731689453, | |
| "logps/rejected": -84.5110855102539, | |
| "loss": 0.6035, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": -0.24916405975818634, | |
| "rewards/margins": 0.22875118255615234, | |
| "rewards/rejected": -0.47791528701782227, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 10.147679324894515, | |
| "grad_norm": 1.1683701276779175, | |
| "learning_rate": 2.785250302445062e-06, | |
| "logits/chosen": 1.8126602172851562, | |
| "logits/rejected": 1.8814666271209717, | |
| "logps/chosen": -105.7123031616211, | |
| "logps/rejected": -92.78335571289062, | |
| "loss": 0.5916, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.257813036441803, | |
| "rewards/margins": 0.28732481598854065, | |
| "rewards/rejected": -0.5451378226280212, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 10.485232067510548, | |
| "grad_norm": 1.078698754310608, | |
| "learning_rate": 2.6353472714635443e-06, | |
| "logits/chosen": 1.7991764545440674, | |
| "logits/rejected": 1.8787552118301392, | |
| "logps/chosen": -70.400634765625, | |
| "logps/rejected": -72.78517150878906, | |
| "loss": 0.5893, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": -0.23745933175086975, | |
| "rewards/margins": 0.22456249594688416, | |
| "rewards/rejected": -0.4620218276977539, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 10.822784810126583, | |
| "grad_norm": 1.0667306184768677, | |
| "learning_rate": 2.4849541490017868e-06, | |
| "logits/chosen": 1.7493784427642822, | |
| "logits/rejected": 1.7776050567626953, | |
| "logps/chosen": -79.56948852539062, | |
| "logps/rejected": -95.15191650390625, | |
| "loss": 0.5943, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.2948698401451111, | |
| "rewards/margins": 0.27440011501312256, | |
| "rewards/rejected": -0.5692699551582336, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 11.160337552742616, | |
| "grad_norm": 1.1734286546707153, | |
| "learning_rate": 2.3346155074564712e-06, | |
| "logits/chosen": 1.7588024139404297, | |
| "logits/rejected": 1.6906486749649048, | |
| "logps/chosen": -92.88114929199219, | |
| "logps/rejected": -87.77473449707031, | |
| "loss": 0.5748, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.26193878054618835, | |
| "rewards/margins": 0.31644436717033386, | |
| "rewards/rejected": -0.5783831477165222, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 11.49789029535865, | |
| "grad_norm": 1.0506142377853394, | |
| "learning_rate": 2.184875721949277e-06, | |
| "logits/chosen": 1.6390129327774048, | |
| "logits/rejected": 1.6817207336425781, | |
| "logps/chosen": -85.6020278930664, | |
| "logps/rejected": -81.51480865478516, | |
| "loss": 0.5755, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -0.2864437997341156, | |
| "rewards/margins": 0.3067106604576111, | |
| "rewards/rejected": -0.5931544899940491, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 11.835443037974684, | |
| "grad_norm": 1.0367387533187866, | |
| "learning_rate": 2.0362769991485514e-06, | |
| "logits/chosen": 1.811912178993225, | |
| "logits/rejected": 1.8571999073028564, | |
| "logps/chosen": -79.2660903930664, | |
| "logps/rejected": -105.51014709472656, | |
| "loss": 0.5743, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": -0.2905730605125427, | |
| "rewards/margins": 0.37750759720802307, | |
| "rewards/rejected": -0.6680806279182434, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 12.172995780590718, | |
| "grad_norm": 1.249039888381958, | |
| "learning_rate": 1.8893574139429226e-06, | |
| "logits/chosen": 1.8357423543930054, | |
| "logits/rejected": 1.8285624980926514, | |
| "logps/chosen": -85.5277099609375, | |
| "logps/rejected": -90.13473510742188, | |
| "loss": 0.575, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.36660733819007874, | |
| "rewards/margins": 0.3461737632751465, | |
| "rewards/rejected": -0.7127811312675476, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 12.51054852320675, | |
| "grad_norm": 1.1541879177093506, | |
| "learning_rate": 1.744648961076068e-06, | |
| "logits/chosen": 1.790567398071289, | |
| "logits/rejected": 1.7228246927261353, | |
| "logps/chosen": -86.40596008300781, | |
| "logps/rejected": -75.5702896118164, | |
| "loss": 0.5693, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.34050875902175903, | |
| "rewards/margins": 0.2642201781272888, | |
| "rewards/rejected": -0.6047288775444031, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 12.848101265822784, | |
| "grad_norm": 1.0363268852233887, | |
| "learning_rate": 1.602675628797636e-06, | |
| "logits/chosen": 1.6812736988067627, | |
| "logits/rejected": 1.7188876867294312, | |
| "logps/chosen": -81.14212799072266, | |
| "logps/rejected": -82.90364074707031, | |
| "loss": 0.5594, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.3105277419090271, | |
| "rewards/margins": 0.3372951149940491, | |
| "rewards/rejected": -0.6478228569030762, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 13.185654008438819, | |
| "grad_norm": 1.047418475151062, | |
| "learning_rate": 1.4639515015056205e-06, | |
| "logits/chosen": 1.6939361095428467, | |
| "logits/rejected": 1.7271579504013062, | |
| "logps/chosen": -86.88213348388672, | |
| "logps/rejected": -99.46037292480469, | |
| "loss": 0.558, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.2660979926586151, | |
| "rewards/margins": 0.4496893882751465, | |
| "rewards/rejected": -0.7157873511314392, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 13.523206751054852, | |
| "grad_norm": 1.1949480772018433, | |
| "learning_rate": 1.328978898250525e-06, | |
| "logits/chosen": 1.7057956457138062, | |
| "logits/rejected": 1.726438283920288, | |
| "logps/chosen": -85.27348327636719, | |
| "logps/rejected": -96.06895446777344, | |
| "loss": 0.5553, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.3689552843570709, | |
| "rewards/margins": 0.3453688621520996, | |
| "rewards/rejected": -0.7143241763114929, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 13.860759493670885, | |
| "grad_norm": 1.5321677923202515, | |
| "learning_rate": 1.198246553841744e-06, | |
| "logits/chosen": 1.8362537622451782, | |
| "logits/rejected": 1.889491081237793, | |
| "logps/chosen": -78.84573364257812, | |
| "logps/rejected": -75.66096496582031, | |
| "loss": 0.5618, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.34546419978141785, | |
| "rewards/margins": 0.3866986334323883, | |
| "rewards/rejected": -0.7321628332138062, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 14.19831223628692, | |
| "grad_norm": 1.1697853803634644, | |
| "learning_rate": 1.0722278491423998e-06, | |
| "logits/chosen": 1.7950217723846436, | |
| "logits/rejected": 1.8145115375518799, | |
| "logps/chosen": -82.2846908569336, | |
| "logps/rejected": -91.40074157714844, | |
| "loss": 0.5516, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.37175649404525757, | |
| "rewards/margins": 0.404013067483902, | |
| "rewards/rejected": -0.7757695913314819, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 14.535864978902953, | |
| "grad_norm": 1.163073182106018, | |
| "learning_rate": 9.513790969606926e-07, | |
| "logits/chosen": 1.686415433883667, | |
| "logits/rejected": 1.7637500762939453, | |
| "logps/chosen": -100.37837219238281, | |
| "logps/rejected": -100.43409729003906, | |
| "loss": 0.5583, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.35831838846206665, | |
| "rewards/margins": 0.3624442219734192, | |
| "rewards/rejected": -0.7207626104354858, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 14.873417721518987, | |
| "grad_norm": 1.148380160331726, | |
| "learning_rate": 8.361378897445643e-07, | |
| "logits/chosen": 1.6370818614959717, | |
| "logits/rejected": 1.752598524093628, | |
| "logps/chosen": -84.63670349121094, | |
| "logps/rejected": -89.8626708984375, | |
| "loss": 0.5584, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": -0.44350141286849976, | |
| "rewards/margins": 0.4355877935886383, | |
| "rewards/rejected": -0.8790891766548157, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 15.210970464135022, | |
| "grad_norm": 1.1012156009674072, | |
| "learning_rate": 7.269215150626391e-07, | |
| "logits/chosen": 1.8232038021087646, | |
| "logits/rejected": 1.8113384246826172, | |
| "logps/chosen": -95.2444839477539, | |
| "logps/rejected": -85.72362518310547, | |
| "loss": 0.5539, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.3705037236213684, | |
| "rewards/margins": 0.5386990308761597, | |
| "rewards/rejected": -0.9092028737068176, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 15.548523206751055, | |
| "grad_norm": 1.0890499353408813, | |
| "learning_rate": 6.241254446089942e-07, | |
| "logits/chosen": 1.6102081537246704, | |
| "logits/rejected": 1.6902793645858765, | |
| "logps/chosen": -77.29241943359375, | |
| "logps/rejected": -87.38616943359375, | |
| "loss": 0.554, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.36334335803985596, | |
| "rewards/margins": 0.41814422607421875, | |
| "rewards/rejected": -0.7814875841140747, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 15.886075949367088, | |
| "grad_norm": 1.1587886810302734, | |
| "learning_rate": 5.281219022030423e-07, | |
| "logits/chosen": 1.7049728631973267, | |
| "logits/rejected": 1.7815492153167725, | |
| "logps/chosen": -79.66454315185547, | |
| "logps/rejected": -91.70343017578125, | |
| "loss": 0.5472, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.3898239731788635, | |
| "rewards/margins": 0.40187686681747437, | |
| "rewards/rejected": -0.7917007803916931, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 16.223628691983123, | |
| "grad_norm": 1.5429887771606445, | |
| "learning_rate": 4.392585159698087e-07, | |
| "logits/chosen": 1.6508013010025024, | |
| "logits/rejected": 1.7940822839736938, | |
| "logps/chosen": -77.51395416259766, | |
| "logps/rejected": -90.20869445800781, | |
| "loss": 0.5512, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.35601454973220825, | |
| "rewards/margins": 0.3707844018936157, | |
| "rewards/rejected": -0.7267988920211792, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 16.561181434599156, | |
| "grad_norm": 1.1558414697647095, | |
| "learning_rate": 3.578570595810274e-07, | |
| "logits/chosen": 1.6670711040496826, | |
| "logits/rejected": 1.6244487762451172, | |
| "logps/chosen": -101.91804504394531, | |
| "logps/rejected": -91.96507263183594, | |
| "loss": 0.5452, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -0.40192681550979614, | |
| "rewards/margins": 0.44803065061569214, | |
| "rewards/rejected": -0.8499574661254883, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 16.89873417721519, | |
| "grad_norm": 1.1151753664016724, | |
| "learning_rate": 2.8421228711503127e-07, | |
| "logits/chosen": 1.7409915924072266, | |
| "logits/rejected": 1.8301231861114502, | |
| "logps/chosen": -71.89210510253906, | |
| "logps/rejected": -90.07672119140625, | |
| "loss": 0.5423, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.4172780513763428, | |
| "rewards/margins": 0.394390344619751, | |
| "rewards/rejected": -0.8116683959960938, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 17.236286919831223, | |
| "grad_norm": 1.1430857181549072, | |
| "learning_rate": 2.1859086575439225e-07, | |
| "logits/chosen": 1.668222188949585, | |
| "logits/rejected": 1.7431071996688843, | |
| "logps/chosen": -80.20066833496094, | |
| "logps/rejected": -88.21453094482422, | |
| "loss": 0.5312, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.3836023807525635, | |
| "rewards/margins": 0.41132551431655884, | |
| "rewards/rejected": -0.7949277758598328, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 17.573839662447256, | |
| "grad_norm": 1.3264650106430054, | |
| "learning_rate": 1.6123041018599766e-07, | |
| "logits/chosen": 1.6724201440811157, | |
| "logits/rejected": 1.782766580581665, | |
| "logps/chosen": -87.28868103027344, | |
| "logps/rejected": -109.66194152832031, | |
| "loss": 0.5529, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.4659019112586975, | |
| "rewards/margins": 0.3525821268558502, | |
| "rewards/rejected": -0.8184840083122253, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 17.911392405063292, | |
| "grad_norm": 1.2674906253814697, | |
| "learning_rate": 1.1233862220001168e-07, | |
| "logits/chosen": 1.6698293685913086, | |
| "logits/rejected": 1.696459412574768, | |
| "logps/chosen": -95.00108337402344, | |
| "logps/rejected": -84.9312973022461, | |
| "loss": 0.5565, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.3677830696105957, | |
| "rewards/margins": 0.4486306607723236, | |
| "rewards/rejected": -0.8164137005805969, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 18.248945147679326, | |
| "grad_norm": 1.1495198011398315, | |
| "learning_rate": 7.209253860320897e-08, | |
| "logits/chosen": 1.7104814052581787, | |
| "logits/rejected": 1.7387645244598389, | |
| "logps/chosen": -84.37834167480469, | |
| "logps/rejected": -82.36712646484375, | |
| "loss": 0.5475, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.4061828553676605, | |
| "rewards/margins": 0.3533913493156433, | |
| "rewards/rejected": -0.7595741748809814, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 18.58649789029536, | |
| "grad_norm": 1.0989700555801392, | |
| "learning_rate": 4.063789016999331e-08, | |
| "logits/chosen": 1.6560907363891602, | |
| "logits/rejected": 1.7722400426864624, | |
| "logps/chosen": -80.30384826660156, | |
| "logps/rejected": -84.69402313232422, | |
| "loss": 0.5462, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.3671244978904724, | |
| "rewards/margins": 0.41629084944725037, | |
| "rewards/rejected": -0.7834154367446899, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 18.924050632911392, | |
| "grad_norm": 1.159568428993225, | |
| "learning_rate": 1.808857395232788e-08, | |
| "logits/chosen": 1.738085389137268, | |
| "logits/rejected": 1.6417734622955322, | |
| "logps/chosen": -107.0008316040039, | |
| "logps/rejected": -72.6747817993164, | |
| "loss": 0.5327, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": -0.34443432092666626, | |
| "rewards/margins": 0.4274899363517761, | |
| "rewards/rejected": -0.7719243168830872, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 19.261603375527425, | |
| "grad_norm": 1.208256483078003, | |
| "learning_rate": 4.526240859345499e-09, | |
| "logits/chosen": 1.7233607769012451, | |
| "logits/rejected": 1.7593921422958374, | |
| "logps/chosen": -88.6917953491211, | |
| "logps/rejected": -82.6318359375, | |
| "loss": 0.5552, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -0.31639525294303894, | |
| "rewards/margins": 0.3895270824432373, | |
| "rewards/rejected": -0.7059223055839539, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 19.59915611814346, | |
| "grad_norm": 1.0889534950256348, | |
| "learning_rate": 0.0, | |
| "logits/chosen": 1.706377625465393, | |
| "logits/rejected": 1.853161096572876, | |
| "logps/chosen": -77.8305892944336, | |
| "logps/rejected": -102.3224868774414, | |
| "loss": 0.5454, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": -0.357083261013031, | |
| "rewards/margins": 0.48699983954429626, | |
| "rewards/rejected": -0.8440830111503601, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 19.59915611814346, | |
| "step": 580, | |
| "total_flos": 1.9914349255044628e+18, | |
| "train_loss": 0.5518483819632695, | |
| "train_runtime": 3952.6533, | |
| "train_samples_per_second": 9.594, | |
| "train_steps_per_second": 0.147 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 580, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.9914349255044628e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |