| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 13.168724279835391, | |
| "eval_steps": 500, | |
| "global_step": 400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.3292181069958848, | |
| "grad_norm": 4.28085994720459, | |
| "learning_rate": 8.333333333333333e-07, | |
| "logits/chosen": -2.348794937133789, | |
| "logits/rejected": -2.3731939792633057, | |
| "logps/chosen": -65.27528381347656, | |
| "logps/rejected": -66.73173522949219, | |
| "loss": 0.6933, | |
| "rewards/accuracies": 0.38749998807907104, | |
| "rewards/chosen": -0.000993417459540069, | |
| "rewards/margins": -0.0003861843142658472, | |
| "rewards/rejected": -0.0006072330288589001, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.6584362139917695, | |
| "grad_norm": 3.9006142616271973, | |
| "learning_rate": 1.6666666666666667e-06, | |
| "logits/chosen": -2.381702184677124, | |
| "logits/rejected": -2.387089252471924, | |
| "logps/chosen": -76.84241485595703, | |
| "logps/rejected": -76.77680969238281, | |
| "loss": 0.6941, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": 0.002538852859288454, | |
| "rewards/margins": 0.0011897915974259377, | |
| "rewards/rejected": 0.0013490616111084819, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.9876543209876543, | |
| "grad_norm": 4.658288478851318, | |
| "learning_rate": 2.5e-06, | |
| "logits/chosen": -2.3804943561553955, | |
| "logits/rejected": -2.3802895545959473, | |
| "logps/chosen": -65.05754089355469, | |
| "logps/rejected": -60.8338737487793, | |
| "loss": 0.6938, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.004748785402625799, | |
| "rewards/margins": 0.0050577023066580296, | |
| "rewards/rejected": -0.00030891643837094307, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.316872427983539, | |
| "grad_norm": 10.031479835510254, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "logits/chosen": -2.343177556991577, | |
| "logits/rejected": -2.356895923614502, | |
| "logps/chosen": -72.90717315673828, | |
| "logps/rejected": -64.87867736816406, | |
| "loss": 0.6884, | |
| "rewards/accuracies": 0.550000011920929, | |
| "rewards/chosen": 0.02475106716156006, | |
| "rewards/margins": 0.029533248394727707, | |
| "rewards/rejected": -0.0047821830958127975, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.646090534979424, | |
| "grad_norm": 5.148515224456787, | |
| "learning_rate": 4.166666666666667e-06, | |
| "logits/chosen": -2.314805507659912, | |
| "logits/rejected": -2.332075357437134, | |
| "logps/chosen": -74.19972229003906, | |
| "logps/rejected": -84.31201934814453, | |
| "loss": 0.6888, | |
| "rewards/accuracies": 0.512499988079071, | |
| "rewards/chosen": 0.008543441072106361, | |
| "rewards/margins": 0.011772889643907547, | |
| "rewards/rejected": -0.0032294485718011856, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.9753086419753085, | |
| "grad_norm": 4.609860420227051, | |
| "learning_rate": 5e-06, | |
| "logits/chosen": -2.350900173187256, | |
| "logits/rejected": -2.330029010772705, | |
| "logps/chosen": -72.87406921386719, | |
| "logps/rejected": -69.63726043701172, | |
| "loss": 0.6869, | |
| "rewards/accuracies": 0.550000011920929, | |
| "rewards/chosen": 0.009138436987996101, | |
| "rewards/margins": 0.013146847486495972, | |
| "rewards/rejected": -0.0040084123611450195, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.3045267489711936, | |
| "grad_norm": 4.261270523071289, | |
| "learning_rate": 4.995770395678171e-06, | |
| "logits/chosen": -2.3685832023620605, | |
| "logits/rejected": -2.3904385566711426, | |
| "logps/chosen": -75.97269439697266, | |
| "logps/rejected": -78.17133331298828, | |
| "loss": 0.6707, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": 0.03298325464129448, | |
| "rewards/margins": 0.05551639944314957, | |
| "rewards/rejected": -0.02253313548862934, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.633744855967078, | |
| "grad_norm": 4.327195644378662, | |
| "learning_rate": 4.983095894354858e-06, | |
| "logits/chosen": -2.3301124572753906, | |
| "logits/rejected": -2.331705093383789, | |
| "logps/chosen": -72.1042251586914, | |
| "logps/rejected": -62.44911575317383, | |
| "loss": 0.6575, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": 0.02669849991798401, | |
| "rewards/margins": 0.051876507699489594, | |
| "rewards/rejected": -0.025178011506795883, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.962962962962963, | |
| "grad_norm": 4.614945888519287, | |
| "learning_rate": 4.962019382530521e-06, | |
| "logits/chosen": -2.3660030364990234, | |
| "logits/rejected": -2.379166603088379, | |
| "logps/chosen": -67.3130874633789, | |
| "logps/rejected": -65.10738372802734, | |
| "loss": 0.6454, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": 0.019906962290406227, | |
| "rewards/margins": 0.07426988333463669, | |
| "rewards/rejected": -0.05436293035745621, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.292181069958848, | |
| "grad_norm": 4.318708896636963, | |
| "learning_rate": 4.93261217644956e-06, | |
| "logits/chosen": -2.3698017597198486, | |
| "logits/rejected": -2.378725051879883, | |
| "logps/chosen": -75.62451171875, | |
| "logps/rejected": -80.79871368408203, | |
| "loss": 0.6156, | |
| "rewards/accuracies": 0.887499988079071, | |
| "rewards/chosen": 0.09396305680274963, | |
| "rewards/margins": 0.2190607488155365, | |
| "rewards/rejected": -0.12509770691394806, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.6213991769547325, | |
| "grad_norm": 4.499190807342529, | |
| "learning_rate": 4.894973780788722e-06, | |
| "logits/chosen": -2.4184162616729736, | |
| "logits/rejected": -2.400146007537842, | |
| "logps/chosen": -62.17826461791992, | |
| "logps/rejected": -65.74874877929688, | |
| "loss": 0.5929, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": 0.051440201699733734, | |
| "rewards/margins": 0.19785621762275696, | |
| "rewards/rejected": -0.14641599357128143, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 3.950617283950617, | |
| "grad_norm": 4.767049312591553, | |
| "learning_rate": 4.849231551964771e-06, | |
| "logits/chosen": -2.389042615890503, | |
| "logits/rejected": -2.3723196983337402, | |
| "logps/chosen": -81.3266830444336, | |
| "logps/rejected": -79.3870849609375, | |
| "loss": 0.576, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": 0.006978911813348532, | |
| "rewards/margins": 0.3074144721031189, | |
| "rewards/rejected": -0.30043551325798035, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.279835390946502, | |
| "grad_norm": 4.824108123779297, | |
| "learning_rate": 4.7955402672006855e-06, | |
| "logits/chosen": -2.393279552459717, | |
| "logits/rejected": -2.4019925594329834, | |
| "logps/chosen": -76.16815185546875, | |
| "logps/rejected": -76.16459655761719, | |
| "loss": 0.5395, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": 0.004183758515864611, | |
| "rewards/margins": 0.3808066248893738, | |
| "rewards/rejected": -0.37662285566329956, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.609053497942387, | |
| "grad_norm": 5.023725986480713, | |
| "learning_rate": 4.734081600808531e-06, | |
| "logits/chosen": -2.3473973274230957, | |
| "logits/rejected": -2.361241579055786, | |
| "logps/chosen": -81.27131652832031, | |
| "logps/rejected": -94.87051391601562, | |
| "loss": 0.5009, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.09364889562129974, | |
| "rewards/margins": 0.5326429605484009, | |
| "rewards/rejected": -0.6262918710708618, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 4.938271604938271, | |
| "grad_norm": 5.259047031402588, | |
| "learning_rate": 4.665063509461098e-06, | |
| "logits/chosen": -2.4455697536468506, | |
| "logits/rejected": -2.4381449222564697, | |
| "logps/chosen": -69.75814819335938, | |
| "logps/rejected": -77.3443832397461, | |
| "loss": 0.5111, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": -0.15613806247711182, | |
| "rewards/margins": 0.5841559767723083, | |
| "rewards/rejected": -0.7402940392494202, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 5.267489711934156, | |
| "grad_norm": 4.720038890838623, | |
| "learning_rate": 4.588719528532342e-06, | |
| "logits/chosen": -2.4044508934020996, | |
| "logits/rejected": -2.386610269546509, | |
| "logps/chosen": -89.14405059814453, | |
| "logps/rejected": -86.31761932373047, | |
| "loss": 0.4646, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": -0.24362032115459442, | |
| "rewards/margins": 0.6143159866333008, | |
| "rewards/rejected": -0.8579362630844116, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 5.596707818930041, | |
| "grad_norm": 4.4304094314575195, | |
| "learning_rate": 4.50530798188761e-06, | |
| "logits/chosen": -2.4376425743103027, | |
| "logits/rejected": -2.416869878768921, | |
| "logps/chosen": -83.4544906616211, | |
| "logps/rejected": -87.96864318847656, | |
| "loss": 0.4327, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": -0.25005388259887695, | |
| "rewards/margins": 0.7776281833648682, | |
| "rewards/rejected": -1.0276820659637451, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 5.925925925925926, | |
| "grad_norm": 5.450507164001465, | |
| "learning_rate": 4.415111107797445e-06, | |
| "logits/chosen": -2.429447650909424, | |
| "logits/rejected": -2.422396421432495, | |
| "logps/chosen": -84.02339172363281, | |
| "logps/rejected": -81.29154968261719, | |
| "loss": 0.4313, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -0.4232204556465149, | |
| "rewards/margins": 0.8838070034980774, | |
| "rewards/rejected": -1.3070275783538818, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 6.255144032921811, | |
| "grad_norm": 5.132652282714844, | |
| "learning_rate": 4.318434103932622e-06, | |
| "logits/chosen": -2.433706760406494, | |
| "logits/rejected": -2.460986614227295, | |
| "logps/chosen": -82.15243530273438, | |
| "logps/rejected": -95.60574340820312, | |
| "loss": 0.4021, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": -0.47676190733909607, | |
| "rewards/margins": 1.0315628051757812, | |
| "rewards/rejected": -1.5083246231079102, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 6.584362139917696, | |
| "grad_norm": 5.9813690185546875, | |
| "learning_rate": 4.215604094671835e-06, | |
| "logits/chosen": -2.4577317237854004, | |
| "logits/rejected": -2.474640130996704, | |
| "logps/chosen": -70.86317443847656, | |
| "logps/rejected": -90.49053192138672, | |
| "loss": 0.3643, | |
| "rewards/accuracies": 0.9125000238418579, | |
| "rewards/chosen": -0.5595805048942566, | |
| "rewards/margins": 1.0751607418060303, | |
| "rewards/rejected": -1.634741187095642, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 6.91358024691358, | |
| "grad_norm": 5.65604305267334, | |
| "learning_rate": 4.106969024216348e-06, | |
| "logits/chosen": -2.435795307159424, | |
| "logits/rejected": -2.454598903656006, | |
| "logps/chosen": -78.73880767822266, | |
| "logps/rejected": -94.9955062866211, | |
| "loss": 0.3608, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -0.676998496055603, | |
| "rewards/margins": 1.2362091541290283, | |
| "rewards/rejected": -1.9132076501846313, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 7.242798353909465, | |
| "grad_norm": 5.157329082489014, | |
| "learning_rate": 3.992896479256966e-06, | |
| "logits/chosen": -2.39492130279541, | |
| "logits/rejected": -2.3961689472198486, | |
| "logps/chosen": -74.92576599121094, | |
| "logps/rejected": -91.86097717285156, | |
| "loss": 0.319, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": -0.6371862292289734, | |
| "rewards/margins": 1.316188097000122, | |
| "rewards/rejected": -1.9533745050430298, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 7.57201646090535, | |
| "grad_norm": 5.136756896972656, | |
| "learning_rate": 3.8737724451770155e-06, | |
| "logits/chosen": -2.4012722969055176, | |
| "logits/rejected": -2.3788814544677734, | |
| "logps/chosen": -129.93008422851562, | |
| "logps/rejected": -92.78892517089844, | |
| "loss": 0.29, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": 0.2803955078125, | |
| "rewards/margins": 2.429713726043701, | |
| "rewards/rejected": -2.149318218231201, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 7.901234567901234, | |
| "grad_norm": 5.834601879119873, | |
| "learning_rate": 3.7500000000000005e-06, | |
| "logits/chosen": -2.4388298988342285, | |
| "logits/rejected": -2.4415581226348877, | |
| "logps/chosen": -71.8863296508789, | |
| "logps/rejected": -88.8488998413086, | |
| "loss": 0.2979, | |
| "rewards/accuracies": 0.8999999761581421, | |
| "rewards/chosen": -0.9963721036911011, | |
| "rewards/margins": 1.5685430765151978, | |
| "rewards/rejected": -2.5649149417877197, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 8.23045267489712, | |
| "grad_norm": 4.80113410949707, | |
| "learning_rate": 3.621997950501156e-06, | |
| "logits/chosen": -2.432385206222534, | |
| "logits/rejected": -2.426995038986206, | |
| "logps/chosen": -83.89598846435547, | |
| "logps/rejected": -109.856689453125, | |
| "loss": 0.2709, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -1.05374014377594, | |
| "rewards/margins": 1.6686651706695557, | |
| "rewards/rejected": -2.722405433654785, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 8.559670781893004, | |
| "grad_norm": 4.61885404586792, | |
| "learning_rate": 3.4901994150978926e-06, | |
| "logits/chosen": -2.3861355781555176, | |
| "logits/rejected": -2.386688232421875, | |
| "logps/chosen": -83.57012176513672, | |
| "logps/rejected": -103.34098052978516, | |
| "loss": 0.2383, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": -1.036254644393921, | |
| "rewards/margins": 1.8610769510269165, | |
| "rewards/rejected": -2.897331714630127, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 8.88888888888889, | |
| "grad_norm": 6.8387603759765625, | |
| "learning_rate": 3.3550503583141726e-06, | |
| "logits/chosen": -2.393098831176758, | |
| "logits/rejected": -2.406322956085205, | |
| "logps/chosen": -88.91485595703125, | |
| "logps/rejected": -118.64851379394531, | |
| "loss": 0.2461, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -1.3334013223648071, | |
| "rewards/margins": 2.0046401023864746, | |
| "rewards/rejected": -3.338041305541992, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 9.218106995884774, | |
| "grad_norm": 4.790656089782715, | |
| "learning_rate": 3.217008081777726e-06, | |
| "logits/chosen": -2.40057373046875, | |
| "logits/rejected": -2.3985626697540283, | |
| "logps/chosen": -99.63141632080078, | |
| "logps/rejected": -106.76456451416016, | |
| "loss": 0.2095, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": -1.3174140453338623, | |
| "rewards/margins": 2.1455185413360596, | |
| "rewards/rejected": -3.462932586669922, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 9.547325102880658, | |
| "grad_norm": 7.7193708419799805, | |
| "learning_rate": 3.0765396768561005e-06, | |
| "logits/chosen": -2.3912322521209717, | |
| "logits/rejected": -2.3881642818450928, | |
| "logps/chosen": -90.94058990478516, | |
| "logps/rejected": -121.91908264160156, | |
| "loss": 0.1962, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -1.536046028137207, | |
| "rewards/margins": 2.3822174072265625, | |
| "rewards/rejected": -3.9182631969451904, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 9.876543209876543, | |
| "grad_norm": 5.5355000495910645, | |
| "learning_rate": 2.9341204441673267e-06, | |
| "logits/chosen": -2.3750805854797363, | |
| "logits/rejected": -2.403031826019287, | |
| "logps/chosen": -92.7899169921875, | |
| "logps/rejected": -107.88253021240234, | |
| "loss": 0.1769, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": -1.6319608688354492, | |
| "rewards/margins": 2.070601463317871, | |
| "rewards/rejected": -3.7025623321533203, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 10.205761316872428, | |
| "grad_norm": 4.570405006408691, | |
| "learning_rate": 2.7902322853130758e-06, | |
| "logits/chosen": -2.413585662841797, | |
| "logits/rejected": -2.4128100872039795, | |
| "logps/chosen": -82.2129898071289, | |
| "logps/rejected": -111.6125259399414, | |
| "loss": 0.1725, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -1.4095767736434937, | |
| "rewards/margins": 2.5350427627563477, | |
| "rewards/rejected": -3.944619655609131, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 10.534979423868313, | |
| "grad_norm": 6.312554359436035, | |
| "learning_rate": 2.6453620722761897e-06, | |
| "logits/chosen": -2.3654825687408447, | |
| "logits/rejected": -2.360241651535034, | |
| "logps/chosen": -94.05240631103516, | |
| "logps/rejected": -126.00224304199219, | |
| "loss": 0.1511, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": -1.9204778671264648, | |
| "rewards/margins": 2.536738157272339, | |
| "rewards/rejected": -4.457216262817383, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 10.864197530864198, | |
| "grad_norm": 5.389893054962158, | |
| "learning_rate": 2.5e-06, | |
| "logits/chosen": -2.3491291999816895, | |
| "logits/rejected": -2.354940414428711, | |
| "logps/chosen": -104.17342376708984, | |
| "logps/rejected": -137.72128295898438, | |
| "loss": 0.1361, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -2.220851182937622, | |
| "rewards/margins": 2.9987587928771973, | |
| "rewards/rejected": -5.219610214233398, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 11.193415637860083, | |
| "grad_norm": 4.085507392883301, | |
| "learning_rate": 2.3546379277238107e-06, | |
| "logits/chosen": -2.318918228149414, | |
| "logits/rejected": -2.351606607437134, | |
| "logps/chosen": -92.32350158691406, | |
| "logps/rejected": -129.15365600585938, | |
| "loss": 0.1294, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": -2.087580442428589, | |
| "rewards/margins": 2.9299442768096924, | |
| "rewards/rejected": -5.017524719238281, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 11.522633744855966, | |
| "grad_norm": 5.135997772216797, | |
| "learning_rate": 2.2097677146869242e-06, | |
| "logits/chosen": -2.3357138633728027, | |
| "logits/rejected": -2.3629403114318848, | |
| "logps/chosen": -94.5295639038086, | |
| "logps/rejected": -119.21665954589844, | |
| "loss": 0.1216, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": -2.43931245803833, | |
| "rewards/margins": 2.933500289916992, | |
| "rewards/rejected": -5.372812271118164, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 11.851851851851851, | |
| "grad_norm": 6.594943046569824, | |
| "learning_rate": 2.0658795558326745e-06, | |
| "logits/chosen": -2.2753522396087646, | |
| "logits/rejected": -2.27793550491333, | |
| "logps/chosen": -113.2972412109375, | |
| "logps/rejected": -141.83450317382812, | |
| "loss": 0.1079, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.8333306312561035, | |
| "rewards/margins": 3.120042324066162, | |
| "rewards/rejected": -5.953372478485107, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 12.181069958847736, | |
| "grad_norm": 3.8602702617645264, | |
| "learning_rate": 1.9234603231439e-06, | |
| "logits/chosen": -2.279592990875244, | |
| "logits/rejected": -2.3021957874298096, | |
| "logps/chosen": -157.71868896484375, | |
| "logps/rejected": -130.61915588378906, | |
| "loss": 0.0979, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.1277592182159424, | |
| "rewards/margins": 3.6744728088378906, | |
| "rewards/rejected": -5.802231788635254, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 12.510288065843621, | |
| "grad_norm": 4.203768253326416, | |
| "learning_rate": 1.7829919182222752e-06, | |
| "logits/chosen": -2.332866668701172, | |
| "logits/rejected": -2.342665910720825, | |
| "logps/chosen": -95.84976959228516, | |
| "logps/rejected": -142.677734375, | |
| "loss": 0.081, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.5826311111450195, | |
| "rewards/margins": 3.6997482776641846, | |
| "rewards/rejected": -6.282379150390625, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 12.839506172839506, | |
| "grad_norm": 7.400186061859131, | |
| "learning_rate": 1.6449496416858285e-06, | |
| "logits/chosen": -2.2747161388397217, | |
| "logits/rejected": -2.2847588062286377, | |
| "logps/chosen": -97.12799835205078, | |
| "logps/rejected": -146.51226806640625, | |
| "loss": 0.0908, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -2.9791295528411865, | |
| "rewards/margins": 3.8435187339782715, | |
| "rewards/rejected": -6.822648048400879, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 13.168724279835391, | |
| "grad_norm": 4.099456310272217, | |
| "learning_rate": 1.509800584902108e-06, | |
| "logits/chosen": -2.269260883331299, | |
| "logits/rejected": -2.2895314693450928, | |
| "logps/chosen": -108.60566711425781, | |
| "logps/rejected": -143.68768310546875, | |
| "loss": 0.0719, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.2180545330047607, | |
| "rewards/margins": 4.0625810623168945, | |
| "rewards/rejected": -7.280634880065918, | |
| "step": 400 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 600, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.4653950405566792e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |