| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 19.753086419753085, |
| "eval_steps": 500, |
| "global_step": 600, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.3292181069958848, |
| "grad_norm": 4.28085994720459, |
| "learning_rate": 8.333333333333333e-07, |
| "logits/chosen": -2.348794937133789, |
| "logits/rejected": -2.3731939792633057, |
| "logps/chosen": -65.27528381347656, |
| "logps/rejected": -66.73173522949219, |
| "loss": 0.6933, |
| "rewards/accuracies": 0.38749998807907104, |
| "rewards/chosen": -0.000993417459540069, |
| "rewards/margins": -0.0003861843142658472, |
| "rewards/rejected": -0.0006072330288589001, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.6584362139917695, |
| "grad_norm": 3.9006142616271973, |
| "learning_rate": 1.6666666666666667e-06, |
| "logits/chosen": -2.381702184677124, |
| "logits/rejected": -2.387089252471924, |
| "logps/chosen": -76.84241485595703, |
| "logps/rejected": -76.77680969238281, |
| "loss": 0.6941, |
| "rewards/accuracies": 0.4375, |
| "rewards/chosen": 0.002538852859288454, |
| "rewards/margins": 0.0011897915974259377, |
| "rewards/rejected": 0.0013490616111084819, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.9876543209876543, |
| "grad_norm": 4.658288478851318, |
| "learning_rate": 2.5e-06, |
| "logits/chosen": -2.3804943561553955, |
| "logits/rejected": -2.3802895545959473, |
| "logps/chosen": -65.05754089355469, |
| "logps/rejected": -60.8338737487793, |
| "loss": 0.6938, |
| "rewards/accuracies": 0.5625, |
| "rewards/chosen": 0.004748785402625799, |
| "rewards/margins": 0.0050577023066580296, |
| "rewards/rejected": -0.00030891643837094307, |
| "step": 30 |
| }, |
| { |
| "epoch": 1.316872427983539, |
| "grad_norm": 10.031479835510254, |
| "learning_rate": 3.3333333333333333e-06, |
| "logits/chosen": -2.343177556991577, |
| "logits/rejected": -2.356895923614502, |
| "logps/chosen": -72.90717315673828, |
| "logps/rejected": -64.87867736816406, |
| "loss": 0.6884, |
| "rewards/accuracies": 0.550000011920929, |
| "rewards/chosen": 0.02475106716156006, |
| "rewards/margins": 0.029533248394727707, |
| "rewards/rejected": -0.0047821830958127975, |
| "step": 40 |
| }, |
| { |
| "epoch": 1.646090534979424, |
| "grad_norm": 5.148515224456787, |
| "learning_rate": 4.166666666666667e-06, |
| "logits/chosen": -2.314805507659912, |
| "logits/rejected": -2.332075357437134, |
| "logps/chosen": -74.19972229003906, |
| "logps/rejected": -84.31201934814453, |
| "loss": 0.6888, |
| "rewards/accuracies": 0.512499988079071, |
| "rewards/chosen": 0.008543441072106361, |
| "rewards/margins": 0.011772889643907547, |
| "rewards/rejected": -0.0032294485718011856, |
| "step": 50 |
| }, |
| { |
| "epoch": 1.9753086419753085, |
| "grad_norm": 4.609860420227051, |
| "learning_rate": 5e-06, |
| "logits/chosen": -2.350900173187256, |
| "logits/rejected": -2.330029010772705, |
| "logps/chosen": -72.87406921386719, |
| "logps/rejected": -69.63726043701172, |
| "loss": 0.6869, |
| "rewards/accuracies": 0.550000011920929, |
| "rewards/chosen": 0.009138436987996101, |
| "rewards/margins": 0.013146847486495972, |
| "rewards/rejected": -0.0040084123611450195, |
| "step": 60 |
| }, |
| { |
| "epoch": 2.3045267489711936, |
| "grad_norm": 4.261270523071289, |
| "learning_rate": 4.995770395678171e-06, |
| "logits/chosen": -2.3685832023620605, |
| "logits/rejected": -2.3904385566711426, |
| "logps/chosen": -75.97269439697266, |
| "logps/rejected": -78.17133331298828, |
| "loss": 0.6707, |
| "rewards/accuracies": 0.7250000238418579, |
| "rewards/chosen": 0.03298325464129448, |
| "rewards/margins": 0.05551639944314957, |
| "rewards/rejected": -0.02253313548862934, |
| "step": 70 |
| }, |
| { |
| "epoch": 2.633744855967078, |
| "grad_norm": 4.327195644378662, |
| "learning_rate": 4.983095894354858e-06, |
| "logits/chosen": -2.3301124572753906, |
| "logits/rejected": -2.331705093383789, |
| "logps/chosen": -72.1042251586914, |
| "logps/rejected": -62.44911575317383, |
| "loss": 0.6575, |
| "rewards/accuracies": 0.699999988079071, |
| "rewards/chosen": 0.02669849991798401, |
| "rewards/margins": 0.051876507699489594, |
| "rewards/rejected": -0.025178011506795883, |
| "step": 80 |
| }, |
| { |
| "epoch": 2.962962962962963, |
| "grad_norm": 4.614945888519287, |
| "learning_rate": 4.962019382530521e-06, |
| "logits/chosen": -2.3660030364990234, |
| "logits/rejected": -2.379166603088379, |
| "logps/chosen": -67.3130874633789, |
| "logps/rejected": -65.10738372802734, |
| "loss": 0.6454, |
| "rewards/accuracies": 0.7250000238418579, |
| "rewards/chosen": 0.019906962290406227, |
| "rewards/margins": 0.07426988333463669, |
| "rewards/rejected": -0.05436293035745621, |
| "step": 90 |
| }, |
| { |
| "epoch": 3.292181069958848, |
| "grad_norm": 4.318708896636963, |
| "learning_rate": 4.93261217644956e-06, |
| "logits/chosen": -2.3698017597198486, |
| "logits/rejected": -2.378725051879883, |
| "logps/chosen": -75.62451171875, |
| "logps/rejected": -80.79871368408203, |
| "loss": 0.6156, |
| "rewards/accuracies": 0.887499988079071, |
| "rewards/chosen": 0.09396305680274963, |
| "rewards/margins": 0.2190607488155365, |
| "rewards/rejected": -0.12509770691394806, |
| "step": 100 |
| }, |
| { |
| "epoch": 3.6213991769547325, |
| "grad_norm": 4.499190807342529, |
| "learning_rate": 4.894973780788722e-06, |
| "logits/chosen": -2.4184162616729736, |
| "logits/rejected": -2.400146007537842, |
| "logps/chosen": -62.17826461791992, |
| "logps/rejected": -65.74874877929688, |
| "loss": 0.5929, |
| "rewards/accuracies": 0.7875000238418579, |
| "rewards/chosen": 0.051440201699733734, |
| "rewards/margins": 0.19785621762275696, |
| "rewards/rejected": -0.14641599357128143, |
| "step": 110 |
| }, |
| { |
| "epoch": 3.950617283950617, |
| "grad_norm": 4.767049312591553, |
| "learning_rate": 4.849231551964771e-06, |
| "logits/chosen": -2.389042615890503, |
| "logits/rejected": -2.3723196983337402, |
| "logps/chosen": -81.3266830444336, |
| "logps/rejected": -79.3870849609375, |
| "loss": 0.576, |
| "rewards/accuracies": 0.862500011920929, |
| "rewards/chosen": 0.006978911813348532, |
| "rewards/margins": 0.3074144721031189, |
| "rewards/rejected": -0.30043551325798035, |
| "step": 120 |
| }, |
| { |
| "epoch": 4.279835390946502, |
| "grad_norm": 4.824108123779297, |
| "learning_rate": 4.7955402672006855e-06, |
| "logits/chosen": -2.393279552459717, |
| "logits/rejected": -2.4019925594329834, |
| "logps/chosen": -76.16815185546875, |
| "logps/rejected": -76.16459655761719, |
| "loss": 0.5395, |
| "rewards/accuracies": 0.8374999761581421, |
| "rewards/chosen": 0.004183758515864611, |
| "rewards/margins": 0.3808066248893738, |
| "rewards/rejected": -0.37662285566329956, |
| "step": 130 |
| }, |
| { |
| "epoch": 4.609053497942387, |
| "grad_norm": 5.023725986480713, |
| "learning_rate": 4.734081600808531e-06, |
| "logits/chosen": -2.3473973274230957, |
| "logits/rejected": -2.361241579055786, |
| "logps/chosen": -81.27131652832031, |
| "logps/rejected": -94.87051391601562, |
| "loss": 0.5009, |
| "rewards/accuracies": 0.875, |
| "rewards/chosen": -0.09364889562129974, |
| "rewards/margins": 0.5326429605484009, |
| "rewards/rejected": -0.6262918710708618, |
| "step": 140 |
| }, |
| { |
| "epoch": 4.938271604938271, |
| "grad_norm": 5.259047031402588, |
| "learning_rate": 4.665063509461098e-06, |
| "logits/chosen": -2.4455697536468506, |
| "logits/rejected": -2.4381449222564697, |
| "logps/chosen": -69.75814819335938, |
| "logps/rejected": -77.3443832397461, |
| "loss": 0.5111, |
| "rewards/accuracies": 0.8500000238418579, |
| "rewards/chosen": -0.15613806247711182, |
| "rewards/margins": 0.5841559767723083, |
| "rewards/rejected": -0.7402940392494202, |
| "step": 150 |
| }, |
| { |
| "epoch": 5.267489711934156, |
| "grad_norm": 4.720038890838623, |
| "learning_rate": 4.588719528532342e-06, |
| "logits/chosen": -2.4044508934020996, |
| "logits/rejected": -2.386610269546509, |
| "logps/chosen": -89.14405059814453, |
| "logps/rejected": -86.31761932373047, |
| "loss": 0.4646, |
| "rewards/accuracies": 0.862500011920929, |
| "rewards/chosen": -0.24362032115459442, |
| "rewards/margins": 0.6143159866333008, |
| "rewards/rejected": -0.8579362630844116, |
| "step": 160 |
| }, |
| { |
| "epoch": 5.596707818930041, |
| "grad_norm": 4.4304094314575195, |
| "learning_rate": 4.50530798188761e-06, |
| "logits/chosen": -2.4376425743103027, |
| "logits/rejected": -2.416869878768921, |
| "logps/chosen": -83.4544906616211, |
| "logps/rejected": -87.96864318847656, |
| "loss": 0.4327, |
| "rewards/accuracies": 0.862500011920929, |
| "rewards/chosen": -0.25005388259887695, |
| "rewards/margins": 0.7776281833648682, |
| "rewards/rejected": -1.0276820659637451, |
| "step": 170 |
| }, |
| { |
| "epoch": 5.925925925925926, |
| "grad_norm": 5.450507164001465, |
| "learning_rate": 4.415111107797445e-06, |
| "logits/chosen": -2.429447650909424, |
| "logits/rejected": -2.422396421432495, |
| "logps/chosen": -84.02339172363281, |
| "logps/rejected": -81.29154968261719, |
| "loss": 0.4313, |
| "rewards/accuracies": 0.925000011920929, |
| "rewards/chosen": -0.4232204556465149, |
| "rewards/margins": 0.8838070034980774, |
| "rewards/rejected": -1.3070275783538818, |
| "step": 180 |
| }, |
| { |
| "epoch": 6.255144032921811, |
| "grad_norm": 5.132652282714844, |
| "learning_rate": 4.318434103932622e-06, |
| "logits/chosen": -2.433706760406494, |
| "logits/rejected": -2.460986614227295, |
| "logps/chosen": -82.15243530273438, |
| "logps/rejected": -95.60574340820312, |
| "loss": 0.4021, |
| "rewards/accuracies": 0.949999988079071, |
| "rewards/chosen": -0.47676190733909607, |
| "rewards/margins": 1.0315628051757812, |
| "rewards/rejected": -1.5083246231079102, |
| "step": 190 |
| }, |
| { |
| "epoch": 6.584362139917696, |
| "grad_norm": 5.9813690185546875, |
| "learning_rate": 4.215604094671835e-06, |
| "logits/chosen": -2.4577317237854004, |
| "logits/rejected": -2.474640130996704, |
| "logps/chosen": -70.86317443847656, |
| "logps/rejected": -90.49053192138672, |
| "loss": 0.3643, |
| "rewards/accuracies": 0.9125000238418579, |
| "rewards/chosen": -0.5595805048942566, |
| "rewards/margins": 1.0751607418060303, |
| "rewards/rejected": -1.634741187095642, |
| "step": 200 |
| }, |
| { |
| "epoch": 6.91358024691358, |
| "grad_norm": 5.65604305267334, |
| "learning_rate": 4.106969024216348e-06, |
| "logits/chosen": -2.435795307159424, |
| "logits/rejected": -2.454598903656006, |
| "logps/chosen": -78.73880767822266, |
| "logps/rejected": -94.9955062866211, |
| "loss": 0.3608, |
| "rewards/accuracies": 0.925000011920929, |
| "rewards/chosen": -0.676998496055603, |
| "rewards/margins": 1.2362091541290283, |
| "rewards/rejected": -1.9132076501846313, |
| "step": 210 |
| }, |
| { |
| "epoch": 7.242798353909465, |
| "grad_norm": 5.157329082489014, |
| "learning_rate": 3.992896479256966e-06, |
| "logits/chosen": -2.39492130279541, |
| "logits/rejected": -2.3961689472198486, |
| "logps/chosen": -74.92576599121094, |
| "logps/rejected": -91.86097717285156, |
| "loss": 0.319, |
| "rewards/accuracies": 0.9375, |
| "rewards/chosen": -0.6371862292289734, |
| "rewards/margins": 1.316188097000122, |
| "rewards/rejected": -1.9533745050430298, |
| "step": 220 |
| }, |
| { |
| "epoch": 7.57201646090535, |
| "grad_norm": 5.136756896972656, |
| "learning_rate": 3.8737724451770155e-06, |
| "logits/chosen": -2.4012722969055176, |
| "logits/rejected": -2.3788814544677734, |
| "logps/chosen": -129.93008422851562, |
| "logps/rejected": -92.78892517089844, |
| "loss": 0.29, |
| "rewards/accuracies": 0.9375, |
| "rewards/chosen": 0.2803955078125, |
| "rewards/margins": 2.429713726043701, |
| "rewards/rejected": -2.149318218231201, |
| "step": 230 |
| }, |
| { |
| "epoch": 7.901234567901234, |
| "grad_norm": 5.834601879119873, |
| "learning_rate": 3.7500000000000005e-06, |
| "logits/chosen": -2.4388298988342285, |
| "logits/rejected": -2.4415581226348877, |
| "logps/chosen": -71.8863296508789, |
| "logps/rejected": -88.8488998413086, |
| "loss": 0.2979, |
| "rewards/accuracies": 0.8999999761581421, |
| "rewards/chosen": -0.9963721036911011, |
| "rewards/margins": 1.5685430765151978, |
| "rewards/rejected": -2.5649149417877197, |
| "step": 240 |
| }, |
| { |
| "epoch": 8.23045267489712, |
| "grad_norm": 4.80113410949707, |
| "learning_rate": 3.621997950501156e-06, |
| "logits/chosen": -2.432385206222534, |
| "logits/rejected": -2.426995038986206, |
| "logps/chosen": -83.89598846435547, |
| "logps/rejected": -109.856689453125, |
| "loss": 0.2709, |
| "rewards/accuracies": 0.925000011920929, |
| "rewards/chosen": -1.05374014377594, |
| "rewards/margins": 1.6686651706695557, |
| "rewards/rejected": -2.722405433654785, |
| "step": 250 |
| }, |
| { |
| "epoch": 8.559670781893004, |
| "grad_norm": 4.61885404586792, |
| "learning_rate": 3.4901994150978926e-06, |
| "logits/chosen": -2.3861355781555176, |
| "logits/rejected": -2.386688232421875, |
| "logps/chosen": -83.57012176513672, |
| "logps/rejected": -103.34098052978516, |
| "loss": 0.2383, |
| "rewards/accuracies": 0.949999988079071, |
| "rewards/chosen": -1.036254644393921, |
| "rewards/margins": 1.8610769510269165, |
| "rewards/rejected": -2.897331714630127, |
| "step": 260 |
| }, |
| { |
| "epoch": 8.88888888888889, |
| "grad_norm": 6.8387603759765625, |
| "learning_rate": 3.3550503583141726e-06, |
| "logits/chosen": -2.393098831176758, |
| "logits/rejected": -2.406322956085205, |
| "logps/chosen": -88.91485595703125, |
| "logps/rejected": -118.64851379394531, |
| "loss": 0.2461, |
| "rewards/accuracies": 0.925000011920929, |
| "rewards/chosen": -1.3334013223648071, |
| "rewards/margins": 2.0046401023864746, |
| "rewards/rejected": -3.338041305541992, |
| "step": 270 |
| }, |
| { |
| "epoch": 9.218106995884774, |
| "grad_norm": 4.790656089782715, |
| "learning_rate": 3.217008081777726e-06, |
| "logits/chosen": -2.40057373046875, |
| "logits/rejected": -2.3985626697540283, |
| "logps/chosen": -99.63141632080078, |
| "logps/rejected": -106.76456451416016, |
| "loss": 0.2095, |
| "rewards/accuracies": 0.9750000238418579, |
| "rewards/chosen": -1.3174140453338623, |
| "rewards/margins": 2.1455185413360596, |
| "rewards/rejected": -3.462932586669922, |
| "step": 280 |
| }, |
| { |
| "epoch": 9.547325102880658, |
| "grad_norm": 7.7193708419799805, |
| "learning_rate": 3.0765396768561005e-06, |
| "logits/chosen": -2.3912322521209717, |
| "logits/rejected": -2.3881642818450928, |
| "logps/chosen": -90.94058990478516, |
| "logps/rejected": -121.91908264160156, |
| "loss": 0.1962, |
| "rewards/accuracies": 0.987500011920929, |
| "rewards/chosen": -1.536046028137207, |
| "rewards/margins": 2.3822174072265625, |
| "rewards/rejected": -3.9182631969451904, |
| "step": 290 |
| }, |
| { |
| "epoch": 9.876543209876543, |
| "grad_norm": 5.5355000495910645, |
| "learning_rate": 2.9341204441673267e-06, |
| "logits/chosen": -2.3750805854797363, |
| "logits/rejected": -2.403031826019287, |
| "logps/chosen": -92.7899169921875, |
| "logps/rejected": -107.88253021240234, |
| "loss": 0.1769, |
| "rewards/accuracies": 0.9375, |
| "rewards/chosen": -1.6319608688354492, |
| "rewards/margins": 2.070601463317871, |
| "rewards/rejected": -3.7025623321533203, |
| "step": 300 |
| }, |
| { |
| "epoch": 10.205761316872428, |
| "grad_norm": 4.570405006408691, |
| "learning_rate": 2.7902322853130758e-06, |
| "logits/chosen": -2.413585662841797, |
| "logits/rejected": -2.4128100872039795, |
| "logps/chosen": -82.2129898071289, |
| "logps/rejected": -111.6125259399414, |
| "loss": 0.1725, |
| "rewards/accuracies": 0.987500011920929, |
| "rewards/chosen": -1.4095767736434937, |
| "rewards/margins": 2.5350427627563477, |
| "rewards/rejected": -3.944619655609131, |
| "step": 310 |
| }, |
| { |
| "epoch": 10.534979423868313, |
| "grad_norm": 6.312554359436035, |
| "learning_rate": 2.6453620722761897e-06, |
| "logits/chosen": -2.3654825687408447, |
| "logits/rejected": -2.360241651535034, |
| "logps/chosen": -94.05240631103516, |
| "logps/rejected": -126.00224304199219, |
| "loss": 0.1511, |
| "rewards/accuracies": 0.9750000238418579, |
| "rewards/chosen": -1.9204778671264648, |
| "rewards/margins": 2.536738157272339, |
| "rewards/rejected": -4.457216262817383, |
| "step": 320 |
| }, |
| { |
| "epoch": 10.864197530864198, |
| "grad_norm": 5.389893054962158, |
| "learning_rate": 2.5e-06, |
| "logits/chosen": -2.3491291999816895, |
| "logits/rejected": -2.354940414428711, |
| "logps/chosen": -104.17342376708984, |
| "logps/rejected": -137.72128295898438, |
| "loss": 0.1361, |
| "rewards/accuracies": 0.987500011920929, |
| "rewards/chosen": -2.220851182937622, |
| "rewards/margins": 2.9987587928771973, |
| "rewards/rejected": -5.219610214233398, |
| "step": 330 |
| }, |
| { |
| "epoch": 11.193415637860083, |
| "grad_norm": 4.085507392883301, |
| "learning_rate": 2.3546379277238107e-06, |
| "logits/chosen": -2.318918228149414, |
| "logits/rejected": -2.351606607437134, |
| "logps/chosen": -92.32350158691406, |
| "logps/rejected": -129.15365600585938, |
| "loss": 0.1294, |
| "rewards/accuracies": 0.9624999761581421, |
| "rewards/chosen": -2.087580442428589, |
| "rewards/margins": 2.9299442768096924, |
| "rewards/rejected": -5.017524719238281, |
| "step": 340 |
| }, |
| { |
| "epoch": 11.522633744855966, |
| "grad_norm": 5.135997772216797, |
| "learning_rate": 2.2097677146869242e-06, |
| "logits/chosen": -2.3357138633728027, |
| "logits/rejected": -2.3629403114318848, |
| "logps/chosen": -94.5295639038086, |
| "logps/rejected": -119.21665954589844, |
| "loss": 0.1216, |
| "rewards/accuracies": 0.9750000238418579, |
| "rewards/chosen": -2.43931245803833, |
| "rewards/margins": 2.933500289916992, |
| "rewards/rejected": -5.372812271118164, |
| "step": 350 |
| }, |
| { |
| "epoch": 11.851851851851851, |
| "grad_norm": 6.594943046569824, |
| "learning_rate": 2.0658795558326745e-06, |
| "logits/chosen": -2.2753522396087646, |
| "logits/rejected": -2.27793550491333, |
| "logps/chosen": -113.2972412109375, |
| "logps/rejected": -141.83450317382812, |
| "loss": 0.1079, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -2.8333306312561035, |
| "rewards/margins": 3.120042324066162, |
| "rewards/rejected": -5.953372478485107, |
| "step": 360 |
| }, |
| { |
| "epoch": 12.181069958847736, |
| "grad_norm": 3.8602702617645264, |
| "learning_rate": 1.9234603231439e-06, |
| "logits/chosen": -2.279592990875244, |
| "logits/rejected": -2.3021957874298096, |
| "logps/chosen": -157.71868896484375, |
| "logps/rejected": -130.61915588378906, |
| "loss": 0.0979, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -2.1277592182159424, |
| "rewards/margins": 3.6744728088378906, |
| "rewards/rejected": -5.802231788635254, |
| "step": 370 |
| }, |
| { |
| "epoch": 12.510288065843621, |
| "grad_norm": 4.203768253326416, |
| "learning_rate": 1.7829919182222752e-06, |
| "logits/chosen": -2.332866668701172, |
| "logits/rejected": -2.342665910720825, |
| "logps/chosen": -95.84976959228516, |
| "logps/rejected": -142.677734375, |
| "loss": 0.081, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -2.5826311111450195, |
| "rewards/margins": 3.6997482776641846, |
| "rewards/rejected": -6.282379150390625, |
| "step": 380 |
| }, |
| { |
| "epoch": 12.839506172839506, |
| "grad_norm": 7.400186061859131, |
| "learning_rate": 1.6449496416858285e-06, |
| "logits/chosen": -2.2747161388397217, |
| "logits/rejected": -2.2847588062286377, |
| "logps/chosen": -97.12799835205078, |
| "logps/rejected": -146.51226806640625, |
| "loss": 0.0908, |
| "rewards/accuracies": 0.987500011920929, |
| "rewards/chosen": -2.9791295528411865, |
| "rewards/margins": 3.8435187339782715, |
| "rewards/rejected": -6.822648048400879, |
| "step": 390 |
| }, |
| { |
| "epoch": 13.168724279835391, |
| "grad_norm": 4.099456310272217, |
| "learning_rate": 1.509800584902108e-06, |
| "logits/chosen": -2.269260883331299, |
| "logits/rejected": -2.2895314693450928, |
| "logps/chosen": -108.60566711425781, |
| "logps/rejected": -143.68768310546875, |
| "loss": 0.0719, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -3.2180545330047607, |
| "rewards/margins": 4.0625810623168945, |
| "rewards/rejected": -7.280634880065918, |
| "step": 400 |
| }, |
| { |
| "epoch": 13.497942386831276, |
| "grad_norm": 5.907413959503174, |
| "learning_rate": 1.3780020494988447e-06, |
| "logits/chosen": -2.224945068359375, |
| "logits/rejected": -2.2712655067443848, |
| "logps/chosen": -113.31339263916016, |
| "logps/rejected": -178.72280883789062, |
| "loss": 0.0705, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -3.5527851581573486, |
| "rewards/margins": 4.106083869934082, |
| "rewards/rejected": -7.658868312835693, |
| "step": 410 |
| }, |
| { |
| "epoch": 13.82716049382716, |
| "grad_norm": 5.777594566345215, |
| "learning_rate": 1.2500000000000007e-06, |
| "logits/chosen": -2.277588367462158, |
| "logits/rejected": -2.2817983627319336, |
| "logps/chosen": -98.06736755371094, |
| "logps/rejected": -142.85302734375, |
| "loss": 0.0653, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -2.8046793937683105, |
| "rewards/margins": 4.122653007507324, |
| "rewards/rejected": -6.927332878112793, |
| "step": 420 |
| }, |
| { |
| "epoch": 14.156378600823045, |
| "grad_norm": 3.4384090900421143, |
| "learning_rate": 1.1262275548229852e-06, |
| "logits/chosen": -2.2674877643585205, |
| "logits/rejected": -2.26188588142395, |
| "logps/chosen": -109.643310546875, |
| "logps/rejected": -150.3360595703125, |
| "loss": 0.0643, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -3.527834415435791, |
| "rewards/margins": 3.976726531982422, |
| "rewards/rejected": -7.504560947418213, |
| "step": 430 |
| }, |
| { |
| "epoch": 14.48559670781893, |
| "grad_norm": 3.2865304946899414, |
| "learning_rate": 1.0071035207430352e-06, |
| "logits/chosen": -2.2867743968963623, |
| "logits/rejected": -2.3037807941436768, |
| "logps/chosen": -95.85628509521484, |
| "logps/rejected": -135.21749877929688, |
| "loss": 0.0576, |
| "rewards/accuracies": 0.987500011920929, |
| "rewards/chosen": -3.122006893157959, |
| "rewards/margins": 3.721601963043213, |
| "rewards/rejected": -6.843608856201172, |
| "step": 440 |
| }, |
| { |
| "epoch": 14.814814814814815, |
| "grad_norm": 4.039007663726807, |
| "learning_rate": 8.930309757836517e-07, |
| "logits/chosen": -2.250168561935425, |
| "logits/rejected": -2.2552523612976074, |
| "logps/chosen": -116.44384765625, |
| "logps/rejected": -155.5840301513672, |
| "loss": 0.0496, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -3.6558947563171387, |
| "rewards/margins": 4.322918891906738, |
| "rewards/rejected": -7.978812217712402, |
| "step": 450 |
| }, |
| { |
| "epoch": 15.1440329218107, |
| "grad_norm": 3.032468795776367, |
| "learning_rate": 7.843959053281663e-07, |
| "logits/chosen": -2.2516555786132812, |
| "logits/rejected": -2.266954183578491, |
| "logps/chosen": -110.81983947753906, |
| "logps/rejected": -155.51846313476562, |
| "loss": 0.0536, |
| "rewards/accuracies": 0.987500011920929, |
| "rewards/chosen": -3.9787039756774902, |
| "rewards/margins": 4.6672492027282715, |
| "rewards/rejected": -8.645953178405762, |
| "step": 460 |
| }, |
| { |
| "epoch": 15.473251028806585, |
| "grad_norm": 2.9452898502349854, |
| "learning_rate": 6.815658960673782e-07, |
| "logits/chosen": -2.272416591644287, |
| "logits/rejected": -2.2924296855926514, |
| "logps/chosen": -105.24934387207031, |
| "logps/rejected": -142.57855224609375, |
| "loss": 0.0501, |
| "rewards/accuracies": 0.9750000238418579, |
| "rewards/chosen": -3.5546278953552246, |
| "rewards/margins": 3.986192226409912, |
| "rewards/rejected": -7.540820121765137, |
| "step": 470 |
| }, |
| { |
| "epoch": 15.802469135802468, |
| "grad_norm": 3.4811203479766846, |
| "learning_rate": 5.848888922025553e-07, |
| "logits/chosen": -2.2133867740631104, |
| "logits/rejected": -2.2130513191223145, |
| "logps/chosen": -121.40483093261719, |
| "logps/rejected": -157.2665557861328, |
| "loss": 0.0431, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -3.4991672039031982, |
| "rewards/margins": 4.753915309906006, |
| "rewards/rejected": -8.253081321716309, |
| "step": 480 |
| }, |
| { |
| "epoch": 16.131687242798353, |
| "grad_norm": 3.7063302993774414, |
| "learning_rate": 4.946920181123904e-07, |
| "logits/chosen": -2.2596447467803955, |
| "logits/rejected": -2.2761495113372803, |
| "logps/chosen": -129.48851013183594, |
| "logps/rejected": -164.61209106445312, |
| "loss": 0.0408, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -4.200577259063721, |
| "rewards/margins": 4.300900459289551, |
| "rewards/rejected": -8.501477241516113, |
| "step": 490 |
| }, |
| { |
| "epoch": 16.46090534979424, |
| "grad_norm": 3.2988643646240234, |
| "learning_rate": 4.1128047146765936e-07, |
| "logits/chosen": -2.238184928894043, |
| "logits/rejected": -2.2563719749450684, |
| "logps/chosen": -119.21360778808594, |
| "logps/rejected": -168.99972534179688, |
| "loss": 0.0415, |
| "rewards/accuracies": 0.987500011920929, |
| "rewards/chosen": -4.102926731109619, |
| "rewards/margins": 4.592761993408203, |
| "rewards/rejected": -8.69568920135498, |
| "step": 500 |
| }, |
| { |
| "epoch": 16.790123456790123, |
| "grad_norm": 3.3806164264678955, |
| "learning_rate": 3.3493649053890325e-07, |
| "logits/chosen": -2.193830966949463, |
| "logits/rejected": -2.2341809272766113, |
| "logps/chosen": -124.14864349365234, |
| "logps/rejected": -149.8924560546875, |
| "loss": 0.0362, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -3.885023832321167, |
| "rewards/margins": 4.158351898193359, |
| "rewards/rejected": -8.043375968933105, |
| "step": 510 |
| }, |
| { |
| "epoch": 17.11934156378601, |
| "grad_norm": 2.4073729515075684, |
| "learning_rate": 2.6591839919146963e-07, |
| "logits/chosen": -2.220184803009033, |
| "logits/rejected": -2.2121176719665527, |
| "logps/chosen": -120.33931732177734, |
| "logps/rejected": -165.0544891357422, |
| "loss": 0.0396, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -4.194688320159912, |
| "rewards/margins": 4.460559368133545, |
| "rewards/rejected": -8.655247688293457, |
| "step": 520 |
| }, |
| { |
| "epoch": 17.448559670781894, |
| "grad_norm": 3.3336453437805176, |
| "learning_rate": 2.044597327993153e-07, |
| "logits/chosen": -2.2254652976989746, |
| "logits/rejected": -2.2366299629211426, |
| "logps/chosen": -113.01094818115234, |
| "logps/rejected": -161.00198364257812, |
| "loss": 0.0395, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -4.11731481552124, |
| "rewards/margins": 4.832752227783203, |
| "rewards/rejected": -8.950067520141602, |
| "step": 530 |
| }, |
| { |
| "epoch": 17.77777777777778, |
| "grad_norm": 3.376014232635498, |
| "learning_rate": 1.507684480352292e-07, |
| "logits/chosen": -2.2119059562683105, |
| "logits/rejected": -2.2531039714813232, |
| "logps/chosen": -110.52852630615234, |
| "logps/rejected": -176.49847412109375, |
| "loss": 0.0392, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -4.28528356552124, |
| "rewards/margins": 4.865164756774902, |
| "rewards/rejected": -9.150447845458984, |
| "step": 540 |
| }, |
| { |
| "epoch": 18.106995884773664, |
| "grad_norm": 3.3391194343566895, |
| "learning_rate": 1.0502621921127776e-07, |
| "logits/chosen": -2.2244715690612793, |
| "logits/rejected": -2.2445571422576904, |
| "logps/chosen": -118.01458740234375, |
| "logps/rejected": -165.5779266357422, |
| "loss": 0.0362, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -4.345705032348633, |
| "rewards/margins": 4.486096382141113, |
| "rewards/rejected": -8.831801414489746, |
| "step": 550 |
| }, |
| { |
| "epoch": 18.43621399176955, |
| "grad_norm": 3.404609203338623, |
| "learning_rate": 6.738782355044048e-08, |
| "logits/chosen": -2.227168321609497, |
| "logits/rejected": -2.207815170288086, |
| "logps/chosen": -99.65079498291016, |
| "logps/rejected": -148.56776428222656, |
| "loss": 0.037, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -3.825402021408081, |
| "rewards/margins": 4.1935296058654785, |
| "rewards/rejected": -8.01893138885498, |
| "step": 560 |
| }, |
| { |
| "epoch": 18.765432098765434, |
| "grad_norm": 3.2749338150024414, |
| "learning_rate": 3.798061746947995e-08, |
| "logits/chosen": -2.2365221977233887, |
| "logits/rejected": -2.2139172554016113, |
| "logps/chosen": -119.14200592041016, |
| "logps/rejected": -156.78273010253906, |
| "loss": 0.0394, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -4.163195610046387, |
| "rewards/margins": 4.5820207595825195, |
| "rewards/rejected": -8.745216369628906, |
| "step": 570 |
| }, |
| { |
| "epoch": 19.094650205761315, |
| "grad_norm": 3.9220383167266846, |
| "learning_rate": 1.6904105645142443e-08, |
| "logits/chosen": -2.1797518730163574, |
| "logits/rejected": -2.198817014694214, |
| "logps/chosen": -117.19510650634766, |
| "logps/rejected": -173.05508422851562, |
| "loss": 0.0306, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -4.046970367431641, |
| "rewards/margins": 4.983619213104248, |
| "rewards/rejected": -9.03058910369873, |
| "step": 580 |
| }, |
| { |
| "epoch": 19.4238683127572, |
| "grad_norm": 2.5847129821777344, |
| "learning_rate": 4.229604321829561e-09, |
| "logits/chosen": -2.257030963897705, |
| "logits/rejected": -2.2700631618499756, |
| "logps/chosen": -129.76861572265625, |
| "logps/rejected": -153.6666717529297, |
| "loss": 0.0378, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -4.047970771789551, |
| "rewards/margins": 4.603674411773682, |
| "rewards/rejected": -8.651644706726074, |
| "step": 590 |
| }, |
| { |
| "epoch": 19.753086419753085, |
| "grad_norm": 2.3237764835357666, |
| "learning_rate": 0.0, |
| "logits/chosen": -2.237996816635132, |
| "logits/rejected": -2.2293925285339355, |
| "logps/chosen": -127.35640716552734, |
| "logps/rejected": -163.79196166992188, |
| "loss": 0.0322, |
| "rewards/accuracies": 1.0, |
| "rewards/chosen": -4.333734035491943, |
| "rewards/margins": 4.672335624694824, |
| "rewards/rejected": -9.006070137023926, |
| "step": 600 |
| }, |
| { |
| "epoch": 19.753086419753085, |
| "step": 600, |
| "total_flos": 2.1987713728999588e+18, |
| "train_loss": 0.2703349814315637, |
| "train_runtime": 4673.1198, |
| "train_samples_per_second": 8.294, |
| "train_steps_per_second": 0.128 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 600, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 20, |
| "save_steps": 100, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 2.1987713728999588e+18, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|