| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.981818181818182, | |
| "eval_steps": 500, | |
| "global_step": 164, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.012121212121212121, | |
| "grad_norm": 2.573503255844116, | |
| "learning_rate": 2.9411764705882356e-07, | |
| "logits/chosen": 0.5989066958427429, | |
| "logits/rejected": 0.3433399796485901, | |
| "logps/chosen": -80.92005920410156, | |
| "logps/rejected": -97.3714599609375, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.024242424242424242, | |
| "grad_norm": 2.060006856918335, | |
| "learning_rate": 5.882352941176471e-07, | |
| "logits/chosen": 0.5360614061355591, | |
| "logits/rejected": 0.42208123207092285, | |
| "logps/chosen": -78.2723388671875, | |
| "logps/rejected": -92.45564270019531, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.03636363636363636, | |
| "grad_norm": 2.1472530364990234, | |
| "learning_rate": 8.823529411764707e-07, | |
| "logits/chosen": 0.46590447425842285, | |
| "logits/rejected": 0.5246831178665161, | |
| "logps/chosen": -82.09193420410156, | |
| "logps/rejected": -88.78755187988281, | |
| "loss": 0.6928, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.0004730225191451609, | |
| "rewards/margins": 0.0007421494228765368, | |
| "rewards/rejected": -0.00026912690373137593, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.048484848484848485, | |
| "grad_norm": 2.800305128097534, | |
| "learning_rate": 1.1764705882352942e-06, | |
| "logits/chosen": 0.65618497133255, | |
| "logits/rejected": 0.49275052547454834, | |
| "logps/chosen": -82.10594940185547, | |
| "logps/rejected": -95.1839370727539, | |
| "loss": 0.6947, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.0007750511867925525, | |
| "rewards/margins": -0.003024006262421608, | |
| "rewards/rejected": 0.00224895472638309, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.06060606060606061, | |
| "grad_norm": 2.5324952602386475, | |
| "learning_rate": 1.4705882352941177e-06, | |
| "logits/chosen": 0.5559582114219666, | |
| "logits/rejected": 0.3742131292819977, | |
| "logps/chosen": -87.20143127441406, | |
| "logps/rejected": -105.94652557373047, | |
| "loss": 0.693, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -4.3773557990789413e-05, | |
| "rewards/margins": 0.00021791458129882812, | |
| "rewards/rejected": -0.00026168813928961754, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07272727272727272, | |
| "grad_norm": 2.026254177093506, | |
| "learning_rate": 1.7647058823529414e-06, | |
| "logits/chosen": 0.6321487426757812, | |
| "logits/rejected": 0.6650270223617554, | |
| "logps/chosen": -83.1277847290039, | |
| "logps/rejected": -85.23698425292969, | |
| "loss": 0.6928, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": -0.0016042710049077868, | |
| "rewards/margins": 0.000760841416195035, | |
| "rewards/rejected": -0.0023651123046875, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.08484848484848485, | |
| "grad_norm": 2.4757308959960938, | |
| "learning_rate": 2.058823529411765e-06, | |
| "logits/chosen": 0.2787664830684662, | |
| "logits/rejected": 0.3368262052536011, | |
| "logps/chosen": -79.63035583496094, | |
| "logps/rejected": -90.24547576904297, | |
| "loss": 0.691, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.0005692481645382941, | |
| "rewards/margins": 0.004281234461814165, | |
| "rewards/rejected": -0.003711986355483532, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.09696969696969697, | |
| "grad_norm": 2.419964551925659, | |
| "learning_rate": 2.3529411764705885e-06, | |
| "logits/chosen": 0.5775762796401978, | |
| "logits/rejected": 0.47042325139045715, | |
| "logps/chosen": -80.90266418457031, | |
| "logps/rejected": -86.38180541992188, | |
| "loss": 0.6898, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.00029315962456166744, | |
| "rewards/margins": 0.006642055697739124, | |
| "rewards/rejected": -0.006935215089470148, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.10909090909090909, | |
| "grad_norm": 2.376508951187134, | |
| "learning_rate": 2.647058823529412e-06, | |
| "logits/chosen": 0.2030642330646515, | |
| "logits/rejected": 0.4143352508544922, | |
| "logps/chosen": -78.35124969482422, | |
| "logps/rejected": -89.64823913574219, | |
| "loss": 0.6877, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.010747241787612438, | |
| "rewards/margins": 0.010888958349823952, | |
| "rewards/rejected": -0.021636201068758965, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.12121212121212122, | |
| "grad_norm": 2.344287157058716, | |
| "learning_rate": 2.9411764705882355e-06, | |
| "logits/chosen": 0.5994730591773987, | |
| "logits/rejected": 0.5396036505699158, | |
| "logps/chosen": -79.0880355834961, | |
| "logps/rejected": -88.75253295898438, | |
| "loss": 0.6849, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.010178089141845703, | |
| "rewards/margins": 0.016606714576482773, | |
| "rewards/rejected": -0.026784801855683327, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.13333333333333333, | |
| "grad_norm": 2.1255571842193604, | |
| "learning_rate": 3.2352941176470594e-06, | |
| "logits/chosen": 0.3612106442451477, | |
| "logits/rejected": 0.6012680530548096, | |
| "logps/chosen": -81.41439819335938, | |
| "logps/rejected": -89.69815063476562, | |
| "loss": 0.6846, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.01244363747537136, | |
| "rewards/margins": 0.01724729686975479, | |
| "rewards/rejected": -0.029690932482481003, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.14545454545454545, | |
| "grad_norm": 1.613459825515747, | |
| "learning_rate": 3.529411764705883e-06, | |
| "logits/chosen": 0.45081156492233276, | |
| "logits/rejected": 0.519508957862854, | |
| "logps/chosen": -82.76570129394531, | |
| "logps/rejected": -86.98448944091797, | |
| "loss": 0.6856, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.004742145538330078, | |
| "rewards/margins": 0.01519632339477539, | |
| "rewards/rejected": -0.01993846893310547, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.15757575757575756, | |
| "grad_norm": 2.1008141040802, | |
| "learning_rate": 3.8235294117647055e-06, | |
| "logits/chosen": 0.5754950046539307, | |
| "logits/rejected": 0.32084736227989197, | |
| "logps/chosen": -77.88233947753906, | |
| "logps/rejected": -90.71653747558594, | |
| "loss": 0.6702, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.023272134363651276, | |
| "rewards/margins": 0.04726962745189667, | |
| "rewards/rejected": -0.07054176181554794, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.1696969696969697, | |
| "grad_norm": 1.8546743392944336, | |
| "learning_rate": 4.11764705882353e-06, | |
| "logits/chosen": 0.26316455006599426, | |
| "logits/rejected": 0.2455248236656189, | |
| "logps/chosen": -85.9008560180664, | |
| "logps/rejected": -90.04121398925781, | |
| "loss": 0.6757, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.023431111127138138, | |
| "rewards/margins": 0.035515978932380676, | |
| "rewards/rejected": -0.058947086334228516, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.18181818181818182, | |
| "grad_norm": 2.233098268508911, | |
| "learning_rate": 4.411764705882353e-06, | |
| "logits/chosen": 0.39422497153282166, | |
| "logits/rejected": 0.2617032527923584, | |
| "logps/chosen": -83.47624206542969, | |
| "logps/rejected": -92.22642517089844, | |
| "loss": 0.6549, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.009575080126523972, | |
| "rewards/margins": 0.07998533546924591, | |
| "rewards/rejected": -0.08956041187047958, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.19393939393939394, | |
| "grad_norm": 2.212238073348999, | |
| "learning_rate": 4.705882352941177e-06, | |
| "logits/chosen": 0.6788235306739807, | |
| "logits/rejected": 0.37267640233039856, | |
| "logps/chosen": -79.50228118896484, | |
| "logps/rejected": -85.6736068725586, | |
| "loss": 0.6502, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.007221986539661884, | |
| "rewards/margins": 0.090502068400383, | |
| "rewards/rejected": -0.0977240651845932, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.20606060606060606, | |
| "grad_norm": 2.0433385372161865, | |
| "learning_rate": 5e-06, | |
| "logits/chosen": 0.4306856095790863, | |
| "logits/rejected": 0.4344051778316498, | |
| "logps/chosen": -80.9012680053711, | |
| "logps/rejected": -86.1850357055664, | |
| "loss": 0.6826, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.10336600244045258, | |
| "rewards/margins": 0.022557254880666733, | |
| "rewards/rejected": -0.1259232461452484, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.21818181818181817, | |
| "grad_norm": 2.405224323272705, | |
| "learning_rate": 4.965986394557824e-06, | |
| "logits/chosen": 0.6965266466140747, | |
| "logits/rejected": 0.4544612169265747, | |
| "logps/chosen": -79.70795440673828, | |
| "logps/rejected": -100.73465728759766, | |
| "loss": 0.6404, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.06364002823829651, | |
| "rewards/margins": 0.11611214280128479, | |
| "rewards/rejected": -0.1797521561384201, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.23030303030303031, | |
| "grad_norm": 2.102670192718506, | |
| "learning_rate": 4.931972789115647e-06, | |
| "logits/chosen": 0.07950831949710846, | |
| "logits/rejected": 0.24685823917388916, | |
| "logps/chosen": -78.33619689941406, | |
| "logps/rejected": -84.86385345458984, | |
| "loss": 0.6501, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.06548452377319336, | |
| "rewards/margins": 0.09256687760353088, | |
| "rewards/rejected": -0.15805140137672424, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.24242424242424243, | |
| "grad_norm": 2.0822105407714844, | |
| "learning_rate": 4.897959183673469e-06, | |
| "logits/chosen": 0.32838794589042664, | |
| "logits/rejected": 0.3617649972438812, | |
| "logps/chosen": -82.71783447265625, | |
| "logps/rejected": -90.81163787841797, | |
| "loss": 0.6378, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.06530027091503143, | |
| "rewards/margins": 0.11812734603881836, | |
| "rewards/rejected": -0.183427631855011, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2545454545454545, | |
| "grad_norm": 2.3999321460723877, | |
| "learning_rate": 4.863945578231293e-06, | |
| "logits/chosen": 0.6818695068359375, | |
| "logits/rejected": 0.24048802256584167, | |
| "logps/chosen": -80.49371337890625, | |
| "logps/rejected": -96.09458923339844, | |
| "loss": 0.6311, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.07628040015697479, | |
| "rewards/margins": 0.1365867704153061, | |
| "rewards/rejected": -0.21286717057228088, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.26666666666666666, | |
| "grad_norm": 2.2768161296844482, | |
| "learning_rate": 4.829931972789116e-06, | |
| "logits/chosen": 0.18232905864715576, | |
| "logits/rejected": 0.39266088604927063, | |
| "logps/chosen": -80.37593078613281, | |
| "logps/rejected": -89.72500610351562, | |
| "loss": 0.6031, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": 0.06079893559217453, | |
| "rewards/margins": 0.20091038942337036, | |
| "rewards/rejected": -0.14011144638061523, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.2787878787878788, | |
| "grad_norm": 1.9448617696762085, | |
| "learning_rate": 4.795918367346939e-06, | |
| "logits/chosen": 0.6265424489974976, | |
| "logits/rejected": 0.510066032409668, | |
| "logps/chosen": -79.49093627929688, | |
| "logps/rejected": -94.0775146484375, | |
| "loss": 0.583, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.029067613184452057, | |
| "rewards/margins": 0.2460225224494934, | |
| "rewards/rejected": -0.27509012818336487, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.2909090909090909, | |
| "grad_norm": 2.10017728805542, | |
| "learning_rate": 4.761904761904762e-06, | |
| "logits/chosen": 0.4965704679489136, | |
| "logits/rejected": 0.425863116979599, | |
| "logps/chosen": -82.37673950195312, | |
| "logps/rejected": -92.39176940917969, | |
| "loss": 0.5989, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.1384635865688324, | |
| "rewards/margins": 0.202981099486351, | |
| "rewards/rejected": -0.3414447009563446, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.30303030303030304, | |
| "grad_norm": 1.8628658056259155, | |
| "learning_rate": 4.727891156462586e-06, | |
| "logits/chosen": 0.36799079179763794, | |
| "logits/rejected": 0.37271881103515625, | |
| "logps/chosen": -80.91612243652344, | |
| "logps/rejected": -94.26983642578125, | |
| "loss": 0.5859, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.02675161324441433, | |
| "rewards/margins": 0.24192678928375244, | |
| "rewards/rejected": -0.26867836713790894, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.3151515151515151, | |
| "grad_norm": 1.5512022972106934, | |
| "learning_rate": 4.693877551020409e-06, | |
| "logits/chosen": 0.45694056153297424, | |
| "logits/rejected": 0.36823397874832153, | |
| "logps/chosen": -80.49776458740234, | |
| "logps/rejected": -91.63525390625, | |
| "loss": 0.6118, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.13241948187351227, | |
| "rewards/margins": 0.19294482469558716, | |
| "rewards/rejected": -0.32536429166793823, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.32727272727272727, | |
| "grad_norm": 2.0366673469543457, | |
| "learning_rate": 4.659863945578232e-06, | |
| "logits/chosen": 0.44295981526374817, | |
| "logits/rejected": 0.3648220896720886, | |
| "logps/chosen": -81.94073486328125, | |
| "logps/rejected": -94.45509338378906, | |
| "loss": 0.5649, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.006222818046808243, | |
| "rewards/margins": 0.2877269685268402, | |
| "rewards/rejected": -0.28150415420532227, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.3393939393939394, | |
| "grad_norm": 2.866046667098999, | |
| "learning_rate": 4.6258503401360546e-06, | |
| "logits/chosen": 0.37108322978019714, | |
| "logits/rejected": 0.5524705052375793, | |
| "logps/chosen": -81.45274353027344, | |
| "logps/rejected": -93.38094329833984, | |
| "loss": 0.6103, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.1701315939426422, | |
| "rewards/margins": 0.20057372748851776, | |
| "rewards/rejected": -0.3707053065299988, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.3515151515151515, | |
| "grad_norm": 1.883554220199585, | |
| "learning_rate": 4.591836734693878e-06, | |
| "logits/chosen": 0.635645866394043, | |
| "logits/rejected": 0.4668920636177063, | |
| "logps/chosen": -81.63595581054688, | |
| "logps/rejected": -90.26708984375, | |
| "loss": 0.6138, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.28862398862838745, | |
| "rewards/margins": 0.17766514420509338, | |
| "rewards/rejected": -0.4662891626358032, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.36363636363636365, | |
| "grad_norm": 2.250345230102539, | |
| "learning_rate": 4.557823129251701e-06, | |
| "logits/chosen": 0.49621516466140747, | |
| "logits/rejected": 0.5221789479255676, | |
| "logps/chosen": -81.66897583007812, | |
| "logps/rejected": -90.41416931152344, | |
| "loss": 0.5329, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.04887208715081215, | |
| "rewards/margins": 0.39742839336395264, | |
| "rewards/rejected": -0.4463005065917969, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.37575757575757573, | |
| "grad_norm": 1.950496792793274, | |
| "learning_rate": 4.523809523809524e-06, | |
| "logits/chosen": 0.5327723622322083, | |
| "logits/rejected": 0.46856898069381714, | |
| "logps/chosen": -82.16653442382812, | |
| "logps/rejected": -87.493408203125, | |
| "loss": 0.6341, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.19954414665699005, | |
| "rewards/margins": 0.14190863072872162, | |
| "rewards/rejected": -0.34145280718803406, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.3878787878787879, | |
| "grad_norm": 2.1350300312042236, | |
| "learning_rate": 4.489795918367348e-06, | |
| "logits/chosen": 0.5370631814002991, | |
| "logits/rejected": 0.5145618915557861, | |
| "logps/chosen": -86.60906982421875, | |
| "logps/rejected": -93.3792495727539, | |
| "loss": 0.5004, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.13417404890060425, | |
| "rewards/margins": 0.4657081365585327, | |
| "rewards/rejected": -0.5998822450637817, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 1.427173376083374, | |
| "learning_rate": 4.45578231292517e-06, | |
| "logits/chosen": 0.6590886116027832, | |
| "logits/rejected": 0.5974253416061401, | |
| "logps/chosen": -87.329345703125, | |
| "logps/rejected": -94.36528778076172, | |
| "loss": 0.6501, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.4314802885055542, | |
| "rewards/margins": 0.09496374428272247, | |
| "rewards/rejected": -0.5264440774917603, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.4121212121212121, | |
| "grad_norm": 2.568857431411743, | |
| "learning_rate": 4.421768707482993e-06, | |
| "logits/chosen": 0.30280208587646484, | |
| "logits/rejected": 0.24079981446266174, | |
| "logps/chosen": -84.2959213256836, | |
| "logps/rejected": -93.14737701416016, | |
| "loss": 0.5052, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": 0.05571327358484268, | |
| "rewards/margins": 0.4796813726425171, | |
| "rewards/rejected": -0.4239681363105774, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.42424242424242425, | |
| "grad_norm": 2.2260830402374268, | |
| "learning_rate": 4.3877551020408165e-06, | |
| "logits/chosen": 0.6506168842315674, | |
| "logits/rejected": 0.4573900103569031, | |
| "logps/chosen": -75.35042572021484, | |
| "logps/rejected": -94.74085998535156, | |
| "loss": 0.4573, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": 0.18374451994895935, | |
| "rewards/margins": 0.6279397010803223, | |
| "rewards/rejected": -0.4441951811313629, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.43636363636363634, | |
| "grad_norm": 2.2206430435180664, | |
| "learning_rate": 4.35374149659864e-06, | |
| "logits/chosen": 0.46530038118362427, | |
| "logits/rejected": 0.15938237309455872, | |
| "logps/chosen": -84.88845825195312, | |
| "logps/rejected": -93.98237609863281, | |
| "loss": 0.5184, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.13509225845336914, | |
| "rewards/margins": 0.4553631842136383, | |
| "rewards/rejected": -0.5904554128646851, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.4484848484848485, | |
| "grad_norm": 2.2873029708862305, | |
| "learning_rate": 4.319727891156463e-06, | |
| "logits/chosen": 0.5647754669189453, | |
| "logits/rejected": 0.32446718215942383, | |
| "logps/chosen": -83.01586151123047, | |
| "logps/rejected": -105.61540222167969, | |
| "loss": 0.4647, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.2565247416496277, | |
| "rewards/margins": 0.611173152923584, | |
| "rewards/rejected": -0.8676979541778564, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.46060606060606063, | |
| "grad_norm": 2.183913230895996, | |
| "learning_rate": 4.2857142857142855e-06, | |
| "logits/chosen": 0.10570168495178223, | |
| "logits/rejected": -0.027524828910827637, | |
| "logps/chosen": -80.10659790039062, | |
| "logps/rejected": -94.95332336425781, | |
| "loss": 0.4946, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": 0.09906873852014542, | |
| "rewards/margins": 0.5269591212272644, | |
| "rewards/rejected": -0.427890419960022, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.4727272727272727, | |
| "grad_norm": 2.2503180503845215, | |
| "learning_rate": 4.251700680272109e-06, | |
| "logits/chosen": 0.14078444242477417, | |
| "logits/rejected": 0.30571305751800537, | |
| "logps/chosen": -83.16555786132812, | |
| "logps/rejected": -92.3938980102539, | |
| "loss": 0.4761, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": 0.07579489052295685, | |
| "rewards/margins": 0.5589980483055115, | |
| "rewards/rejected": -0.48320311307907104, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.48484848484848486, | |
| "grad_norm": 2.3518598079681396, | |
| "learning_rate": 4.217687074829933e-06, | |
| "logits/chosen": 0.3999345302581787, | |
| "logits/rejected": 0.020384781062602997, | |
| "logps/chosen": -86.66480255126953, | |
| "logps/rejected": -100.3143539428711, | |
| "loss": 0.5238, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.27970850467681885, | |
| "rewards/margins": 0.416584312915802, | |
| "rewards/rejected": -0.6962927579879761, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.49696969696969695, | |
| "grad_norm": 2.3239002227783203, | |
| "learning_rate": 4.183673469387755e-06, | |
| "logits/chosen": 0.32163870334625244, | |
| "logits/rejected": 0.1246703565120697, | |
| "logps/chosen": -82.92484283447266, | |
| "logps/rejected": -99.36587524414062, | |
| "loss": 0.5421, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.5212937593460083, | |
| "rewards/margins": 0.40337201952934265, | |
| "rewards/rejected": -0.9246657490730286, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.509090909090909, | |
| "grad_norm": 1.7185702323913574, | |
| "learning_rate": 4.1496598639455785e-06, | |
| "logits/chosen": 0.1402607560157776, | |
| "logits/rejected": 0.189698725938797, | |
| "logps/chosen": -81.78995513916016, | |
| "logps/rejected": -86.25436401367188, | |
| "loss": 0.5575, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.14212313294410706, | |
| "rewards/margins": 0.31536924839019775, | |
| "rewards/rejected": -0.4574923515319824, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.5212121212121212, | |
| "grad_norm": 2.684324026107788, | |
| "learning_rate": 4.115646258503402e-06, | |
| "logits/chosen": 0.1822155863046646, | |
| "logits/rejected": 0.09680613875389099, | |
| "logps/chosen": -84.12541198730469, | |
| "logps/rejected": -95.77808380126953, | |
| "loss": 0.5116, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.055333998054265976, | |
| "rewards/margins": 0.5083685517311096, | |
| "rewards/rejected": -0.5637024641036987, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 2.2760534286499023, | |
| "learning_rate": 4.081632653061225e-06, | |
| "logits/chosen": 0.2796591818332672, | |
| "logits/rejected": 0.08514690399169922, | |
| "logps/chosen": -79.44110107421875, | |
| "logps/rejected": -105.64237976074219, | |
| "loss": 0.3924, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.08903113752603531, | |
| "rewards/margins": 0.9599316120147705, | |
| "rewards/rejected": -1.0489627122879028, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.5454545454545454, | |
| "grad_norm": 2.321897506713867, | |
| "learning_rate": 4.047619047619048e-06, | |
| "logits/chosen": 0.3646478056907654, | |
| "logits/rejected": 0.08748706430196762, | |
| "logps/chosen": -81.45243072509766, | |
| "logps/rejected": -101.58081817626953, | |
| "loss": 0.4698, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.11360569298267365, | |
| "rewards/margins": 0.6234503984451294, | |
| "rewards/rejected": -0.7370560765266418, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.5575757575757576, | |
| "grad_norm": 1.6828770637512207, | |
| "learning_rate": 4.013605442176871e-06, | |
| "logits/chosen": -0.027117550373077393, | |
| "logits/rejected": -0.1517198383808136, | |
| "logps/chosen": -83.84929656982422, | |
| "logps/rejected": -92.15609741210938, | |
| "loss": 0.4976, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.05189989507198334, | |
| "rewards/margins": 0.542182445526123, | |
| "rewards/rejected": -0.5940823554992676, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.5696969696969697, | |
| "grad_norm": 2.0993480682373047, | |
| "learning_rate": 3.979591836734694e-06, | |
| "logits/chosen": 0.17406637966632843, | |
| "logits/rejected": -0.06627433747053146, | |
| "logps/chosen": -79.99982452392578, | |
| "logps/rejected": -97.92196655273438, | |
| "loss": 0.3913, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.10610990226268768, | |
| "rewards/margins": 0.9161709547042847, | |
| "rewards/rejected": -1.0222809314727783, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.5818181818181818, | |
| "grad_norm": 2.8575243949890137, | |
| "learning_rate": 3.945578231292517e-06, | |
| "logits/chosen": 0.19870872795581818, | |
| "logits/rejected": 0.2058108150959015, | |
| "logps/chosen": -82.90513610839844, | |
| "logps/rejected": -104.39181518554688, | |
| "loss": 0.4777, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.52581387758255, | |
| "rewards/margins": 0.6539483666419983, | |
| "rewards/rejected": -1.179762363433838, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.593939393939394, | |
| "grad_norm": 2.676713705062866, | |
| "learning_rate": 3.9115646258503405e-06, | |
| "logits/chosen": 0.018811197951436043, | |
| "logits/rejected": 0.04601475968956947, | |
| "logps/chosen": -87.0106430053711, | |
| "logps/rejected": -95.17454528808594, | |
| "loss": 0.5488, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.2954186499118805, | |
| "rewards/margins": 0.5057048797607422, | |
| "rewards/rejected": -0.8011234998703003, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.6060606060606061, | |
| "grad_norm": 2.592785120010376, | |
| "learning_rate": 3.877551020408164e-06, | |
| "logits/chosen": -0.20389169454574585, | |
| "logits/rejected": 0.15457789599895477, | |
| "logps/chosen": -84.461181640625, | |
| "logps/rejected": -94.97840881347656, | |
| "loss": 0.5292, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.47971272468566895, | |
| "rewards/margins": 0.5050724148750305, | |
| "rewards/rejected": -0.9847851991653442, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6181818181818182, | |
| "grad_norm": 1.5183725357055664, | |
| "learning_rate": 3.843537414965986e-06, | |
| "logits/chosen": 0.014184877276420593, | |
| "logits/rejected": -0.06476259976625443, | |
| "logps/chosen": -84.92556762695312, | |
| "logps/rejected": -98.85646057128906, | |
| "loss": 0.4731, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": -0.3806217312812805, | |
| "rewards/margins": 0.7172415256500244, | |
| "rewards/rejected": -1.0978631973266602, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.6303030303030303, | |
| "grad_norm": 2.276607036590576, | |
| "learning_rate": 3.80952380952381e-06, | |
| "logits/chosen": -0.08753400295972824, | |
| "logits/rejected": -0.06234829127788544, | |
| "logps/chosen": -88.94854736328125, | |
| "logps/rejected": -100.52336120605469, | |
| "loss": 0.3788, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.18584024906158447, | |
| "rewards/margins": 0.8677559494972229, | |
| "rewards/rejected": -1.0535962581634521, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.6424242424242425, | |
| "grad_norm": 1.763636589050293, | |
| "learning_rate": 3.7755102040816327e-06, | |
| "logits/chosen": -0.09077857434749603, | |
| "logits/rejected": -0.15150956809520721, | |
| "logps/chosen": -87.24674224853516, | |
| "logps/rejected": -104.22818756103516, | |
| "loss": 0.377, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.0016011148691177368, | |
| "rewards/margins": 0.9522408843040466, | |
| "rewards/rejected": -0.953842043876648, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.6545454545454545, | |
| "grad_norm": 2.4671847820281982, | |
| "learning_rate": 3.7414965986394563e-06, | |
| "logits/chosen": -0.016229011118412018, | |
| "logits/rejected": -0.06034049391746521, | |
| "logps/chosen": -84.28144073486328, | |
| "logps/rejected": -105.29624938964844, | |
| "loss": 0.3391, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.5438804626464844, | |
| "rewards/margins": 1.0644160509109497, | |
| "rewards/rejected": -1.6082963943481445, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 1.968894124031067, | |
| "learning_rate": 3.7074829931972796e-06, | |
| "logits/chosen": -0.1544817090034485, | |
| "logits/rejected": -0.1875695288181305, | |
| "logps/chosen": -89.4288101196289, | |
| "logps/rejected": -106.10208892822266, | |
| "loss": 0.4345, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.3140699565410614, | |
| "rewards/margins": 0.6972665190696716, | |
| "rewards/rejected": -1.0113364458084106, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.6787878787878788, | |
| "grad_norm": 2.3655052185058594, | |
| "learning_rate": 3.6734693877551024e-06, | |
| "logits/chosen": -0.2433435171842575, | |
| "logits/rejected": -0.15462861955165863, | |
| "logps/chosen": -89.85462188720703, | |
| "logps/rejected": -101.96684265136719, | |
| "loss": 0.4306, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.44760817289352417, | |
| "rewards/margins": 0.8661949634552002, | |
| "rewards/rejected": -1.3138031959533691, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.6909090909090909, | |
| "grad_norm": 2.2802484035491943, | |
| "learning_rate": 3.6394557823129257e-06, | |
| "logits/chosen": -0.005502855405211449, | |
| "logits/rejected": -0.03438833728432655, | |
| "logps/chosen": -86.78486633300781, | |
| "logps/rejected": -108.69119262695312, | |
| "loss": 0.3527, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.5136454701423645, | |
| "rewards/margins": 1.0334694385528564, | |
| "rewards/rejected": -1.5471149682998657, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.703030303030303, | |
| "grad_norm": 2.532391309738159, | |
| "learning_rate": 3.6054421768707485e-06, | |
| "logits/chosen": -0.05163462832570076, | |
| "logits/rejected": -0.15335048735141754, | |
| "logps/chosen": -87.60224914550781, | |
| "logps/rejected": -105.06623840332031, | |
| "loss": 0.437, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.7067617774009705, | |
| "rewards/margins": 0.7635065913200378, | |
| "rewards/rejected": -1.4702682495117188, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.7151515151515152, | |
| "grad_norm": 3.486032485961914, | |
| "learning_rate": 3.5714285714285718e-06, | |
| "logits/chosen": -0.49854233860969543, | |
| "logits/rejected": -0.42069506645202637, | |
| "logps/chosen": -84.32408142089844, | |
| "logps/rejected": -98.28240966796875, | |
| "loss": 0.6605, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": -0.8036391735076904, | |
| "rewards/margins": 0.3057122826576233, | |
| "rewards/rejected": -1.109351396560669, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.7272727272727273, | |
| "grad_norm": 3.032853364944458, | |
| "learning_rate": 3.537414965986395e-06, | |
| "logits/chosen": -0.31167668104171753, | |
| "logits/rejected": -0.21693065762519836, | |
| "logps/chosen": -82.40815734863281, | |
| "logps/rejected": -104.02395629882812, | |
| "loss": 0.4043, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.5048532485961914, | |
| "rewards/margins": 0.9285488724708557, | |
| "rewards/rejected": -1.433402180671692, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.7393939393939394, | |
| "grad_norm": 2.9223711490631104, | |
| "learning_rate": 3.503401360544218e-06, | |
| "logits/chosen": -0.28187453746795654, | |
| "logits/rejected": -0.477741003036499, | |
| "logps/chosen": -90.93452453613281, | |
| "logps/rejected": -99.23526000976562, | |
| "loss": 0.5541, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.9466086626052856, | |
| "rewards/margins": 0.38151228427886963, | |
| "rewards/rejected": -1.3281209468841553, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.7515151515151515, | |
| "grad_norm": 2.4500951766967773, | |
| "learning_rate": 3.469387755102041e-06, | |
| "logits/chosen": -0.8493286371231079, | |
| "logits/rejected": -0.5042667388916016, | |
| "logps/chosen": -85.09526824951172, | |
| "logps/rejected": -102.77671813964844, | |
| "loss": 0.3626, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.7808952331542969, | |
| "rewards/margins": 1.1093262434005737, | |
| "rewards/rejected": -1.8902214765548706, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.7636363636363637, | |
| "grad_norm": 2.2984867095947266, | |
| "learning_rate": 3.435374149659864e-06, | |
| "logits/chosen": -0.6557090282440186, | |
| "logits/rejected": -0.68489009141922, | |
| "logps/chosen": -86.66493225097656, | |
| "logps/rejected": -99.13359069824219, | |
| "loss": 0.4207, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.8295649290084839, | |
| "rewards/margins": 0.7867836952209473, | |
| "rewards/rejected": -1.6163487434387207, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.7757575757575758, | |
| "grad_norm": 2.1829628944396973, | |
| "learning_rate": 3.4013605442176872e-06, | |
| "logits/chosen": 0.057308848947286606, | |
| "logits/rejected": -0.05564488470554352, | |
| "logps/chosen": -96.36569213867188, | |
| "logps/rejected": -111.1490478515625, | |
| "loss": 0.4997, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.7329980134963989, | |
| "rewards/margins": 0.6487807631492615, | |
| "rewards/rejected": -1.3817787170410156, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.7878787878787878, | |
| "grad_norm": 2.3714816570281982, | |
| "learning_rate": 3.3673469387755105e-06, | |
| "logits/chosen": -0.6570330262184143, | |
| "logits/rejected": -0.6089777946472168, | |
| "logps/chosen": -87.11174774169922, | |
| "logps/rejected": -109.03756713867188, | |
| "loss": 0.325, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.430930495262146, | |
| "rewards/margins": 1.1537929773330688, | |
| "rewards/rejected": -1.5847234725952148, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 2.326923370361328, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "logits/chosen": -0.4283097982406616, | |
| "logits/rejected": -0.5568526387214661, | |
| "logps/chosen": -82.62942504882812, | |
| "logps/rejected": -104.06118774414062, | |
| "loss": 0.3385, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.24116915464401245, | |
| "rewards/margins": 1.3031015396118164, | |
| "rewards/rejected": -1.5442705154418945, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.8121212121212121, | |
| "grad_norm": 2.2417383193969727, | |
| "learning_rate": 3.2993197278911566e-06, | |
| "logits/chosen": -0.2863921821117401, | |
| "logits/rejected": -0.34131675958633423, | |
| "logps/chosen": -82.39275360107422, | |
| "logps/rejected": -103.30265808105469, | |
| "loss": 0.3566, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.5457499623298645, | |
| "rewards/margins": 1.191537618637085, | |
| "rewards/rejected": -1.7372875213623047, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.8242424242424242, | |
| "grad_norm": 1.7097468376159668, | |
| "learning_rate": 3.2653061224489794e-06, | |
| "logits/chosen": -0.8362722992897034, | |
| "logits/rejected": -1.0511568784713745, | |
| "logps/chosen": -83.73776245117188, | |
| "logps/rejected": -99.80606842041016, | |
| "loss": 0.3407, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.3446452021598816, | |
| "rewards/margins": 1.332738995552063, | |
| "rewards/rejected": -1.6773841381072998, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.8363636363636363, | |
| "grad_norm": 2.3758466243743896, | |
| "learning_rate": 3.231292517006803e-06, | |
| "logits/chosen": -0.5980571508407593, | |
| "logits/rejected": -0.612303614616394, | |
| "logps/chosen": -90.42843627929688, | |
| "logps/rejected": -111.27342987060547, | |
| "loss": 0.3426, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.7758303880691528, | |
| "rewards/margins": 1.0578529834747314, | |
| "rewards/rejected": -1.8336834907531738, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.8484848484848485, | |
| "grad_norm": 2.9294631481170654, | |
| "learning_rate": 3.1972789115646264e-06, | |
| "logits/chosen": -0.5315291285514832, | |
| "logits/rejected": -0.5597429275512695, | |
| "logps/chosen": -90.48263549804688, | |
| "logps/rejected": -105.86819458007812, | |
| "loss": 0.327, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.6664019823074341, | |
| "rewards/margins": 1.3921841382980347, | |
| "rewards/rejected": -2.0585861206054688, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.8606060606060606, | |
| "grad_norm": 2.242730140686035, | |
| "learning_rate": 3.1632653061224496e-06, | |
| "logits/chosen": -0.6034741997718811, | |
| "logits/rejected": -0.6490014791488647, | |
| "logps/chosen": -86.84711456298828, | |
| "logps/rejected": -98.25587463378906, | |
| "loss": 0.3063, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.6566754579544067, | |
| "rewards/margins": 1.320617914199829, | |
| "rewards/rejected": -1.9772932529449463, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.8727272727272727, | |
| "grad_norm": 2.2084603309631348, | |
| "learning_rate": 3.1292517006802725e-06, | |
| "logits/chosen": -0.6398088932037354, | |
| "logits/rejected": -0.6443818211555481, | |
| "logps/chosen": -87.65351104736328, | |
| "logps/rejected": -121.56532287597656, | |
| "loss": 0.2704, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.693427324295044, | |
| "rewards/margins": 1.869689702987671, | |
| "rewards/rejected": -2.563117027282715, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.8848484848484849, | |
| "grad_norm": 2.9377784729003906, | |
| "learning_rate": 3.0952380952380957e-06, | |
| "logits/chosen": -0.9345231652259827, | |
| "logits/rejected": -0.8481395244598389, | |
| "logps/chosen": -90.87952423095703, | |
| "logps/rejected": -111.86048889160156, | |
| "loss": 0.4141, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.99037766456604, | |
| "rewards/margins": 1.1532552242279053, | |
| "rewards/rejected": -2.1436328887939453, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.896969696969697, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.0952380952380957e-06, | |
| "logits/chosen": -0.7441086769104004, | |
| "logits/rejected": -0.5624651312828064, | |
| "logps/chosen": -91.13578796386719, | |
| "logps/rejected": -105.57829284667969, | |
| "loss": 0.5834, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -1.249877691268921, | |
| "rewards/margins": 0.8648021221160889, | |
| "rewards/rejected": -2.1146798133850098, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.9090909090909091, | |
| "grad_norm": 2.4748213291168213, | |
| "learning_rate": 3.0612244897959185e-06, | |
| "logits/chosen": -0.8197535276412964, | |
| "logits/rejected": -0.777995228767395, | |
| "logps/chosen": -91.41878509521484, | |
| "logps/rejected": -118.0909423828125, | |
| "loss": 0.2613, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.1850990056991577, | |
| "rewards/margins": 1.63706636428833, | |
| "rewards/rejected": -2.8221654891967773, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.9212121212121213, | |
| "grad_norm": 3.374300003051758, | |
| "learning_rate": 3.027210884353742e-06, | |
| "logits/chosen": -0.9236270785331726, | |
| "logits/rejected": -0.8337404727935791, | |
| "logps/chosen": -96.32296752929688, | |
| "logps/rejected": -125.73896026611328, | |
| "loss": 0.3092, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.3610727787017822, | |
| "rewards/margins": 1.4623136520385742, | |
| "rewards/rejected": -2.8233861923217773, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.9333333333333333, | |
| "grad_norm": 2.5666401386260986, | |
| "learning_rate": 2.993197278911565e-06, | |
| "logits/chosen": -1.0324219465255737, | |
| "logits/rejected": -0.9907780885696411, | |
| "logps/chosen": -88.73812103271484, | |
| "logps/rejected": -126.26348876953125, | |
| "loss": 0.2357, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.0420153141021729, | |
| "rewards/margins": 1.8817570209503174, | |
| "rewards/rejected": -2.9237725734710693, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.9454545454545454, | |
| "grad_norm": 4.485978126525879, | |
| "learning_rate": 2.959183673469388e-06, | |
| "logits/chosen": -1.0606157779693604, | |
| "logits/rejected": -0.9346469044685364, | |
| "logps/chosen": -90.07966613769531, | |
| "logps/rejected": -98.59324645996094, | |
| "loss": 0.5322, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.1819792985916138, | |
| "rewards/margins": 0.40667369961738586, | |
| "rewards/rejected": -1.5886529684066772, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.9575757575757575, | |
| "grad_norm": 2.4757275581359863, | |
| "learning_rate": 2.925170068027211e-06, | |
| "logits/chosen": -0.8977766633033752, | |
| "logits/rejected": -0.924136221408844, | |
| "logps/chosen": -91.82334899902344, | |
| "logps/rejected": -112.15080261230469, | |
| "loss": 0.2769, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.6803606152534485, | |
| "rewards/margins": 1.7069063186645508, | |
| "rewards/rejected": -2.3872668743133545, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.9696969696969697, | |
| "grad_norm": 3.1851117610931396, | |
| "learning_rate": 2.891156462585034e-06, | |
| "logits/chosen": -0.98236083984375, | |
| "logits/rejected": -0.9971530437469482, | |
| "logps/chosen": -86.82650756835938, | |
| "logps/rejected": -117.18161010742188, | |
| "loss": 0.2453, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.8689044713973999, | |
| "rewards/margins": 2.010478973388672, | |
| "rewards/rejected": -2.8793835639953613, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.9818181818181818, | |
| "grad_norm": 3.0963189601898193, | |
| "learning_rate": 2.8571428571428573e-06, | |
| "logits/chosen": -1.145169973373413, | |
| "logits/rejected": -1.1914927959442139, | |
| "logps/chosen": -96.40525817871094, | |
| "logps/rejected": -118.5788345336914, | |
| "loss": 0.3285, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -1.5041391849517822, | |
| "rewards/margins": 1.3836085796356201, | |
| "rewards/rejected": -2.8877477645874023, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.9939393939393939, | |
| "grad_norm": 2.4315578937530518, | |
| "learning_rate": 2.8231292517006805e-06, | |
| "logits/chosen": -1.113650918006897, | |
| "logits/rejected": -1.160982370376587, | |
| "logps/chosen": -95.52598571777344, | |
| "logps/rejected": -122.83209228515625, | |
| "loss": 0.2272, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.3614020347595215, | |
| "rewards/margins": 1.9207268953323364, | |
| "rewards/rejected": -3.2821288108825684, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 1.3302698135375977, | |
| "learning_rate": 2.7891156462585034e-06, | |
| "logits/chosen": -1.3629558086395264, | |
| "logits/rejected": -1.263519525527954, | |
| "logps/chosen": -89.43887329101562, | |
| "logps/rejected": -117.10709381103516, | |
| "loss": 0.0766, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.4000343680381775, | |
| "rewards/margins": 2.2837576866149902, | |
| "rewards/rejected": -2.6837921142578125, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 1.0121212121212122, | |
| "grad_norm": 2.363071918487549, | |
| "learning_rate": 2.7551020408163266e-06, | |
| "logits/chosen": -1.3711086511611938, | |
| "logits/rejected": -1.2426812648773193, | |
| "logps/chosen": -95.48851013183594, | |
| "logps/rejected": -117.56849670410156, | |
| "loss": 0.2692, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.3288049697875977, | |
| "rewards/margins": 1.447179913520813, | |
| "rewards/rejected": -2.775984764099121, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.0242424242424242, | |
| "grad_norm": 2.7445614337921143, | |
| "learning_rate": 2.7210884353741503e-06, | |
| "logits/chosen": -0.9873888492584229, | |
| "logits/rejected": -1.0433766841888428, | |
| "logps/chosen": -93.86320495605469, | |
| "logps/rejected": -116.3147201538086, | |
| "loss": 0.3151, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.4767199754714966, | |
| "rewards/margins": 1.3222770690917969, | |
| "rewards/rejected": -2.798996925354004, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.0363636363636364, | |
| "grad_norm": 4.278541088104248, | |
| "learning_rate": 2.687074829931973e-06, | |
| "logits/chosen": -1.0000330209732056, | |
| "logits/rejected": -1.0293034315109253, | |
| "logps/chosen": -96.55633544921875, | |
| "logps/rejected": -115.9503173828125, | |
| "loss": 0.354, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.2256011962890625, | |
| "rewards/margins": 1.4643216133117676, | |
| "rewards/rejected": -2.68992280960083, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.0484848484848486, | |
| "grad_norm": 4.383340835571289, | |
| "learning_rate": 2.6530612244897964e-06, | |
| "logits/chosen": -1.4603015184402466, | |
| "logits/rejected": -1.3601713180541992, | |
| "logps/chosen": -93.01313781738281, | |
| "logps/rejected": -115.10786437988281, | |
| "loss": 0.291, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.329301118850708, | |
| "rewards/margins": 1.7043461799621582, | |
| "rewards/rejected": -3.0336475372314453, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.0606060606060606, | |
| "grad_norm": 3.752023696899414, | |
| "learning_rate": 2.6190476190476192e-06, | |
| "logits/chosen": -1.0595191717147827, | |
| "logits/rejected": -1.129692554473877, | |
| "logps/chosen": -98.60609436035156, | |
| "logps/rejected": -129.41033935546875, | |
| "loss": 0.2992, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.1922227144241333, | |
| "rewards/margins": 2.3776955604553223, | |
| "rewards/rejected": -3.569918155670166, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.0727272727272728, | |
| "grad_norm": 1.9620897769927979, | |
| "learning_rate": 2.5850340136054425e-06, | |
| "logits/chosen": -1.139256238937378, | |
| "logits/rejected": -1.2131175994873047, | |
| "logps/chosen": -86.63563537597656, | |
| "logps/rejected": -117.04485321044922, | |
| "loss": 0.217, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.0059623718261719, | |
| "rewards/margins": 2.2501814365386963, | |
| "rewards/rejected": -3.256143569946289, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 1.084848484848485, | |
| "grad_norm": 5.18034029006958, | |
| "learning_rate": 2.5510204081632657e-06, | |
| "logits/chosen": -1.088368535041809, | |
| "logits/rejected": -1.310705304145813, | |
| "logps/chosen": -88.38562774658203, | |
| "logps/rejected": -115.90390014648438, | |
| "loss": 0.3847, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.9935359358787537, | |
| "rewards/margins": 1.7633311748504639, | |
| "rewards/rejected": -2.7568671703338623, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.096969696969697, | |
| "grad_norm": 3.5002970695495605, | |
| "learning_rate": 2.5170068027210886e-06, | |
| "logits/chosen": -1.2597205638885498, | |
| "logits/rejected": -1.279348611831665, | |
| "logps/chosen": -96.96222686767578, | |
| "logps/rejected": -126.3550796508789, | |
| "loss": 0.2583, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.492140531539917, | |
| "rewards/margins": 1.8623967170715332, | |
| "rewards/rejected": -3.35453724861145, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 1.1090909090909091, | |
| "grad_norm": 4.057419300079346, | |
| "learning_rate": 2.482993197278912e-06, | |
| "logits/chosen": -1.357700228691101, | |
| "logits/rejected": -1.3373640775680542, | |
| "logps/chosen": -98.337890625, | |
| "logps/rejected": -108.06039428710938, | |
| "loss": 0.4725, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": -1.7533509731292725, | |
| "rewards/margins": 0.8783911466598511, | |
| "rewards/rejected": -2.631742000579834, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.121212121212121, | |
| "grad_norm": 1.610083818435669, | |
| "learning_rate": 2.4489795918367347e-06, | |
| "logits/chosen": -1.2951648235321045, | |
| "logits/rejected": -1.405087947845459, | |
| "logps/chosen": -88.3757553100586, | |
| "logps/rejected": -124.96995544433594, | |
| "loss": 0.0841, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.8665078282356262, | |
| "rewards/margins": 2.740098476409912, | |
| "rewards/rejected": -3.6066062450408936, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 1.1333333333333333, | |
| "grad_norm": 3.589198112487793, | |
| "learning_rate": 2.414965986394558e-06, | |
| "logits/chosen": -1.2180225849151611, | |
| "logits/rejected": -1.3185333013534546, | |
| "logps/chosen": -103.8935317993164, | |
| "logps/rejected": -126.54220581054688, | |
| "loss": 0.3401, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.6970596313476562, | |
| "rewards/margins": 1.3950029611587524, | |
| "rewards/rejected": -3.0920627117156982, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.1454545454545455, | |
| "grad_norm": 1.59670090675354, | |
| "learning_rate": 2.380952380952381e-06, | |
| "logits/chosen": -1.5853278636932373, | |
| "logits/rejected": -1.414257287979126, | |
| "logps/chosen": -89.67636108398438, | |
| "logps/rejected": -134.74940490722656, | |
| "loss": 0.1001, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.6401633024215698, | |
| "rewards/margins": 2.851132392883301, | |
| "rewards/rejected": -3.49129581451416, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.1575757575757575, | |
| "grad_norm": 4.376671314239502, | |
| "learning_rate": 2.3469387755102044e-06, | |
| "logits/chosen": -1.38065767288208, | |
| "logits/rejected": -1.435377836227417, | |
| "logps/chosen": -96.53055572509766, | |
| "logps/rejected": -119.9242935180664, | |
| "loss": 0.4199, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -1.8974272012710571, | |
| "rewards/margins": 1.4000160694122314, | |
| "rewards/rejected": -3.297443151473999, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.1696969696969697, | |
| "grad_norm": 2.5332932472229004, | |
| "learning_rate": 2.3129251700680273e-06, | |
| "logits/chosen": -1.302667260169983, | |
| "logits/rejected": -1.3088089227676392, | |
| "logps/chosen": -93.29157257080078, | |
| "logps/rejected": -123.02129364013672, | |
| "loss": 0.234, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.987712562084198, | |
| "rewards/margins": 2.1374382972717285, | |
| "rewards/rejected": -3.1251509189605713, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 1.1818181818181819, | |
| "grad_norm": 2.5916943550109863, | |
| "learning_rate": 2.2789115646258505e-06, | |
| "logits/chosen": -1.6041796207427979, | |
| "logits/rejected": -1.516028642654419, | |
| "logps/chosen": -93.6436767578125, | |
| "logps/rejected": -125.10226440429688, | |
| "loss": 0.1684, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.393079400062561, | |
| "rewards/margins": 2.149301290512085, | |
| "rewards/rejected": -3.5423808097839355, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.1939393939393939, | |
| "grad_norm": 4.822098731994629, | |
| "learning_rate": 2.244897959183674e-06, | |
| "logits/chosen": -1.5234119892120361, | |
| "logits/rejected": -1.4276056289672852, | |
| "logps/chosen": -98.25959014892578, | |
| "logps/rejected": -122.98600769042969, | |
| "loss": 0.3363, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.4268699884414673, | |
| "rewards/margins": 1.5049833059310913, | |
| "rewards/rejected": -2.9318532943725586, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 1.206060606060606, | |
| "grad_norm": 3.437187671661377, | |
| "learning_rate": 2.2108843537414966e-06, | |
| "logits/chosen": -1.6394751071929932, | |
| "logits/rejected": -1.606034517288208, | |
| "logps/chosen": -97.52091217041016, | |
| "logps/rejected": -119.31391906738281, | |
| "loss": 0.2759, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.413924217224121, | |
| "rewards/margins": 1.6457183361053467, | |
| "rewards/rejected": -3.0596425533294678, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.2181818181818183, | |
| "grad_norm": 3.6992416381835938, | |
| "learning_rate": 2.17687074829932e-06, | |
| "logits/chosen": -1.4257758855819702, | |
| "logits/rejected": -1.4925386905670166, | |
| "logps/chosen": -89.6732177734375, | |
| "logps/rejected": -111.02645111083984, | |
| "loss": 0.3846, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.1798646450042725, | |
| "rewards/margins": 1.4852337837219238, | |
| "rewards/rejected": -2.6650984287261963, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 1.2303030303030302, | |
| "grad_norm": 2.463331937789917, | |
| "learning_rate": 2.1428571428571427e-06, | |
| "logits/chosen": -1.5652334690093994, | |
| "logits/rejected": -1.4604835510253906, | |
| "logps/chosen": -97.5364761352539, | |
| "logps/rejected": -120.79109954833984, | |
| "loss": 0.1744, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.2595030069351196, | |
| "rewards/margins": 1.9318180084228516, | |
| "rewards/rejected": -3.1913208961486816, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 1.2424242424242424, | |
| "grad_norm": 2.5878117084503174, | |
| "learning_rate": 2.1088435374149664e-06, | |
| "logits/chosen": -1.325555682182312, | |
| "logits/rejected": -1.428547978401184, | |
| "logps/chosen": -87.47943878173828, | |
| "logps/rejected": -122.22314453125, | |
| "loss": 0.2459, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.502652108669281, | |
| "rewards/margins": 2.3220510482788086, | |
| "rewards/rejected": -2.8247032165527344, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 1.2545454545454544, | |
| "grad_norm": 2.1508100032806396, | |
| "learning_rate": 2.0748299319727892e-06, | |
| "logits/chosen": -1.6749191284179688, | |
| "logits/rejected": -1.5175896883010864, | |
| "logps/chosen": -96.2158432006836, | |
| "logps/rejected": -134.41329956054688, | |
| "loss": 0.1496, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.7228033542633057, | |
| "rewards/margins": 2.3363349437713623, | |
| "rewards/rejected": -4.059138298034668, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 1.2666666666666666, | |
| "grad_norm": 4.549570560455322, | |
| "learning_rate": 2.0408163265306125e-06, | |
| "logits/chosen": -1.583759069442749, | |
| "logits/rejected": -1.595736026763916, | |
| "logps/chosen": -91.94076538085938, | |
| "logps/rejected": -116.45210266113281, | |
| "loss": 0.3024, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.4231905937194824, | |
| "rewards/margins": 1.6255862712860107, | |
| "rewards/rejected": -3.048776865005493, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.2787878787878788, | |
| "grad_norm": 3.135284185409546, | |
| "learning_rate": 2.0068027210884353e-06, | |
| "logits/chosen": -1.4998533725738525, | |
| "logits/rejected": -1.4802157878875732, | |
| "logps/chosen": -97.09971618652344, | |
| "logps/rejected": -123.02347564697266, | |
| "loss": 0.3073, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.9578745365142822, | |
| "rewards/margins": 1.374443769454956, | |
| "rewards/rejected": -3.3323183059692383, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 1.290909090909091, | |
| "grad_norm": 2.2556681632995605, | |
| "learning_rate": 1.9727891156462586e-06, | |
| "logits/chosen": -1.5938842296600342, | |
| "logits/rejected": -1.5801693201065063, | |
| "logps/chosen": -96.47604370117188, | |
| "logps/rejected": -124.70124053955078, | |
| "loss": 0.1763, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.65388822555542, | |
| "rewards/margins": 1.8723074197769165, | |
| "rewards/rejected": -3.526195526123047, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 1.303030303030303, | |
| "grad_norm": 2.146841287612915, | |
| "learning_rate": 1.938775510204082e-06, | |
| "logits/chosen": -1.673741102218628, | |
| "logits/rejected": -1.6758308410644531, | |
| "logps/chosen": -95.465087890625, | |
| "logps/rejected": -127.52227020263672, | |
| "loss": 0.123, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.3014137744903564, | |
| "rewards/margins": 2.439565658569336, | |
| "rewards/rejected": -3.7409796714782715, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 1.3151515151515152, | |
| "grad_norm": 3.1009531021118164, | |
| "learning_rate": 1.904761904761905e-06, | |
| "logits/chosen": -1.7371501922607422, | |
| "logits/rejected": -1.6622573137283325, | |
| "logps/chosen": -95.71591186523438, | |
| "logps/rejected": -119.20672607421875, | |
| "loss": 0.2911, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.3894450664520264, | |
| "rewards/margins": 1.329725742340088, | |
| "rewards/rejected": -2.7191710472106934, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 1.3272727272727272, | |
| "grad_norm": 2.3910417556762695, | |
| "learning_rate": 1.8707482993197282e-06, | |
| "logits/chosen": -1.6049320697784424, | |
| "logits/rejected": -1.6519778966903687, | |
| "logps/chosen": -97.65145111083984, | |
| "logps/rejected": -135.92715454101562, | |
| "loss": 0.2255, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.2323026657104492, | |
| "rewards/margins": 2.811933755874634, | |
| "rewards/rejected": -4.044236183166504, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.3393939393939394, | |
| "grad_norm": 3.1409387588500977, | |
| "learning_rate": 1.8367346938775512e-06, | |
| "logits/chosen": -1.7363131046295166, | |
| "logits/rejected": -1.609194278717041, | |
| "logps/chosen": -98.52182006835938, | |
| "logps/rejected": -124.78396606445312, | |
| "loss": 0.322, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -1.5491598844528198, | |
| "rewards/margins": 1.6308727264404297, | |
| "rewards/rejected": -3.180032730102539, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 1.3515151515151516, | |
| "grad_norm": 3.7841951847076416, | |
| "learning_rate": 1.8027210884353743e-06, | |
| "logits/chosen": -1.6172230243682861, | |
| "logits/rejected": -1.550255298614502, | |
| "logps/chosen": -92.85408020019531, | |
| "logps/rejected": -112.62004089355469, | |
| "loss": 0.2447, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.998050332069397, | |
| "rewards/margins": 1.8484382629394531, | |
| "rewards/rejected": -2.8464884757995605, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 1.3636363636363638, | |
| "grad_norm": 1.7370332479476929, | |
| "learning_rate": 1.7687074829931975e-06, | |
| "logits/chosen": -1.7049037218093872, | |
| "logits/rejected": -1.7044804096221924, | |
| "logps/chosen": -86.98583984375, | |
| "logps/rejected": -121.7904052734375, | |
| "loss": 0.1767, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.86982661485672, | |
| "rewards/margins": 2.5970449447631836, | |
| "rewards/rejected": -3.466871500015259, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 1.3757575757575757, | |
| "grad_norm": 4.671180248260498, | |
| "learning_rate": 1.7346938775510206e-06, | |
| "logits/chosen": -1.6064000129699707, | |
| "logits/rejected": -1.7420430183410645, | |
| "logps/chosen": -103.08202362060547, | |
| "logps/rejected": -126.36719512939453, | |
| "loss": 0.4506, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": -2.119563579559326, | |
| "rewards/margins": 1.3957490921020508, | |
| "rewards/rejected": -3.515312671661377, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 1.387878787878788, | |
| "grad_norm": 2.891258478164673, | |
| "learning_rate": 1.7006802721088436e-06, | |
| "logits/chosen": -1.7240793704986572, | |
| "logits/rejected": -1.7777044773101807, | |
| "logps/chosen": -88.76716613769531, | |
| "logps/rejected": -127.64596557617188, | |
| "loss": 0.1338, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.8680980801582336, | |
| "rewards/margins": 2.9895691871643066, | |
| "rewards/rejected": -3.8576672077178955, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 3.575765371322632, | |
| "learning_rate": 1.6666666666666667e-06, | |
| "logits/chosen": -1.6597156524658203, | |
| "logits/rejected": -1.6605198383331299, | |
| "logps/chosen": -88.46395874023438, | |
| "logps/rejected": -116.02509307861328, | |
| "loss": 0.2241, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.8981451988220215, | |
| "rewards/margins": 2.229868173599243, | |
| "rewards/rejected": -3.1280133724212646, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 1.412121212121212, | |
| "grad_norm": 1.8933932781219482, | |
| "learning_rate": 1.6326530612244897e-06, | |
| "logits/chosen": -1.6874178647994995, | |
| "logits/rejected": -1.6887404918670654, | |
| "logps/chosen": -98.41668701171875, | |
| "logps/rejected": -132.19210815429688, | |
| "loss": 0.1343, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.6015653610229492, | |
| "rewards/margins": 2.4682466983795166, | |
| "rewards/rejected": -4.069811820983887, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 1.4242424242424243, | |
| "grad_norm": 1.5824294090270996, | |
| "learning_rate": 1.5986394557823132e-06, | |
| "logits/chosen": -1.7633352279663086, | |
| "logits/rejected": -1.6844078302383423, | |
| "logps/chosen": -91.18302917480469, | |
| "logps/rejected": -133.2034912109375, | |
| "loss": 0.1243, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.005885124206543, | |
| "rewards/margins": 3.0340609550476074, | |
| "rewards/rejected": -4.03994607925415, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 1.4363636363636363, | |
| "grad_norm": 2.5562682151794434, | |
| "learning_rate": 1.5646258503401362e-06, | |
| "logits/chosen": -1.7930512428283691, | |
| "logits/rejected": -1.770517110824585, | |
| "logps/chosen": -101.37489318847656, | |
| "logps/rejected": -118.3425064086914, | |
| "loss": 0.3446, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -2.229304313659668, | |
| "rewards/margins": 1.3117181062698364, | |
| "rewards/rejected": -3.541022539138794, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 1.4484848484848485, | |
| "grad_norm": 3.2759103775024414, | |
| "learning_rate": 1.5306122448979593e-06, | |
| "logits/chosen": -1.6340594291687012, | |
| "logits/rejected": -1.6577837467193604, | |
| "logps/chosen": -85.42779541015625, | |
| "logps/rejected": -113.1285171508789, | |
| "loss": 0.2232, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.880210280418396, | |
| "rewards/margins": 2.2023112773895264, | |
| "rewards/rejected": -3.082521677017212, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.4606060606060607, | |
| "grad_norm": 1.992604374885559, | |
| "learning_rate": 1.4965986394557825e-06, | |
| "logits/chosen": -1.3893667459487915, | |
| "logits/rejected": -1.6112309694290161, | |
| "logps/chosen": -89.03352355957031, | |
| "logps/rejected": -123.7257308959961, | |
| "loss": 0.215, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.9930751919746399, | |
| "rewards/margins": 2.7060134410858154, | |
| "rewards/rejected": -3.6990885734558105, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 1.4727272727272727, | |
| "grad_norm": 1.8438624143600464, | |
| "learning_rate": 1.4625850340136056e-06, | |
| "logits/chosen": -1.597573161125183, | |
| "logits/rejected": -1.6522390842437744, | |
| "logps/chosen": -95.18002319335938, | |
| "logps/rejected": -149.4505615234375, | |
| "loss": 0.1243, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.2069129943847656, | |
| "rewards/margins": 3.2835397720336914, | |
| "rewards/rejected": -4.490452766418457, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 1.4848484848484849, | |
| "grad_norm": 2.364157199859619, | |
| "learning_rate": 1.4285714285714286e-06, | |
| "logits/chosen": -1.5696862936019897, | |
| "logits/rejected": -1.589264154434204, | |
| "logps/chosen": -98.0048828125, | |
| "logps/rejected": -129.3928680419922, | |
| "loss": 0.1642, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.5141558647155762, | |
| "rewards/margins": 2.3787457942962646, | |
| "rewards/rejected": -3.892901659011841, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 1.496969696969697, | |
| "grad_norm": 1.409197211265564, | |
| "learning_rate": 1.3945578231292517e-06, | |
| "logits/chosen": -1.530297040939331, | |
| "logits/rejected": -1.5341331958770752, | |
| "logps/chosen": -97.44312286376953, | |
| "logps/rejected": -143.15103149414062, | |
| "loss": 0.1037, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.2367117404937744, | |
| "rewards/margins": 3.409477472305298, | |
| "rewards/rejected": -4.646188735961914, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 1.509090909090909, | |
| "grad_norm": 1.676865577697754, | |
| "learning_rate": 1.3605442176870751e-06, | |
| "logits/chosen": -1.8658342361450195, | |
| "logits/rejected": -1.7735984325408936, | |
| "logps/chosen": -95.08421325683594, | |
| "logps/rejected": -126.56539154052734, | |
| "loss": 0.1224, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.3045662641525269, | |
| "rewards/margins": 2.744955539703369, | |
| "rewards/rejected": -4.0495219230651855, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.5212121212121212, | |
| "grad_norm": 1.2884773015975952, | |
| "learning_rate": 1.3265306122448982e-06, | |
| "logits/chosen": -1.664272427558899, | |
| "logits/rejected": -1.6544904708862305, | |
| "logps/chosen": -91.6380615234375, | |
| "logps/rejected": -132.34695434570312, | |
| "loss": 0.1712, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.1254020929336548, | |
| "rewards/margins": 2.629539966583252, | |
| "rewards/rejected": -3.754941701889038, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 1.5333333333333332, | |
| "grad_norm": 1.5515812635421753, | |
| "learning_rate": 1.2925170068027212e-06, | |
| "logits/chosen": -1.6638004779815674, | |
| "logits/rejected": -1.7308552265167236, | |
| "logps/chosen": -90.78031921386719, | |
| "logps/rejected": -127.66264343261719, | |
| "loss": 0.1479, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.2343765497207642, | |
| "rewards/margins": 2.645479202270508, | |
| "rewards/rejected": -3.8798556327819824, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 1.5454545454545454, | |
| "grad_norm": 3.9256937503814697, | |
| "learning_rate": 1.2585034013605443e-06, | |
| "logits/chosen": -1.8101837635040283, | |
| "logits/rejected": -1.7593592405319214, | |
| "logps/chosen": -96.83201599121094, | |
| "logps/rejected": -118.23445129394531, | |
| "loss": 0.2606, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.00610613822937, | |
| "rewards/margins": 1.5677779912948608, | |
| "rewards/rejected": -3.5738840103149414, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 1.5575757575757576, | |
| "grad_norm": 3.4023656845092773, | |
| "learning_rate": 1.2244897959183673e-06, | |
| "logits/chosen": -1.6174266338348389, | |
| "logits/rejected": -1.5729625225067139, | |
| "logps/chosen": -101.54139709472656, | |
| "logps/rejected": -124.65895080566406, | |
| "loss": 0.2789, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.163264036178589, | |
| "rewards/margins": 1.4744569063186646, | |
| "rewards/rejected": -3.637721061706543, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 1.5696969696969698, | |
| "grad_norm": 8.019857406616211, | |
| "learning_rate": 1.1904761904761906e-06, | |
| "logits/chosen": -1.9090921878814697, | |
| "logits/rejected": -1.7606130838394165, | |
| "logps/chosen": -98.2342300415039, | |
| "logps/rejected": -117.97206115722656, | |
| "loss": 0.434, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -2.0541114807128906, | |
| "rewards/margins": 1.012056589126587, | |
| "rewards/rejected": -3.0661683082580566, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.5818181818181818, | |
| "grad_norm": 2.7587668895721436, | |
| "learning_rate": 1.1564625850340136e-06, | |
| "logits/chosen": -1.561621904373169, | |
| "logits/rejected": -1.6865174770355225, | |
| "logps/chosen": -96.38162994384766, | |
| "logps/rejected": -122.79660034179688, | |
| "loss": 0.2854, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.4686121940612793, | |
| "rewards/margins": 2.149606466293335, | |
| "rewards/rejected": -3.618218421936035, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 1.593939393939394, | |
| "grad_norm": 2.6992170810699463, | |
| "learning_rate": 1.122448979591837e-06, | |
| "logits/chosen": -1.7638099193572998, | |
| "logits/rejected": -1.6800596714019775, | |
| "logps/chosen": -100.1748275756836, | |
| "logps/rejected": -128.0324249267578, | |
| "loss": 0.1708, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.1165099143981934, | |
| "rewards/margins": 2.189871311187744, | |
| "rewards/rejected": -4.3063812255859375, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 1.606060606060606, | |
| "grad_norm": 4.169434070587158, | |
| "learning_rate": 1.08843537414966e-06, | |
| "logits/chosen": -1.5525397062301636, | |
| "logits/rejected": -1.6112422943115234, | |
| "logps/chosen": -100.83575439453125, | |
| "logps/rejected": -128.28770446777344, | |
| "loss": 0.2272, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.8389804363250732, | |
| "rewards/margins": 2.285094738006592, | |
| "rewards/rejected": -4.124075412750244, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 1.6181818181818182, | |
| "grad_norm": 4.582830905914307, | |
| "learning_rate": 1.0544217687074832e-06, | |
| "logits/chosen": -1.811753511428833, | |
| "logits/rejected": -1.7734105587005615, | |
| "logps/chosen": -99.59288024902344, | |
| "logps/rejected": -122.28144836425781, | |
| "loss": 0.2968, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.2128849029541016, | |
| "rewards/margins": 1.3973000049591064, | |
| "rewards/rejected": -3.610184907913208, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 1.6303030303030304, | |
| "grad_norm": 3.154338836669922, | |
| "learning_rate": 1.0204081632653063e-06, | |
| "logits/chosen": -1.5578339099884033, | |
| "logits/rejected": -1.6350841522216797, | |
| "logps/chosen": -108.85205078125, | |
| "logps/rejected": -149.6986083984375, | |
| "loss": 0.1819, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.148804187774658, | |
| "rewards/margins": 2.981112480163574, | |
| "rewards/rejected": -5.129916191101074, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.6424242424242426, | |
| "grad_norm": 3.0729594230651855, | |
| "learning_rate": 9.863945578231293e-07, | |
| "logits/chosen": -1.7794138193130493, | |
| "logits/rejected": -1.7279512882232666, | |
| "logps/chosen": -99.13277435302734, | |
| "logps/rejected": -116.2926025390625, | |
| "loss": 0.219, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.7788490056991577, | |
| "rewards/margins": 1.6601810455322266, | |
| "rewards/rejected": -3.439030170440674, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 1.6545454545454545, | |
| "grad_norm": 4.573628902435303, | |
| "learning_rate": 9.523809523809525e-07, | |
| "logits/chosen": -1.7974026203155518, | |
| "logits/rejected": -1.820143461227417, | |
| "logps/chosen": -94.99871826171875, | |
| "logps/rejected": -131.11312866210938, | |
| "loss": 0.1895, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.565184473991394, | |
| "rewards/margins": 2.551455020904541, | |
| "rewards/rejected": -4.116639614105225, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 1.6666666666666665, | |
| "grad_norm": 5.590139865875244, | |
| "learning_rate": 9.183673469387756e-07, | |
| "logits/chosen": -1.8338170051574707, | |
| "logits/rejected": -1.789681077003479, | |
| "logps/chosen": -101.36553955078125, | |
| "logps/rejected": -121.39546203613281, | |
| "loss": 0.2882, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.8100769519805908, | |
| "rewards/margins": 1.7394788265228271, | |
| "rewards/rejected": -3.549555778503418, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 1.6787878787878787, | |
| "grad_norm": 3.2985379695892334, | |
| "learning_rate": 8.843537414965988e-07, | |
| "logits/chosen": -1.7100659608840942, | |
| "logits/rejected": -1.621692180633545, | |
| "logps/chosen": -100.19327545166016, | |
| "logps/rejected": -127.35150146484375, | |
| "loss": 0.194, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.047415256500244, | |
| "rewards/margins": 2.123518466949463, | |
| "rewards/rejected": -4.170933246612549, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 1.690909090909091, | |
| "grad_norm": 2.877264976501465, | |
| "learning_rate": 8.503401360544218e-07, | |
| "logits/chosen": -1.6704832315444946, | |
| "logits/rejected": -1.6993167400360107, | |
| "logps/chosen": -112.37958526611328, | |
| "logps/rejected": -143.90924072265625, | |
| "loss": 0.2125, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -2.7939703464508057, | |
| "rewards/margins": 2.070646286010742, | |
| "rewards/rejected": -4.864616870880127, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.7030303030303031, | |
| "grad_norm": 1.3344841003417969, | |
| "learning_rate": 8.163265306122449e-07, | |
| "logits/chosen": -1.6487529277801514, | |
| "logits/rejected": -1.616715669631958, | |
| "logps/chosen": -89.38975524902344, | |
| "logps/rejected": -136.5968780517578, | |
| "loss": 0.0842, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.927839994430542, | |
| "rewards/margins": 3.7124204635620117, | |
| "rewards/rejected": -4.640260219573975, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 1.7151515151515153, | |
| "grad_norm": 2.1570088863372803, | |
| "learning_rate": 7.823129251700681e-07, | |
| "logits/chosen": -1.778488039970398, | |
| "logits/rejected": -1.7679212093353271, | |
| "logps/chosen": -93.74765014648438, | |
| "logps/rejected": -121.31202697753906, | |
| "loss": 0.1657, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.2899467945098877, | |
| "rewards/margins": 2.4230360984802246, | |
| "rewards/rejected": -3.712982654571533, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 1.7272727272727273, | |
| "grad_norm": 2.719651460647583, | |
| "learning_rate": 7.482993197278913e-07, | |
| "logits/chosen": -1.889997124671936, | |
| "logits/rejected": -1.7798057794570923, | |
| "logps/chosen": -101.00578308105469, | |
| "logps/rejected": -145.29190063476562, | |
| "loss": 0.1061, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.5723130702972412, | |
| "rewards/margins": 3.213724136352539, | |
| "rewards/rejected": -4.786037445068359, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 1.7393939393939393, | |
| "grad_norm": 2.279040813446045, | |
| "learning_rate": 7.142857142857143e-07, | |
| "logits/chosen": -1.5846905708312988, | |
| "logits/rejected": -1.7222152948379517, | |
| "logps/chosen": -92.99658203125, | |
| "logps/rejected": -149.00729370117188, | |
| "loss": 0.0996, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.1021888256072998, | |
| "rewards/margins": 3.8253209590911865, | |
| "rewards/rejected": -4.927509784698486, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 1.7515151515151515, | |
| "grad_norm": 3.1770360469818115, | |
| "learning_rate": 6.802721088435376e-07, | |
| "logits/chosen": -1.4982367753982544, | |
| "logits/rejected": -1.6536548137664795, | |
| "logps/chosen": -92.8003921508789, | |
| "logps/rejected": -129.96063232421875, | |
| "loss": 0.2177, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.4197006225585938, | |
| "rewards/margins": 2.827155590057373, | |
| "rewards/rejected": -4.246856212615967, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.7636363636363637, | |
| "grad_norm": 2.86728572845459, | |
| "learning_rate": 6.462585034013606e-07, | |
| "logits/chosen": -1.6697306632995605, | |
| "logits/rejected": -1.7117178440093994, | |
| "logps/chosen": -105.58975219726562, | |
| "logps/rejected": -140.0699005126953, | |
| "loss": 0.196, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.083402156829834, | |
| "rewards/margins": 2.635946750640869, | |
| "rewards/rejected": -4.719348907470703, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 1.7757575757575759, | |
| "grad_norm": 3.762331008911133, | |
| "learning_rate": 6.122448979591837e-07, | |
| "logits/chosen": -1.9138250350952148, | |
| "logits/rejected": -1.9125970602035522, | |
| "logps/chosen": -94.5777587890625, | |
| "logps/rejected": -125.44267272949219, | |
| "loss": 0.1805, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.2487887144088745, | |
| "rewards/margins": 2.5635106563568115, | |
| "rewards/rejected": -3.8122997283935547, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 1.7878787878787878, | |
| "grad_norm": 2.9627037048339844, | |
| "learning_rate": 5.782312925170068e-07, | |
| "logits/chosen": -1.6655495166778564, | |
| "logits/rejected": -1.669325351715088, | |
| "logps/chosen": -110.57474517822266, | |
| "logps/rejected": -125.62745666503906, | |
| "loss": 0.3147, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -2.6180191040039062, | |
| "rewards/margins": 1.3960981369018555, | |
| "rewards/rejected": -4.014117240905762, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 2.934206247329712, | |
| "learning_rate": 5.4421768707483e-07, | |
| "logits/chosen": -1.6896376609802246, | |
| "logits/rejected": -1.7967206239700317, | |
| "logps/chosen": -103.97714233398438, | |
| "logps/rejected": -150.354248046875, | |
| "loss": 0.1075, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.2765142917633057, | |
| "rewards/margins": 3.3366103172302246, | |
| "rewards/rejected": -5.613123893737793, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 1.812121212121212, | |
| "grad_norm": 2.11407208442688, | |
| "learning_rate": 5.102040816326531e-07, | |
| "logits/chosen": -1.774599552154541, | |
| "logits/rejected": -1.7382519245147705, | |
| "logps/chosen": -94.33448791503906, | |
| "logps/rejected": -130.95513916015625, | |
| "loss": 0.1393, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.2320460081100464, | |
| "rewards/margins": 2.8650190830230713, | |
| "rewards/rejected": -4.097064971923828, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.8242424242424242, | |
| "grad_norm": 1.7938166856765747, | |
| "learning_rate": 4.7619047619047623e-07, | |
| "logits/chosen": -1.6386427879333496, | |
| "logits/rejected": -1.734755277633667, | |
| "logps/chosen": -99.16139221191406, | |
| "logps/rejected": -138.26210021972656, | |
| "loss": 0.0658, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.9007340669631958, | |
| "rewards/margins": 2.9712603092193604, | |
| "rewards/rejected": -4.871994972229004, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 1.8363636363636364, | |
| "grad_norm": 2.793008804321289, | |
| "learning_rate": 4.421768707482994e-07, | |
| "logits/chosen": -1.838673710823059, | |
| "logits/rejected": -1.7541086673736572, | |
| "logps/chosen": -95.26689910888672, | |
| "logps/rejected": -133.28399658203125, | |
| "loss": 0.1334, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.4139292240142822, | |
| "rewards/margins": 2.946502208709717, | |
| "rewards/rejected": -4.360431671142578, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 1.8484848484848486, | |
| "grad_norm": 2.561596393585205, | |
| "learning_rate": 4.0816326530612243e-07, | |
| "logits/chosen": -1.7521519660949707, | |
| "logits/rejected": -1.7829943895339966, | |
| "logps/chosen": -95.3616714477539, | |
| "logps/rejected": -145.51622009277344, | |
| "loss": 0.0941, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.6735217571258545, | |
| "rewards/margins": 3.370741605758667, | |
| "rewards/rejected": -5.04426383972168, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 1.8606060606060606, | |
| "grad_norm": 5.654742240905762, | |
| "learning_rate": 3.7414965986394563e-07, | |
| "logits/chosen": -1.8284450769424438, | |
| "logits/rejected": -1.8849406242370605, | |
| "logps/chosen": -93.88221740722656, | |
| "logps/rejected": -123.63079833984375, | |
| "loss": 0.2585, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.3463689088821411, | |
| "rewards/margins": 2.314800977706909, | |
| "rewards/rejected": -3.66117000579834, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 1.8727272727272726, | |
| "grad_norm": 3.4941039085388184, | |
| "learning_rate": 3.401360544217688e-07, | |
| "logits/chosen": -1.8094770908355713, | |
| "logits/rejected": -1.8442440032958984, | |
| "logps/chosen": -102.26832580566406, | |
| "logps/rejected": -135.0662841796875, | |
| "loss": 0.2172, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.8880977630615234, | |
| "rewards/margins": 2.399691343307495, | |
| "rewards/rejected": -4.287789344787598, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 1.8848484848484848, | |
| "grad_norm": 7.033143043518066, | |
| "learning_rate": 3.0612244897959183e-07, | |
| "logits/chosen": -1.8748410940170288, | |
| "logits/rejected": -1.8568613529205322, | |
| "logps/chosen": -91.27564239501953, | |
| "logps/rejected": -110.9326171875, | |
| "loss": 0.5425, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -1.5733274221420288, | |
| "rewards/margins": 1.3791325092315674, | |
| "rewards/rejected": -2.9524598121643066, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 1.896969696969697, | |
| "grad_norm": 4.322800159454346, | |
| "learning_rate": 2.72108843537415e-07, | |
| "logits/chosen": -1.6863696575164795, | |
| "logits/rejected": -1.7218561172485352, | |
| "logps/chosen": -92.07785034179688, | |
| "logps/rejected": -128.896240234375, | |
| "loss": 0.197, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -1.2870597839355469, | |
| "rewards/margins": 2.7534267902374268, | |
| "rewards/rejected": -4.040486812591553, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 1.9090909090909092, | |
| "grad_norm": 5.76566219329834, | |
| "learning_rate": 2.3809523809523811e-07, | |
| "logits/chosen": -1.7858214378356934, | |
| "logits/rejected": -1.7693607807159424, | |
| "logps/chosen": -99.64199829101562, | |
| "logps/rejected": -123.77627563476562, | |
| "loss": 0.3982, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -2.0218448638916016, | |
| "rewards/margins": 1.6399569511413574, | |
| "rewards/rejected": -3.66180157661438, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 1.9212121212121214, | |
| "grad_norm": 5.00474739074707, | |
| "learning_rate": 2.0408163265306121e-07, | |
| "logits/chosen": -1.7164349555969238, | |
| "logits/rejected": -1.7813513278961182, | |
| "logps/chosen": -98.73097229003906, | |
| "logps/rejected": -117.31221008300781, | |
| "loss": 0.3695, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -1.9404692649841309, | |
| "rewards/margins": 1.20289146900177, | |
| "rewards/rejected": -3.1433606147766113, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 1.9333333333333333, | |
| "grad_norm": 3.1765851974487305, | |
| "learning_rate": 1.700680272108844e-07, | |
| "logits/chosen": -1.8153291940689087, | |
| "logits/rejected": -1.8343583345413208, | |
| "logps/chosen": -96.27183532714844, | |
| "logps/rejected": -123.16444396972656, | |
| "loss": 0.2216, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.5373660326004028, | |
| "rewards/margins": 2.2644829750061035, | |
| "rewards/rejected": -3.801848888397217, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.9454545454545453, | |
| "grad_norm": 2.6219356060028076, | |
| "learning_rate": 1.360544217687075e-07, | |
| "logits/chosen": -1.6747124195098877, | |
| "logits/rejected": -1.690348744392395, | |
| "logps/chosen": -104.59769439697266, | |
| "logps/rejected": -125.69312286376953, | |
| "loss": 0.2246, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.8719542026519775, | |
| "rewards/margins": 1.6488584280014038, | |
| "rewards/rejected": -3.520812511444092, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 1.9575757575757575, | |
| "grad_norm": 3.1105170249938965, | |
| "learning_rate": 1.0204081632653061e-07, | |
| "logits/chosen": -1.9918447732925415, | |
| "logits/rejected": -1.8305941820144653, | |
| "logps/chosen": -93.89464569091797, | |
| "logps/rejected": -129.2601318359375, | |
| "loss": 0.1517, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.2167264223098755, | |
| "rewards/margins": 2.7442147731781006, | |
| "rewards/rejected": -3.9609413146972656, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 1.9696969696969697, | |
| "grad_norm": 3.005218505859375, | |
| "learning_rate": 6.802721088435375e-08, | |
| "logits/chosen": -1.897176742553711, | |
| "logits/rejected": -1.8900220394134521, | |
| "logps/chosen": -94.99845123291016, | |
| "logps/rejected": -142.2918243408203, | |
| "loss": 0.2873, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -1.604986310005188, | |
| "rewards/margins": 3.0710902214050293, | |
| "rewards/rejected": -4.676076412200928, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 1.981818181818182, | |
| "grad_norm": 2.6735050678253174, | |
| "learning_rate": 3.4013605442176873e-08, | |
| "logits/chosen": -1.938112497329712, | |
| "logits/rejected": -1.9410051107406616, | |
| "logps/chosen": -95.70248413085938, | |
| "logps/rejected": -139.81271362304688, | |
| "loss": 0.1507, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.3574395179748535, | |
| "rewards/margins": 3.3296563625335693, | |
| "rewards/rejected": -4.687096118927002, | |
| "step": 164 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 164, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 1650, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |