| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 20.0, | |
| "eval_steps": 500, | |
| "global_step": 580, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.3448275862068966, | |
| "grad_norm": 0.5030671954154968, | |
| "learning_rate": 8.620689655172415e-07, | |
| "logits/chosen": 1.8564815521240234, | |
| "logits/rejected": 1.8255866765975952, | |
| "logps/chosen": -95.54290008544922, | |
| "logps/rejected": -79.79582214355469, | |
| "loss": 0.693, | |
| "rewards/accuracies": 0.4000000059604645, | |
| "rewards/chosen": -0.0028573228046298027, | |
| "rewards/margins": 0.0005112116923555732, | |
| "rewards/rejected": -0.0033685355447232723, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.6896551724137931, | |
| "grad_norm": 0.45081979036331177, | |
| "learning_rate": 1.724137931034483e-06, | |
| "logits/chosen": 1.781947374343872, | |
| "logits/rejected": 1.6911979913711548, | |
| "logps/chosen": -104.20463562011719, | |
| "logps/rejected": -78.91150665283203, | |
| "loss": 0.6929, | |
| "rewards/accuracies": 0.512499988079071, | |
| "rewards/chosen": 0.00018558502779342234, | |
| "rewards/margins": -0.002152198925614357, | |
| "rewards/rejected": 0.0023377849720418453, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.0344827586206897, | |
| "grad_norm": 0.41456305980682373, | |
| "learning_rate": 2.5862068965517246e-06, | |
| "logits/chosen": 1.7937275171279907, | |
| "logits/rejected": 1.729128122329712, | |
| "logps/chosen": -90.21867370605469, | |
| "logps/rejected": -71.69265747070312, | |
| "loss": 0.6935, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": 0.003012509550899267, | |
| "rewards/margins": 0.009945740923285484, | |
| "rewards/rejected": -0.006933231838047504, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.3793103448275863, | |
| "grad_norm": 0.45128729939460754, | |
| "learning_rate": 3.448275862068966e-06, | |
| "logits/chosen": 1.864363670349121, | |
| "logits/rejected": 1.9002879858016968, | |
| "logps/chosen": -87.1126480102539, | |
| "logps/rejected": -77.18392181396484, | |
| "loss": 0.6923, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.0013037443859502673, | |
| "rewards/margins": 0.006342612206935883, | |
| "rewards/rejected": -0.005038867238909006, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.7241379310344827, | |
| "grad_norm": 0.4960842728614807, | |
| "learning_rate": 4.310344827586207e-06, | |
| "logits/chosen": 1.7825076580047607, | |
| "logits/rejected": 1.829602837562561, | |
| "logps/chosen": -78.63069915771484, | |
| "logps/rejected": -91.36685180664062, | |
| "loss": 0.6954, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.009792634285986423, | |
| "rewards/margins": 0.014146638102829456, | |
| "rewards/rejected": -0.0043540047481656075, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.0689655172413794, | |
| "grad_norm": 0.5404626727104187, | |
| "learning_rate": 4.999818897894192e-06, | |
| "logits/chosen": 1.8061244487762451, | |
| "logits/rejected": 1.7855304479599, | |
| "logps/chosen": -90.69072723388672, | |
| "logps/rejected": -72.0459213256836, | |
| "loss": 0.6938, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": 0.002143201883882284, | |
| "rewards/margins": 0.005239076912403107, | |
| "rewards/rejected": -0.0030958750285208225, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.413793103448276, | |
| "grad_norm": 0.5091063380241394, | |
| "learning_rate": 4.9934830787948756e-06, | |
| "logits/chosen": 1.6158870458602905, | |
| "logits/rejected": 1.696754813194275, | |
| "logps/chosen": -76.28319549560547, | |
| "logps/rejected": -77.81846618652344, | |
| "loss": 0.6905, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": 1.5049334251671098e-05, | |
| "rewards/margins": 0.007967600598931313, | |
| "rewards/rejected": -0.00795255322009325, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.7586206896551726, | |
| "grad_norm": 0.6066665053367615, | |
| "learning_rate": 4.978118375700895e-06, | |
| "logits/chosen": 1.6109062433242798, | |
| "logits/rejected": 1.7185981273651123, | |
| "logps/chosen": -84.615966796875, | |
| "logps/rejected": -96.0793228149414, | |
| "loss": 0.6942, | |
| "rewards/accuracies": 0.4749999940395355, | |
| "rewards/chosen": -0.000627221364993602, | |
| "rewards/margins": 0.0009560534963384271, | |
| "rewards/rejected": -0.0015832759672775865, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 3.103448275862069, | |
| "grad_norm": 0.5341666340827942, | |
| "learning_rate": 4.953780424089803e-06, | |
| "logits/chosen": 1.8657306432724, | |
| "logits/rejected": 1.8894052505493164, | |
| "logps/chosen": -87.67496490478516, | |
| "logps/rejected": -77.41777038574219, | |
| "loss": 0.6919, | |
| "rewards/accuracies": 0.550000011920929, | |
| "rewards/chosen": -0.004135184455662966, | |
| "rewards/margins": 0.011894735507667065, | |
| "rewards/rejected": -0.016029920428991318, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.4482758620689653, | |
| "grad_norm": 0.5891664028167725, | |
| "learning_rate": 4.920557351506409e-06, | |
| "logits/chosen": 1.8143476247787476, | |
| "logits/rejected": 1.8618648052215576, | |
| "logps/chosen": -82.7096176147461, | |
| "logps/rejected": -80.64532470703125, | |
| "loss": 0.6905, | |
| "rewards/accuracies": 0.4749999940395355, | |
| "rewards/chosen": -0.014110831543803215, | |
| "rewards/margins": -0.007867741398513317, | |
| "rewards/rejected": -0.0062430910766124725, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.793103448275862, | |
| "grad_norm": 0.5773605704307556, | |
| "learning_rate": 4.878569458453592e-06, | |
| "logits/chosen": 1.7850589752197266, | |
| "logits/rejected": 1.76922607421875, | |
| "logps/chosen": -87.4909896850586, | |
| "logps/rejected": -90.67459869384766, | |
| "loss": 0.6899, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": 0.0026252653915435076, | |
| "rewards/margins": 0.014214910566806793, | |
| "rewards/rejected": -0.011589646339416504, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 4.137931034482759, | |
| "grad_norm": 0.6582341194152832, | |
| "learning_rate": 4.827968782785062e-06, | |
| "logits/chosen": 1.7467533349990845, | |
| "logits/rejected": 1.8858740329742432, | |
| "logps/chosen": -69.12274169921875, | |
| "logps/rejected": -94.86824035644531, | |
| "loss": 0.6878, | |
| "rewards/accuracies": 0.4749999940395355, | |
| "rewards/chosen": -0.009279675781726837, | |
| "rewards/margins": 0.0035783485509455204, | |
| "rewards/rejected": -0.012858022935688496, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.482758620689655, | |
| "grad_norm": 0.7790700793266296, | |
| "learning_rate": 4.7689385491773934e-06, | |
| "logits/chosen": 1.8058643341064453, | |
| "logits/rejected": 1.866713523864746, | |
| "logps/chosen": -79.86953735351562, | |
| "logps/rejected": -76.4103012084961, | |
| "loss": 0.6857, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.00016659722314216197, | |
| "rewards/margins": 0.023889193311333656, | |
| "rewards/rejected": -0.02372259460389614, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.827586206896552, | |
| "grad_norm": 0.7933465242385864, | |
| "learning_rate": 4.70169250567482e-06, | |
| "logits/chosen": 1.7705074548721313, | |
| "logits/rejected": 1.7728191614151, | |
| "logps/chosen": -86.46583557128906, | |
| "logps/rejected": -75.83828735351562, | |
| "loss": 0.6804, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": -0.0001745700865285471, | |
| "rewards/margins": 0.029326915740966797, | |
| "rewards/rejected": -0.02950148656964302, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 5.172413793103448, | |
| "grad_norm": 0.7435988187789917, | |
| "learning_rate": 4.626474149709127e-06, | |
| "logits/chosen": 1.740312933921814, | |
| "logits/rejected": 1.7526963949203491, | |
| "logps/chosen": -100.93019104003906, | |
| "logps/rejected": -90.39695739746094, | |
| "loss": 0.6803, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": -0.013870477676391602, | |
| "rewards/margins": 0.03614342585206032, | |
| "rewards/rejected": -0.05001390725374222, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 5.517241379310345, | |
| "grad_norm": 0.8416279554367065, | |
| "learning_rate": 4.54355584639723e-06, | |
| "logits/chosen": 1.7618926763534546, | |
| "logits/rejected": 1.8320789337158203, | |
| "logps/chosen": -71.05604553222656, | |
| "logps/rejected": -82.77362060546875, | |
| "loss": 0.6748, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": -0.028952527791261673, | |
| "rewards/margins": 0.03283882141113281, | |
| "rewards/rejected": -0.061791349202394485, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 5.862068965517241, | |
| "grad_norm": 0.9731557965278625, | |
| "learning_rate": 4.45323784230908e-06, | |
| "logits/chosen": 1.7541310787200928, | |
| "logits/rejected": 1.845503807067871, | |
| "logps/chosen": -79.18022155761719, | |
| "logps/rejected": -89.36749267578125, | |
| "loss": 0.6706, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": -0.02990702912211418, | |
| "rewards/margins": 0.05189533159136772, | |
| "rewards/rejected": -0.0818023532629013, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 6.206896551724138, | |
| "grad_norm": 0.8338156938552856, | |
| "learning_rate": 4.355847178277025e-06, | |
| "logits/chosen": 1.7501287460327148, | |
| "logits/rejected": 1.7370710372924805, | |
| "logps/chosen": -91.2562484741211, | |
| "logps/rejected": -80.88179779052734, | |
| "loss": 0.6661, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": -0.03045990690588951, | |
| "rewards/margins": 0.08234542608261108, | |
| "rewards/rejected": -0.1128053292632103, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 6.551724137931035, | |
| "grad_norm": 0.8057475686073303, | |
| "learning_rate": 4.2517365051833564e-06, | |
| "logits/chosen": 1.8594491481781006, | |
| "logits/rejected": 1.9256786108016968, | |
| "logps/chosen": -74.18313598632812, | |
| "logps/rejected": -84.37249755859375, | |
| "loss": 0.6573, | |
| "rewards/accuracies": 0.7124999761581421, | |
| "rewards/chosen": -0.05836126208305359, | |
| "rewards/margins": 0.06953532248735428, | |
| "rewards/rejected": -0.12789657711982727, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 6.896551724137931, | |
| "grad_norm": 0.957699716091156, | |
| "learning_rate": 4.141282807014034e-06, | |
| "logits/chosen": 1.696286916732788, | |
| "logits/rejected": 1.782080888748169, | |
| "logps/chosen": -83.79094696044922, | |
| "logps/rejected": -79.17488861083984, | |
| "loss": 0.6595, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": -0.08588370680809021, | |
| "rewards/margins": 0.062126852571964264, | |
| "rewards/rejected": -0.14801056683063507, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 7.241379310344827, | |
| "grad_norm": 0.8981906771659851, | |
| "learning_rate": 4.024886035802432e-06, | |
| "logits/chosen": 1.8213344812393188, | |
| "logits/rejected": 1.9113010168075562, | |
| "logps/chosen": -67.83535766601562, | |
| "logps/rejected": -80.57597351074219, | |
| "loss": 0.6498, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -0.05136314034461975, | |
| "rewards/margins": 0.10948891937732697, | |
| "rewards/rejected": -0.16085204482078552, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 7.586206896551724, | |
| "grad_norm": 1.0450000762939453, | |
| "learning_rate": 3.9029676634059565e-06, | |
| "logits/chosen": 1.8091751337051392, | |
| "logits/rejected": 1.705392599105835, | |
| "logps/chosen": -98.79942321777344, | |
| "logps/rejected": -79.21489715576172, | |
| "loss": 0.6457, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": -0.09070365875959396, | |
| "rewards/margins": 0.12246594578027725, | |
| "rewards/rejected": -0.21316960453987122, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 7.931034482758621, | |
| "grad_norm": 0.9624446034431458, | |
| "learning_rate": 3.7759691553595214e-06, | |
| "logits/chosen": 1.8079789876937866, | |
| "logits/rejected": 1.8931957483291626, | |
| "logps/chosen": -77.8460922241211, | |
| "logps/rejected": -76.8448715209961, | |
| "loss": 0.6391, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": -0.09181363880634308, | |
| "rewards/margins": 0.09949400275945663, | |
| "rewards/rejected": -0.19130763411521912, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 8.275862068965518, | |
| "grad_norm": 0.9608933925628662, | |
| "learning_rate": 3.6443503723320837e-06, | |
| "logits/chosen": 1.7896497249603271, | |
| "logits/rejected": 1.8757597208023071, | |
| "logps/chosen": -79.89161682128906, | |
| "logps/rejected": -88.79243469238281, | |
| "loss": 0.6322, | |
| "rewards/accuracies": 0.675000011920929, | |
| "rewards/chosen": -0.10905975103378296, | |
| "rewards/margins": 0.12515850365161896, | |
| "rewards/rejected": -0.23421823978424072, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 8.620689655172415, | |
| "grad_norm": 0.9355646967887878, | |
| "learning_rate": 3.508587904974522e-06, | |
| "logits/chosen": 1.860887885093689, | |
| "logits/rejected": 1.9076162576675415, | |
| "logps/chosen": -85.80632781982422, | |
| "logps/rejected": -104.0634765625, | |
| "loss": 0.6217, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.11484359204769135, | |
| "rewards/margins": 0.1558837592601776, | |
| "rewards/rejected": -0.27072733640670776, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 8.96551724137931, | |
| "grad_norm": 1.0788410902023315, | |
| "learning_rate": 3.3691733481883693e-06, | |
| "logits/chosen": 1.9172537326812744, | |
| "logits/rejected": 1.7995975017547607, | |
| "logps/chosen": -92.4389419555664, | |
| "logps/rejected": -69.5486068725586, | |
| "loss": 0.6273, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.10718520730733871, | |
| "rewards/margins": 0.15187612175941467, | |
| "rewards/rejected": -0.259061336517334, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 9.310344827586206, | |
| "grad_norm": 0.9002386331558228, | |
| "learning_rate": 3.226611521064278e-06, | |
| "logits/chosen": 1.676670789718628, | |
| "logits/rejected": 1.7091697454452515, | |
| "logps/chosen": -78.51396942138672, | |
| "logps/rejected": -85.13417053222656, | |
| "loss": 0.6077, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.10331835597753525, | |
| "rewards/margins": 0.18381647765636444, | |
| "rewards/rejected": -0.2871348559856415, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 9.655172413793103, | |
| "grad_norm": 0.9108310341835022, | |
| "learning_rate": 3.0814186389357765e-06, | |
| "logits/chosen": 1.778263807296753, | |
| "logits/rejected": 1.8473546504974365, | |
| "logps/chosen": -74.77851867675781, | |
| "logps/rejected": -72.4294662475586, | |
| "loss": 0.6161, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.15308822691440582, | |
| "rewards/margins": 0.1778026521205902, | |
| "rewards/rejected": -0.33089086413383484, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.946640133857727, | |
| "learning_rate": 2.9341204441673267e-06, | |
| "logits/chosen": 1.7126500606536865, | |
| "logits/rejected": 1.7476106882095337, | |
| "logps/chosen": -86.2017593383789, | |
| "logps/rejected": -80.07001495361328, | |
| "loss": 0.605, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.19395658373832703, | |
| "rewards/margins": 0.19845889508724213, | |
| "rewards/rejected": -0.39241549372673035, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 10.344827586206897, | |
| "grad_norm": 1.0770831108093262, | |
| "learning_rate": 2.785250302445062e-06, | |
| "logits/chosen": 1.7564175128936768, | |
| "logits/rejected": 1.7827428579330444, | |
| "logps/chosen": -83.96778869628906, | |
| "logps/rejected": -90.85274505615234, | |
| "loss": 0.6044, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.1555892676115036, | |
| "rewards/margins": 0.2721293568611145, | |
| "rewards/rejected": -0.4277185797691345, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 10.689655172413794, | |
| "grad_norm": 1.034155011177063, | |
| "learning_rate": 2.6353472714635443e-06, | |
| "logits/chosen": 1.8422276973724365, | |
| "logits/rejected": 1.9225587844848633, | |
| "logps/chosen": -81.3154525756836, | |
| "logps/rejected": -82.27003479003906, | |
| "loss": 0.589, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.1461830735206604, | |
| "rewards/margins": 0.2492922842502594, | |
| "rewards/rejected": -0.3954753279685974, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 11.03448275862069, | |
| "grad_norm": 0.9909716844558716, | |
| "learning_rate": 2.4849541490017868e-06, | |
| "logits/chosen": 1.6462303400039673, | |
| "logits/rejected": 1.676540732383728, | |
| "logps/chosen": -83.37794494628906, | |
| "logps/rejected": -100.16087341308594, | |
| "loss": 0.5912, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -0.20598438382148743, | |
| "rewards/margins": 0.2601252496242523, | |
| "rewards/rejected": -0.46610966324806213, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 11.379310344827585, | |
| "grad_norm": 1.0035589933395386, | |
| "learning_rate": 2.3346155074564712e-06, | |
| "logits/chosen": 1.8267767429351807, | |
| "logits/rejected": 1.8420498371124268, | |
| "logps/chosen": -83.43693542480469, | |
| "logps/rejected": -81.88127899169922, | |
| "loss": 0.5903, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.16100475192070007, | |
| "rewards/margins": 0.2701917588710785, | |
| "rewards/rejected": -0.43119654059410095, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 11.724137931034482, | |
| "grad_norm": 1.1042752265930176, | |
| "learning_rate": 2.184875721949277e-06, | |
| "logits/chosen": 1.5917112827301025, | |
| "logits/rejected": 1.747766137123108, | |
| "logps/chosen": -84.04930114746094, | |
| "logps/rejected": -99.69436645507812, | |
| "loss": 0.5773, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": -0.20484447479248047, | |
| "rewards/margins": 0.34736505150794983, | |
| "rewards/rejected": -0.5522094964981079, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 12.068965517241379, | |
| "grad_norm": 1.109389066696167, | |
| "learning_rate": 2.0362769991485514e-06, | |
| "logits/chosen": 1.7192186117172241, | |
| "logits/rejected": 1.7819591760635376, | |
| "logps/chosen": -81.86787414550781, | |
| "logps/rejected": -106.79791259765625, | |
| "loss": 0.5738, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": -0.2179536074399948, | |
| "rewards/margins": 0.31939607858657837, | |
| "rewards/rejected": -0.5373496413230896, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 12.413793103448276, | |
| "grad_norm": 1.0638686418533325, | |
| "learning_rate": 1.8893574139429226e-06, | |
| "logits/chosen": 1.7060273885726929, | |
| "logits/rejected": 1.695678949356079, | |
| "logps/chosen": -96.91914367675781, | |
| "logps/rejected": -81.13513946533203, | |
| "loss": 0.5678, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.2419094741344452, | |
| "rewards/margins": 0.30147960782051086, | |
| "rewards/rejected": -0.543389081954956, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 12.758620689655173, | |
| "grad_norm": 1.2344255447387695, | |
| "learning_rate": 1.744648961076068e-06, | |
| "logits/chosen": 1.8007898330688477, | |
| "logits/rejected": 1.8490253686904907, | |
| "logps/chosen": -75.9570083618164, | |
| "logps/rejected": -80.76083374023438, | |
| "loss": 0.5739, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.22758683562278748, | |
| "rewards/margins": 0.28146892786026, | |
| "rewards/rejected": -0.5090557336807251, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 13.10344827586207, | |
| "grad_norm": 1.401179313659668, | |
| "learning_rate": 1.602675628797636e-06, | |
| "logits/chosen": 1.7176204919815063, | |
| "logits/rejected": 1.7408435344696045, | |
| "logps/chosen": -78.79522705078125, | |
| "logps/rejected": -92.59135437011719, | |
| "loss": 0.5856, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.26380348205566406, | |
| "rewards/margins": 0.24713349342346191, | |
| "rewards/rejected": -0.510936975479126, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 13.448275862068966, | |
| "grad_norm": 1.135343313217163, | |
| "learning_rate": 1.4639515015056205e-06, | |
| "logits/chosen": 1.7480716705322266, | |
| "logits/rejected": 1.7659183740615845, | |
| "logps/chosen": -97.42142486572266, | |
| "logps/rejected": -84.87925720214844, | |
| "loss": 0.5641, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -0.2054813802242279, | |
| "rewards/margins": 0.34871339797973633, | |
| "rewards/rejected": -0.5541948080062866, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 13.793103448275861, | |
| "grad_norm": 1.2716423273086548, | |
| "learning_rate": 1.328978898250525e-06, | |
| "logits/chosen": 1.7928969860076904, | |
| "logits/rejected": 1.7819544076919556, | |
| "logps/chosen": -83.4045181274414, | |
| "logps/rejected": -75.65983581542969, | |
| "loss": 0.5611, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -0.21872875094413757, | |
| "rewards/margins": 0.3643662631511688, | |
| "rewards/rejected": -0.5830950140953064, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 14.137931034482758, | |
| "grad_norm": 1.0569292306900024, | |
| "learning_rate": 1.198246553841744e-06, | |
| "logits/chosen": 1.7486135959625244, | |
| "logits/rejected": 1.7119531631469727, | |
| "logps/chosen": -81.8648910522461, | |
| "logps/rejected": -75.25749969482422, | |
| "loss": 0.558, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.2697509229183197, | |
| "rewards/margins": 0.27841055393218994, | |
| "rewards/rejected": -0.548161506652832, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 14.482758620689655, | |
| "grad_norm": 1.095815896987915, | |
| "learning_rate": 1.0722278491423998e-06, | |
| "logits/chosen": 1.7447073459625244, | |
| "logits/rejected": 1.7643568515777588, | |
| "logps/chosen": -89.39067077636719, | |
| "logps/rejected": -92.55440521240234, | |
| "loss": 0.551, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -0.25158265233039856, | |
| "rewards/margins": 0.37486472725868225, | |
| "rewards/rejected": -0.6264473795890808, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 14.827586206896552, | |
| "grad_norm": 1.248704195022583, | |
| "learning_rate": 9.513790969606926e-07, | |
| "logits/chosen": 1.7191492319107056, | |
| "logits/rejected": 1.716238021850586, | |
| "logps/chosen": -88.57774353027344, | |
| "logps/rejected": -81.03433990478516, | |
| "loss": 0.5586, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": -0.3197632431983948, | |
| "rewards/margins": 0.27085188031196594, | |
| "rewards/rejected": -0.5906150937080383, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 15.172413793103448, | |
| "grad_norm": 1.1247127056121826, | |
| "learning_rate": 8.361378897445643e-07, | |
| "logits/chosen": 1.7265936136245728, | |
| "logits/rejected": 1.7360607385635376, | |
| "logps/chosen": -82.77619934082031, | |
| "logps/rejected": -94.1690444946289, | |
| "loss": 0.5532, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.2630786895751953, | |
| "rewards/margins": 0.4049588143825531, | |
| "rewards/rejected": -0.668037474155426, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 15.517241379310345, | |
| "grad_norm": 1.278336763381958, | |
| "learning_rate": 7.269215150626391e-07, | |
| "logits/chosen": 1.6616032123565674, | |
| "logits/rejected": 1.6782630681991577, | |
| "logps/chosen": -77.37220764160156, | |
| "logps/rejected": -72.65897369384766, | |
| "loss": 0.5493, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": -0.28209930658340454, | |
| "rewards/margins": 0.3699084520339966, | |
| "rewards/rejected": -0.6520077586174011, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 15.862068965517242, | |
| "grad_norm": 1.2569310665130615, | |
| "learning_rate": 6.241254446089942e-07, | |
| "logits/chosen": 1.7343212366104126, | |
| "logits/rejected": 1.7447402477264404, | |
| "logps/chosen": -96.77474212646484, | |
| "logps/rejected": -85.16373443603516, | |
| "loss": 0.5519, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.24759416282176971, | |
| "rewards/margins": 0.3629246950149536, | |
| "rewards/rejected": -0.6105188131332397, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 16.20689655172414, | |
| "grad_norm": 1.336328148841858, | |
| "learning_rate": 5.281219022030423e-07, | |
| "logits/chosen": 1.6723476648330688, | |
| "logits/rejected": 1.700272798538208, | |
| "logps/chosen": -90.57843017578125, | |
| "logps/rejected": -95.53768920898438, | |
| "loss": 0.5514, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -0.24270884692668915, | |
| "rewards/margins": 0.4548424184322357, | |
| "rewards/rejected": -0.6975512504577637, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 16.551724137931036, | |
| "grad_norm": 1.1883608102798462, | |
| "learning_rate": 4.392585159698087e-07, | |
| "logits/chosen": 1.7246710062026978, | |
| "logits/rejected": 1.7370675802230835, | |
| "logps/chosen": -82.00711822509766, | |
| "logps/rejected": -80.11751556396484, | |
| "loss": 0.5453, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.29246488213539124, | |
| "rewards/margins": 0.38618355989456177, | |
| "rewards/rejected": -0.6786484122276306, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 16.896551724137932, | |
| "grad_norm": 1.1097893714904785, | |
| "learning_rate": 3.578570595810274e-07, | |
| "logits/chosen": 1.6674753427505493, | |
| "logits/rejected": 1.8064743280410767, | |
| "logps/chosen": -71.32947540283203, | |
| "logps/rejected": -93.24207305908203, | |
| "loss": 0.5494, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": -0.24032393097877502, | |
| "rewards/margins": 0.45011502504348755, | |
| "rewards/rejected": -0.6904388666152954, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 17.24137931034483, | |
| "grad_norm": 1.182057499885559, | |
| "learning_rate": 2.8421228711503127e-07, | |
| "logits/chosen": 1.6695787906646729, | |
| "logits/rejected": 1.6788876056671143, | |
| "logps/chosen": -85.4040298461914, | |
| "logps/rejected": -91.11408996582031, | |
| "loss": 0.5475, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -0.2333899289369583, | |
| "rewards/margins": 0.4074273705482483, | |
| "rewards/rejected": -0.6408172845840454, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 17.586206896551722, | |
| "grad_norm": 1.0390254259109497, | |
| "learning_rate": 2.1859086575439225e-07, | |
| "logits/chosen": 1.706221342086792, | |
| "logits/rejected": 1.7529186010360718, | |
| "logps/chosen": -85.7811279296875, | |
| "logps/rejected": -94.31623077392578, | |
| "loss": 0.5594, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": -0.30455097556114197, | |
| "rewards/margins": 0.322262704372406, | |
| "rewards/rejected": -0.6268137097358704, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 17.93103448275862, | |
| "grad_norm": 1.1281626224517822, | |
| "learning_rate": 1.6123041018599766e-07, | |
| "logits/chosen": 1.727237343788147, | |
| "logits/rejected": 1.7324402332305908, | |
| "logps/chosen": -94.46121978759766, | |
| "logps/rejected": -83.49845123291016, | |
| "loss": 0.5394, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": -0.19388970732688904, | |
| "rewards/margins": 0.4317905902862549, | |
| "rewards/rejected": -0.6256802678108215, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 18.275862068965516, | |
| "grad_norm": 1.33527672290802, | |
| "learning_rate": 1.1233862220001168e-07, | |
| "logits/chosen": 1.752081274986267, | |
| "logits/rejected": 1.7581005096435547, | |
| "logps/chosen": -85.71052551269531, | |
| "logps/rejected": -79.36320495605469, | |
| "loss": 0.5484, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": -0.26601725816726685, | |
| "rewards/margins": 0.45839911699295044, | |
| "rewards/rejected": -0.7244163751602173, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 18.620689655172413, | |
| "grad_norm": 1.049845576286316, | |
| "learning_rate": 7.209253860320897e-08, | |
| "logits/chosen": 1.7148698568344116, | |
| "logits/rejected": 1.709167718887329, | |
| "logps/chosen": -85.56983947753906, | |
| "logps/rejected": -86.58708953857422, | |
| "loss": 0.5485, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": -0.2879505753517151, | |
| "rewards/margins": 0.4327491223812103, | |
| "rewards/rejected": -0.7206997871398926, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 18.96551724137931, | |
| "grad_norm": 1.1356227397918701, | |
| "learning_rate": 4.063789016999331e-08, | |
| "logits/chosen": 1.7863643169403076, | |
| "logits/rejected": 1.7491180896759033, | |
| "logps/chosen": -105.41233825683594, | |
| "logps/rejected": -75.59117126464844, | |
| "loss": 0.5447, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": -0.302162230014801, | |
| "rewards/margins": 0.35185351967811584, | |
| "rewards/rejected": -0.6540156602859497, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 19.310344827586206, | |
| "grad_norm": 1.0577690601348877, | |
| "learning_rate": 1.808857395232788e-08, | |
| "logits/chosen": 1.6059486865997314, | |
| "logits/rejected": 1.6719220876693726, | |
| "logps/chosen": -76.90476989746094, | |
| "logps/rejected": -84.28822326660156, | |
| "loss": 0.5437, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": -0.28083422780036926, | |
| "rewards/margins": 0.3541816174983978, | |
| "rewards/rejected": -0.6350158452987671, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 19.655172413793103, | |
| "grad_norm": 1.048354983329773, | |
| "learning_rate": 4.526240859345499e-09, | |
| "logits/chosen": 1.7016254663467407, | |
| "logits/rejected": 1.7128419876098633, | |
| "logps/chosen": -89.4581069946289, | |
| "logps/rejected": -89.28730773925781, | |
| "loss": 0.5485, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.3324401378631592, | |
| "rewards/margins": 0.3611425757408142, | |
| "rewards/rejected": -0.6935827136039734, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "grad_norm": 1.5148407220840454, | |
| "learning_rate": 0.0, | |
| "logits/chosen": 1.6660770177841187, | |
| "logits/rejected": 1.7669824361801147, | |
| "logps/chosen": -82.24041748046875, | |
| "logps/rejected": -87.77484893798828, | |
| "loss": 0.5444, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.30835968255996704, | |
| "rewards/margins": 0.35150665044784546, | |
| "rewards/rejected": -0.6598662734031677, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "step": 580, | |
| "total_flos": 1.9591809237023457e+18, | |
| "train_loss": 0.613731259313123, | |
| "train_runtime": 4229.7108, | |
| "train_samples_per_second": 8.752, | |
| "train_steps_per_second": 0.137 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 580, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.9591809237023457e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |