| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9950248756218906, | |
| "eval_steps": 500, | |
| "global_step": 2500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03980099502487562, | |
| "grad_norm": 2.466599941253662, | |
| "learning_rate": 4.980474401576887e-07, | |
| "logits/chosen": -1.0382839441299438, | |
| "logits/rejected": -1.054585576057434, | |
| "logps/chosen": -180.16372680664062, | |
| "logps/rejected": -179.63958740234375, | |
| "loss": 0.7169, | |
| "rewards/accuracies": 0.508593738079071, | |
| "rewards/chosen": -0.278283029794693, | |
| "rewards/margins": 0.017729664221405983, | |
| "rewards/rejected": -0.2960126996040344, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.07960199004975124, | |
| "grad_norm": 1.6285502910614014, | |
| "learning_rate": 4.922202605502572e-07, | |
| "logits/chosen": -1.0206574201583862, | |
| "logits/rejected": -1.0379180908203125, | |
| "logps/chosen": -180.13119506835938, | |
| "logps/rejected": -180.29315185546875, | |
| "loss": 0.7103, | |
| "rewards/accuracies": 0.5193750262260437, | |
| "rewards/chosen": -0.2392772138118744, | |
| "rewards/margins": 0.0195913203060627, | |
| "rewards/rejected": -0.2588685154914856, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.11940298507462686, | |
| "grad_norm": 2.7344954013824463, | |
| "learning_rate": 4.82609484512869e-07, | |
| "logits/chosen": -1.0241189002990723, | |
| "logits/rejected": -1.0348433256149292, | |
| "logps/chosen": -178.86837768554688, | |
| "logps/rejected": -179.53598022460938, | |
| "loss": 0.7077, | |
| "rewards/accuracies": 0.5171874761581421, | |
| "rewards/chosen": -0.21391521394252777, | |
| "rewards/margins": 0.015260601416230202, | |
| "rewards/rejected": -0.22917583584785461, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.15920398009950248, | |
| "grad_norm": 1.7553608417510986, | |
| "learning_rate": 4.6936523696827614e-07, | |
| "logits/chosen": -1.0034430027008057, | |
| "logits/rejected": -1.0131057500839233, | |
| "logps/chosen": -180.70912170410156, | |
| "logps/rejected": -181.44435119628906, | |
| "loss": 0.7064, | |
| "rewards/accuracies": 0.5106250047683716, | |
| "rewards/chosen": -0.18936260044574738, | |
| "rewards/margins": 0.013838082551956177, | |
| "rewards/rejected": -0.20320068299770355, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.19900497512437812, | |
| "grad_norm": 1.4984805583953857, | |
| "learning_rate": 4.5269439940365644e-07, | |
| "logits/chosen": -1.0015250444412231, | |
| "logits/rejected": -1.0229138135910034, | |
| "logps/chosen": -178.01638793945312, | |
| "logps/rejected": -177.6285400390625, | |
| "loss": 0.7093, | |
| "rewards/accuracies": 0.4985937476158142, | |
| "rewards/chosen": -0.1724223494529724, | |
| "rewards/margins": 0.005522388964891434, | |
| "rewards/rejected": -0.17794474959373474, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.23880597014925373, | |
| "grad_norm": 1.539502501487732, | |
| "learning_rate": 4.328573782827409e-07, | |
| "logits/chosen": -0.9833173155784607, | |
| "logits/rejected": -1.0065593719482422, | |
| "logps/chosen": -176.92718505859375, | |
| "logps/rejected": -174.7305145263672, | |
| "loss": 0.7017, | |
| "rewards/accuracies": 0.507031261920929, | |
| "rewards/chosen": -0.14655204117298126, | |
| "rewards/margins": 0.01735287345945835, | |
| "rewards/rejected": -0.16390492022037506, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.27860696517412936, | |
| "grad_norm": 1.7529277801513672, | |
| "learning_rate": 4.1016403737218373e-07, | |
| "logits/chosen": -0.9706727862358093, | |
| "logits/rejected": -0.997480034828186, | |
| "logps/chosen": -179.5781707763672, | |
| "logps/rejected": -174.51856994628906, | |
| "loss": 0.7024, | |
| "rewards/accuracies": 0.5096874833106995, | |
| "rewards/chosen": -0.1383100152015686, | |
| "rewards/margins": 0.014245204627513885, | |
| "rewards/rejected": -0.1525552123785019, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.31840796019900497, | |
| "grad_norm": 1.58861243724823, | |
| "learning_rate": 3.849688575211836e-07, | |
| "logits/chosen": -0.9722832441329956, | |
| "logits/rejected": -0.9961209893226624, | |
| "logps/chosen": -182.63351440429688, | |
| "logps/rejected": -178.73550415039062, | |
| "loss": 0.6995, | |
| "rewards/accuracies": 0.5214062333106995, | |
| "rewards/chosen": -0.12984366714954376, | |
| "rewards/margins": 0.020543336868286133, | |
| "rewards/rejected": -0.1503870040178299, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.3582089552238806, | |
| "grad_norm": 1.5765061378479004, | |
| "learning_rate": 3.576653995009154e-07, | |
| "logits/chosen": -0.9867467284202576, | |
| "logits/rejected": -1.0024300813674927, | |
| "logps/chosen": -175.5293426513672, | |
| "logps/rejected": -175.31863403320312, | |
| "loss": 0.6951, | |
| "rewards/accuracies": 0.5267187356948853, | |
| "rewards/chosen": -0.11608893424272537, | |
| "rewards/margins": 0.024803265929222107, | |
| "rewards/rejected": -0.14089219272136688, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.39800995024875624, | |
| "grad_norm": 1.4879308938980103, | |
| "learning_rate": 3.286801563968721e-07, | |
| "logits/chosen": -0.9625688195228577, | |
| "logits/rejected": -0.9898955821990967, | |
| "logps/chosen": -178.4788818359375, | |
| "logps/rejected": -175.9660186767578, | |
| "loss": 0.6984, | |
| "rewards/accuracies": 0.5181249976158142, | |
| "rewards/chosen": -0.11111991107463837, | |
| "rewards/margins": 0.020023101940751076, | |
| "rewards/rejected": -0.1311430186033249, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.43781094527363185, | |
| "grad_norm": 1.4386117458343506, | |
| "learning_rate": 2.9846589158269034e-07, | |
| "logits/chosen": -0.9723007082939148, | |
| "logits/rejected": -0.9996936321258545, | |
| "logps/chosen": -181.09291076660156, | |
| "logps/rejected": -176.69898986816406, | |
| "loss": 0.7013, | |
| "rewards/accuracies": 0.5106250047683716, | |
| "rewards/chosen": -0.11242911964654922, | |
| "rewards/margins": 0.0125286765396595, | |
| "rewards/rejected": -0.12495779991149902, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.47761194029850745, | |
| "grad_norm": 2.057227373123169, | |
| "learning_rate": 2.674945663394993e-07, | |
| "logits/chosen": -0.9660943746566772, | |
| "logits/rejected": -0.9956172108650208, | |
| "logps/chosen": -177.69273376464844, | |
| "logps/rejected": -172.55929565429688, | |
| "loss": 0.7032, | |
| "rewards/accuracies": 0.5062500238418579, | |
| "rewards/chosen": -0.10844483971595764, | |
| "rewards/margins": 0.0067189522087574005, | |
| "rewards/rejected": -0.11516381055116653, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.5174129353233831, | |
| "grad_norm": 1.5468394756317139, | |
| "learning_rate": 2.3624996759476285e-07, | |
| "logits/chosen": -0.9644728899002075, | |
| "logits/rejected": -0.9913946390151978, | |
| "logps/chosen": -178.84991455078125, | |
| "logps/rejected": -175.5871124267578, | |
| "loss": 0.6992, | |
| "rewards/accuracies": 0.5198437571525574, | |
| "rewards/chosen": -0.10432270169258118, | |
| "rewards/margins": 0.016916964203119278, | |
| "rewards/rejected": -0.12123966217041016, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.5572139303482587, | |
| "grad_norm": 1.2536767721176147, | |
| "learning_rate": 2.0522015093886614e-07, | |
| "logits/chosen": -0.9504435658454895, | |
| "logits/rejected": -0.9719184637069702, | |
| "logps/chosen": -178.3258056640625, | |
| "logps/rejected": -176.43231201171875, | |
| "loss": 0.6973, | |
| "rewards/accuracies": 0.514843761920929, | |
| "rewards/chosen": -0.09463655203580856, | |
| "rewards/margins": 0.02013176493346691, | |
| "rewards/rejected": -0.11476832628250122, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.5970149253731343, | |
| "grad_norm": 1.377833366394043, | |
| "learning_rate": 1.7488981696314154e-07, | |
| "logits/chosen": -0.9491727948188782, | |
| "logits/rejected": -0.97397780418396, | |
| "logps/chosen": -177.8011932373047, | |
| "logps/rejected": -174.43284606933594, | |
| "loss": 0.6986, | |
| "rewards/accuracies": 0.5153124928474426, | |
| "rewards/chosen": -0.08802775293588638, | |
| "rewards/margins": 0.014450294896960258, | |
| "rewards/rejected": -0.1024780347943306, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.6368159203980099, | |
| "grad_norm": 1.4358242750167847, | |
| "learning_rate": 1.4573274000458839e-07, | |
| "logits/chosen": -0.9687415957450867, | |
| "logits/rejected": -0.9858207106590271, | |
| "logps/chosen": -177.40037536621094, | |
| "logps/rejected": -175.6855010986328, | |
| "loss": 0.6988, | |
| "rewards/accuracies": 0.5137500166893005, | |
| "rewards/chosen": -0.08970288932323456, | |
| "rewards/margins": 0.016197971999645233, | |
| "rewards/rejected": -0.10590087622404099, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.6766169154228856, | |
| "grad_norm": 1.7245056629180908, | |
| "learning_rate": 1.1820436756391414e-07, | |
| "logits/chosen": -0.9455291032791138, | |
| "logits/rejected": -0.9768875241279602, | |
| "logps/chosen": -180.8955841064453, | |
| "logps/rejected": -176.17271423339844, | |
| "loss": 0.6959, | |
| "rewards/accuracies": 0.5231249928474426, | |
| "rewards/chosen": -0.08720911294221878, | |
| "rewards/margins": 0.021857038140296936, | |
| "rewards/rejected": -0.10906614363193512, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.7164179104477612, | |
| "grad_norm": 1.4436590671539307, | |
| "learning_rate": 9.273470599753375e-08, | |
| "logits/chosen": -0.9535448551177979, | |
| "logits/rejected": -0.968150794506073, | |
| "logps/chosen": -176.52774047851562, | |
| "logps/rejected": -175.44468688964844, | |
| "loss": 0.6951, | |
| "rewards/accuracies": 0.5285937786102295, | |
| "rewards/chosen": -0.08284671604633331, | |
| "rewards/margins": 0.022235672920942307, | |
| "rewards/rejected": -0.10508238524198532, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.7562189054726368, | |
| "grad_norm": 1.4405585527420044, | |
| "learning_rate": 6.972160361242119e-08, | |
| "logits/chosen": -0.9640732407569885, | |
| "logits/rejected": -0.9774580597877502, | |
| "logps/chosen": -174.50440979003906, | |
| "logps/rejected": -174.0882568359375, | |
| "loss": 0.6982, | |
| "rewards/accuracies": 0.5145312547683716, | |
| "rewards/chosen": -0.0837017223238945, | |
| "rewards/margins": 0.015292072668671608, | |
| "rewards/rejected": -0.09899380803108215, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.7960199004975125, | |
| "grad_norm": 1.5042272806167603, | |
| "learning_rate": 4.952453608509e-08, | |
| "logits/chosen": -0.9581471085548401, | |
| "logits/rejected": -0.9812409281730652, | |
| "logps/chosen": -180.36788940429688, | |
| "logps/rejected": -177.5521240234375, | |
| "loss": 0.6955, | |
| "rewards/accuracies": 0.5196874737739563, | |
| "rewards/chosen": -0.08257725834846497, | |
| "rewards/margins": 0.020721009001135826, | |
| "rewards/rejected": -0.10329828411340714, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.835820895522388, | |
| "grad_norm": 1.356775164604187, | |
| "learning_rate": 3.2458991279430717e-08, | |
| "logits/chosen": -0.9707114696502686, | |
| "logits/rejected": -0.9862151145935059, | |
| "logps/chosen": -174.95864868164062, | |
| "logps/rejected": -176.15863037109375, | |
| "loss": 0.6934, | |
| "rewards/accuracies": 0.5290625095367432, | |
| "rewards/chosen": -0.07742391526699066, | |
| "rewards/margins": 0.026406314224004745, | |
| "rewards/rejected": -0.10383022576570511, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.8756218905472637, | |
| "grad_norm": 1.412015438079834, | |
| "learning_rate": 1.8791541175240787e-08, | |
| "logits/chosen": -0.9716974496841431, | |
| "logits/rejected": -0.989736795425415, | |
| "logps/chosen": -176.68089294433594, | |
| "logps/rejected": -174.81150817871094, | |
| "loss": 0.7002, | |
| "rewards/accuracies": 0.5140625238418579, | |
| "rewards/chosen": -0.08455894142389297, | |
| "rewards/margins": 0.013038328848779202, | |
| "rewards/rejected": -0.09759727120399475, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.9154228855721394, | |
| "grad_norm": 1.2886689901351929, | |
| "learning_rate": 8.735677886282183e-09, | |
| "logits/chosen": -0.971416175365448, | |
| "logits/rejected": -0.9738467335700989, | |
| "logps/chosen": -175.48406982421875, | |
| "logps/rejected": -178.49595642089844, | |
| "loss": 0.6938, | |
| "rewards/accuracies": 0.5239062309265137, | |
| "rewards/chosen": -0.08240438252687454, | |
| "rewards/margins": 0.025585364550352097, | |
| "rewards/rejected": -0.10798975080251694, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.9552238805970149, | |
| "grad_norm": 1.5586168766021729, | |
| "learning_rate": 2.4484788112571488e-09, | |
| "logits/chosen": -0.9471347332000732, | |
| "logits/rejected": -0.9709975719451904, | |
| "logps/chosen": -178.15431213378906, | |
| "logps/rejected": -175.84417724609375, | |
| "loss": 0.696, | |
| "rewards/accuracies": 0.5165625214576721, | |
| "rewards/chosen": -0.08339080959558487, | |
| "rewards/margins": 0.020824383944272995, | |
| "rewards/rejected": -0.10421518236398697, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.9950248756218906, | |
| "grad_norm": 1.4165425300598145, | |
| "learning_rate": 2.8153009641818103e-11, | |
| "logits/chosen": -0.9615390300750732, | |
| "logits/rejected": -0.9777545928955078, | |
| "logps/chosen": -175.46063232421875, | |
| "logps/rejected": -173.99378967285156, | |
| "loss": 0.6932, | |
| "rewards/accuracies": 0.5181249976158142, | |
| "rewards/chosen": -0.07821417599916458, | |
| "rewards/margins": 0.023193707689642906, | |
| "rewards/rejected": -0.10140787065029144, | |
| "step": 2500 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 2512, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |