| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.995910949568378, | |
| "eval_steps": 400, | |
| "global_step": 137, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03634711494775102, | |
| "grad_norm": 42.18056900627306, | |
| "learning_rate": 2.857142857142857e-07, | |
| "logits/chosen": -10.936590194702148, | |
| "logits/rejected": -11.232436180114746, | |
| "logps/chosen": -0.6059076189994812, | |
| "logps/rejected": -0.5706321001052856, | |
| "loss": 5.4752, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": -6.05907678604126, | |
| "rewards/margins": -0.352755606174469, | |
| "rewards/rejected": -5.7063212394714355, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07269422989550205, | |
| "grad_norm": 33.128716826319426, | |
| "learning_rate": 5.714285714285714e-07, | |
| "logits/chosen": -10.911026954650879, | |
| "logits/rejected": -11.181764602661133, | |
| "logps/chosen": -0.6222352385520935, | |
| "logps/rejected": -0.5984331965446472, | |
| "loss": 5.5068, | |
| "rewards/accuracies": 0.3187499940395355, | |
| "rewards/chosen": -6.222352504730225, | |
| "rewards/margins": -0.23802027106285095, | |
| "rewards/rejected": -5.984332084655762, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.10904134484325306, | |
| "grad_norm": 50.848111590558474, | |
| "learning_rate": 7.998695344323425e-07, | |
| "logits/chosen": -10.209379196166992, | |
| "logits/rejected": -10.468820571899414, | |
| "logps/chosen": -0.6080077290534973, | |
| "logps/rejected": -0.5699423551559448, | |
| "loss": 5.3874, | |
| "rewards/accuracies": 0.2874999940395355, | |
| "rewards/chosen": -6.080076694488525, | |
| "rewards/margins": -0.3806529641151428, | |
| "rewards/rejected": -5.699423789978027, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.1453884597910041, | |
| "grad_norm": 35.145738479410355, | |
| "learning_rate": 7.953121695121394e-07, | |
| "logits/chosen": -9.792947769165039, | |
| "logits/rejected": -10.061820030212402, | |
| "logps/chosen": -0.5358205437660217, | |
| "logps/rejected": -0.5112504363059998, | |
| "loss": 5.3654, | |
| "rewards/accuracies": 0.3499999940395355, | |
| "rewards/chosen": -5.358205318450928, | |
| "rewards/margins": -0.24570095539093018, | |
| "rewards/rejected": -5.112504482269287, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.18173557473875512, | |
| "grad_norm": 35.43905001965967, | |
| "learning_rate": 7.843163833184991e-07, | |
| "logits/chosen": -9.215612411499023, | |
| "logits/rejected": -9.5372314453125, | |
| "logps/chosen": -0.47783246636390686, | |
| "logps/rejected": -0.4458232522010803, | |
| "loss": 5.2677, | |
| "rewards/accuracies": 0.33125001192092896, | |
| "rewards/chosen": -4.778324604034424, | |
| "rewards/margins": -0.32009226083755493, | |
| "rewards/rejected": -4.4582319259643555, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.21808268968650613, | |
| "grad_norm": 34.872469426364724, | |
| "learning_rate": 7.670612634414511e-07, | |
| "logits/chosen": -8.819177627563477, | |
| "logits/rejected": -8.965360641479492, | |
| "logps/chosen": -0.49288082122802734, | |
| "logps/rejected": -0.4752427041530609, | |
| "loss": 5.1863, | |
| "rewards/accuracies": 0.34375, | |
| "rewards/chosen": -4.928807735443115, | |
| "rewards/margins": -0.17638036608695984, | |
| "rewards/rejected": -4.752427577972412, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.25442980463425713, | |
| "grad_norm": 41.683131083762184, | |
| "learning_rate": 7.438278427948805e-07, | |
| "logits/chosen": -8.983338356018066, | |
| "logits/rejected": -9.358332633972168, | |
| "logps/chosen": -0.45715832710266113, | |
| "logps/rejected": -0.4428199231624603, | |
| "loss": 5.1479, | |
| "rewards/accuracies": 0.3187499940395355, | |
| "rewards/chosen": -4.571582794189453, | |
| "rewards/margins": -0.14338405430316925, | |
| "rewards/rejected": -4.428199291229248, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.2907769195820082, | |
| "grad_norm": 35.49350724475253, | |
| "learning_rate": 7.149945224533862e-07, | |
| "logits/chosen": -8.473846435546875, | |
| "logits/rejected": -8.634288787841797, | |
| "logps/chosen": -0.49668779969215393, | |
| "logps/rejected": -0.4822346568107605, | |
| "loss": 5.1059, | |
| "rewards/accuracies": 0.4124999940395355, | |
| "rewards/chosen": -4.9668779373168945, | |
| "rewards/margins": -0.14453084766864777, | |
| "rewards/rejected": -4.8223466873168945, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3271240345297592, | |
| "grad_norm": 35.201815027229166, | |
| "learning_rate": 6.810309086608129e-07, | |
| "logits/chosen": -9.485041618347168, | |
| "logits/rejected": -9.641453742980957, | |
| "logps/chosen": -0.4795761704444885, | |
| "logps/rejected": -0.4812702536582947, | |
| "loss": 5.0687, | |
| "rewards/accuracies": 0.3812499940395355, | |
| "rewards/chosen": -4.795762062072754, | |
| "rewards/margins": 0.016940664499998093, | |
| "rewards/rejected": -4.812702655792236, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.36347114947751025, | |
| "grad_norm": 41.62812201333068, | |
| "learning_rate": 6.424901643866552e-07, | |
| "logits/chosen": -9.312422752380371, | |
| "logits/rejected": -9.527371406555176, | |
| "logps/chosen": -0.5089246034622192, | |
| "logps/rejected": -0.5194448232650757, | |
| "loss": 5.0107, | |
| "rewards/accuracies": 0.4124999940395355, | |
| "rewards/chosen": -5.0892462730407715, | |
| "rewards/margins": 0.10520193725824356, | |
| "rewards/rejected": -5.194448471069336, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.39981826442526125, | |
| "grad_norm": 34.69469413711179, | |
| "learning_rate": 6e-07, | |
| "logits/chosen": -9.82011604309082, | |
| "logits/rejected": -9.918914794921875, | |
| "logps/chosen": -0.5091612935066223, | |
| "logps/rejected": -0.5071789026260376, | |
| "loss": 4.9611, | |
| "rewards/accuracies": 0.4749999940395355, | |
| "rewards/chosen": -5.091612815856934, | |
| "rewards/margins": -0.019824281334877014, | |
| "rewards/rejected": -5.071788311004639, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.43616537937301225, | |
| "grad_norm": 39.20092819974629, | |
| "learning_rate": 5.542524497952543e-07, | |
| "logits/chosen": -10.392576217651367, | |
| "logits/rejected": -10.515856742858887, | |
| "logps/chosen": -0.49750223755836487, | |
| "logps/rejected": -0.5498560070991516, | |
| "loss": 4.7725, | |
| "rewards/accuracies": 0.4625000059604645, | |
| "rewards/chosen": -4.975022315979004, | |
| "rewards/margins": 0.5235382318496704, | |
| "rewards/rejected": -5.498560428619385, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.4725124943207633, | |
| "grad_norm": 41.76932417294435, | |
| "learning_rate": 5.059926008786647e-07, | |
| "logits/chosen": -9.944269180297852, | |
| "logits/rejected": -10.23642635345459, | |
| "logps/chosen": -0.5527420043945312, | |
| "logps/rejected": -0.6039702296257019, | |
| "loss": 4.9196, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -5.5274200439453125, | |
| "rewards/margins": 0.5122829675674438, | |
| "rewards/rejected": -6.039702415466309, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.5088596092685143, | |
| "grad_norm": 42.78642756929462, | |
| "learning_rate": 4.5600645798745166e-07, | |
| "logits/chosen": -10.92486572265625, | |
| "logits/rejected": -11.130925178527832, | |
| "logps/chosen": -0.5507891774177551, | |
| "logps/rejected": -0.5964936017990112, | |
| "loss": 4.7685, | |
| "rewards/accuracies": 0.5062500238418579, | |
| "rewards/chosen": -5.50789213180542, | |
| "rewards/margins": 0.4570443034172058, | |
| "rewards/rejected": -5.964935779571533, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5452067242162654, | |
| "grad_norm": 43.736655032116566, | |
| "learning_rate": 4.051081418863895e-07, | |
| "logits/chosen": -10.675444602966309, | |
| "logits/rejected": -10.829668045043945, | |
| "logps/chosen": -0.6147274374961853, | |
| "logps/rejected": -0.6446117758750916, | |
| "loss": 4.8607, | |
| "rewards/accuracies": 0.581250011920929, | |
| "rewards/chosen": -6.147274494171143, | |
| "rewards/margins": 0.29884377121925354, | |
| "rewards/rejected": -6.446118354797363, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.5815538391640164, | |
| "grad_norm": 50.47780593376421, | |
| "learning_rate": 3.541266298406398e-07, | |
| "logits/chosen": -11.719624519348145, | |
| "logits/rejected": -11.85960578918457, | |
| "logps/chosen": -0.6017196774482727, | |
| "logps/rejected": -0.6400548815727234, | |
| "loss": 4.7166, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -6.017197608947754, | |
| "rewards/margins": 0.38335245847702026, | |
| "rewards/rejected": -6.400549411773682, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.6179009541117674, | |
| "grad_norm": 49.58320001105474, | |
| "learning_rate": 3.0389225412181565e-07, | |
| "logits/chosen": -11.57774543762207, | |
| "logits/rejected": -11.787096977233887, | |
| "logps/chosen": -0.639508843421936, | |
| "logps/rejected": -0.7087122797966003, | |
| "loss": 4.6635, | |
| "rewards/accuracies": 0.59375, | |
| "rewards/chosen": -6.395088195800781, | |
| "rewards/margins": 0.6920341849327087, | |
| "rewards/rejected": -7.087122917175293, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.6542480690595184, | |
| "grad_norm": 60.72521586951035, | |
| "learning_rate": 2.5522317844515273e-07, | |
| "logits/chosen": -11.539664268493652, | |
| "logits/rejected": -11.73334789276123, | |
| "logps/chosen": -0.6763519644737244, | |
| "logps/rejected": -0.719671905040741, | |
| "loss": 4.6877, | |
| "rewards/accuracies": 0.543749988079071, | |
| "rewards/chosen": -6.763520240783691, | |
| "rewards/margins": 0.4331996440887451, | |
| "rewards/rejected": -7.196720123291016, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.6905951840072694, | |
| "grad_norm": 73.18876407608641, | |
| "learning_rate": 2.0891207259509476e-07, | |
| "logits/chosen": -11.891935348510742, | |
| "logits/rejected": -11.978799819946289, | |
| "logps/chosen": -0.6321767568588257, | |
| "logps/rejected": -0.7210510969161987, | |
| "loss": 4.6661, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": -6.321768283843994, | |
| "rewards/margins": 0.8887429237365723, | |
| "rewards/rejected": -7.210511207580566, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.7269422989550205, | |
| "grad_norm": 58.29758563166122, | |
| "learning_rate": 1.6571320226872206e-07, | |
| "logits/chosen": -12.378867149353027, | |
| "logits/rejected": -12.450654983520508, | |
| "logps/chosen": -0.7091072797775269, | |
| "logps/rejected": -0.7949134111404419, | |
| "loss": 4.5318, | |
| "rewards/accuracies": 0.6499999761581421, | |
| "rewards/chosen": -7.091072082519531, | |
| "rewards/margins": 0.8580614924430847, | |
| "rewards/rejected": -7.949133396148682, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7632894139027715, | |
| "grad_norm": 65.14186021974389, | |
| "learning_rate": 1.2633014440382787e-07, | |
| "logits/chosen": -12.120596885681152, | |
| "logits/rejected": -12.198526382446289, | |
| "logps/chosen": -0.7462261319160461, | |
| "logps/rejected": -0.8084705471992493, | |
| "loss": 4.5075, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": -7.462261199951172, | |
| "rewards/margins": 0.622443437576294, | |
| "rewards/rejected": -8.084705352783203, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.7996365288505225, | |
| "grad_norm": 66.21038830050585, | |
| "learning_rate": 9.14043280712228e-08, | |
| "logits/chosen": -12.642437934875488, | |
| "logits/rejected": -12.874890327453613, | |
| "logps/chosen": -0.7560291290283203, | |
| "logps/rejected": -0.8245932459831238, | |
| "loss": 4.6235, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": -7.560290336608887, | |
| "rewards/margins": 0.6856421828269958, | |
| "rewards/rejected": -8.245932579040527, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.8359836437982735, | |
| "grad_norm": 58.78661492817544, | |
| "learning_rate": 6.150458756494239e-08, | |
| "logits/chosen": -12.5349760055542, | |
| "logits/rejected": -12.633232116699219, | |
| "logps/chosen": -0.7407893538475037, | |
| "logps/rejected": -0.8003439903259277, | |
| "loss": 4.537, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": -7.407894134521484, | |
| "rewards/margins": 0.5955458879470825, | |
| "rewards/rejected": -8.003438949584961, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.8723307587460245, | |
| "grad_norm": 72.85901824273444, | |
| "learning_rate": 3.711789783843522e-08, | |
| "logits/chosen": -12.179682731628418, | |
| "logits/rejected": -12.421493530273438, | |
| "logps/chosen": -0.7077151536941528, | |
| "logps/rejected": -0.8436015248298645, | |
| "loss": 4.41, | |
| "rewards/accuracies": 0.6187499761581421, | |
| "rewards/chosen": -7.077151298522949, | |
| "rewards/margins": 1.358863115310669, | |
| "rewards/rejected": -8.436014175415039, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.9086778736937755, | |
| "grad_norm": 81.44112780212329, | |
| "learning_rate": 1.8641443178027784e-08, | |
| "logits/chosen": -12.844386100769043, | |
| "logits/rejected": -13.204521179199219, | |
| "logps/chosen": -0.7584520578384399, | |
| "logps/rejected": -0.8550151586532593, | |
| "loss": 4.4737, | |
| "rewards/accuracies": 0.6312500238418579, | |
| "rewards/chosen": -7.584519863128662, | |
| "rewards/margins": 0.965630829334259, | |
| "rewards/rejected": -8.550151824951172, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.9450249886415266, | |
| "grad_norm": 63.15208705033616, | |
| "learning_rate": 6.376148290617145e-09, | |
| "logits/chosen": -13.006174087524414, | |
| "logits/rejected": -13.145883560180664, | |
| "logps/chosen": -0.7715423703193665, | |
| "logps/rejected": -0.8375405073165894, | |
| "loss": 4.6746, | |
| "rewards/accuracies": 0.6187499761581421, | |
| "rewards/chosen": -7.715423583984375, | |
| "rewards/margins": 0.6599816679954529, | |
| "rewards/rejected": -8.375406265258789, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.9813721035892776, | |
| "grad_norm": 69.30502594164692, | |
| "learning_rate": 5.217771643080127e-10, | |
| "logits/chosen": -12.759674072265625, | |
| "logits/rejected": -12.864163398742676, | |
| "logps/chosen": -0.7980765700340271, | |
| "logps/rejected": -0.8657040596008301, | |
| "loss": 4.3661, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": -7.980767250061035, | |
| "rewards/margins": 0.6762741804122925, | |
| "rewards/rejected": -8.6570405960083, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.995910949568378, | |
| "step": 137, | |
| "total_flos": 0.0, | |
| "train_loss": 4.865393144370866, | |
| "train_runtime": 3024.5765, | |
| "train_samples_per_second": 5.821, | |
| "train_steps_per_second": 0.045 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 137, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 1000000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |