| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.995910949568378, | |
| "eval_steps": 400, | |
| "global_step": 137, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03634711494775102, | |
| "grad_norm": 73.50294131392823, | |
| "learning_rate": 2.857142857142857e-07, | |
| "logits/chosen": -10.441385269165039, | |
| "logits/rejected": -9.520084381103516, | |
| "logps/chosen": -0.9301475286483765, | |
| "logps/rejected": -1.348732590675354, | |
| "loss": 3.7321, | |
| "rewards/accuracies": 0.893750011920929, | |
| "rewards/chosen": -9.301475524902344, | |
| "rewards/margins": 4.185849189758301, | |
| "rewards/rejected": -13.487324714660645, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07269422989550205, | |
| "grad_norm": 49.41339223972053, | |
| "learning_rate": 5.714285714285714e-07, | |
| "logits/chosen": -10.112415313720703, | |
| "logits/rejected": -9.321776390075684, | |
| "logps/chosen": -1.0208427906036377, | |
| "logps/rejected": -1.3525886535644531, | |
| "loss": 3.5537, | |
| "rewards/accuracies": 0.893750011920929, | |
| "rewards/chosen": -10.208429336547852, | |
| "rewards/margins": 3.317457914352417, | |
| "rewards/rejected": -13.525888442993164, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.10904134484325306, | |
| "grad_norm": 56.21490730930501, | |
| "learning_rate": 7.998695344323425e-07, | |
| "logits/chosen": -10.125561714172363, | |
| "logits/rejected": -9.403921127319336, | |
| "logps/chosen": -1.0114651918411255, | |
| "logps/rejected": -1.4378917217254639, | |
| "loss": 3.688, | |
| "rewards/accuracies": 0.8187500238418579, | |
| "rewards/chosen": -10.114651679992676, | |
| "rewards/margins": 4.264266014099121, | |
| "rewards/rejected": -14.37891674041748, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.1453884597910041, | |
| "grad_norm": 49.2098026266453, | |
| "learning_rate": 7.953121695121394e-07, | |
| "logits/chosen": -9.852151870727539, | |
| "logits/rejected": -8.917947769165039, | |
| "logps/chosen": -0.917424201965332, | |
| "logps/rejected": -1.2778595685958862, | |
| "loss": 3.3675, | |
| "rewards/accuracies": 0.84375, | |
| "rewards/chosen": -9.17424201965332, | |
| "rewards/margins": 3.6043541431427, | |
| "rewards/rejected": -12.778596878051758, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.18173557473875512, | |
| "grad_norm": 65.87909071485771, | |
| "learning_rate": 7.843163833184991e-07, | |
| "logits/chosen": -10.04487419128418, | |
| "logits/rejected": -9.234810829162598, | |
| "logps/chosen": -1.029579520225525, | |
| "logps/rejected": -1.2623463869094849, | |
| "loss": 3.3763, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -10.295795440673828, | |
| "rewards/margins": 2.3276684284210205, | |
| "rewards/rejected": -12.623464584350586, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.21808268968650613, | |
| "grad_norm": 53.32133564579517, | |
| "learning_rate": 7.670612634414511e-07, | |
| "logits/chosen": -9.879186630249023, | |
| "logits/rejected": -8.854208946228027, | |
| "logps/chosen": -0.9565294981002808, | |
| "logps/rejected": -1.3326746225357056, | |
| "loss": 3.2331, | |
| "rewards/accuracies": 0.893750011920929, | |
| "rewards/chosen": -9.565296173095703, | |
| "rewards/margins": 3.7614502906799316, | |
| "rewards/rejected": -13.326745986938477, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.25442980463425713, | |
| "grad_norm": 47.904474253960814, | |
| "learning_rate": 7.438278427948805e-07, | |
| "logits/chosen": -10.390070915222168, | |
| "logits/rejected": -9.588186264038086, | |
| "logps/chosen": -0.997881293296814, | |
| "logps/rejected": -1.4048808813095093, | |
| "loss": 3.1431, | |
| "rewards/accuracies": 0.84375, | |
| "rewards/chosen": -9.978813171386719, | |
| "rewards/margins": 4.069996356964111, | |
| "rewards/rejected": -14.048810005187988, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.2907769195820082, | |
| "grad_norm": 52.83351276305546, | |
| "learning_rate": 7.149945224533862e-07, | |
| "logits/chosen": -10.330121040344238, | |
| "logits/rejected": -9.436741828918457, | |
| "logps/chosen": -1.2074191570281982, | |
| "logps/rejected": -1.6147959232330322, | |
| "loss": 3.079, | |
| "rewards/accuracies": 0.8062499761581421, | |
| "rewards/chosen": -12.074190139770508, | |
| "rewards/margins": 4.07376766204834, | |
| "rewards/rejected": -16.147958755493164, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3271240345297592, | |
| "grad_norm": 50.87463012185036, | |
| "learning_rate": 6.810309086608129e-07, | |
| "logits/chosen": -10.859004974365234, | |
| "logits/rejected": -9.854225158691406, | |
| "logps/chosen": -1.2316290140151978, | |
| "logps/rejected": -1.8107597827911377, | |
| "loss": 2.9332, | |
| "rewards/accuracies": 0.887499988079071, | |
| "rewards/chosen": -12.316289901733398, | |
| "rewards/margins": 5.79130744934082, | |
| "rewards/rejected": -18.10759735107422, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.36347114947751025, | |
| "grad_norm": 96.65968234360786, | |
| "learning_rate": 6.424901643866552e-07, | |
| "logits/chosen": -11.214125633239746, | |
| "logits/rejected": -10.290956497192383, | |
| "logps/chosen": -1.4425190687179565, | |
| "logps/rejected": -1.994137167930603, | |
| "loss": 2.9568, | |
| "rewards/accuracies": 0.831250011920929, | |
| "rewards/chosen": -14.425191879272461, | |
| "rewards/margins": 5.516180515289307, | |
| "rewards/rejected": -19.941370010375977, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.39981826442526125, | |
| "grad_norm": 76.49645663435781, | |
| "learning_rate": 6e-07, | |
| "logits/chosen": -11.809680938720703, | |
| "logits/rejected": -10.86604118347168, | |
| "logps/chosen": -1.511315107345581, | |
| "logps/rejected": -2.095630168914795, | |
| "loss": 2.7726, | |
| "rewards/accuracies": 0.831250011920929, | |
| "rewards/chosen": -15.113149642944336, | |
| "rewards/margins": 5.843151092529297, | |
| "rewards/rejected": -20.956302642822266, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.43616537937301225, | |
| "grad_norm": 52.07543926650269, | |
| "learning_rate": 5.542524497952543e-07, | |
| "logits/chosen": -12.52156925201416, | |
| "logits/rejected": -11.367462158203125, | |
| "logps/chosen": -1.417328119277954, | |
| "logps/rejected": -1.9500234127044678, | |
| "loss": 2.7633, | |
| "rewards/accuracies": 0.8999999761581421, | |
| "rewards/chosen": -14.1732816696167, | |
| "rewards/margins": 5.3269548416137695, | |
| "rewards/rejected": -19.500234603881836, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.4725124943207633, | |
| "grad_norm": 86.00453174970828, | |
| "learning_rate": 5.059926008786647e-07, | |
| "logits/chosen": -13.00245189666748, | |
| "logits/rejected": -11.82313346862793, | |
| "logps/chosen": -1.3697974681854248, | |
| "logps/rejected": -1.979061484336853, | |
| "loss": 2.6871, | |
| "rewards/accuracies": 0.893750011920929, | |
| "rewards/chosen": -13.697975158691406, | |
| "rewards/margins": 6.092639923095703, | |
| "rewards/rejected": -19.79061508178711, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.5088596092685143, | |
| "grad_norm": 74.14149953928305, | |
| "learning_rate": 4.5600645798745166e-07, | |
| "logits/chosen": -13.302050590515137, | |
| "logits/rejected": -12.503473281860352, | |
| "logps/chosen": -1.3789888620376587, | |
| "logps/rejected": -1.9646472930908203, | |
| "loss": 2.6182, | |
| "rewards/accuracies": 0.8687499761581421, | |
| "rewards/chosen": -13.789888381958008, | |
| "rewards/margins": 5.856583595275879, | |
| "rewards/rejected": -19.646472930908203, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5452067242162654, | |
| "grad_norm": 82.09661278070892, | |
| "learning_rate": 4.051081418863895e-07, | |
| "logits/chosen": -14.177574157714844, | |
| "logits/rejected": -12.784692764282227, | |
| "logps/chosen": -1.523525595664978, | |
| "logps/rejected": -2.243549346923828, | |
| "loss": 2.4206, | |
| "rewards/accuracies": 0.8999999761581421, | |
| "rewards/chosen": -15.235257148742676, | |
| "rewards/margins": 7.200235843658447, | |
| "rewards/rejected": -22.43549156188965, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.5815538391640164, | |
| "grad_norm": 78.17477380695215, | |
| "learning_rate": 3.541266298406398e-07, | |
| "logits/chosen": -13.6174955368042, | |
| "logits/rejected": -12.198015213012695, | |
| "logps/chosen": -1.413405418395996, | |
| "logps/rejected": -2.035590648651123, | |
| "loss": 2.3449, | |
| "rewards/accuracies": 0.8687499761581421, | |
| "rewards/chosen": -14.134053230285645, | |
| "rewards/margins": 6.221851825714111, | |
| "rewards/rejected": -20.355907440185547, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.6179009541117674, | |
| "grad_norm": 161.18580193098623, | |
| "learning_rate": 3.0389225412181565e-07, | |
| "logits/chosen": -14.64953899383545, | |
| "logits/rejected": -13.436065673828125, | |
| "logps/chosen": -1.577237844467163, | |
| "logps/rejected": -2.26001238822937, | |
| "loss": 2.3647, | |
| "rewards/accuracies": 0.8999999761581421, | |
| "rewards/chosen": -15.772378921508789, | |
| "rewards/margins": 6.827744483947754, | |
| "rewards/rejected": -22.60012435913086, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.6542480690595184, | |
| "grad_norm": 110.64212883100511, | |
| "learning_rate": 2.5522317844515273e-07, | |
| "logits/chosen": -15.540824890136719, | |
| "logits/rejected": -14.182299613952637, | |
| "logps/chosen": -1.7375078201293945, | |
| "logps/rejected": -2.5354347229003906, | |
| "loss": 2.2847, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": -17.375080108642578, | |
| "rewards/margins": 7.979270935058594, | |
| "rewards/rejected": -25.35434913635254, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.6905951840072694, | |
| "grad_norm": 94.27522984793, | |
| "learning_rate": 2.0891207259509476e-07, | |
| "logits/chosen": -16.05794906616211, | |
| "logits/rejected": -14.873092651367188, | |
| "logps/chosen": -1.8003565073013306, | |
| "logps/rejected": -2.508544683456421, | |
| "loss": 2.1888, | |
| "rewards/accuracies": 0.8687499761581421, | |
| "rewards/chosen": -18.003564834594727, | |
| "rewards/margins": 7.081883907318115, | |
| "rewards/rejected": -25.085451126098633, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.7269422989550205, | |
| "grad_norm": 108.23339503673007, | |
| "learning_rate": 1.6571320226872206e-07, | |
| "logits/chosen": -15.491415023803711, | |
| "logits/rejected": -14.448579788208008, | |
| "logps/chosen": -1.7869226932525635, | |
| "logps/rejected": -2.6193735599517822, | |
| "loss": 2.235, | |
| "rewards/accuracies": 0.918749988079071, | |
| "rewards/chosen": -17.869226455688477, | |
| "rewards/margins": 8.324507713317871, | |
| "rewards/rejected": -26.193735122680664, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7632894139027715, | |
| "grad_norm": 98.94867860284775, | |
| "learning_rate": 1.2633014440382787e-07, | |
| "logits/chosen": -16.772117614746094, | |
| "logits/rejected": -15.896406173706055, | |
| "logps/chosen": -1.7917287349700928, | |
| "logps/rejected": -2.5536069869995117, | |
| "loss": 1.8876, | |
| "rewards/accuracies": 0.918749988079071, | |
| "rewards/chosen": -17.917287826538086, | |
| "rewards/margins": 7.61877965927124, | |
| "rewards/rejected": -25.536067962646484, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.7996365288505225, | |
| "grad_norm": 116.37014336849657, | |
| "learning_rate": 9.14043280712228e-08, | |
| "logits/chosen": -16.703330993652344, | |
| "logits/rejected": -15.419268608093262, | |
| "logps/chosen": -1.8466812372207642, | |
| "logps/rejected": -2.60697340965271, | |
| "loss": 2.0518, | |
| "rewards/accuracies": 0.8812500238418579, | |
| "rewards/chosen": -18.466812133789062, | |
| "rewards/margins": 7.602923393249512, | |
| "rewards/rejected": -26.06973648071289, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.8359836437982735, | |
| "grad_norm": 88.56805919880858, | |
| "learning_rate": 6.150458756494239e-08, | |
| "logits/chosen": -17.041057586669922, | |
| "logits/rejected": -15.86296558380127, | |
| "logps/chosen": -1.9040367603302002, | |
| "logps/rejected": -2.855522632598877, | |
| "loss": 1.8644, | |
| "rewards/accuracies": 0.9125000238418579, | |
| "rewards/chosen": -19.040367126464844, | |
| "rewards/margins": 9.514856338500977, | |
| "rewards/rejected": -28.555225372314453, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.8723307587460245, | |
| "grad_norm": 119.49778162125348, | |
| "learning_rate": 3.711789783843522e-08, | |
| "logits/chosen": -16.915624618530273, | |
| "logits/rejected": -15.977205276489258, | |
| "logps/chosen": -1.9863815307617188, | |
| "logps/rejected": -2.819209575653076, | |
| "loss": 2.0238, | |
| "rewards/accuracies": 0.8687499761581421, | |
| "rewards/chosen": -19.863813400268555, | |
| "rewards/margins": 8.328282356262207, | |
| "rewards/rejected": -28.192096710205078, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.9086778736937755, | |
| "grad_norm": 100.49399716119115, | |
| "learning_rate": 1.8641443178027784e-08, | |
| "logits/chosen": -17.482728958129883, | |
| "logits/rejected": -16.11745262145996, | |
| "logps/chosen": -1.8876680135726929, | |
| "logps/rejected": -2.652832269668579, | |
| "loss": 2.0136, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": -18.876678466796875, | |
| "rewards/margins": 7.6516432762146, | |
| "rewards/rejected": -26.528324127197266, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.9450249886415266, | |
| "grad_norm": 78.59767084378562, | |
| "learning_rate": 6.376148290617145e-09, | |
| "logits/chosen": -16.445621490478516, | |
| "logits/rejected": -15.228750228881836, | |
| "logps/chosen": -1.947279691696167, | |
| "logps/rejected": -2.799992084503174, | |
| "loss": 1.9014, | |
| "rewards/accuracies": 0.8812500238418579, | |
| "rewards/chosen": -19.472797393798828, | |
| "rewards/margins": 8.527124404907227, | |
| "rewards/rejected": -27.999919891357422, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.9813721035892776, | |
| "grad_norm": 106.19866039415072, | |
| "learning_rate": 5.217771643080127e-10, | |
| "logits/chosen": -17.454696655273438, | |
| "logits/rejected": -16.37453842163086, | |
| "logps/chosen": -1.9225728511810303, | |
| "logps/rejected": -2.7656478881835938, | |
| "loss": 1.9868, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -19.225727081298828, | |
| "rewards/margins": 8.430750846862793, | |
| "rewards/rejected": -27.656478881835938, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.995910949568378, | |
| "step": 137, | |
| "total_flos": 0.0, | |
| "train_loss": 2.6376401560149922, | |
| "train_runtime": 2565.593, | |
| "train_samples_per_second": 6.862, | |
| "train_steps_per_second": 0.053 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 137, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 1000000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |