| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 44, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022727272727272728, | |
| "grad_norm": 16.625, | |
| "learning_rate": 0.0, | |
| "logits/chosen": 0.4483538866043091, | |
| "logits/rejected": 0.7762327194213867, | |
| "logps/chosen": -34.067535400390625, | |
| "logps/rejected": -26.805654525756836, | |
| "loss": 0.6781, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.017476718872785568, | |
| "rewards/margins": 0.03066336363554001, | |
| "rewards/rejected": -0.01318664662539959, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.045454545454545456, | |
| "grad_norm": 11.875, | |
| "learning_rate": 1.2e-05, | |
| "logits/chosen": 0.5961519479751587, | |
| "logits/rejected": 0.6054960489273071, | |
| "logps/chosen": -20.344480514526367, | |
| "logps/rejected": -23.291126251220703, | |
| "loss": 0.696, | |
| "rewards/accuracies": 0.3333333432674408, | |
| "rewards/chosen": 0.002745183650404215, | |
| "rewards/margins": -0.00570220872759819, | |
| "rewards/rejected": 0.008447392843663692, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.06818181818181818, | |
| "grad_norm": 12.4375, | |
| "learning_rate": 2.4e-05, | |
| "logits/chosen": -0.2268427461385727, | |
| "logits/rejected": -0.08561667054891586, | |
| "logps/chosen": -26.2761287689209, | |
| "logps/rejected": -23.514366149902344, | |
| "loss": 0.6965, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": -0.0071416860446333885, | |
| "rewards/margins": -0.0062666586600244045, | |
| "rewards/rejected": -0.0008750278502702713, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.09090909090909091, | |
| "grad_norm": 13.0, | |
| "learning_rate": 3.6e-05, | |
| "logits/chosen": 0.5151932239532471, | |
| "logits/rejected": 0.5493708848953247, | |
| "logps/chosen": -29.416748046875, | |
| "logps/rejected": -21.80704116821289, | |
| "loss": 0.6664, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.04196351394057274, | |
| "rewards/margins": 0.05446663126349449, | |
| "rewards/rejected": -0.012503115460276604, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.11363636363636363, | |
| "grad_norm": 12.1875, | |
| "learning_rate": 4.8e-05, | |
| "logits/chosen": 0.3497011661529541, | |
| "logits/rejected": 0.31218263506889343, | |
| "logps/chosen": -31.662017822265625, | |
| "logps/rejected": -24.33977508544922, | |
| "loss": 0.6927, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.007583046332001686, | |
| "rewards/margins": 0.0009033204987645149, | |
| "rewards/rejected": 0.0066797262988984585, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.13636363636363635, | |
| "grad_norm": 12.3125, | |
| "learning_rate": 6e-05, | |
| "logits/chosen": 0.5960320234298706, | |
| "logits/rejected": 1.0259536504745483, | |
| "logps/chosen": -30.316076278686523, | |
| "logps/rejected": -21.38100242614746, | |
| "loss": 0.6842, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.012160556390881538, | |
| "rewards/margins": 0.01807880401611328, | |
| "rewards/rejected": -0.0059182485565543175, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.1590909090909091, | |
| "grad_norm": 12.6875, | |
| "learning_rate": 5.846153846153846e-05, | |
| "logits/chosen": 0.3294275999069214, | |
| "logits/rejected": 0.2885156273841858, | |
| "logps/chosen": -30.754199981689453, | |
| "logps/rejected": -21.946483612060547, | |
| "loss": 0.6911, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.020008979365229607, | |
| "rewards/margins": 0.004138438496738672, | |
| "rewards/rejected": 0.015870539471507072, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.18181818181818182, | |
| "grad_norm": 11.75, | |
| "learning_rate": 5.692307692307692e-05, | |
| "logits/chosen": 0.4179474115371704, | |
| "logits/rejected": 0.6303722262382507, | |
| "logps/chosen": -25.757431030273438, | |
| "logps/rejected": -22.029760360717773, | |
| "loss": 0.6991, | |
| "rewards/accuracies": 0.3333333432674408, | |
| "rewards/chosen": -0.005146535113453865, | |
| "rewards/margins": -0.011694017797708511, | |
| "rewards/rejected": 0.006547483615577221, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.20454545454545456, | |
| "grad_norm": 11.125, | |
| "learning_rate": 5.538461538461539e-05, | |
| "logits/chosen": 0.6332809329032898, | |
| "logits/rejected": 0.7613773345947266, | |
| "logps/chosen": -28.643695831298828, | |
| "logps/rejected": -22.429039001464844, | |
| "loss": 0.686, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.019583575427532196, | |
| "rewards/margins": 0.014355978928506374, | |
| "rewards/rejected": 0.005227597430348396, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.22727272727272727, | |
| "grad_norm": 8.875, | |
| "learning_rate": 5.384615384615385e-05, | |
| "logits/chosen": 0.4406185746192932, | |
| "logits/rejected": 0.44449326395988464, | |
| "logps/chosen": -23.943906784057617, | |
| "logps/rejected": -20.787511825561523, | |
| "loss": 0.6815, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.06105436012148857, | |
| "rewards/margins": 0.023596446961164474, | |
| "rewards/rejected": 0.0374579131603241, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 18.375, | |
| "learning_rate": 5.230769230769231e-05, | |
| "logits/chosen": 1.2527503967285156, | |
| "logits/rejected": 1.176261067390442, | |
| "logps/chosen": -42.38333511352539, | |
| "logps/rejected": -26.054731369018555, | |
| "loss": 0.6936, | |
| "rewards/accuracies": 0.3333333432674408, | |
| "rewards/chosen": 0.026292739436030388, | |
| "rewards/margins": -0.0008817669004201889, | |
| "rewards/rejected": 0.027174506336450577, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.2727272727272727, | |
| "grad_norm": 9.625, | |
| "learning_rate": 5.076923076923077e-05, | |
| "logits/chosen": 0.37028029561042786, | |
| "logits/rejected": 0.5766373872756958, | |
| "logps/chosen": -22.245540618896484, | |
| "logps/rejected": -21.667922973632812, | |
| "loss": 0.663, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.05539780110120773, | |
| "rewards/margins": 0.06313318014144897, | |
| "rewards/rejected": -0.007735380437225103, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.29545454545454547, | |
| "grad_norm": 9.8125, | |
| "learning_rate": 4.923076923076923e-05, | |
| "logits/chosen": -0.09108446538448334, | |
| "logits/rejected": 0.24406792223453522, | |
| "logps/chosen": -28.064685821533203, | |
| "logps/rejected": -21.54052734375, | |
| "loss": 0.6802, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.05286191403865814, | |
| "rewards/margins": 0.026534145697951317, | |
| "rewards/rejected": 0.026327770203351974, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.3181818181818182, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 4.769230769230769e-05, | |
| "logits/chosen": 0.48153504729270935, | |
| "logits/rejected": 0.533014714717865, | |
| "logps/chosen": -25.028493881225586, | |
| "logps/rejected": -24.618581771850586, | |
| "loss": 0.7134, | |
| "rewards/accuracies": 0.3333333432674408, | |
| "rewards/chosen": -0.03459123149514198, | |
| "rewards/margins": -0.038723185658454895, | |
| "rewards/rejected": 0.004131953231990337, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.3409090909090909, | |
| "grad_norm": 12.4375, | |
| "learning_rate": 4.615384615384616e-05, | |
| "logits/chosen": 0.32169950008392334, | |
| "logits/rejected": 0.5244829058647156, | |
| "logps/chosen": -26.596710205078125, | |
| "logps/rejected": -23.023845672607422, | |
| "loss": 0.6797, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.047678254544734955, | |
| "rewards/margins": 0.02770627662539482, | |
| "rewards/rejected": 0.019971976056694984, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.36363636363636365, | |
| "grad_norm": 11.8125, | |
| "learning_rate": 4.461538461538462e-05, | |
| "logits/chosen": -1.4551410675048828, | |
| "logits/rejected": -1.4342135190963745, | |
| "logps/chosen": -29.413467407226562, | |
| "logps/rejected": -25.22298240661621, | |
| "loss": 0.6804, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.03757172077894211, | |
| "rewards/margins": 0.02564137801527977, | |
| "rewards/rejected": 0.01193033903837204, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.38636363636363635, | |
| "grad_norm": 12.6875, | |
| "learning_rate": 4.307692307692308e-05, | |
| "logits/chosen": 1.1149061918258667, | |
| "logits/rejected": 1.3534132242202759, | |
| "logps/chosen": -28.74739646911621, | |
| "logps/rejected": -23.868566513061523, | |
| "loss": 0.6526, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.031014060601592064, | |
| "rewards/margins": 0.08419768512248993, | |
| "rewards/rejected": -0.05318361520767212, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.4090909090909091, | |
| "grad_norm": 13.6875, | |
| "learning_rate": 4.153846153846154e-05, | |
| "logits/chosen": 0.43228524923324585, | |
| "logits/rejected": 0.44759899377822876, | |
| "logps/chosen": -29.343727111816406, | |
| "logps/rejected": -20.927490234375, | |
| "loss": 0.6779, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.0534362867474556, | |
| "rewards/margins": 0.03123900294303894, | |
| "rewards/rejected": 0.022197280079126358, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.4318181818181818, | |
| "grad_norm": 11.0, | |
| "learning_rate": 3.9999999999999996e-05, | |
| "logits/chosen": 0.9617244601249695, | |
| "logits/rejected": 1.036171317100525, | |
| "logps/chosen": -22.17565155029297, | |
| "logps/rejected": -23.60016632080078, | |
| "loss": 0.6727, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.015546607784926891, | |
| "rewards/margins": 0.0422033928334713, | |
| "rewards/rejected": -0.02665678784251213, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.45454545454545453, | |
| "grad_norm": 15.6875, | |
| "learning_rate": 3.846153846153846e-05, | |
| "logits/chosen": 0.6454523205757141, | |
| "logits/rejected": 1.2390544414520264, | |
| "logps/chosen": -26.925029754638672, | |
| "logps/rejected": -24.303573608398438, | |
| "loss": 0.6238, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.10690931975841522, | |
| "rewards/margins": 0.15301425755023956, | |
| "rewards/rejected": -0.04610493779182434, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.4772727272727273, | |
| "grad_norm": 14.6875, | |
| "learning_rate": 3.692307692307693e-05, | |
| "logits/chosen": 0.8735722303390503, | |
| "logits/rejected": 1.108435034751892, | |
| "logps/chosen": -33.20803451538086, | |
| "logps/rejected": -23.885366439819336, | |
| "loss": 0.7071, | |
| "rewards/accuracies": 0.3333333432674408, | |
| "rewards/chosen": -0.025695737451314926, | |
| "rewards/margins": -0.026847710832953453, | |
| "rewards/rejected": 0.0011519752442836761, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 11.125, | |
| "learning_rate": 3.538461538461539e-05, | |
| "logits/chosen": 0.3912545144557953, | |
| "logits/rejected": 0.8124356269836426, | |
| "logps/chosen": -28.345073699951172, | |
| "logps/rejected": -29.91925048828125, | |
| "loss": 0.6494, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.0346221923828125, | |
| "rewards/margins": 0.09574788063764572, | |
| "rewards/rejected": -0.06112569198012352, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.5227272727272727, | |
| "grad_norm": 12.25, | |
| "learning_rate": 3.384615384615385e-05, | |
| "logits/chosen": 0.8518146872520447, | |
| "logits/rejected": 0.7960999608039856, | |
| "logps/chosen": -30.20541000366211, | |
| "logps/rejected": -25.19312858581543, | |
| "loss": 0.673, | |
| "rewards/accuracies": 0.3333333432674408, | |
| "rewards/chosen": -0.005412420257925987, | |
| "rewards/margins": 0.04422512277960777, | |
| "rewards/rejected": -0.04963754117488861, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.5454545454545454, | |
| "grad_norm": 11.1875, | |
| "learning_rate": 3.230769230769231e-05, | |
| "logits/chosen": 0.5866941213607788, | |
| "logits/rejected": 0.9357667565345764, | |
| "logps/chosen": -26.213180541992188, | |
| "logps/rejected": -22.639881134033203, | |
| "loss": 0.6633, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.04306405782699585, | |
| "rewards/margins": 0.0637446790933609, | |
| "rewards/rejected": -0.02068062126636505, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.5681818181818182, | |
| "grad_norm": 12.625, | |
| "learning_rate": 3.076923076923077e-05, | |
| "logits/chosen": 0.4800880551338196, | |
| "logits/rejected": 0.47039034962654114, | |
| "logps/chosen": -34.4006233215332, | |
| "logps/rejected": -22.132925033569336, | |
| "loss": 0.6946, | |
| "rewards/accuracies": 0.3333333432674408, | |
| "rewards/chosen": 0.020339712500572205, | |
| "rewards/margins": -0.002547518815845251, | |
| "rewards/rejected": 0.022887229919433594, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.5909090909090909, | |
| "grad_norm": 9.375, | |
| "learning_rate": 2.923076923076923e-05, | |
| "logits/chosen": 0.09336434304714203, | |
| "logits/rejected": 0.05750793218612671, | |
| "logps/chosen": -23.79607391357422, | |
| "logps/rejected": -21.185327529907227, | |
| "loss": 0.6685, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.053456246852874756, | |
| "rewards/margins": 0.050235815346241, | |
| "rewards/rejected": 0.0032204315066337585, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.6136363636363636, | |
| "grad_norm": 12.125, | |
| "learning_rate": 2.7692307692307694e-05, | |
| "logits/chosen": 0.4014180600643158, | |
| "logits/rejected": 1.0060770511627197, | |
| "logps/chosen": -24.400341033935547, | |
| "logps/rejected": -26.51728630065918, | |
| "loss": 0.6725, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.04896443709731102, | |
| "rewards/margins": 0.04188614338636398, | |
| "rewards/rejected": 0.007078297436237335, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.6363636363636364, | |
| "grad_norm": 16.875, | |
| "learning_rate": 2.6153846153846157e-05, | |
| "logits/chosen": 0.24801835417747498, | |
| "logits/rejected": 0.2863054573535919, | |
| "logps/chosen": -26.580617904663086, | |
| "logps/rejected": -22.799705505371094, | |
| "loss": 0.7299, | |
| "rewards/accuracies": 0.3333333432674408, | |
| "rewards/chosen": 0.022428005933761597, | |
| "rewards/margins": -0.06923790276050568, | |
| "rewards/rejected": 0.09166590869426727, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.6590909090909091, | |
| "grad_norm": 11.0625, | |
| "learning_rate": 2.4615384615384616e-05, | |
| "logits/chosen": 0.31185609102249146, | |
| "logits/rejected": 0.16100364923477173, | |
| "logps/chosen": -27.79303741455078, | |
| "logps/rejected": -22.794635772705078, | |
| "loss": 0.6317, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.08723494410514832, | |
| "rewards/margins": 0.12933623790740967, | |
| "rewards/rejected": -0.042101286351680756, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.6818181818181818, | |
| "grad_norm": 14.9375, | |
| "learning_rate": 2.307692307692308e-05, | |
| "logits/chosen": 0.3959487974643707, | |
| "logits/rejected": 0.688706636428833, | |
| "logps/chosen": -30.851974487304688, | |
| "logps/rejected": -22.691082000732422, | |
| "loss": 0.644, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.1131121963262558, | |
| "rewards/margins": 0.10812320560216904, | |
| "rewards/rejected": 0.004988989792764187, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.7045454545454546, | |
| "grad_norm": 8.625, | |
| "learning_rate": 2.153846153846154e-05, | |
| "logits/chosen": 0.37772199511528015, | |
| "logits/rejected": 0.6661175489425659, | |
| "logps/chosen": -24.510616302490234, | |
| "logps/rejected": -20.977294921875, | |
| "loss": 0.6215, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.1579880565404892, | |
| "rewards/margins": 0.15024060010910034, | |
| "rewards/rejected": 0.0077474601566791534, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.7272727272727273, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.9999999999999998e-05, | |
| "logits/chosen": 0.8124392032623291, | |
| "logits/rejected": 0.7364901900291443, | |
| "logps/chosen": -24.003555297851562, | |
| "logps/rejected": -22.78453826904297, | |
| "loss": 0.5977, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.13395456969738007, | |
| "rewards/margins": 0.21094201505184174, | |
| "rewards/rejected": -0.07698746025562286, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 11.5, | |
| "learning_rate": 1.8461538461538465e-05, | |
| "logits/chosen": 0.4202606678009033, | |
| "logits/rejected": 0.8325386643409729, | |
| "logps/chosen": -20.655643463134766, | |
| "logps/rejected": -22.911235809326172, | |
| "loss": 0.6293, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.09568717330694199, | |
| "rewards/margins": 0.1334756314754486, | |
| "rewards/rejected": -0.03778845816850662, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.7727272727272727, | |
| "grad_norm": 9.25, | |
| "learning_rate": 1.6923076923076924e-05, | |
| "logits/chosen": 0.7410422563552856, | |
| "logits/rejected": 0.9519510269165039, | |
| "logps/chosen": -20.877716064453125, | |
| "logps/rejected": -21.208219528198242, | |
| "loss": 0.6509, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.10315850377082825, | |
| "rewards/margins": 0.09082140028476715, | |
| "rewards/rejected": 0.012337110936641693, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.7954545454545454, | |
| "grad_norm": 10.6875, | |
| "learning_rate": 1.5384615384615384e-05, | |
| "logits/chosen": 0.2906338572502136, | |
| "logits/rejected": 0.2896530032157898, | |
| "logps/chosen": -27.066699981689453, | |
| "logps/rejected": -20.529428482055664, | |
| "loss": 0.7098, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.016274897381663322, | |
| "rewards/margins": -0.03218403086066246, | |
| "rewards/rejected": 0.04845893010497093, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.8181818181818182, | |
| "grad_norm": 12.0625, | |
| "learning_rate": 1.3846153846153847e-05, | |
| "logits/chosen": 0.3423420190811157, | |
| "logits/rejected": 0.4290481507778168, | |
| "logps/chosen": -27.17491912841797, | |
| "logps/rejected": -21.299049377441406, | |
| "loss": 0.7005, | |
| "rewards/accuracies": 0.3333333432674408, | |
| "rewards/chosen": 0.0788511335849762, | |
| "rewards/margins": -0.012380982749164104, | |
| "rewards/rejected": 0.09123211354017258, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.8409090909090909, | |
| "grad_norm": 12.125, | |
| "learning_rate": 1.2307692307692308e-05, | |
| "logits/chosen": 0.43100249767303467, | |
| "logits/rejected": 0.45868536829948425, | |
| "logps/chosen": -26.59796142578125, | |
| "logps/rejected": -23.339920043945312, | |
| "loss": 0.6465, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.13345324993133545, | |
| "rewards/margins": 0.09718997776508331, | |
| "rewards/rejected": 0.036263275891542435, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.8636363636363636, | |
| "grad_norm": 18.25, | |
| "learning_rate": 1.076923076923077e-05, | |
| "logits/chosen": 1.6746655702590942, | |
| "logits/rejected": 1.4878392219543457, | |
| "logps/chosen": -34.438411712646484, | |
| "logps/rejected": -21.078689575195312, | |
| "loss": 0.6676, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.02436860464513302, | |
| "rewards/margins": 0.056867726147174835, | |
| "rewards/rejected": -0.032499127089977264, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.8863636363636364, | |
| "grad_norm": 10.375, | |
| "learning_rate": 9.230769230769232e-06, | |
| "logits/chosen": 0.42151492834091187, | |
| "logits/rejected": 0.5604749917984009, | |
| "logps/chosen": -25.84881591796875, | |
| "logps/rejected": -23.173572540283203, | |
| "loss": 0.6422, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.09398200362920761, | |
| "rewards/margins": 0.11083710193634033, | |
| "rewards/rejected": -0.016855113208293915, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.9090909090909091, | |
| "grad_norm": 10.625, | |
| "learning_rate": 7.692307692307692e-06, | |
| "logits/chosen": 0.7431029677391052, | |
| "logits/rejected": 0.671082079410553, | |
| "logps/chosen": -20.48221206665039, | |
| "logps/rejected": -21.603567123413086, | |
| "loss": 0.6616, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.08623453974723816, | |
| "rewards/margins": 0.06446807086467743, | |
| "rewards/rejected": 0.02176647260785103, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.9318181818181818, | |
| "grad_norm": 9.5625, | |
| "learning_rate": 6.153846153846154e-06, | |
| "logits/chosen": 0.4396713674068451, | |
| "logits/rejected": 0.5854610800743103, | |
| "logps/chosen": -22.81239891052246, | |
| "logps/rejected": -22.814224243164062, | |
| "loss": 0.6006, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.12625077366828918, | |
| "rewards/margins": 0.2127111405134201, | |
| "rewards/rejected": -0.08646037429571152, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.9545454545454546, | |
| "grad_norm": 15.625, | |
| "learning_rate": 4.615384615384616e-06, | |
| "logits/chosen": 0.7838262319564819, | |
| "logits/rejected": 0.7330495715141296, | |
| "logps/chosen": -32.488609313964844, | |
| "logps/rejected": -21.370939254760742, | |
| "loss": 0.6928, | |
| "rewards/accuracies": 0.3333333432674408, | |
| "rewards/chosen": 0.08423754572868347, | |
| "rewards/margins": 0.009891888126730919, | |
| "rewards/rejected": 0.0743456482887268, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.9772727272727273, | |
| "grad_norm": 10.0, | |
| "learning_rate": 3.076923076923077e-06, | |
| "logits/chosen": 0.8008474111557007, | |
| "logits/rejected": 0.8280413150787354, | |
| "logps/chosen": -24.55078125, | |
| "logps/rejected": -23.09003257751465, | |
| "loss": 0.6579, | |
| "rewards/accuracies": 0.6666666865348816, | |
| "rewards/chosen": 0.06935082376003265, | |
| "rewards/margins": 0.07333603501319885, | |
| "rewards/rejected": -0.003985214047133923, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 27.5, | |
| "learning_rate": 1.5384615384615385e-06, | |
| "logits/chosen": 0.68901127576828, | |
| "logits/rejected": 0.41714319586753845, | |
| "logps/chosen": -38.19911193847656, | |
| "logps/rejected": -22.654001235961914, | |
| "loss": 0.7624, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": -0.0440673828125, | |
| "rewards/margins": -0.13395901024341583, | |
| "rewards/rejected": 0.08989162743091583, | |
| "step": 44 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 44, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 3, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |