| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 356, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.056338028169014086, | |
| "grad_norm": 574.27978515625, | |
| "learning_rate": 6.25e-07, | |
| "logits/chosen": 2.945640802383423, | |
| "logits/rejected": 3.0515265464782715, | |
| "logps/chosen": -246.39541625976562, | |
| "logps/rejected": -195.46109008789062, | |
| "loss": 0.9304, | |
| "rewards/accuracies": 0.5218750238418579, | |
| "rewards/chosen": 0.03325582295656204, | |
| "rewards/margins": 0.014068150892853737, | |
| "rewards/rejected": 0.019187677651643753, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.11267605633802817, | |
| "grad_norm": 267.4449462890625, | |
| "learning_rate": 1.3194444444444446e-06, | |
| "logits/chosen": 3.0365357398986816, | |
| "logits/rejected": 3.0756676197052, | |
| "logps/chosen": -231.8340606689453, | |
| "logps/rejected": -196.01499938964844, | |
| "loss": 0.5293, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": 1.023498773574829, | |
| "rewards/margins": 1.6332218647003174, | |
| "rewards/rejected": -0.6097229719161987, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.16901408450704225, | |
| "grad_norm": 168.65689086914062, | |
| "learning_rate": 2.0138888888888893e-06, | |
| "logits/chosen": 3.038414478302002, | |
| "logits/rejected": 3.0314807891845703, | |
| "logps/chosen": -265.27825927734375, | |
| "logps/rejected": -199.1730499267578, | |
| "loss": 0.1929, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": 4.1361799240112305, | |
| "rewards/margins": 6.971987724304199, | |
| "rewards/rejected": -2.835808277130127, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.22535211267605634, | |
| "grad_norm": 37.93267822265625, | |
| "learning_rate": 2.7083333333333334e-06, | |
| "logits/chosen": 3.0692856311798096, | |
| "logits/rejected": 3.1041903495788574, | |
| "logps/chosen": -245.3774871826172, | |
| "logps/rejected": -207.87789916992188, | |
| "loss": 0.1002, | |
| "rewards/accuracies": 0.9671875238418579, | |
| "rewards/chosen": 6.395417213439941, | |
| "rewards/margins": 13.0038480758667, | |
| "rewards/rejected": -6.608429908752441, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.28169014084507044, | |
| "grad_norm": 129.71878051757812, | |
| "learning_rate": 3.4027777777777783e-06, | |
| "logits/chosen": 2.943250894546509, | |
| "logits/rejected": 2.9869542121887207, | |
| "logps/chosen": -245.9439239501953, | |
| "logps/rejected": -202.8123321533203, | |
| "loss": 0.1375, | |
| "rewards/accuracies": 0.973437488079071, | |
| "rewards/chosen": 7.993535041809082, | |
| "rewards/margins": 19.0964412689209, | |
| "rewards/rejected": -11.102909088134766, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3380281690140845, | |
| "grad_norm": 57.64502716064453, | |
| "learning_rate": 4.097222222222222e-06, | |
| "logits/chosen": 3.0049643516540527, | |
| "logits/rejected": 3.0770554542541504, | |
| "logps/chosen": -236.73410034179688, | |
| "logps/rejected": -210.151611328125, | |
| "loss": 0.1722, | |
| "rewards/accuracies": 0.965624988079071, | |
| "rewards/chosen": 9.21152114868164, | |
| "rewards/margins": 22.748985290527344, | |
| "rewards/rejected": -13.53746509552002, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.39436619718309857, | |
| "grad_norm": 105.868408203125, | |
| "learning_rate": 4.791666666666668e-06, | |
| "logits/chosen": 3.0406033992767334, | |
| "logits/rejected": 3.0739083290100098, | |
| "logps/chosen": -256.1812744140625, | |
| "logps/rejected": -204.35446166992188, | |
| "loss": 0.1129, | |
| "rewards/accuracies": 0.979687511920929, | |
| "rewards/chosen": 8.951081275939941, | |
| "rewards/margins": 21.80655860900879, | |
| "rewards/rejected": -12.85547924041748, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.4507042253521127, | |
| "grad_norm": 168.75357055664062, | |
| "learning_rate": 4.998524282731094e-06, | |
| "logits/chosen": 3.0504136085510254, | |
| "logits/rejected": 3.0491514205932617, | |
| "logps/chosen": -255.90316772460938, | |
| "logps/rejected": -206.5497283935547, | |
| "loss": 0.3368, | |
| "rewards/accuracies": 0.96875, | |
| "rewards/chosen": 8.498771667480469, | |
| "rewards/margins": 22.880569458007812, | |
| "rewards/rejected": -14.381797790527344, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5070422535211268, | |
| "grad_norm": 109.43980407714844, | |
| "learning_rate": 4.991300473502437e-06, | |
| "logits/chosen": 3.0595602989196777, | |
| "logits/rejected": 3.1263351440429688, | |
| "logps/chosen": -228.88070678710938, | |
| "logps/rejected": -218.99783325195312, | |
| "loss": 0.3032, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 6.379992485046387, | |
| "rewards/margins": 28.722070693969727, | |
| "rewards/rejected": -22.34208106994629, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.5633802816901409, | |
| "grad_norm": 128.98760986328125, | |
| "learning_rate": 4.978074903220964e-06, | |
| "logits/chosen": 3.1774604320526123, | |
| "logits/rejected": 3.181511402130127, | |
| "logps/chosen": -228.3056640625, | |
| "logps/rejected": -222.18038940429688, | |
| "loss": 0.2029, | |
| "rewards/accuracies": 0.9781249761581421, | |
| "rewards/chosen": 5.856657028198242, | |
| "rewards/margins": 34.18266677856445, | |
| "rewards/rejected": -28.326007843017578, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6197183098591549, | |
| "grad_norm": 144.1073455810547, | |
| "learning_rate": 4.958879433443904e-06, | |
| "logits/chosen": 3.218785047531128, | |
| "logits/rejected": 3.1809394359588623, | |
| "logps/chosen": -243.4155731201172, | |
| "logps/rejected": -236.49868774414062, | |
| "loss": 0.2134, | |
| "rewards/accuracies": 0.9765625, | |
| "rewards/chosen": 6.719731330871582, | |
| "rewards/margins": 35.10533905029297, | |
| "rewards/rejected": -28.385604858398438, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.676056338028169, | |
| "grad_norm": 149.68939208984375, | |
| "learning_rate": 4.933760307739277e-06, | |
| "logits/chosen": 3.1437926292419434, | |
| "logits/rejected": 3.1433277130126953, | |
| "logps/chosen": -255.87796020507812, | |
| "logps/rejected": -231.52182006835938, | |
| "loss": 0.2745, | |
| "rewards/accuracies": 0.981249988079071, | |
| "rewards/chosen": 6.636951446533203, | |
| "rewards/margins": 37.604583740234375, | |
| "rewards/rejected": -30.967632293701172, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.7323943661971831, | |
| "grad_norm": 171.0680389404297, | |
| "learning_rate": 4.90277804028108e-06, | |
| "logits/chosen": 3.206712245941162, | |
| "logits/rejected": 3.2256839275360107, | |
| "logps/chosen": -261.3406982421875, | |
| "logps/rejected": -228.3511505126953, | |
| "loss": 0.5352, | |
| "rewards/accuracies": 0.964062511920929, | |
| "rewards/chosen": 4.778416633605957, | |
| "rewards/margins": 33.36115646362305, | |
| "rewards/rejected": -28.58274269104004, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.7887323943661971, | |
| "grad_norm": 118.76382446289062, | |
| "learning_rate": 4.866007270065345e-06, | |
| "logits/chosen": 3.2129406929016113, | |
| "logits/rejected": 3.227287769317627, | |
| "logps/chosen": -246.7953643798828, | |
| "logps/rejected": -230.45651245117188, | |
| "loss": 0.2311, | |
| "rewards/accuracies": 0.9765625, | |
| "rewards/chosen": 0.6871539354324341, | |
| "rewards/margins": 36.90045166015625, | |
| "rewards/rejected": -36.213294982910156, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.8450704225352113, | |
| "grad_norm": 198.268310546875, | |
| "learning_rate": 4.823536581098262e-06, | |
| "logits/chosen": 3.1713156700134277, | |
| "logits/rejected": 3.152228355407715, | |
| "logps/chosen": -238.29159545898438, | |
| "logps/rejected": -238.2996826171875, | |
| "loss": 0.2612, | |
| "rewards/accuracies": 0.96875, | |
| "rewards/chosen": 1.5464673042297363, | |
| "rewards/margins": 41.4525146484375, | |
| "rewards/rejected": -39.90605163574219, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.9014084507042254, | |
| "grad_norm": 191.44915771484375, | |
| "learning_rate": 4.775468288989545e-06, | |
| "logits/chosen": 3.2888169288635254, | |
| "logits/rejected": 3.2170681953430176, | |
| "logps/chosen": -251.0401153564453, | |
| "logps/rejected": -236.00479125976562, | |
| "loss": 0.51, | |
| "rewards/accuracies": 0.971875011920929, | |
| "rewards/chosen": 2.7297756671905518, | |
| "rewards/margins": 42.41936492919922, | |
| "rewards/rejected": -39.68959426879883, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.9577464788732394, | |
| "grad_norm": 73.10994720458984, | |
| "learning_rate": 4.7219181944651695e-06, | |
| "logits/chosen": 3.2599377632141113, | |
| "logits/rejected": 3.2560737133026123, | |
| "logps/chosen": -245.1952667236328, | |
| "logps/rejected": -241.4735565185547, | |
| "loss": 0.313, | |
| "rewards/accuracies": 0.973437488079071, | |
| "rewards/chosen": -0.8346878290176392, | |
| "rewards/margins": 44.581295013427734, | |
| "rewards/rejected": -45.415985107421875, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.0112676056338028, | |
| "grad_norm": 71.17866516113281, | |
| "learning_rate": 4.663015304393279e-06, | |
| "logits/chosen": 3.3155758380889893, | |
| "logits/rejected": 3.3333241939544678, | |
| "logps/chosen": -270.6006774902344, | |
| "logps/rejected": -241.6360321044922, | |
| "loss": 0.338, | |
| "rewards/accuracies": 0.9736841917037964, | |
| "rewards/chosen": 0.171695277094841, | |
| "rewards/margins": 45.20180130004883, | |
| "rewards/rejected": -45.03010177612305, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.0676056338028168, | |
| "grad_norm": 113.73174285888672, | |
| "learning_rate": 4.59890152099534e-06, | |
| "logits/chosen": 3.508065700531006, | |
| "logits/rejected": 3.5083301067352295, | |
| "logps/chosen": -266.78387451171875, | |
| "logps/rejected": -252.5698699951172, | |
| "loss": 0.8076, | |
| "rewards/accuracies": 0.964062511920929, | |
| "rewards/chosen": 10.96754264831543, | |
| "rewards/margins": 63.106483459472656, | |
| "rewards/rejected": -52.138938903808594, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.123943661971831, | |
| "grad_norm": 0.890201985836029, | |
| "learning_rate": 4.5297312999912625e-06, | |
| "logits/chosen": 3.5750420093536377, | |
| "logits/rejected": 3.5598015785217285, | |
| "logps/chosen": -244.08615112304688, | |
| "logps/rejected": -249.61984252929688, | |
| "loss": 0.4625, | |
| "rewards/accuracies": 0.9765625, | |
| "rewards/chosen": 8.597976684570312, | |
| "rewards/margins": 65.22010803222656, | |
| "rewards/rejected": -56.62213897705078, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.180281690140845, | |
| "grad_norm": 204.70803833007812, | |
| "learning_rate": 4.455671278502042e-06, | |
| "logits/chosen": 3.6519992351531982, | |
| "logits/rejected": 3.6381404399871826, | |
| "logps/chosen": -240.0857696533203, | |
| "logps/rejected": -238.0441131591797, | |
| "loss": 0.653, | |
| "rewards/accuracies": 0.9546874761581421, | |
| "rewards/chosen": 6.305617332458496, | |
| "rewards/margins": 53.15886688232422, | |
| "rewards/rejected": -46.8532600402832, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.236619718309859, | |
| "grad_norm": 139.86141967773438, | |
| "learning_rate": 4.376899873606336e-06, | |
| "logits/chosen": 3.622997283935547, | |
| "logits/rejected": 3.6359076499938965, | |
| "logps/chosen": -259.12725830078125, | |
| "logps/rejected": -250.89501953125, | |
| "loss": 0.9057, | |
| "rewards/accuracies": 0.9453125, | |
| "rewards/chosen": 4.471914291381836, | |
| "rewards/margins": 51.15581130981445, | |
| "rewards/rejected": -46.683895111083984, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.2929577464788733, | |
| "grad_norm": 94.85035705566406, | |
| "learning_rate": 4.293606852518101e-06, | |
| "logits/chosen": 3.6598830223083496, | |
| "logits/rejected": 3.594897508621216, | |
| "logps/chosen": -263.00079345703125, | |
| "logps/rejected": -248.59524536132812, | |
| "loss": 0.3918, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": 2.2544591426849365, | |
| "rewards/margins": 56.7413444519043, | |
| "rewards/rejected": -54.48688507080078, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.3492957746478873, | |
| "grad_norm": 37.788360595703125, | |
| "learning_rate": 4.205992875420742e-06, | |
| "logits/chosen": 3.57995343208313, | |
| "logits/rejected": 3.5664329528808594, | |
| "logps/chosen": -243.8499755859375, | |
| "logps/rejected": -252.65353393554688, | |
| "loss": 0.4873, | |
| "rewards/accuracies": 0.9781249761581421, | |
| "rewards/chosen": 7.1952104568481445, | |
| "rewards/margins": 64.06478118896484, | |
| "rewards/rejected": -56.86956787109375, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.4056338028169013, | |
| "grad_norm": 159.25875854492188, | |
| "learning_rate": 4.114269012059169e-06, | |
| "logits/chosen": 3.5491080284118652, | |
| "logits/rejected": 3.510200023651123, | |
| "logps/chosen": -253.5936279296875, | |
| "logps/rejected": -254.4651336669922, | |
| "loss": 0.3683, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": 4.560586452484131, | |
| "rewards/margins": 59.49503707885742, | |
| "rewards/rejected": -54.9344482421875, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.4619718309859155, | |
| "grad_norm": 113.43865966796875, | |
| "learning_rate": 4.018656233254278e-06, | |
| "logits/chosen": 3.4692349433898926, | |
| "logits/rejected": 3.3723576068878174, | |
| "logps/chosen": -253.46249389648438, | |
| "logps/rejected": -268.2179870605469, | |
| "loss": 0.3905, | |
| "rewards/accuracies": 0.984375, | |
| "rewards/chosen": 3.9269375801086426, | |
| "rewards/margins": 74.34013366699219, | |
| "rewards/rejected": -70.41320037841797, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.5183098591549296, | |
| "grad_norm": 36.17473602294922, | |
| "learning_rate": 3.919384878564902e-06, | |
| "logits/chosen": 3.488018035888672, | |
| "logits/rejected": 3.4679126739501953, | |
| "logps/chosen": -248.9741668701172, | |
| "logps/rejected": -282.92840576171875, | |
| "loss": 0.5658, | |
| "rewards/accuracies": 0.9781249761581421, | |
| "rewards/chosen": 4.083906650543213, | |
| "rewards/margins": 77.73112487792969, | |
| "rewards/rejected": -73.647216796875, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.5746478873239438, | |
| "grad_norm": 177.54251098632812, | |
| "learning_rate": 3.816694101379631e-06, | |
| "logits/chosen": 3.6568634510040283, | |
| "logits/rejected": 3.6167445182800293, | |
| "logps/chosen": -240.26895141601562, | |
| "logps/rejected": -263.4219665527344, | |
| "loss": 0.3476, | |
| "rewards/accuracies": 0.979687511920929, | |
| "rewards/chosen": 4.189328193664551, | |
| "rewards/margins": 66.97871398925781, | |
| "rewards/rejected": -62.78938674926758, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.6309859154929578, | |
| "grad_norm": 76.29850006103516, | |
| "learning_rate": 3.7108312927753533e-06, | |
| "logits/chosen": 3.5761330127716064, | |
| "logits/rejected": 3.5349583625793457, | |
| "logps/chosen": -256.83697509765625, | |
| "logps/rejected": -257.34063720703125, | |
| "loss": 0.2007, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": 3.5510315895080566, | |
| "rewards/margins": 67.4579849243164, | |
| "rewards/rejected": -63.906944274902344, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.6873239436619718, | |
| "grad_norm": 88.87206268310547, | |
| "learning_rate": 3.6020514855304856e-06, | |
| "logits/chosen": 3.4625442028045654, | |
| "logits/rejected": 3.4353599548339844, | |
| "logps/chosen": -246.8657684326172, | |
| "logps/rejected": -262.365966796875, | |
| "loss": 0.3359, | |
| "rewards/accuracies": 0.9765625, | |
| "rewards/chosen": -1.413503885269165, | |
| "rewards/margins": 65.82698059082031, | |
| "rewards/rejected": -67.240478515625, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.7436619718309858, | |
| "grad_norm": 109.3917007446289, | |
| "learning_rate": 3.4906167397286643e-06, | |
| "logits/chosen": 3.4818034172058105, | |
| "logits/rejected": 3.438699722290039, | |
| "logps/chosen": -248.93112182617188, | |
| "logps/rejected": -257.42694091796875, | |
| "loss": 0.4265, | |
| "rewards/accuracies": 0.9828125238418579, | |
| "rewards/chosen": 0.5807734131813049, | |
| "rewards/margins": 62.88755416870117, | |
| "rewards/rejected": -62.306793212890625, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 88.23355102539062, | |
| "learning_rate": 3.3767955114330586e-06, | |
| "logits/chosen": 3.3789494037628174, | |
| "logits/rejected": 3.329315662384033, | |
| "logps/chosen": -258.67291259765625, | |
| "logps/rejected": -259.5810546875, | |
| "loss": 0.4489, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": -2.7831504344940186, | |
| "rewards/margins": 59.0428466796875, | |
| "rewards/rejected": -61.82598876953125, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.856338028169014, | |
| "grad_norm": 1.0168672800064087, | |
| "learning_rate": 3.2608620059521935e-06, | |
| "logits/chosen": 3.38688325881958, | |
| "logits/rejected": 3.349194288253784, | |
| "logps/chosen": -233.11666870117188, | |
| "logps/rejected": -270.48590087890625, | |
| "loss": 0.5576, | |
| "rewards/accuracies": 0.979687511920929, | |
| "rewards/chosen": -4.500494480133057, | |
| "rewards/margins": 67.32182312011719, | |
| "rewards/rejected": -71.82231140136719, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.9126760563380283, | |
| "grad_norm": 106.62237548828125, | |
| "learning_rate": 3.14309551725535e-06, | |
| "logits/chosen": 3.401437759399414, | |
| "logits/rejected": 3.3626091480255127, | |
| "logps/chosen": -247.9010009765625, | |
| "logps/rejected": -256.19097900390625, | |
| "loss": 0.4024, | |
| "rewards/accuracies": 0.979687511920929, | |
| "rewards/chosen": 0.5352304577827454, | |
| "rewards/margins": 63.979896545410156, | |
| "rewards/rejected": -63.444664001464844, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.9690140845070423, | |
| "grad_norm": 159.5343475341797, | |
| "learning_rate": 3.0237797551289228e-06, | |
| "logits/chosen": 3.4421494007110596, | |
| "logits/rejected": 3.4059250354766846, | |
| "logps/chosen": -229.91134643554688, | |
| "logps/rejected": -252.29019165039062, | |
| "loss": 0.6959, | |
| "rewards/accuracies": 0.971875011920929, | |
| "rewards/chosen": 0.6608916521072388, | |
| "rewards/margins": 60.5278205871582, | |
| "rewards/rejected": -59.86692428588867, | |
| "step": 350 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 712, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |