Invalid JSON: Unexpected token 'N', ..."ad_norm": NaN,
"... is not valid JSON
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.9934426229508198, | |
| "eval_steps": 500, | |
| "global_step": 152, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.013114754098360656, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "logits/chosen": -0.7201688289642334, | |
| "logits/rejected": -0.25958356261253357, | |
| "logps/chosen": -235.22982788085938, | |
| "logps/rejected": -110.77284240722656, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.02622950819672131, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0, | |
| "logits/chosen": -0.6440946459770203, | |
| "logits/rejected": -0.8803070783615112, | |
| "logps/chosen": -233.35064697265625, | |
| "logps/rejected": -270.211669921875, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.03934426229508197, | |
| "grad_norm": Infinity, | |
| "learning_rate": 0.0, | |
| "logits/chosen": -0.3529421091079712, | |
| "logits/rejected": -0.46579116582870483, | |
| "logps/chosen": -283.93499755859375, | |
| "logps/rejected": -236.17298889160156, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.05245901639344262, | |
| "grad_norm": 116.6382827758789, | |
| "learning_rate": 3.125e-07, | |
| "logits/chosen": -0.6149371862411499, | |
| "logits/rejected": -0.6765552163124084, | |
| "logps/chosen": -330.7442321777344, | |
| "logps/rejected": -333.74853515625, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.06557377049180328, | |
| "grad_norm": 96.14250946044922, | |
| "learning_rate": 6.25e-07, | |
| "logits/chosen": -0.48342880606651306, | |
| "logits/rejected": -0.552554190158844, | |
| "logps/chosen": -224.43968200683594, | |
| "logps/rejected": -243.84121704101562, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07868852459016394, | |
| "grad_norm": 119.71466064453125, | |
| "learning_rate": 9.375000000000001e-07, | |
| "logits/chosen": -0.4438214600086212, | |
| "logits/rejected": -0.41411423683166504, | |
| "logps/chosen": -509.1705322265625, | |
| "logps/rejected": -331.69927978515625, | |
| "loss": 0.6942, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.005420684814453125, | |
| "rewards/margins": -0.0020191192161291838, | |
| "rewards/rejected": -0.0034015655983239412, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.09180327868852459, | |
| "grad_norm": 89.92263793945312, | |
| "learning_rate": 1.25e-06, | |
| "logits/chosen": -0.5990754961967468, | |
| "logits/rejected": -0.5052899718284607, | |
| "logps/chosen": -353.8023376464844, | |
| "logps/rejected": -293.9719543457031, | |
| "loss": 0.6924, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.0021602632477879524, | |
| "rewards/margins": 0.001445007394067943, | |
| "rewards/rejected": 0.0007152557955123484, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.10491803278688525, | |
| "grad_norm": 91.0330810546875, | |
| "learning_rate": 1.5625e-06, | |
| "logits/chosen": -0.7285553216934204, | |
| "logits/rejected": -0.8203944563865662, | |
| "logps/chosen": -201.29092407226562, | |
| "logps/rejected": -310.0835876464844, | |
| "loss": 0.6923, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.0027733801398426294, | |
| "rewards/margins": 0.00168361677788198, | |
| "rewards/rejected": -0.004456996917724609, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.1180327868852459, | |
| "grad_norm": 115.72259521484375, | |
| "learning_rate": 1.8750000000000003e-06, | |
| "logits/chosen": -0.6387780904769897, | |
| "logits/rejected": -0.4966055750846863, | |
| "logps/chosen": -286.18316650390625, | |
| "logps/rejected": -174.62039184570312, | |
| "loss": 0.6924, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.008039474487304688, | |
| "rewards/margins": 0.0014192580711096525, | |
| "rewards/rejected": -0.009458731859922409, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.13114754098360656, | |
| "grad_norm": 81.71347045898438, | |
| "learning_rate": 2.1875000000000002e-06, | |
| "logits/chosen": -0.43023377656936646, | |
| "logits/rejected": -0.4917900860309601, | |
| "logps/chosen": -123.80950164794922, | |
| "logps/rejected": -231.78781127929688, | |
| "loss": 0.6922, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.013120556250214577, | |
| "rewards/margins": 0.001862620934844017, | |
| "rewards/rejected": -0.014983177185058594, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.14426229508196722, | |
| "grad_norm": 126.95443725585938, | |
| "learning_rate": 2.5e-06, | |
| "logits/chosen": -0.5431928634643555, | |
| "logits/rejected": -0.6928932666778564, | |
| "logps/chosen": -466.6590881347656, | |
| "logps/rejected": -450.839111328125, | |
| "loss": 0.6835, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.003947257064282894, | |
| "rewards/margins": 0.01957416534423828, | |
| "rewards/rejected": -0.015626907348632812, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.15737704918032788, | |
| "grad_norm": 106.9858169555664, | |
| "learning_rate": 2.8125e-06, | |
| "logits/chosen": -0.8515354990959167, | |
| "logits/rejected": -0.8400233387947083, | |
| "logps/chosen": -496.6812438964844, | |
| "logps/rejected": -367.9149169921875, | |
| "loss": 0.6932, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.013410568237304688, | |
| "rewards/margins": 0.0008895890787243843, | |
| "rewards/rejected": -0.014300156384706497, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.17049180327868851, | |
| "grad_norm": 94.6556167602539, | |
| "learning_rate": 3.125e-06, | |
| "logits/chosen": -0.8845179080963135, | |
| "logits/rejected": -0.8273138403892517, | |
| "logps/chosen": -332.0273742675781, | |
| "logps/rejected": -263.0736389160156, | |
| "loss": 0.6739, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.0007216455414891243, | |
| "rewards/margins": 0.03937549516558647, | |
| "rewards/rejected": -0.04009714350104332, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.18360655737704917, | |
| "grad_norm": 110.63664245605469, | |
| "learning_rate": 3.4375e-06, | |
| "logits/chosen": -0.49672091007232666, | |
| "logits/rejected": -0.7252901792526245, | |
| "logps/chosen": -182.361572265625, | |
| "logps/rejected": -378.4778747558594, | |
| "loss": 0.6764, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.03787527605891228, | |
| "rewards/margins": 0.0358370766043663, | |
| "rewards/rejected": -0.07371234893798828, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.19672131147540983, | |
| "grad_norm": 76.27680969238281, | |
| "learning_rate": 3.7500000000000005e-06, | |
| "logits/chosen": -0.5162211656570435, | |
| "logits/rejected": -0.4975786507129669, | |
| "logps/chosen": -179.30902099609375, | |
| "logps/rejected": -132.12823486328125, | |
| "loss": 0.6905, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.037923429161310196, | |
| "rewards/margins": 0.005405997391790152, | |
| "rewards/rejected": -0.04332943260669708, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.2098360655737705, | |
| "grad_norm": 91.94597625732422, | |
| "learning_rate": 4.0625000000000005e-06, | |
| "logits/chosen": -0.3636321425437927, | |
| "logits/rejected": -0.6324741840362549, | |
| "logps/chosen": -153.548828125, | |
| "logps/rejected": -371.84765625, | |
| "loss": 0.713, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.06001920625567436, | |
| "rewards/margins": -0.038457777351140976, | |
| "rewards/rejected": -0.021561432629823685, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.22295081967213115, | |
| "grad_norm": 75.85398864746094, | |
| "learning_rate": 4.3750000000000005e-06, | |
| "logits/chosen": -0.5712193250656128, | |
| "logits/rejected": -0.3860137164592743, | |
| "logps/chosen": -267.605224609375, | |
| "logps/rejected": -104.8357162475586, | |
| "loss": 0.6921, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.022034931927919388, | |
| "rewards/margins": 0.0022784238681197166, | |
| "rewards/rejected": -0.02431335672736168, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.2360655737704918, | |
| "grad_norm": 117.28124237060547, | |
| "learning_rate": 4.6875000000000004e-06, | |
| "logits/chosen": -0.5285807251930237, | |
| "logits/rejected": -0.3493229150772095, | |
| "logps/chosen": -380.4576416015625, | |
| "logps/rejected": -311.0014343261719, | |
| "loss": 0.7359, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.16734714806079865, | |
| "rewards/margins": -0.07893352955579758, | |
| "rewards/rejected": -0.08841361850500107, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.24918032786885247, | |
| "grad_norm": 90.01153564453125, | |
| "learning_rate": 5e-06, | |
| "logits/chosen": -0.6711679697036743, | |
| "logits/rejected": -0.2690754532814026, | |
| "logps/chosen": -462.41973876953125, | |
| "logps/rejected": -297.78363037109375, | |
| "loss": 0.6435, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.0683719664812088, | |
| "rewards/margins": 0.10821684449911118, | |
| "rewards/rejected": -0.17658883333206177, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.26229508196721313, | |
| "grad_norm": 73.38824462890625, | |
| "learning_rate": 4.9632352941176475e-06, | |
| "logits/chosen": -0.6787087917327881, | |
| "logits/rejected": -0.6652618050575256, | |
| "logps/chosen": -361.91131591796875, | |
| "logps/rejected": -243.35589599609375, | |
| "loss": 0.6319, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.04816999286413193, | |
| "rewards/margins": 0.12730780243873596, | |
| "rewards/rejected": -0.07913780957460403, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2754098360655738, | |
| "grad_norm": 115.89125061035156, | |
| "learning_rate": 4.9264705882352945e-06, | |
| "logits/chosen": -0.3588840365409851, | |
| "logits/rejected": -0.2116285115480423, | |
| "logps/chosen": -400.4305419921875, | |
| "logps/rejected": -362.8456726074219, | |
| "loss": 0.6087, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.10708275437355042, | |
| "rewards/margins": 0.1842678040266037, | |
| "rewards/rejected": -0.07718505710363388, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.28852459016393445, | |
| "grad_norm": 78.77503204345703, | |
| "learning_rate": 4.889705882352942e-06, | |
| "logits/chosen": -0.8203290700912476, | |
| "logits/rejected": -0.8240777254104614, | |
| "logps/chosen": -166.8377685546875, | |
| "logps/rejected": -170.83688354492188, | |
| "loss": 0.7257, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.06841621547937393, | |
| "rewards/margins": -0.061698053032159805, | |
| "rewards/rejected": -0.0067181577906012535, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.3016393442622951, | |
| "grad_norm": 121.3331527709961, | |
| "learning_rate": 4.852941176470589e-06, | |
| "logits/chosen": -0.2023768126964569, | |
| "logits/rejected": 0.14346128702163696, | |
| "logps/chosen": -404.2669677734375, | |
| "logps/rejected": -286.7450256347656, | |
| "loss": 0.6238, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.12858295440673828, | |
| "rewards/margins": 0.14947663247585297, | |
| "rewards/rejected": -0.020893670618534088, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.31475409836065577, | |
| "grad_norm": 103.55362701416016, | |
| "learning_rate": 4.816176470588236e-06, | |
| "logits/chosen": -0.5769512057304382, | |
| "logits/rejected": -0.7388426661491394, | |
| "logps/chosen": -483.7005310058594, | |
| "logps/rejected": -367.4910583496094, | |
| "loss": 0.5843, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.05827799811959267, | |
| "rewards/margins": 0.27695170044898987, | |
| "rewards/rejected": -0.2186737060546875, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.32786885245901637, | |
| "grad_norm": 72.6626205444336, | |
| "learning_rate": 4.779411764705883e-06, | |
| "logits/chosen": -0.7255613803863525, | |
| "logits/rejected": -0.884934663772583, | |
| "logps/chosen": -183.0727081298828, | |
| "logps/rejected": -202.55877685546875, | |
| "loss": 0.6588, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.0200653113424778, | |
| "rewards/margins": 0.09606742858886719, | |
| "rewards/rejected": -0.07600211352109909, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.34098360655737703, | |
| "grad_norm": 88.45654296875, | |
| "learning_rate": 4.74264705882353e-06, | |
| "logits/chosen": -0.5392031073570251, | |
| "logits/rejected": -0.7600653171539307, | |
| "logps/chosen": -405.0447998046875, | |
| "logps/rejected": -448.3751220703125, | |
| "loss": 0.5453, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.170909121632576, | |
| "rewards/margins": 0.33048784732818604, | |
| "rewards/rejected": -0.15957871079444885, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.3540983606557377, | |
| "grad_norm": 72.38986206054688, | |
| "learning_rate": 4.705882352941177e-06, | |
| "logits/chosen": -0.8080704212188721, | |
| "logits/rejected": -0.4376976788043976, | |
| "logps/chosen": -242.55665588378906, | |
| "logps/rejected": -131.02154541015625, | |
| "loss": 0.7378, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.07056399434804916, | |
| "rewards/margins": -0.06801767647266388, | |
| "rewards/rejected": -0.0025463104248046875, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.36721311475409835, | |
| "grad_norm": 89.0915298461914, | |
| "learning_rate": 4.669117647058824e-06, | |
| "logits/chosen": -0.7039719820022583, | |
| "logits/rejected": -0.45313090085983276, | |
| "logps/chosen": -357.1167297363281, | |
| "logps/rejected": -254.34747314453125, | |
| "loss": 0.6511, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": 0.1653575897216797, | |
| "rewards/margins": 0.11446285247802734, | |
| "rewards/rejected": 0.050894737243652344, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.380327868852459, | |
| "grad_norm": 72.88048553466797, | |
| "learning_rate": 4.632352941176471e-06, | |
| "logits/chosen": -0.4176654815673828, | |
| "logits/rejected": -0.8021068572998047, | |
| "logps/chosen": -118.46458435058594, | |
| "logps/rejected": -266.1793518066406, | |
| "loss": 0.6207, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.03454189747571945, | |
| "rewards/margins": 0.17219945788383484, | |
| "rewards/rejected": -0.2067413479089737, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.39344262295081966, | |
| "grad_norm": 112.41997528076172, | |
| "learning_rate": 4.595588235294118e-06, | |
| "logits/chosen": -0.5515627264976501, | |
| "logits/rejected": -0.3022332489490509, | |
| "logps/chosen": -383.1882629394531, | |
| "logps/rejected": -355.9986877441406, | |
| "loss": 0.7112, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": 0.12149354070425034, | |
| "rewards/margins": 0.018218979239463806, | |
| "rewards/rejected": 0.10327454656362534, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.4065573770491803, | |
| "grad_norm": 99.6341781616211, | |
| "learning_rate": 4.558823529411765e-06, | |
| "logits/chosen": -0.32010310888290405, | |
| "logits/rejected": -0.34287354350090027, | |
| "logps/chosen": -433.3697509765625, | |
| "logps/rejected": -427.46868896484375, | |
| "loss": 0.5341, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.1381477415561676, | |
| "rewards/margins": 0.3722812533378601, | |
| "rewards/rejected": -0.2341335415840149, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.419672131147541, | |
| "grad_norm": 141.9149932861328, | |
| "learning_rate": 4.522058823529412e-06, | |
| "logits/chosen": -0.3783895969390869, | |
| "logits/rejected": -0.22028671205043793, | |
| "logps/chosen": -438.865478515625, | |
| "logps/rejected": -378.45306396484375, | |
| "loss": 0.7389, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.20297011733055115, | |
| "rewards/margins": -0.03645037114620209, | |
| "rewards/rejected": -0.16651973128318787, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.43278688524590164, | |
| "grad_norm": 83.45268249511719, | |
| "learning_rate": 4.485294117647059e-06, | |
| "logits/chosen": -0.4643877446651459, | |
| "logits/rejected": -0.23612239956855774, | |
| "logps/chosen": -323.2715148925781, | |
| "logps/rejected": -257.071533203125, | |
| "loss": 0.6477, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": 0.06585502624511719, | |
| "rewards/margins": 0.09959708154201508, | |
| "rewards/rejected": -0.03374204784631729, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.4459016393442623, | |
| "grad_norm": 123.2249526977539, | |
| "learning_rate": 4.448529411764706e-06, | |
| "logits/chosen": -0.7680925130844116, | |
| "logits/rejected": -0.6171376705169678, | |
| "logps/chosen": -480.37481689453125, | |
| "logps/rejected": -430.70379638671875, | |
| "loss": 0.7551, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.217463880777359, | |
| "rewards/margins": -0.08437519520521164, | |
| "rewards/rejected": -0.13308867812156677, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.45901639344262296, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.448529411764706e-06, | |
| "logits/chosen": -0.9053277969360352, | |
| "logits/rejected": -0.691852331161499, | |
| "logps/chosen": -409.5699462890625, | |
| "logps/rejected": -475.8656005859375, | |
| "loss": 0.8483, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.18217086791992188, | |
| "rewards/margins": -0.2824680209159851, | |
| "rewards/rejected": 0.10029716044664383, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.4721311475409836, | |
| "grad_norm": 105.83935546875, | |
| "learning_rate": 4.411764705882353e-06, | |
| "logits/chosen": -0.28655514121055603, | |
| "logits/rejected": -0.05234713852405548, | |
| "logps/chosen": -563.8226318359375, | |
| "logps/rejected": -375.774658203125, | |
| "loss": 0.6024, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.048460379242897034, | |
| "rewards/margins": 0.28112107515335083, | |
| "rewards/rejected": -0.2326606810092926, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.4852459016393443, | |
| "grad_norm": 68.51673126220703, | |
| "learning_rate": 4.3750000000000005e-06, | |
| "logits/chosen": -0.7692010402679443, | |
| "logits/rejected": -0.9052639007568359, | |
| "logps/chosen": -262.10467529296875, | |
| "logps/rejected": -209.62413024902344, | |
| "loss": 0.5991, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.09292706847190857, | |
| "rewards/margins": 0.20516462624073029, | |
| "rewards/rejected": -0.11223754286766052, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.49836065573770494, | |
| "grad_norm": 152.35870361328125, | |
| "learning_rate": 4.3382352941176475e-06, | |
| "logits/chosen": -0.4434085190296173, | |
| "logits/rejected": -0.5462446212768555, | |
| "logps/chosen": -325.6352844238281, | |
| "logps/rejected": -386.6681823730469, | |
| "loss": 1.048, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.2829402983188629, | |
| "rewards/margins": -0.5018165707588196, | |
| "rewards/rejected": 0.21887625753879547, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.5114754098360655, | |
| "grad_norm": 128.87644958496094, | |
| "learning_rate": 4.301470588235295e-06, | |
| "logits/chosen": -0.32321175932884216, | |
| "logits/rejected": -0.6219479441642761, | |
| "logps/chosen": -220.80865478515625, | |
| "logps/rejected": -679.1721801757812, | |
| "loss": 0.6471, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.08989439904689789, | |
| "rewards/margins": 0.1783585399389267, | |
| "rewards/rejected": -0.268252968788147, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.5245901639344263, | |
| "grad_norm": 164.9374542236328, | |
| "learning_rate": 4.264705882352942e-06, | |
| "logits/chosen": -0.41436851024627686, | |
| "logits/rejected": -0.3371754288673401, | |
| "logps/chosen": -453.7709045410156, | |
| "logps/rejected": -186.964599609375, | |
| "loss": 0.7023, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.012208174914121628, | |
| "rewards/margins": 0.0052278414368629456, | |
| "rewards/rejected": -0.01743602193892002, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5377049180327869, | |
| "grad_norm": 85.42108917236328, | |
| "learning_rate": 4.227941176470589e-06, | |
| "logits/chosen": -0.3417915403842926, | |
| "logits/rejected": -0.6106492280960083, | |
| "logps/chosen": -410.3317565917969, | |
| "logps/rejected": -356.129638671875, | |
| "loss": 0.5101, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.12232876569032669, | |
| "rewards/margins": 0.4886293411254883, | |
| "rewards/rejected": -0.3663005828857422, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.5508196721311476, | |
| "grad_norm": 92.59017181396484, | |
| "learning_rate": 4.191176470588236e-06, | |
| "logits/chosen": -0.3282807171344757, | |
| "logits/rejected": -0.6609268188476562, | |
| "logps/chosen": -273.1469421386719, | |
| "logps/rejected": -394.88787841796875, | |
| "loss": 0.5399, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.09656943380832672, | |
| "rewards/margins": 0.3409425616264343, | |
| "rewards/rejected": -0.2443731427192688, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.5639344262295082, | |
| "grad_norm": 113.27176666259766, | |
| "learning_rate": 4.154411764705883e-06, | |
| "logits/chosen": -0.6696023941040039, | |
| "logits/rejected": -0.4758712649345398, | |
| "logps/chosen": -259.9328918457031, | |
| "logps/rejected": -257.8404846191406, | |
| "loss": 0.7035, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.07427978515625, | |
| "rewards/margins": 0.009069837629795074, | |
| "rewards/rejected": -0.08334961533546448, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.5770491803278689, | |
| "grad_norm": 115.66248321533203, | |
| "learning_rate": 4.11764705882353e-06, | |
| "logits/chosen": -0.5908689498901367, | |
| "logits/rejected": -0.6039953231811523, | |
| "logps/chosen": -264.3017578125, | |
| "logps/rejected": -345.41534423828125, | |
| "loss": 0.758, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.16783800721168518, | |
| "rewards/margins": -0.07998895645141602, | |
| "rewards/rejected": -0.08784904330968857, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.5901639344262295, | |
| "grad_norm": 114.06060028076172, | |
| "learning_rate": 4.080882352941177e-06, | |
| "logits/chosen": -0.4014369249343872, | |
| "logits/rejected": -0.31871065497398376, | |
| "logps/chosen": -365.35687255859375, | |
| "logps/rejected": -380.46661376953125, | |
| "loss": 0.7162, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.09307613968849182, | |
| "rewards/margins": -0.03436946123838425, | |
| "rewards/rejected": 0.12744559347629547, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.6032786885245902, | |
| "grad_norm": 138.62567138671875, | |
| "learning_rate": 4.044117647058824e-06, | |
| "logits/chosen": -0.638996958732605, | |
| "logits/rejected": -0.39604443311691284, | |
| "logps/chosen": -286.7137451171875, | |
| "logps/rejected": -220.05154418945312, | |
| "loss": 0.8332, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.1774437129497528, | |
| "rewards/margins": -0.23038703203201294, | |
| "rewards/rejected": 0.05294332653284073, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.6163934426229508, | |
| "grad_norm": 57.410499572753906, | |
| "learning_rate": 4.007352941176471e-06, | |
| "logits/chosen": -0.6090636253356934, | |
| "logits/rejected": -0.5290719866752625, | |
| "logps/chosen": -137.49166870117188, | |
| "logps/rejected": -139.84095764160156, | |
| "loss": 0.5675, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.006125451996922493, | |
| "rewards/margins": 0.29940930008888245, | |
| "rewards/rejected": -0.2932838499546051, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.6295081967213115, | |
| "grad_norm": 74.56676483154297, | |
| "learning_rate": 3.970588235294118e-06, | |
| "logits/chosen": -0.7181419134140015, | |
| "logits/rejected": -0.4697718024253845, | |
| "logps/chosen": -189.2108154296875, | |
| "logps/rejected": -129.29373168945312, | |
| "loss": 0.6282, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.16447561979293823, | |
| "rewards/margins": 0.13910141587257385, | |
| "rewards/rejected": -0.3035770356655121, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.6426229508196721, | |
| "grad_norm": 71.27189636230469, | |
| "learning_rate": 3.933823529411765e-06, | |
| "logits/chosen": -0.5085806846618652, | |
| "logits/rejected": -0.44473588466644287, | |
| "logps/chosen": -442.37554931640625, | |
| "logps/rejected": -340.0184326171875, | |
| "loss": 0.4663, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.27928200364112854, | |
| "rewards/margins": 0.581198513507843, | |
| "rewards/rejected": -0.3019165098667145, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.6557377049180327, | |
| "grad_norm": 94.27779388427734, | |
| "learning_rate": 3.897058823529412e-06, | |
| "logits/chosen": -0.43181851506233215, | |
| "logits/rejected": -0.2016599327325821, | |
| "logps/chosen": -412.7520446777344, | |
| "logps/rejected": -255.32366943359375, | |
| "loss": 0.5602, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.15803948044776917, | |
| "rewards/margins": 0.30720824003219604, | |
| "rewards/rejected": -0.14916877448558807, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6688524590163935, | |
| "grad_norm": 105.73281860351562, | |
| "learning_rate": 3.860294117647059e-06, | |
| "logits/chosen": -0.8714503645896912, | |
| "logits/rejected": -0.8866394758224487, | |
| "logps/chosen": -234.35894775390625, | |
| "logps/rejected": -381.7604064941406, | |
| "loss": 0.679, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": 0.20015183091163635, | |
| "rewards/margins": 0.0683051347732544, | |
| "rewards/rejected": 0.13184669613838196, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.6819672131147541, | |
| "grad_norm": 114.59404754638672, | |
| "learning_rate": 3.8235294117647055e-06, | |
| "logits/chosen": -0.3797226846218109, | |
| "logits/rejected": -0.28768518567085266, | |
| "logps/chosen": -527.8758544921875, | |
| "logps/rejected": -398.22833251953125, | |
| "loss": 0.5557, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.1556282937526703, | |
| "rewards/margins": 0.3929787576198578, | |
| "rewards/rejected": -0.2373504638671875, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.6950819672131148, | |
| "grad_norm": 79.82359313964844, | |
| "learning_rate": 3.786764705882353e-06, | |
| "logits/chosen": -0.36424490809440613, | |
| "logits/rejected": -0.4016554355621338, | |
| "logps/chosen": -264.1070556640625, | |
| "logps/rejected": -228.17445373535156, | |
| "loss": 0.5864, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.08102244883775711, | |
| "rewards/margins": 0.2558790147304535, | |
| "rewards/rejected": -0.17485657334327698, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.7081967213114754, | |
| "grad_norm": 96.55302429199219, | |
| "learning_rate": 3.7500000000000005e-06, | |
| "logits/chosen": -0.2944083511829376, | |
| "logits/rejected": -0.1153193861246109, | |
| "logps/chosen": -548.8867797851562, | |
| "logps/rejected": -476.7703857421875, | |
| "loss": 0.4772, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.578123152256012, | |
| "rewards/margins": 0.5144607424736023, | |
| "rewards/rejected": 0.0636623352766037, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.7213114754098361, | |
| "grad_norm": 99.41586303710938, | |
| "learning_rate": 3.7132352941176476e-06, | |
| "logits/chosen": -0.28071513772010803, | |
| "logits/rejected": -0.3517005443572998, | |
| "logps/chosen": -269.35107421875, | |
| "logps/rejected": -245.49374389648438, | |
| "loss": 0.6535, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.32255783677101135, | |
| "rewards/margins": 0.18369215726852417, | |
| "rewards/rejected": -0.5062500238418579, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7344262295081967, | |
| "grad_norm": 102.873046875, | |
| "learning_rate": 3.6764705882352946e-06, | |
| "logits/chosen": -0.5045206546783447, | |
| "logits/rejected": -0.33617326617240906, | |
| "logps/chosen": -446.717529296875, | |
| "logps/rejected": -354.23199462890625, | |
| "loss": 0.5308, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.11581029742956161, | |
| "rewards/margins": 0.36529916524887085, | |
| "rewards/rejected": -0.24948886036872864, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.7475409836065574, | |
| "grad_norm": 107.32563018798828, | |
| "learning_rate": 3.6397058823529413e-06, | |
| "logits/chosen": -0.7487730979919434, | |
| "logits/rejected": -0.5132504105567932, | |
| "logps/chosen": -297.9276428222656, | |
| "logps/rejected": -272.4541015625, | |
| "loss": 0.6717, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.00452117994427681, | |
| "rewards/margins": 0.049825482070446014, | |
| "rewards/rejected": -0.045304298400878906, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.760655737704918, | |
| "grad_norm": 197.63619995117188, | |
| "learning_rate": 3.6029411764705883e-06, | |
| "logits/chosen": -0.3516054153442383, | |
| "logits/rejected": -0.4525684714317322, | |
| "logps/chosen": -527.5667724609375, | |
| "logps/rejected": -721.8489990234375, | |
| "loss": 0.9658, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.026310719549655914, | |
| "rewards/margins": -0.28759005665779114, | |
| "rewards/rejected": 0.26127931475639343, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.7737704918032787, | |
| "grad_norm": 58.864341735839844, | |
| "learning_rate": 3.566176470588236e-06, | |
| "logits/chosen": -0.8907312750816345, | |
| "logits/rejected": -0.5560203194618225, | |
| "logps/chosen": -356.2855224609375, | |
| "logps/rejected": -259.6497802734375, | |
| "loss": 0.5038, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.31428688764572144, | |
| "rewards/margins": 0.4714818000793457, | |
| "rewards/rejected": -0.15719491243362427, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.7868852459016393, | |
| "grad_norm": 77.59774017333984, | |
| "learning_rate": 3.529411764705883e-06, | |
| "logits/chosen": -0.5632966160774231, | |
| "logits/rejected": -0.6254044771194458, | |
| "logps/chosen": -523.7625732421875, | |
| "logps/rejected": -410.9280700683594, | |
| "loss": 0.3917, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.16903266310691833, | |
| "rewards/margins": 0.9428247213363647, | |
| "rewards/rejected": -0.7737920880317688, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 125.0260009765625, | |
| "learning_rate": 3.4926470588235295e-06, | |
| "logits/chosen": -0.7440542578697205, | |
| "logits/rejected": -0.703271210193634, | |
| "logps/chosen": -259.8118591308594, | |
| "logps/rejected": -273.839599609375, | |
| "loss": 0.8524, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.13262319564819336, | |
| "rewards/margins": -0.27512502670288086, | |
| "rewards/rejected": 0.1425018012523651, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.8131147540983606, | |
| "grad_norm": 76.26204681396484, | |
| "learning_rate": 3.4558823529411766e-06, | |
| "logits/chosen": -0.7221469283103943, | |
| "logits/rejected": -0.7659784555435181, | |
| "logps/chosen": -184.25143432617188, | |
| "logps/rejected": -179.50430297851562, | |
| "loss": 0.6865, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": 0.0025919005274772644, | |
| "rewards/margins": 0.019242577254772186, | |
| "rewards/rejected": -0.016650676727294922, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.8262295081967214, | |
| "grad_norm": 98.49539184570312, | |
| "learning_rate": 3.419117647058824e-06, | |
| "logits/chosen": -0.39552026987075806, | |
| "logits/rejected": -0.5626529455184937, | |
| "logps/chosen": -299.3195495605469, | |
| "logps/rejected": -409.58978271484375, | |
| "loss": 0.6728, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.10388422012329102, | |
| "rewards/margins": 0.0691218301653862, | |
| "rewards/rejected": -0.1730060577392578, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.839344262295082, | |
| "grad_norm": 121.93528747558594, | |
| "learning_rate": 3.382352941176471e-06, | |
| "logits/chosen": -0.3543356955051422, | |
| "logits/rejected": -0.23809625208377838, | |
| "logps/chosen": -189.68069458007812, | |
| "logps/rejected": -349.9007263183594, | |
| "loss": 0.8636, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.3187013864517212, | |
| "rewards/margins": -0.2557255029678345, | |
| "rewards/rejected": -0.06297589093446732, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.8524590163934426, | |
| "grad_norm": 35.53516387939453, | |
| "learning_rate": 3.3455882352941178e-06, | |
| "logits/chosen": -0.9258323311805725, | |
| "logits/rejected": -0.8565582036972046, | |
| "logps/chosen": -272.64453125, | |
| "logps/rejected": -173.0827178955078, | |
| "loss": 0.5008, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": 0.3930124044418335, | |
| "rewards/margins": 0.5905007123947144, | |
| "rewards/rejected": -0.19748830795288086, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.8655737704918033, | |
| "grad_norm": 162.02955627441406, | |
| "learning_rate": 3.308823529411765e-06, | |
| "logits/chosen": -0.7951459288597107, | |
| "logits/rejected": -0.9009961485862732, | |
| "logps/chosen": -322.9057312011719, | |
| "logps/rejected": -461.6522216796875, | |
| "loss": 0.9941, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.22350434958934784, | |
| "rewards/margins": -0.43997135758399963, | |
| "rewards/rejected": 0.2164669930934906, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.8786885245901639, | |
| "grad_norm": 49.46587371826172, | |
| "learning_rate": 3.272058823529412e-06, | |
| "logits/chosen": -0.8077062368392944, | |
| "logits/rejected": -0.7711391448974609, | |
| "logps/chosen": -334.55621337890625, | |
| "logps/rejected": -304.2231750488281, | |
| "loss": 0.4265, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.5165804028511047, | |
| "rewards/margins": 0.7691186666488647, | |
| "rewards/rejected": -0.2525383234024048, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.8918032786885246, | |
| "grad_norm": 113.88951110839844, | |
| "learning_rate": 3.2352941176470594e-06, | |
| "logits/chosen": -0.48406150937080383, | |
| "logits/rejected": -0.550665020942688, | |
| "logps/chosen": -252.42161560058594, | |
| "logps/rejected": -426.91375732421875, | |
| "loss": 0.7502, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.13350410759449005, | |
| "rewards/margins": -0.05722009390592575, | |
| "rewards/rejected": -0.07628403604030609, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.9049180327868852, | |
| "grad_norm": 82.540771484375, | |
| "learning_rate": 3.198529411764706e-06, | |
| "logits/chosen": -0.6168251633644104, | |
| "logits/rejected": -0.5766561031341553, | |
| "logps/chosen": -355.02587890625, | |
| "logps/rejected": -185.5700225830078, | |
| "loss": 0.5665, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.07997532188892365, | |
| "rewards/margins": 0.31432589888572693, | |
| "rewards/rejected": -0.3943012058734894, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.9180327868852459, | |
| "grad_norm": 80.47476196289062, | |
| "learning_rate": 3.161764705882353e-06, | |
| "logits/chosen": -0.44328969717025757, | |
| "logits/rejected": -0.23468244075775146, | |
| "logps/chosen": -298.8565673828125, | |
| "logps/rejected": -271.02874755859375, | |
| "loss": 0.5233, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.3650921881198883, | |
| "rewards/margins": 0.4301413893699646, | |
| "rewards/rejected": -0.0650491714477539, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9311475409836065, | |
| "grad_norm": 183.71018981933594, | |
| "learning_rate": 3.125e-06, | |
| "logits/chosen": -0.7024189233779907, | |
| "logits/rejected": -0.3720092177391052, | |
| "logps/chosen": -663.25341796875, | |
| "logps/rejected": -509.05072021484375, | |
| "loss": 0.6182, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": 0.02078094333410263, | |
| "rewards/margins": 0.3649251461029053, | |
| "rewards/rejected": -0.3441442549228668, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.9442622950819672, | |
| "grad_norm": 73.77120971679688, | |
| "learning_rate": 3.0882352941176476e-06, | |
| "logits/chosen": -0.7303851842880249, | |
| "logits/rejected": -0.5275728702545166, | |
| "logps/chosen": -273.287353515625, | |
| "logps/rejected": -248.97366333007812, | |
| "loss": 0.5902, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.15096759796142578, | |
| "rewards/margins": 0.22571565210819244, | |
| "rewards/rejected": -0.07474804669618607, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.9573770491803278, | |
| "grad_norm": 119.30152893066406, | |
| "learning_rate": 3.0514705882352947e-06, | |
| "logits/chosen": -0.7145899534225464, | |
| "logits/rejected": -0.6755335330963135, | |
| "logps/chosen": -358.4460144042969, | |
| "logps/rejected": -413.93402099609375, | |
| "loss": 0.715, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.5442755818367004, | |
| "rewards/margins": -0.015435710549354553, | |
| "rewards/rejected": -0.5288398861885071, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.9704918032786886, | |
| "grad_norm": Infinity, | |
| "learning_rate": 3.0514705882352947e-06, | |
| "logits/chosen": -0.37683844566345215, | |
| "logits/rejected": -0.19479577243328094, | |
| "logps/chosen": -191.80364990234375, | |
| "logps/rejected": -239.4271240234375, | |
| "loss": 0.6901, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.12151336669921875, | |
| "rewards/margins": 0.011674299836158752, | |
| "rewards/rejected": -0.1331876814365387, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.9836065573770492, | |
| "grad_norm": 37.73300552368164, | |
| "learning_rate": 3.0147058823529413e-06, | |
| "logits/chosen": -0.7108013033866882, | |
| "logits/rejected": -0.8427099585533142, | |
| "logps/chosen": -237.83132934570312, | |
| "logps/rejected": -235.86509704589844, | |
| "loss": 0.5368, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": 0.3780634105205536, | |
| "rewards/margins": 0.5749028921127319, | |
| "rewards/rejected": -0.19683943688869476, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.9967213114754099, | |
| "grad_norm": 50.56627655029297, | |
| "learning_rate": 2.9779411764705884e-06, | |
| "logits/chosen": -0.3920336663722992, | |
| "logits/rejected": -0.5925789475440979, | |
| "logps/chosen": -352.024658203125, | |
| "logps/rejected": -319.0198669433594, | |
| "loss": 0.3911, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.4176803529262543, | |
| "rewards/margins": 0.8605508804321289, | |
| "rewards/rejected": -0.44287049770355225, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 1.0098360655737706, | |
| "grad_norm": 31.756732940673828, | |
| "learning_rate": 2.9411764705882355e-06, | |
| "logits/chosen": -0.9694571495056152, | |
| "logits/rejected": -0.7683681845664978, | |
| "logps/chosen": -397.28277587890625, | |
| "logps/rejected": -506.4493103027344, | |
| "loss": 0.2859, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 3.0198912620544434, | |
| "rewards/margins": 6.584094524383545, | |
| "rewards/rejected": -3.5642035007476807, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 1.022950819672131, | |
| "grad_norm": 15.996756553649902, | |
| "learning_rate": 2.904411764705883e-06, | |
| "logits/chosen": -0.4139990210533142, | |
| "logits/rejected": -0.5289357900619507, | |
| "logps/chosen": -157.25994873046875, | |
| "logps/rejected": -292.02252197265625, | |
| "loss": 0.0868, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.7793710827827454, | |
| "rewards/margins": 3.51633358001709, | |
| "rewards/rejected": -2.73696231842041, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 1.0360655737704918, | |
| "grad_norm": 4.163623809814453, | |
| "learning_rate": 2.8676470588235296e-06, | |
| "logits/chosen": -0.8043258190155029, | |
| "logits/rejected": -0.7802602052688599, | |
| "logps/chosen": -419.2426452636719, | |
| "logps/rejected": -306.1612548828125, | |
| "loss": 0.0156, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.816192626953125, | |
| "rewards/margins": 5.203756332397461, | |
| "rewards/rejected": -2.3875632286071777, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 1.0491803278688525, | |
| "grad_norm": 13.230965614318848, | |
| "learning_rate": 2.8308823529411766e-06, | |
| "logits/chosen": -0.6542176008224487, | |
| "logits/rejected": -0.3940190374851227, | |
| "logps/chosen": -421.56170654296875, | |
| "logps/rejected": -406.03619384765625, | |
| "loss": 0.1049, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 3.0519039630889893, | |
| "rewards/margins": 5.3741607666015625, | |
| "rewards/rejected": -2.322256565093994, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.0622950819672132, | |
| "grad_norm": 31.09598731994629, | |
| "learning_rate": 2.7941176470588237e-06, | |
| "logits/chosen": -0.7882400751113892, | |
| "logits/rejected": -0.755602240562439, | |
| "logps/chosen": -143.84951782226562, | |
| "logps/rejected": -207.64418029785156, | |
| "loss": 0.3054, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.5518492460250854, | |
| "rewards/margins": 1.3898382186889648, | |
| "rewards/rejected": -0.8379888534545898, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 1.0754098360655737, | |
| "grad_norm": 7.787254810333252, | |
| "learning_rate": 2.757352941176471e-06, | |
| "logits/chosen": -0.347468763589859, | |
| "logits/rejected": -0.34622734785079956, | |
| "logps/chosen": -263.9562072753906, | |
| "logps/rejected": -390.29901123046875, | |
| "loss": 0.0387, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.743457555770874, | |
| "rewards/margins": 6.276088714599609, | |
| "rewards/rejected": -3.5326313972473145, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.0885245901639344, | |
| "grad_norm": 12.739912986755371, | |
| "learning_rate": 2.720588235294118e-06, | |
| "logits/chosen": -0.5520642399787903, | |
| "logits/rejected": -0.632639467716217, | |
| "logps/chosen": -542.7330322265625, | |
| "logps/rejected": -477.4020080566406, | |
| "loss": 0.0857, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 3.9007229804992676, | |
| "rewards/margins": 8.109764099121094, | |
| "rewards/rejected": -4.209041595458984, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 1.1016393442622952, | |
| "grad_norm": 39.59391784667969, | |
| "learning_rate": 2.683823529411765e-06, | |
| "logits/chosen": -0.5291910767555237, | |
| "logits/rejected": -0.47945737838745117, | |
| "logps/chosen": -385.80242919921875, | |
| "logps/rejected": -362.5047912597656, | |
| "loss": 0.1411, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.6711763143539429, | |
| "rewards/margins": 3.675436496734619, | |
| "rewards/rejected": -2.004260301589966, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.1147540983606556, | |
| "grad_norm": 34.75082015991211, | |
| "learning_rate": 2.647058823529412e-06, | |
| "logits/chosen": -0.8582125902175903, | |
| "logits/rejected": -0.961124837398529, | |
| "logps/chosen": -127.2431640625, | |
| "logps/rejected": -181.58657836914062, | |
| "loss": 0.3015, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.1156275272369385, | |
| "rewards/margins": 2.283085346221924, | |
| "rewards/rejected": -1.1674578189849854, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.1278688524590164, | |
| "grad_norm": 18.05373191833496, | |
| "learning_rate": 2.610294117647059e-06, | |
| "logits/chosen": -0.8542808890342712, | |
| "logits/rejected": -0.6588492393493652, | |
| "logps/chosen": -249.22372436523438, | |
| "logps/rejected": -176.06527709960938, | |
| "loss": 0.1498, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.803849697113037, | |
| "rewards/margins": 2.9191579818725586, | |
| "rewards/rejected": -1.115308165550232, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.140983606557377, | |
| "grad_norm": 5.507705211639404, | |
| "learning_rate": 2.5735294117647057e-06, | |
| "logits/chosen": -0.4963078498840332, | |
| "logits/rejected": -0.8181336522102356, | |
| "logps/chosen": -108.07630920410156, | |
| "logps/rejected": -320.96392822265625, | |
| "loss": 0.0256, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.0501946210861206, | |
| "rewards/margins": 4.599462032318115, | |
| "rewards/rejected": -3.549267292022705, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.1540983606557378, | |
| "grad_norm": 12.978127479553223, | |
| "learning_rate": 2.536764705882353e-06, | |
| "logits/chosen": -0.8022123575210571, | |
| "logits/rejected": -0.8916893005371094, | |
| "logps/chosen": -98.68746948242188, | |
| "logps/rejected": -129.98577880859375, | |
| "loss": 0.2735, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.5756351947784424, | |
| "rewards/margins": 1.4945859909057617, | |
| "rewards/rejected": -0.9189507961273193, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.1672131147540983, | |
| "grad_norm": 8.445465087890625, | |
| "learning_rate": 2.5e-06, | |
| "logits/chosen": -0.238316610455513, | |
| "logits/rejected": -0.21277102828025818, | |
| "logps/chosen": -355.0726318359375, | |
| "logps/rejected": -468.03424072265625, | |
| "loss": 0.0439, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 3.785343885421753, | |
| "rewards/margins": 8.177167892456055, | |
| "rewards/rejected": -4.391823768615723, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 1.180327868852459, | |
| "grad_norm": 119.01290130615234, | |
| "learning_rate": 2.4632352941176473e-06, | |
| "logits/chosen": -0.38539862632751465, | |
| "logits/rejected": -0.2583141326904297, | |
| "logps/chosen": -479.6025695800781, | |
| "logps/rejected": -433.03045654296875, | |
| "loss": 0.2972, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 2.800222873687744, | |
| "rewards/margins": 6.087710380554199, | |
| "rewards/rejected": -3.287487506866455, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.1934426229508197, | |
| "grad_norm": 31.620267868041992, | |
| "learning_rate": 2.4264705882352943e-06, | |
| "logits/chosen": -0.5990991592407227, | |
| "logits/rejected": -0.4338776469230652, | |
| "logps/chosen": -287.31878662109375, | |
| "logps/rejected": -425.55963134765625, | |
| "loss": 0.2764, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.9531813859939575, | |
| "rewards/margins": 4.420433044433594, | |
| "rewards/rejected": -2.4672515392303467, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 1.2065573770491804, | |
| "grad_norm": 5.715668201446533, | |
| "learning_rate": 2.3897058823529414e-06, | |
| "logits/chosen": -0.5918264389038086, | |
| "logits/rejected": -0.34481585025787354, | |
| "logps/chosen": -462.36181640625, | |
| "logps/rejected": -291.8490295410156, | |
| "loss": 0.0246, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 4.236080169677734, | |
| "rewards/margins": 6.656676292419434, | |
| "rewards/rejected": -2.420595645904541, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.219672131147541, | |
| "grad_norm": 10.8475923538208, | |
| "learning_rate": 2.3529411764705885e-06, | |
| "logits/chosen": -0.48867395520210266, | |
| "logits/rejected": -0.2037501335144043, | |
| "logps/chosen": -248.76011657714844, | |
| "logps/rejected": -278.86505126953125, | |
| "loss": 0.0703, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.4005353450775146, | |
| "rewards/margins": 3.532731056213379, | |
| "rewards/rejected": -1.1321957111358643, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 1.2327868852459016, | |
| "grad_norm": 17.5373477935791, | |
| "learning_rate": 2.3161764705882355e-06, | |
| "logits/chosen": -0.800743043422699, | |
| "logits/rejected": -0.3216054141521454, | |
| "logps/chosen": -332.1054382324219, | |
| "logps/rejected": -262.9385070800781, | |
| "loss": 0.0781, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 3.3670170307159424, | |
| "rewards/margins": 5.165108680725098, | |
| "rewards/rejected": -1.7980915307998657, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.2459016393442623, | |
| "grad_norm": 17.166118621826172, | |
| "learning_rate": 2.2794117647058826e-06, | |
| "logits/chosen": -0.5876657366752625, | |
| "logits/rejected": -0.30134695768356323, | |
| "logps/chosen": -438.53070068359375, | |
| "logps/rejected": -283.11053466796875, | |
| "loss": 0.0878, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 5.845110893249512, | |
| "rewards/margins": 7.538168430328369, | |
| "rewards/rejected": -1.6930572986602783, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.2590163934426228, | |
| "grad_norm": 23.068361282348633, | |
| "learning_rate": 2.2426470588235296e-06, | |
| "logits/chosen": -0.6558918356895447, | |
| "logits/rejected": -0.3275706171989441, | |
| "logps/chosen": -370.03094482421875, | |
| "logps/rejected": -453.90203857421875, | |
| "loss": 0.1287, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.0850746631622314, | |
| "rewards/margins": 4.879607677459717, | |
| "rewards/rejected": -2.7945332527160645, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.2721311475409836, | |
| "grad_norm": 5.994075298309326, | |
| "learning_rate": 2.2058823529411767e-06, | |
| "logits/chosen": -0.6101892590522766, | |
| "logits/rejected": -0.5373432040214539, | |
| "logps/chosen": -347.5174560546875, | |
| "logps/rejected": -322.55023193359375, | |
| "loss": 0.0318, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.3236427307128906, | |
| "rewards/margins": 4.85604190826416, | |
| "rewards/rejected": -2.5323991775512695, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 1.2852459016393443, | |
| "grad_norm": 14.375985145568848, | |
| "learning_rate": 2.1691176470588238e-06, | |
| "logits/chosen": -0.6097806692123413, | |
| "logits/rejected": -0.5758112072944641, | |
| "logps/chosen": -341.9951477050781, | |
| "logps/rejected": -290.09136962890625, | |
| "loss": 0.1422, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.372694253921509, | |
| "rewards/margins": 5.112575531005859, | |
| "rewards/rejected": -2.7398810386657715, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.298360655737705, | |
| "grad_norm": 10.561140060424805, | |
| "learning_rate": 2.132352941176471e-06, | |
| "logits/chosen": -0.7798110842704773, | |
| "logits/rejected": -0.5334391593933105, | |
| "logps/chosen": -122.71707916259766, | |
| "logps/rejected": -138.75685119628906, | |
| "loss": 0.224, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.45279568433761597, | |
| "rewards/margins": 2.1160295009613037, | |
| "rewards/rejected": -1.6632338762283325, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 1.3114754098360657, | |
| "grad_norm": 8.718037605285645, | |
| "learning_rate": 2.095588235294118e-06, | |
| "logits/chosen": -0.5125188827514648, | |
| "logits/rejected": -0.35307127237319946, | |
| "logps/chosen": -180.09408569335938, | |
| "logps/rejected": -273.54046630859375, | |
| "loss": 0.0465, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.8067989349365234, | |
| "rewards/margins": 5.022237777709961, | |
| "rewards/rejected": -3.2154390811920166, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.3245901639344262, | |
| "grad_norm": 11.257354736328125, | |
| "learning_rate": 2.058823529411765e-06, | |
| "logits/chosen": -0.48480626940727234, | |
| "logits/rejected": -0.4184003174304962, | |
| "logps/chosen": -349.5723876953125, | |
| "logps/rejected": -458.3537902832031, | |
| "loss": 0.0341, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.9435834884643555, | |
| "rewards/margins": 6.52362060546875, | |
| "rewards/rejected": -3.5800368785858154, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 1.337704918032787, | |
| "grad_norm": 8.118950843811035, | |
| "learning_rate": 2.022058823529412e-06, | |
| "logits/chosen": -0.6667636632919312, | |
| "logits/rejected": -0.3804593086242676, | |
| "logps/chosen": -331.16015625, | |
| "logps/rejected": -266.6229248046875, | |
| "loss": 0.1422, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 4.118168830871582, | |
| "rewards/margins": 5.915567398071289, | |
| "rewards/rejected": -1.797398328781128, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 1.3508196721311476, | |
| "grad_norm": 14.03177547454834, | |
| "learning_rate": 1.985294117647059e-06, | |
| "logits/chosen": -0.34312984347343445, | |
| "logits/rejected": -0.5378394722938538, | |
| "logps/chosen": -205.9506378173828, | |
| "logps/rejected": -219.46360778808594, | |
| "loss": 0.113, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.023257255554199, | |
| "rewards/margins": 3.4092535972595215, | |
| "rewards/rejected": -1.3859964609146118, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 1.3639344262295081, | |
| "grad_norm": 2.7447023391723633, | |
| "learning_rate": 1.948529411764706e-06, | |
| "logits/chosen": -0.3522460162639618, | |
| "logits/rejected": -0.4929681122303009, | |
| "logps/chosen": -295.0723571777344, | |
| "logps/rejected": -344.48834228515625, | |
| "loss": 0.0114, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.613781690597534, | |
| "rewards/margins": 6.21614408493042, | |
| "rewards/rejected": -3.602362871170044, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 1.3770491803278688, | |
| "grad_norm": 3.9517393112182617, | |
| "learning_rate": 1.9117647058823528e-06, | |
| "logits/chosen": -0.5514786243438721, | |
| "logits/rejected": -0.515537440776825, | |
| "logps/chosen": -283.73504638671875, | |
| "logps/rejected": -354.9101257324219, | |
| "loss": 0.0147, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 3.183412551879883, | |
| "rewards/margins": 6.139578819274902, | |
| "rewards/rejected": -2.9561657905578613, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.3901639344262295, | |
| "grad_norm": 32.080848693847656, | |
| "learning_rate": 1.8750000000000003e-06, | |
| "logits/chosen": -0.34524667263031006, | |
| "logits/rejected": -0.2804412245750427, | |
| "logps/chosen": -487.1918029785156, | |
| "logps/rejected": -459.1595764160156, | |
| "loss": 0.1497, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 3.4442732334136963, | |
| "rewards/margins": 6.211602210998535, | |
| "rewards/rejected": -2.767328977584839, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 1.40327868852459, | |
| "grad_norm": 6.278583526611328, | |
| "learning_rate": 1.8382352941176473e-06, | |
| "logits/chosen": -1.0492626428604126, | |
| "logits/rejected": -0.637208878993988, | |
| "logps/chosen": -508.1822509765625, | |
| "logps/rejected": -264.3661804199219, | |
| "loss": 0.0256, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 3.434685230255127, | |
| "rewards/margins": 5.93399715423584, | |
| "rewards/rejected": -2.4993114471435547, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 1.4163934426229507, | |
| "grad_norm": 3.8192379474639893, | |
| "learning_rate": 1.8014705882352942e-06, | |
| "logits/chosen": -0.49397462606430054, | |
| "logits/rejected": -0.39174699783325195, | |
| "logps/chosen": -250.099853515625, | |
| "logps/rejected": -342.03033447265625, | |
| "loss": 0.0194, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.3859384059906006, | |
| "rewards/margins": 4.495996475219727, | |
| "rewards/rejected": -3.110058307647705, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 1.4295081967213115, | |
| "grad_norm": 5.807373523712158, | |
| "learning_rate": 1.7647058823529414e-06, | |
| "logits/chosen": -0.584580659866333, | |
| "logits/rejected": -0.15329986810684204, | |
| "logps/chosen": -442.9632873535156, | |
| "logps/rejected": -318.378173828125, | |
| "loss": 0.0165, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.1344716548919678, | |
| "rewards/margins": 5.192463397979736, | |
| "rewards/rejected": -3.0579919815063477, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 1.4426229508196722, | |
| "grad_norm": 3.5901246070861816, | |
| "learning_rate": 1.7279411764705883e-06, | |
| "logits/chosen": -0.5349779725074768, | |
| "logits/rejected": -0.2637510299682617, | |
| "logps/chosen": -414.6310729980469, | |
| "logps/rejected": -428.139404296875, | |
| "loss": 0.0136, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.6161444187164307, | |
| "rewards/margins": 7.400443077087402, | |
| "rewards/rejected": -4.784298896789551, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.455737704918033, | |
| "grad_norm": 17.46812629699707, | |
| "learning_rate": 1.6911764705882356e-06, | |
| "logits/chosen": -0.6205960512161255, | |
| "logits/rejected": -0.6515633463859558, | |
| "logps/chosen": -233.56886291503906, | |
| "logps/rejected": -327.51611328125, | |
| "loss": 0.1822, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.1055248975753784, | |
| "rewards/margins": 4.226891994476318, | |
| "rewards/rejected": -3.1213672161102295, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 1.4688524590163934, | |
| "grad_norm": 11.27214241027832, | |
| "learning_rate": 1.6544117647058824e-06, | |
| "logits/chosen": -0.3313244581222534, | |
| "logits/rejected": -0.22155167162418365, | |
| "logps/chosen": -391.53466796875, | |
| "logps/rejected": -230.6190643310547, | |
| "loss": 0.1142, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.988477945327759, | |
| "rewards/margins": 5.188130855560303, | |
| "rewards/rejected": -2.199653148651123, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 1.481967213114754, | |
| "grad_norm": 9.786858558654785, | |
| "learning_rate": 1.6176470588235297e-06, | |
| "logits/chosen": -0.5634331703186035, | |
| "logits/rejected": -0.4348165690898895, | |
| "logps/chosen": -252.67152404785156, | |
| "logps/rejected": -352.4822998046875, | |
| "loss": 0.0467, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.8047726154327393, | |
| "rewards/margins": 6.142679691314697, | |
| "rewards/rejected": -4.337907314300537, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 1.4950819672131148, | |
| "grad_norm": 4.376170635223389, | |
| "learning_rate": 1.5808823529411765e-06, | |
| "logits/chosen": -0.15515179932117462, | |
| "logits/rejected": -0.1870616376399994, | |
| "logps/chosen": -289.219970703125, | |
| "logps/rejected": -400.1667175292969, | |
| "loss": 0.0207, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.1680092811584473, | |
| "rewards/margins": 4.854602336883545, | |
| "rewards/rejected": -2.6865930557250977, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 1.5081967213114753, | |
| "grad_norm": 3.8763139247894287, | |
| "learning_rate": 1.5441176470588238e-06, | |
| "logits/chosen": -0.2717803716659546, | |
| "logits/rejected": -0.17628008127212524, | |
| "logps/chosen": -546.1766357421875, | |
| "logps/rejected": -451.3502502441406, | |
| "loss": 0.0166, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.681325912475586, | |
| "rewards/margins": 5.725113868713379, | |
| "rewards/rejected": -3.0437874794006348, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.521311475409836, | |
| "grad_norm": 30.37358283996582, | |
| "learning_rate": 1.5073529411764707e-06, | |
| "logits/chosen": -0.9199023246765137, | |
| "logits/rejected": -0.8192650079727173, | |
| "logps/chosen": -247.5410919189453, | |
| "logps/rejected": -390.4881896972656, | |
| "loss": 0.1679, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.817814588546753, | |
| "rewards/margins": 4.526091575622559, | |
| "rewards/rejected": -2.7082767486572266, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 1.5344262295081967, | |
| "grad_norm": 17.32929229736328, | |
| "learning_rate": 1.4705882352941177e-06, | |
| "logits/chosen": -0.4218984842300415, | |
| "logits/rejected": -0.411405086517334, | |
| "logps/chosen": -202.14004516601562, | |
| "logps/rejected": -220.29254150390625, | |
| "loss": 0.1733, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.8182326555252075, | |
| "rewards/margins": 3.9761695861816406, | |
| "rewards/rejected": -2.1579370498657227, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 1.5475409836065572, | |
| "grad_norm": 15.47387981414795, | |
| "learning_rate": 1.4338235294117648e-06, | |
| "logits/chosen": -0.6655436754226685, | |
| "logits/rejected": -0.9528688192367554, | |
| "logps/chosen": -110.75199890136719, | |
| "logps/rejected": -216.50701904296875, | |
| "loss": 0.0995, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.448855996131897, | |
| "rewards/margins": 2.9766037464141846, | |
| "rewards/rejected": -2.5277481079101562, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 1.5606557377049182, | |
| "grad_norm": 56.997802734375, | |
| "learning_rate": 1.3970588235294119e-06, | |
| "logits/chosen": -0.3543809652328491, | |
| "logits/rejected": -0.5634450912475586, | |
| "logps/chosen": -307.3863525390625, | |
| "logps/rejected": -422.4675598144531, | |
| "loss": 0.19, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 1.821000337600708, | |
| "rewards/margins": 5.365086555480957, | |
| "rewards/rejected": -3.5440866947174072, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 1.5737704918032787, | |
| "grad_norm": 5.717536926269531, | |
| "learning_rate": 1.360294117647059e-06, | |
| "logits/chosen": -0.394744873046875, | |
| "logits/rejected": -0.46669620275497437, | |
| "logps/chosen": -162.78341674804688, | |
| "logps/rejected": -354.51654052734375, | |
| "loss": 0.0251, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.3121498823165894, | |
| "rewards/margins": 5.8546037673950195, | |
| "rewards/rejected": -4.542453765869141, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.5868852459016394, | |
| "grad_norm": 102.92301177978516, | |
| "learning_rate": 1.323529411764706e-06, | |
| "logits/chosen": -0.8797488212585449, | |
| "logits/rejected": -0.5256719589233398, | |
| "logps/chosen": -400.2969055175781, | |
| "logps/rejected": -228.6015625, | |
| "loss": 0.3735, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 1.3958312273025513, | |
| "rewards/margins": 2.500825881958008, | |
| "rewards/rejected": -1.1049946546554565, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 12.96183967590332, | |
| "learning_rate": 1.2867647058823528e-06, | |
| "logits/chosen": -0.1941753625869751, | |
| "logits/rejected": -0.2740371525287628, | |
| "logps/chosen": -146.65438842773438, | |
| "logps/rejected": -286.4040832519531, | |
| "loss": 0.0833, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.6577894687652588, | |
| "rewards/margins": 5.093594074249268, | |
| "rewards/rejected": -3.4358043670654297, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 1.6131147540983606, | |
| "grad_norm": 1.308056354522705, | |
| "learning_rate": 1.25e-06, | |
| "logits/chosen": -0.2991219460964203, | |
| "logits/rejected": -0.608604371547699, | |
| "logps/chosen": -332.8055419921875, | |
| "logps/rejected": -501.82794189453125, | |
| "loss": 0.0056, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.9228206872940063, | |
| "rewards/margins": 6.52579927444458, | |
| "rewards/rejected": -4.602978706359863, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 1.6262295081967213, | |
| "grad_norm": 6.536846160888672, | |
| "learning_rate": 1.2132352941176472e-06, | |
| "logits/chosen": -0.5168918371200562, | |
| "logits/rejected": -0.8077197670936584, | |
| "logps/chosen": -200.44891357421875, | |
| "logps/rejected": -283.05535888671875, | |
| "loss": 0.0305, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.2009633779525757, | |
| "rewards/margins": 4.113708019256592, | |
| "rewards/rejected": -2.9127449989318848, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 1.639344262295082, | |
| "grad_norm": 14.031917572021484, | |
| "learning_rate": 1.1764705882352942e-06, | |
| "logits/chosen": -0.2985928952693939, | |
| "logits/rejected": -0.5871812701225281, | |
| "logps/chosen": -203.19163513183594, | |
| "logps/rejected": -404.0279846191406, | |
| "loss": 0.1122, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.369646668434143, | |
| "rewards/margins": 5.168557643890381, | |
| "rewards/rejected": -3.7989113330841064, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.6524590163934425, | |
| "grad_norm": 1.0998114347457886, | |
| "learning_rate": 1.1397058823529413e-06, | |
| "logits/chosen": -0.9684606790542603, | |
| "logits/rejected": -0.8049026727676392, | |
| "logps/chosen": -247.9980010986328, | |
| "logps/rejected": -212.36807250976562, | |
| "loss": 0.178, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 2.4171135425567627, | |
| "rewards/margins": 5.950634002685547, | |
| "rewards/rejected": -3.533520460128784, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 1.6655737704918034, | |
| "grad_norm": 1.8453718423843384, | |
| "learning_rate": 1.1029411764705884e-06, | |
| "logits/chosen": -0.4717595875263214, | |
| "logits/rejected": -0.13389728963375092, | |
| "logps/chosen": -400.8570251464844, | |
| "logps/rejected": -322.6037902832031, | |
| "loss": 0.0069, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 3.168325901031494, | |
| "rewards/margins": 6.738244533538818, | |
| "rewards/rejected": -3.569918155670166, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 1.678688524590164, | |
| "grad_norm": 9.327398300170898, | |
| "learning_rate": 1.0661764705882354e-06, | |
| "logits/chosen": -0.45057347416877747, | |
| "logits/rejected": -0.22152967751026154, | |
| "logps/chosen": -261.6713562011719, | |
| "logps/rejected": -318.07391357421875, | |
| "loss": 0.0558, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.15228271484375, | |
| "rewards/margins": 5.979676723480225, | |
| "rewards/rejected": -3.8273940086364746, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 1.6918032786885244, | |
| "grad_norm": 21.2039737701416, | |
| "learning_rate": 1.0294117647058825e-06, | |
| "logits/chosen": -0.5791776180267334, | |
| "logits/rejected": -0.8412386178970337, | |
| "logps/chosen": -132.2311553955078, | |
| "logps/rejected": -418.17657470703125, | |
| "loss": 0.1523, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6422145962715149, | |
| "rewards/margins": 6.080798625946045, | |
| "rewards/rejected": -5.438583850860596, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 1.7049180327868854, | |
| "grad_norm": 0.3313053250312805, | |
| "learning_rate": 9.926470588235295e-07, | |
| "logits/chosen": -0.7310227155685425, | |
| "logits/rejected": -0.6085512638092041, | |
| "logps/chosen": -517.57373046875, | |
| "logps/rejected": -645.620849609375, | |
| "loss": 0.001, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 3.5617308616638184, | |
| "rewards/margins": 8.085519790649414, | |
| "rewards/rejected": -4.5237884521484375, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.7180327868852459, | |
| "grad_norm": 71.060546875, | |
| "learning_rate": 9.558823529411764e-07, | |
| "logits/chosen": -0.6338833570480347, | |
| "logits/rejected": -0.897128701210022, | |
| "logps/chosen": -193.55189514160156, | |
| "logps/rejected": -340.1580810546875, | |
| "loss": 0.3464, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 0.23160991072654724, | |
| "rewards/margins": 1.533915400505066, | |
| "rewards/rejected": -1.3023054599761963, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 1.7311475409836066, | |
| "grad_norm": 11.899687767028809, | |
| "learning_rate": 9.191176470588237e-07, | |
| "logits/chosen": -0.8149244785308838, | |
| "logits/rejected": -0.5785297155380249, | |
| "logps/chosen": -213.12173461914062, | |
| "logps/rejected": -237.8140411376953, | |
| "loss": 0.0893, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.6387001276016235, | |
| "rewards/margins": 3.045469284057617, | |
| "rewards/rejected": -1.4067691564559937, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 1.7442622950819673, | |
| "grad_norm": 1.8859055042266846, | |
| "learning_rate": 8.823529411764707e-07, | |
| "logits/chosen": -0.40556299686431885, | |
| "logits/rejected": -0.577975332736969, | |
| "logps/chosen": -514.1160888671875, | |
| "logps/rejected": -692.885009765625, | |
| "loss": 0.0032, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.4865522384643555, | |
| "rewards/margins": 9.726764678955078, | |
| "rewards/rejected": -7.240212440490723, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 1.7573770491803278, | |
| "grad_norm": 15.04454517364502, | |
| "learning_rate": 8.455882352941178e-07, | |
| "logits/chosen": -0.7795289754867554, | |
| "logits/rejected": -0.8595843315124512, | |
| "logps/chosen": -117.66368103027344, | |
| "logps/rejected": -155.0380859375, | |
| "loss": 0.1567, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.0589184761047363, | |
| "rewards/margins": 2.530170440673828, | |
| "rewards/rejected": -1.4712518453598022, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 1.7704918032786885, | |
| "grad_norm": 79.06763458251953, | |
| "learning_rate": 8.088235294117648e-07, | |
| "logits/chosen": -0.7521681189537048, | |
| "logits/rejected": -0.5849143862724304, | |
| "logps/chosen": -370.17303466796875, | |
| "logps/rejected": -297.7393493652344, | |
| "loss": 0.3304, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 1.5731346607208252, | |
| "rewards/margins": 4.559065341949463, | |
| "rewards/rejected": -2.9859304428100586, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.7836065573770492, | |
| "grad_norm": 75.73023223876953, | |
| "learning_rate": 7.720588235294119e-07, | |
| "logits/chosen": -0.24577771127223969, | |
| "logits/rejected": -0.2834129333496094, | |
| "logps/chosen": -169.0641632080078, | |
| "logps/rejected": -227.2678985595703, | |
| "loss": 0.1953, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.9874727725982666, | |
| "rewards/margins": 2.466601848602295, | |
| "rewards/rejected": -1.4791290760040283, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 1.7967213114754097, | |
| "grad_norm": 13.235738754272461, | |
| "learning_rate": 7.352941176470589e-07, | |
| "logits/chosen": -0.8364167213439941, | |
| "logits/rejected": -0.7191005349159241, | |
| "logps/chosen": -281.6129150390625, | |
| "logps/rejected": -327.17169189453125, | |
| "loss": 0.0731, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.5926880836486816, | |
| "rewards/margins": 5.672669410705566, | |
| "rewards/rejected": -3.0799813270568848, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 1.8098360655737706, | |
| "grad_norm": 5.092410087585449, | |
| "learning_rate": 6.985294117647059e-07, | |
| "logits/chosen": -0.6490945219993591, | |
| "logits/rejected": -0.9161443710327148, | |
| "logps/chosen": -366.2134704589844, | |
| "logps/rejected": -429.5161437988281, | |
| "loss": 0.0178, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.8624721765518188, | |
| "rewards/margins": 6.365274906158447, | |
| "rewards/rejected": -4.50280237197876, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 1.8229508196721311, | |
| "grad_norm": 3.215587615966797, | |
| "learning_rate": 6.61764705882353e-07, | |
| "logits/chosen": -0.7531751990318298, | |
| "logits/rejected": -0.30548983812332153, | |
| "logps/chosen": -386.60504150390625, | |
| "logps/rejected": -275.23779296875, | |
| "loss": 0.0127, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 3.7106332778930664, | |
| "rewards/margins": 6.189945220947266, | |
| "rewards/rejected": -2.4793121814727783, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 1.8360655737704918, | |
| "grad_norm": 0.22684346139431, | |
| "learning_rate": 6.25e-07, | |
| "logits/chosen": -0.38512834906578064, | |
| "logits/rejected": -0.39937111735343933, | |
| "logps/chosen": -506.0502624511719, | |
| "logps/rejected": -478.14990234375, | |
| "loss": 0.0006, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 4.004444599151611, | |
| "rewards/margins": 9.403310775756836, | |
| "rewards/rejected": -5.398866653442383, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.8491803278688526, | |
| "grad_norm": 8.121055603027344, | |
| "learning_rate": 5.882352941176471e-07, | |
| "logits/chosen": -0.5020557045936584, | |
| "logits/rejected": -0.6416282653808594, | |
| "logps/chosen": -369.1741027832031, | |
| "logps/rejected": -440.558837890625, | |
| "loss": 0.0314, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 3.697706699371338, | |
| "rewards/margins": 7.363080024719238, | |
| "rewards/rejected": -3.6653730869293213, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 1.862295081967213, | |
| "grad_norm": 25.55944061279297, | |
| "learning_rate": 5.514705882352942e-07, | |
| "logits/chosen": -0.6144732236862183, | |
| "logits/rejected": -0.5314571857452393, | |
| "logps/chosen": -412.43310546875, | |
| "logps/rejected": -425.11572265625, | |
| "loss": 0.0594, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.2833995819091797, | |
| "rewards/margins": 5.651876926422119, | |
| "rewards/rejected": -3.3684768676757812, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 1.8754098360655738, | |
| "grad_norm": 2.3017585277557373, | |
| "learning_rate": 5.147058823529412e-07, | |
| "logits/chosen": -0.7556811571121216, | |
| "logits/rejected": -0.8270020484924316, | |
| "logps/chosen": -401.3725280761719, | |
| "logps/rejected": -348.38067626953125, | |
| "loss": 0.181, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 2.7437126636505127, | |
| "rewards/margins": 7.136537551879883, | |
| "rewards/rejected": -4.392825126647949, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 1.8885245901639345, | |
| "grad_norm": 1.4070271253585815, | |
| "learning_rate": 4.779411764705882e-07, | |
| "logits/chosen": -0.24664568901062012, | |
| "logits/rejected": -0.640694797039032, | |
| "logps/chosen": -327.0250549316406, | |
| "logps/rejected": -863.6223754882812, | |
| "loss": 0.0034, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.61453115940094, | |
| "rewards/margins": 6.990232467651367, | |
| "rewards/rejected": -5.375700950622559, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 1.901639344262295, | |
| "grad_norm": 37.87571334838867, | |
| "learning_rate": 4.4117647058823536e-07, | |
| "logits/chosen": -0.7307272553443909, | |
| "logits/rejected": -0.5720311403274536, | |
| "logps/chosen": -393.85491943359375, | |
| "logps/rejected": -344.6254577636719, | |
| "loss": 0.352, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 1.1627728939056396, | |
| "rewards/margins": 3.641770124435425, | |
| "rewards/rejected": -2.478997230529785, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.9147540983606557, | |
| "grad_norm": 15.04370403289795, | |
| "learning_rate": 4.044117647058824e-07, | |
| "logits/chosen": -0.842681884765625, | |
| "logits/rejected": -0.4292832314968109, | |
| "logps/chosen": -297.94207763671875, | |
| "logps/rejected": -240.36907958984375, | |
| "loss": 0.0534, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 3.2723472118377686, | |
| "rewards/margins": 5.443933963775635, | |
| "rewards/rejected": -2.171586751937866, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 1.9278688524590164, | |
| "grad_norm": 15.202438354492188, | |
| "learning_rate": 3.6764705882352943e-07, | |
| "logits/chosen": -0.6476624011993408, | |
| "logits/rejected": -0.49806222319602966, | |
| "logps/chosen": -224.61077880859375, | |
| "logps/rejected": -169.60565185546875, | |
| "loss": 0.1546, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.8692402839660645, | |
| "rewards/margins": 2.598930597305298, | |
| "rewards/rejected": -1.7296903133392334, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 1.940983606557377, | |
| "grad_norm": 7.096276760101318, | |
| "learning_rate": 3.308823529411765e-07, | |
| "logits/chosen": -0.7730454802513123, | |
| "logits/rejected": -0.4797229766845703, | |
| "logps/chosen": -321.5426025390625, | |
| "logps/rejected": -219.32769775390625, | |
| "loss": 0.0219, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 2.728835344314575, | |
| "rewards/margins": 5.427863597869873, | |
| "rewards/rejected": -2.699028253555298, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 1.9540983606557378, | |
| "grad_norm": 66.5175552368164, | |
| "learning_rate": 2.9411764705882356e-07, | |
| "logits/chosen": -0.31179729104042053, | |
| "logits/rejected": -0.28874772787094116, | |
| "logps/chosen": -279.7017822265625, | |
| "logps/rejected": -289.318115234375, | |
| "loss": 0.2663, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": 1.061594009399414, | |
| "rewards/margins": 3.8362441062927246, | |
| "rewards/rejected": -2.7746498584747314, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 1.9672131147540983, | |
| "grad_norm": 32.35987091064453, | |
| "learning_rate": 2.573529411764706e-07, | |
| "logits/chosen": -0.6153096556663513, | |
| "logits/rejected": -0.725881814956665, | |
| "logps/chosen": -301.1914367675781, | |
| "logps/rejected": -272.52825927734375, | |
| "loss": 0.233, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.4337717294692993, | |
| "rewards/margins": 3.663452625274658, | |
| "rewards/rejected": -2.2296810150146484, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.980327868852459, | |
| "grad_norm": 0.4960515797138214, | |
| "learning_rate": 2.2058823529411768e-07, | |
| "logits/chosen": -0.30176612734794617, | |
| "logits/rejected": 0.019010625779628754, | |
| "logps/chosen": -270.0059814453125, | |
| "logps/rejected": -376.6888732910156, | |
| "loss": 0.0017, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.2527967691421509, | |
| "rewards/margins": 7.02215576171875, | |
| "rewards/rejected": -5.7693586349487305, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 1.9934426229508198, | |
| "grad_norm": 29.579833984375, | |
| "learning_rate": 1.8382352941176472e-07, | |
| "logits/chosen": -0.753585696220398, | |
| "logits/rejected": -0.5951805114746094, | |
| "logps/chosen": -178.24880981445312, | |
| "logps/rejected": -202.90585327148438, | |
| "loss": 0.1476, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 1.0991493463516235, | |
| "rewards/margins": 2.3130767345428467, | |
| "rewards/rejected": -1.2139275074005127, | |
| "step": 152 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 152, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |