| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9935483870967742, | |
| "eval_steps": 500, | |
| "global_step": 77, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.012903225806451613, | |
| "grad_norm": 7.575236641343013, | |
| "learning_rate": 6.25e-08, | |
| "logits/chosen": -3.0188064575195312, | |
| "logits/rejected": -3.0444469451904297, | |
| "logps/chosen": -24.05819320678711, | |
| "logps/rejected": -34.35090637207031, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.025806451612903226, | |
| "grad_norm": 5.98360652154207, | |
| "learning_rate": 1.25e-07, | |
| "logits/chosen": -2.8478431701660156, | |
| "logits/rejected": -2.816833972930908, | |
| "logps/chosen": -20.381498336791992, | |
| "logps/rejected": -17.743824005126953, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.03870967741935484, | |
| "grad_norm": 8.209690481232373, | |
| "learning_rate": 1.875e-07, | |
| "logits/chosen": -3.1529746055603027, | |
| "logits/rejected": -3.1297154426574707, | |
| "logps/chosen": -29.355775833129883, | |
| "logps/rejected": -27.056119918823242, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": 0.0009147524833679199, | |
| "rewards/margins": 0.0013640093384310603, | |
| "rewards/rejected": -0.00044925688416697085, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.05161290322580645, | |
| "grad_norm": 6.674375366491978, | |
| "learning_rate": 2.5e-07, | |
| "logits/chosen": -3.0608866214752197, | |
| "logits/rejected": -3.0937676429748535, | |
| "logps/chosen": -33.94017791748047, | |
| "logps/rejected": -42.64073181152344, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": 0.00032928824657574296, | |
| "rewards/margins": 0.00031276524532586336, | |
| "rewards/rejected": 1.65230012498796e-05, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.06451612903225806, | |
| "grad_norm": 7.666243296505927, | |
| "learning_rate": 3.1249999999999997e-07, | |
| "logits/chosen": -2.932521104812622, | |
| "logits/rejected": -2.905606746673584, | |
| "logps/chosen": -30.47471809387207, | |
| "logps/rejected": -31.02899932861328, | |
| "loss": 0.6934, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.00036995764821767807, | |
| "rewards/margins": 0.0002594107063487172, | |
| "rewards/rejected": -0.0006293683545663953, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07741935483870968, | |
| "grad_norm": 8.49764667909534, | |
| "learning_rate": 3.75e-07, | |
| "logits/chosen": -3.0920748710632324, | |
| "logits/rejected": -3.1023545265197754, | |
| "logps/chosen": -57.721893310546875, | |
| "logps/rejected": -62.54054260253906, | |
| "loss": 0.6932, | |
| "rewards/accuracies": 0.1875, | |
| "rewards/chosen": -0.001343829557299614, | |
| "rewards/margins": -0.0011225318303331733, | |
| "rewards/rejected": -0.00022129775607027113, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.09032258064516129, | |
| "grad_norm": 7.33711327379137, | |
| "learning_rate": 4.375e-07, | |
| "logits/chosen": -2.827237129211426, | |
| "logits/rejected": -2.816389560699463, | |
| "logps/chosen": -48.21922302246094, | |
| "logps/rejected": -40.31886291503906, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": -0.008381588384509087, | |
| "rewards/margins": -0.002356385812163353, | |
| "rewards/rejected": -0.006025202106684446, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.1032258064516129, | |
| "grad_norm": 4.927805558730517, | |
| "learning_rate": 5e-07, | |
| "logits/chosen": -3.0250465869903564, | |
| "logits/rejected": -2.9766347408294678, | |
| "logps/chosen": -35.06133270263672, | |
| "logps/rejected": -39.101741790771484, | |
| "loss": 0.6927, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": 0.0022047751117497683, | |
| "rewards/margins": 0.004272125195711851, | |
| "rewards/rejected": -0.0020673503167927265, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.11612903225806452, | |
| "grad_norm": 10.205695541523633, | |
| "learning_rate": 4.997409184116819e-07, | |
| "logits/chosen": -3.0799849033355713, | |
| "logits/rejected": -3.0568881034851074, | |
| "logps/chosen": -52.149391174316406, | |
| "logps/rejected": -42.9133186340332, | |
| "loss": 0.6923, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": 0.001999429427087307, | |
| "rewards/margins": 0.0028107434045523405, | |
| "rewards/rejected": -0.0008113139774650335, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.12903225806451613, | |
| "grad_norm": 7.6501883418829415, | |
| "learning_rate": 4.989642106328828e-07, | |
| "logits/chosen": -3.0393779277801514, | |
| "logits/rejected": -3.0457043647766113, | |
| "logps/chosen": -38.716957092285156, | |
| "logps/rejected": -37.4216194152832, | |
| "loss": 0.6909, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": 0.0031322999857366085, | |
| "rewards/margins": 0.004622948355972767, | |
| "rewards/rejected": -0.0014906482538208365, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.14193548387096774, | |
| "grad_norm": 5.299300171900462, | |
| "learning_rate": 4.976714865090826e-07, | |
| "logits/chosen": -3.161602258682251, | |
| "logits/rejected": -3.1717371940612793, | |
| "logps/chosen": -63.28357696533203, | |
| "logps/rejected": -70.27680969238281, | |
| "loss": 0.6893, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": 0.003206870285794139, | |
| "rewards/margins": 0.004664942622184753, | |
| "rewards/rejected": -0.0014580729184672236, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.15483870967741936, | |
| "grad_norm": 6.014913040201032, | |
| "learning_rate": 4.958654254084355e-07, | |
| "logits/chosen": -3.0326099395751953, | |
| "logits/rejected": -3.014777898788452, | |
| "logps/chosen": -59.73595428466797, | |
| "logps/rejected": -54.794410705566406, | |
| "loss": 0.6915, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": 0.0073243663646280766, | |
| "rewards/margins": 0.003999748267233372, | |
| "rewards/rejected": 0.003324618097394705, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.16774193548387098, | |
| "grad_norm": 5.582237671195135, | |
| "learning_rate": 4.935497706683698e-07, | |
| "logits/chosen": -3.088393211364746, | |
| "logits/rejected": -3.1142313480377197, | |
| "logps/chosen": -52.511295318603516, | |
| "logps/rejected": -61.057098388671875, | |
| "loss": 0.6872, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": -0.0028830207884311676, | |
| "rewards/margins": 0.038106679916381836, | |
| "rewards/rejected": -0.040989700704813004, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.18064516129032257, | |
| "grad_norm": 5.712829645203721, | |
| "learning_rate": 4.907293218369498e-07, | |
| "logits/chosen": -2.8892948627471924, | |
| "logits/rejected": -2.9255869388580322, | |
| "logps/chosen": -47.063507080078125, | |
| "logps/rejected": -59.78974533081055, | |
| "loss": 0.6862, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": 0.0006488842191174626, | |
| "rewards/margins": 0.03463263809680939, | |
| "rewards/rejected": -0.033983759582042694, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.1935483870967742, | |
| "grad_norm": 6.024414620349713, | |
| "learning_rate": 4.874099247250798e-07, | |
| "logits/chosen": -2.9845387935638428, | |
| "logits/rejected": -3.003157138824463, | |
| "logps/chosen": -61.63920974731445, | |
| "logps/rejected": -77.2118148803711, | |
| "loss": 0.6873, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.015748100355267525, | |
| "rewards/margins": 0.006991543807089329, | |
| "rewards/rejected": -0.022739645093679428, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.2064516129032258, | |
| "grad_norm": 6.792340069349894, | |
| "learning_rate": 4.835984592901677e-07, | |
| "logits/chosen": -3.0437138080596924, | |
| "logits/rejected": -3.0553507804870605, | |
| "logps/chosen": -47.824859619140625, | |
| "logps/rejected": -57.92134475708008, | |
| "loss": 0.6799, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.006595752667635679, | |
| "rewards/margins": 0.035280268639326096, | |
| "rewards/rejected": -0.041876018047332764, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.21935483870967742, | |
| "grad_norm": 11.168844097664572, | |
| "learning_rate": 4.793028253763632e-07, | |
| "logits/chosen": -3.0245566368103027, | |
| "logits/rejected": -2.989858865737915, | |
| "logps/chosen": -42.81298065185547, | |
| "logps/rejected": -52.179176330566406, | |
| "loss": 0.6768, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.03772101551294327, | |
| "rewards/margins": 0.07718317210674286, | |
| "rewards/rejected": -0.11490418016910553, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.23225806451612904, | |
| "grad_norm": 7.414924383918636, | |
| "learning_rate": 4.74531926340924e-07, | |
| "logits/chosen": -2.971475839614868, | |
| "logits/rejected": -2.945223569869995, | |
| "logps/chosen": -43.70143127441406, | |
| "logps/rejected": -33.612754821777344, | |
| "loss": 0.6875, | |
| "rewards/accuracies": 0.125, | |
| "rewards/chosen": -0.03854432329535484, | |
| "rewards/margins": -0.005393250845372677, | |
| "rewards/rejected": -0.03315107151865959, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.24516129032258063, | |
| "grad_norm": 10.414586267170323, | |
| "learning_rate": 4.692956506006486e-07, | |
| "logits/chosen": -2.9716796875, | |
| "logits/rejected": -2.9322829246520996, | |
| "logps/chosen": -62.92927551269531, | |
| "logps/rejected": -59.554969787597656, | |
| "loss": 0.6734, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.0010191131150349975, | |
| "rewards/margins": 0.037698645144701004, | |
| "rewards/rejected": -0.038717761635780334, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.25806451612903225, | |
| "grad_norm": 8.133295798312702, | |
| "learning_rate": 4.6360485113662214e-07, | |
| "logits/chosen": -2.8298916816711426, | |
| "logits/rejected": -2.906747579574585, | |
| "logps/chosen": -26.855010986328125, | |
| "logps/rejected": -53.25407028198242, | |
| "loss": 0.6727, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": -0.004282762296497822, | |
| "rewards/margins": 0.04214238375425339, | |
| "rewards/rejected": -0.04642514884471893, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2709677419354839, | |
| "grad_norm": 7.911827600658339, | |
| "learning_rate": 4.574713229997563e-07, | |
| "logits/chosen": -3.1790623664855957, | |
| "logits/rejected": -3.183187246322632, | |
| "logps/chosen": -62.52846908569336, | |
| "logps/rejected": -64.2408218383789, | |
| "loss": 0.6712, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.019579507410526276, | |
| "rewards/margins": 6.686896085739136e-06, | |
| "rewards/rejected": -0.019586196169257164, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.2838709677419355, | |
| "grad_norm": 6.233493817899055, | |
| "learning_rate": 4.5090777886374453e-07, | |
| "logits/chosen": -3.0413153171539307, | |
| "logits/rejected": -3.0401761531829834, | |
| "logps/chosen": -32.636592864990234, | |
| "logps/rejected": -33.258331298828125, | |
| "loss": 0.6712, | |
| "rewards/accuracies": 0.1875, | |
| "rewards/chosen": -0.006448335479944944, | |
| "rewards/margins": 0.016460755839943886, | |
| "rewards/rejected": -0.022909093648195267, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.2967741935483871, | |
| "grad_norm": 12.20764827602206, | |
| "learning_rate": 4.4392782267610495e-07, | |
| "logits/chosen": -2.8663036823272705, | |
| "logits/rejected": -2.8864498138427734, | |
| "logps/chosen": -53.24276351928711, | |
| "logps/rejected": -58.46373748779297, | |
| "loss": 0.6649, | |
| "rewards/accuracies": 0.1875, | |
| "rewards/chosen": -0.09484248608350754, | |
| "rewards/margins": 0.05482190474867821, | |
| "rewards/rejected": -0.14966437220573425, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.3096774193548387, | |
| "grad_norm": 9.11035362020615, | |
| "learning_rate": 4.3654592146192137e-07, | |
| "logits/chosen": -3.043889284133911, | |
| "logits/rejected": -3.04427433013916, | |
| "logps/chosen": -50.45281219482422, | |
| "logps/rejected": -62.33794403076172, | |
| "loss": 0.6614, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": -0.06691670417785645, | |
| "rewards/margins": 0.06718842685222626, | |
| "rewards/rejected": -0.1341051459312439, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.3225806451612903, | |
| "grad_norm": 7.38691150020557, | |
| "learning_rate": 4.2877737533872484e-07, | |
| "logits/chosen": -3.020115375518799, | |
| "logits/rejected": -2.949554204940796, | |
| "logps/chosen": -90.56169128417969, | |
| "logps/rejected": -88.24325561523438, | |
| "loss": 0.6629, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.10975000262260437, | |
| "rewards/margins": 0.09208998084068298, | |
| "rewards/rejected": -0.20183998346328735, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.33548387096774196, | |
| "grad_norm": 5.575537840637267, | |
| "learning_rate": 4.206382858046635e-07, | |
| "logits/chosen": -3.0138120651245117, | |
| "logits/rejected": -3.0246829986572266, | |
| "logps/chosen": -63.48017120361328, | |
| "logps/rejected": -64.13935089111328, | |
| "loss": 0.6774, | |
| "rewards/accuracies": 0.375, | |
| "rewards/chosen": -0.08101020753383636, | |
| "rewards/margins": 0.03786313533782959, | |
| "rewards/rejected": -0.11887334287166595, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.34838709677419355, | |
| "grad_norm": 6.166923188160189, | |
| "learning_rate": 4.12145522365689e-07, | |
| "logits/chosen": -2.909912347793579, | |
| "logits/rejected": -2.9610202312469482, | |
| "logps/chosen": -33.47990417480469, | |
| "logps/rejected": -65.81147003173828, | |
| "loss": 0.6581, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.14108583331108093, | |
| "rewards/margins": 0.14831525087356567, | |
| "rewards/rejected": -0.2894010841846466, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.36129032258064514, | |
| "grad_norm": 6.548237975271229, | |
| "learning_rate": 4.0331668757092905e-07, | |
| "logits/chosen": -2.959547519683838, | |
| "logits/rejected": -2.954468250274658, | |
| "logps/chosen": -48.109375, | |
| "logps/rejected": -64.71925354003906, | |
| "loss": 0.662, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": -0.20003138482570648, | |
| "rewards/margins": 0.12624813616275787, | |
| "rewards/rejected": -0.32627955079078674, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.3741935483870968, | |
| "grad_norm": 8.441769285768048, | |
| "learning_rate": 3.941700805287168e-07, | |
| "logits/chosen": -2.763150215148926, | |
| "logits/rejected": -2.7778449058532715, | |
| "logps/chosen": -47.057498931884766, | |
| "logps/rejected": -56.587127685546875, | |
| "loss": 0.6655, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": -0.12557458877563477, | |
| "rewards/margins": 0.02159186080098152, | |
| "rewards/rejected": -0.147166445851326, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.3870967741935484, | |
| "grad_norm": 6.820259306929618, | |
| "learning_rate": 3.847246589788939e-07, | |
| "logits/chosen": -2.9482781887054443, | |
| "logits/rejected": -2.943040370941162, | |
| "logps/chosen": -51.7378044128418, | |
| "logps/rejected": -68.4146728515625, | |
| "loss": 0.6487, | |
| "rewards/accuracies": 0.375, | |
| "rewards/chosen": -0.16159993410110474, | |
| "rewards/margins": 0.12931199371814728, | |
| "rewards/rejected": -0.2909119427204132, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 7.812680952947978, | |
| "learning_rate": 3.75e-07, | |
| "logits/chosen": -2.7655723094940186, | |
| "logits/rejected": -2.7568373680114746, | |
| "logps/chosen": -71.60513305664062, | |
| "logps/rejected": -76.70703125, | |
| "loss": 0.649, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.2342558652162552, | |
| "rewards/margins": 0.0854680985212326, | |
| "rewards/rejected": -0.3197239935398102, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.4129032258064516, | |
| "grad_norm": 7.397143004044, | |
| "learning_rate": 3.65016259432788e-07, | |
| "logits/chosen": -2.9698352813720703, | |
| "logits/rejected": -3.0043089389801025, | |
| "logps/chosen": -41.41299057006836, | |
| "logps/rejected": -50.932334899902344, | |
| "loss": 0.6388, | |
| "rewards/accuracies": 0.375, | |
| "rewards/chosen": -0.23619022965431213, | |
| "rewards/margins": 0.04527804255485535, | |
| "rewards/rejected": -0.2814682722091675, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.4258064516129032, | |
| "grad_norm": 7.152484571426207, | |
| "learning_rate": 3.54794130104166e-07, | |
| "logits/chosen": -2.823517084121704, | |
| "logits/rejected": -2.8529646396636963, | |
| "logps/chosen": -16.425308227539062, | |
| "logps/rejected": -24.822494506835938, | |
| "loss": 0.6421, | |
| "rewards/accuracies": 0.125, | |
| "rewards/chosen": -0.0828515961766243, | |
| "rewards/margins": 0.04831594228744507, | |
| "rewards/rejected": -0.13116753101348877, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.43870967741935485, | |
| "grad_norm": 9.358494780867913, | |
| "learning_rate": 3.4435479893815355e-07, | |
| "logits/chosen": -2.8296639919281006, | |
| "logits/rejected": -2.8361520767211914, | |
| "logps/chosen": -86.12203979492188, | |
| "logps/rejected": -95.99158477783203, | |
| "loss": 0.6386, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": -0.26781708002090454, | |
| "rewards/margins": 0.14883765578269958, | |
| "rewards/rejected": -0.41665470600128174, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.45161290322580644, | |
| "grad_norm": 8.786574832386057, | |
| "learning_rate": 3.337199030427465e-07, | |
| "logits/chosen": -2.9809043407440186, | |
| "logits/rejected": -2.9782485961914062, | |
| "logps/chosen": -70.9310302734375, | |
| "logps/rejected": -77.85838317871094, | |
| "loss": 0.6339, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.2812136113643646, | |
| "rewards/margins": 0.06157858297228813, | |
| "rewards/rejected": -0.3427921533584595, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.4645161290322581, | |
| "grad_norm": 6.3872463929573495, | |
| "learning_rate": 3.229114848637062e-07, | |
| "logits/chosen": -2.9001638889312744, | |
| "logits/rejected": -2.8978068828582764, | |
| "logps/chosen": -53.35871124267578, | |
| "logps/rejected": -61.33634567260742, | |
| "loss": 0.6308, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": -0.24349886178970337, | |
| "rewards/margins": 0.06033254787325859, | |
| "rewards/rejected": -0.30383142828941345, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.4774193548387097, | |
| "grad_norm": 9.892537592398023, | |
| "learning_rate": 3.11951946498225e-07, | |
| "logits/chosen": -2.742429256439209, | |
| "logits/rejected": -2.762636184692383, | |
| "logps/chosen": -51.067054748535156, | |
| "logps/rejected": -58.35332489013672, | |
| "loss": 0.6331, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": -0.12977036833763123, | |
| "rewards/margins": 0.07739689201116562, | |
| "rewards/rejected": -0.20716726779937744, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.49032258064516127, | |
| "grad_norm": 8.185078211550065, | |
| "learning_rate": 3.008640032631585e-07, | |
| "logits/chosen": -2.911105155944824, | |
| "logits/rejected": -2.9227683544158936, | |
| "logps/chosen": -37.1446647644043, | |
| "logps/rejected": -68.1653823852539, | |
| "loss": 0.6473, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.11132928729057312, | |
| "rewards/margins": 0.14660148322582245, | |
| "rewards/rejected": -0.2579307556152344, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.5032258064516129, | |
| "grad_norm": 9.483445735835293, | |
| "learning_rate": 2.8967063661406284e-07, | |
| "logits/chosen": -3.0798282623291016, | |
| "logits/rejected": -3.039661407470703, | |
| "logps/chosen": -75.90811157226562, | |
| "logps/rejected": -76.0742416381836, | |
| "loss": 0.6333, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": -0.044046178460121155, | |
| "rewards/margins": 0.11025506258010864, | |
| "rewards/rejected": -0.154301255941391, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.5161290322580645, | |
| "grad_norm": 10.173499498197904, | |
| "learning_rate": 2.783950465126187e-07, | |
| "logits/chosen": -2.8641061782836914, | |
| "logits/rejected": -2.880579710006714, | |
| "logps/chosen": -59.221946716308594, | |
| "logps/rejected": -90.15663146972656, | |
| "loss": 0.6433, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": -0.16509050130844116, | |
| "rewards/margins": 0.26559263467788696, | |
| "rewards/rejected": -0.4306831359863281, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5290322580645161, | |
| "grad_norm": 8.808202753666528, | |
| "learning_rate": 2.6706060334116775e-07, | |
| "logits/chosen": -3.0445690155029297, | |
| "logits/rejected": -3.074784755706787, | |
| "logps/chosen": -68.35140991210938, | |
| "logps/rejected": -104.00436401367188, | |
| "loss": 0.6333, | |
| "rewards/accuracies": 0.375, | |
| "rewards/chosen": -0.3045073449611664, | |
| "rewards/margins": 0.229690819978714, | |
| "rewards/rejected": -0.5341981649398804, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.5419354838709678, | |
| "grad_norm": 8.13273017045566, | |
| "learning_rate": 2.556907994640264e-07, | |
| "logits/chosen": -2.9060964584350586, | |
| "logits/rejected": -2.90242338180542, | |
| "logps/chosen": -91.3266830444336, | |
| "logps/rejected": -118.18492126464844, | |
| "loss": 0.6499, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.23070470988750458, | |
| "rewards/margins": 0.24628427624702454, | |
| "rewards/rejected": -0.4769890308380127, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.5548387096774193, | |
| "grad_norm": 8.743594644815907, | |
| "learning_rate": 2.4430920053597355e-07, | |
| "logits/chosen": -2.8366618156433105, | |
| "logits/rejected": -2.7787930965423584, | |
| "logps/chosen": -81.77251434326172, | |
| "logps/rejected": -92.00411987304688, | |
| "loss": 0.6421, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.2051384150981903, | |
| "rewards/margins": 0.1954495906829834, | |
| "rewards/rejected": -0.4005880057811737, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.567741935483871, | |
| "grad_norm": 7.640503263166919, | |
| "learning_rate": 2.3293939665883228e-07, | |
| "logits/chosen": -2.8904852867126465, | |
| "logits/rejected": -2.877133369445801, | |
| "logps/chosen": -81.6563949584961, | |
| "logps/rejected": -94.8319320678711, | |
| "loss": 0.6258, | |
| "rewards/accuracies": 0.375, | |
| "rewards/chosen": -0.30271658301353455, | |
| "rewards/margins": 0.1767008900642395, | |
| "rewards/rejected": -0.47941750288009644, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.5806451612903226, | |
| "grad_norm": 7.729498181809134, | |
| "learning_rate": 2.2160495348738124e-07, | |
| "logits/chosen": -3.036078929901123, | |
| "logits/rejected": -3.0348596572875977, | |
| "logps/chosen": -64.10650634765625, | |
| "logps/rejected": -89.60687255859375, | |
| "loss": 0.6304, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": -0.21719691157341003, | |
| "rewards/margins": 0.24271026253700256, | |
| "rewards/rejected": -0.4599071741104126, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.5935483870967742, | |
| "grad_norm": 8.226196925104558, | |
| "learning_rate": 2.1032936338593717e-07, | |
| "logits/chosen": -2.9982056617736816, | |
| "logits/rejected": -3.010178327560425, | |
| "logps/chosen": -75.5604476928711, | |
| "logps/rejected": -85.93553161621094, | |
| "loss": 0.6261, | |
| "rewards/accuracies": 0.1875, | |
| "rewards/chosen": -0.426933228969574, | |
| "rewards/margins": -0.0191495418548584, | |
| "rewards/rejected": -0.4077836871147156, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.6064516129032258, | |
| "grad_norm": 10.912368208375094, | |
| "learning_rate": 1.9913599673684159e-07, | |
| "logits/chosen": -3.070939540863037, | |
| "logits/rejected": -3.0626533031463623, | |
| "logps/chosen": -75.2443618774414, | |
| "logps/rejected": -106.09898376464844, | |
| "loss": 0.6116, | |
| "rewards/accuracies": 0.375, | |
| "rewards/chosen": -0.18821322917938232, | |
| "rewards/margins": 0.24638797342777252, | |
| "rewards/rejected": -0.43460121750831604, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.6193548387096774, | |
| "grad_norm": 10.218819822781606, | |
| "learning_rate": 1.8804805350177506e-07, | |
| "logits/chosen": -3.0395772457122803, | |
| "logits/rejected": -3.069377899169922, | |
| "logps/chosen": -74.85322570800781, | |
| "logps/rejected": -102.59559631347656, | |
| "loss": 0.5944, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": -0.2960664629936218, | |
| "rewards/margins": 0.21642781794071198, | |
| "rewards/rejected": -0.5124942660331726, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.632258064516129, | |
| "grad_norm": 11.388294826021683, | |
| "learning_rate": 1.7708851513629373e-07, | |
| "logits/chosen": -2.810640811920166, | |
| "logits/rejected": -2.8224596977233887, | |
| "logps/chosen": -66.05618286132812, | |
| "logps/rejected": -98.07613372802734, | |
| "loss": 0.6453, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": -0.34373006224632263, | |
| "rewards/margins": 0.2323249876499176, | |
| "rewards/rejected": -0.5760550498962402, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.6451612903225806, | |
| "grad_norm": 13.864844054736514, | |
| "learning_rate": 1.6628009695725346e-07, | |
| "logits/chosen": -2.8773510456085205, | |
| "logits/rejected": -2.8751275539398193, | |
| "logps/chosen": -66.28228759765625, | |
| "logps/rejected": -67.61710357666016, | |
| "loss": 0.6199, | |
| "rewards/accuracies": 0.1875, | |
| "rewards/chosen": -0.16454900801181793, | |
| "rewards/margins": 0.060019105672836304, | |
| "rewards/rejected": -0.22456809878349304, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6580645161290323, | |
| "grad_norm": 11.09099423463829, | |
| "learning_rate": 1.5564520106184643e-07, | |
| "logits/chosen": -2.8890538215637207, | |
| "logits/rejected": -2.8882861137390137, | |
| "logps/chosen": -97.7024917602539, | |
| "logps/rejected": -117.82125854492188, | |
| "loss": 0.5991, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": -0.3737015128135681, | |
| "rewards/margins": 0.14311935007572174, | |
| "rewards/rejected": -0.5168208479881287, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.6709677419354839, | |
| "grad_norm": 14.02314336885856, | |
| "learning_rate": 1.4520586989583405e-07, | |
| "logits/chosen": -3.058253049850464, | |
| "logits/rejected": -3.0794386863708496, | |
| "logps/chosen": -86.7257308959961, | |
| "logps/rejected": -100.26595306396484, | |
| "loss": 0.6199, | |
| "rewards/accuracies": 0.1875, | |
| "rewards/chosen": -0.21201437711715698, | |
| "rewards/margins": -0.08268441259860992, | |
| "rewards/rejected": -0.12932996451854706, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.6838709677419355, | |
| "grad_norm": 15.2231921450545, | |
| "learning_rate": 1.3498374056721196e-07, | |
| "logits/chosen": -3.1192705631256104, | |
| "logits/rejected": -3.145230770111084, | |
| "logps/chosen": -45.069618225097656, | |
| "logps/rejected": -71.2268295288086, | |
| "loss": 0.6339, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.13725833594799042, | |
| "rewards/margins": 0.12775808572769165, | |
| "rewards/rejected": -0.26501643657684326, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.6967741935483871, | |
| "grad_norm": 10.43878298749505, | |
| "learning_rate": 1.2500000000000005e-07, | |
| "logits/chosen": -2.923105239868164, | |
| "logits/rejected": -2.929281234741211, | |
| "logps/chosen": -44.89529800415039, | |
| "logps/rejected": -39.17671203613281, | |
| "loss": 0.612, | |
| "rewards/accuracies": 0.125, | |
| "rewards/chosen": -0.10784972459077835, | |
| "rewards/margins": -0.016192324459552765, | |
| "rewards/rejected": -0.09165740013122559, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.7096774193548387, | |
| "grad_norm": 12.476212432569776, | |
| "learning_rate": 1.1527534102110611e-07, | |
| "logits/chosen": -2.9852142333984375, | |
| "logits/rejected": -3.0071215629577637, | |
| "logps/chosen": -69.94696807861328, | |
| "logps/rejected": -109.17233276367188, | |
| "loss": 0.6082, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": -0.21303395926952362, | |
| "rewards/margins": 0.3135417103767395, | |
| "rewards/rejected": -0.5265756845474243, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7225806451612903, | |
| "grad_norm": 7.382374495620578, | |
| "learning_rate": 1.0582991947128323e-07, | |
| "logits/chosen": -2.8991634845733643, | |
| "logits/rejected": -2.997525453567505, | |
| "logps/chosen": -60.843292236328125, | |
| "logps/rejected": -86.19700622558594, | |
| "loss": 0.6219, | |
| "rewards/accuracies": 0.375, | |
| "rewards/chosen": -0.23299351334571838, | |
| "rewards/margins": 0.20443448424339294, | |
| "rewards/rejected": -0.43742799758911133, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.7354838709677419, | |
| "grad_norm": 7.692419552247762, | |
| "learning_rate": 9.668331242907088e-08, | |
| "logits/chosen": -3.0765204429626465, | |
| "logits/rejected": -3.0679996013641357, | |
| "logps/chosen": -65.09187316894531, | |
| "logps/rejected": -85.44721984863281, | |
| "loss": 0.6402, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": -0.20165708661079407, | |
| "rewards/margins": 0.18024533987045288, | |
| "rewards/rejected": -0.38190245628356934, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.7483870967741936, | |
| "grad_norm": 11.554834969612196, | |
| "learning_rate": 8.785447763431101e-08, | |
| "logits/chosen": -3.120288848876953, | |
| "logits/rejected": -3.10617995262146, | |
| "logps/chosen": -78.844970703125, | |
| "logps/rejected": -106.19613647460938, | |
| "loss": 0.612, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": -0.2569999694824219, | |
| "rewards/margins": 0.2700415253639221, | |
| "rewards/rejected": -0.527041494846344, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.7612903225806451, | |
| "grad_norm": 7.28020470137263, | |
| "learning_rate": 7.936171419533652e-08, | |
| "logits/chosen": -3.1455440521240234, | |
| "logits/rejected": -3.1471190452575684, | |
| "logps/chosen": -51.242835998535156, | |
| "logps/rejected": -78.44454956054688, | |
| "loss": 0.6152, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.046268824487924576, | |
| "rewards/margins": 0.16216275095939636, | |
| "rewards/rejected": -0.20843157172203064, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.7741935483870968, | |
| "grad_norm": 7.0851502720539346, | |
| "learning_rate": 7.122262466127513e-08, | |
| "logits/chosen": -2.983321189880371, | |
| "logits/rejected": -2.9774532318115234, | |
| "logps/chosen": -85.1767807006836, | |
| "logps/rejected": -111.3412857055664, | |
| "loss": 0.6049, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.4200911223888397, | |
| "rewards/margins": 0.23350517451763153, | |
| "rewards/rejected": -0.6535962820053101, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.7870967741935484, | |
| "grad_norm": 10.713993216138782, | |
| "learning_rate": 6.345407853807863e-08, | |
| "logits/chosen": -2.8526759147644043, | |
| "logits/rejected": -2.85900616645813, | |
| "logps/chosen": -63.035057067871094, | |
| "logps/rejected": -72.75942993164062, | |
| "loss": 0.6374, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.2579822540283203, | |
| "rewards/margins": 0.08359092473983765, | |
| "rewards/rejected": -0.34157317876815796, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 10.03605388575717, | |
| "learning_rate": 5.607217732389502e-08, | |
| "logits/chosen": -3.0021588802337646, | |
| "logits/rejected": -3.0087029933929443, | |
| "logps/chosen": -30.55845832824707, | |
| "logps/rejected": -53.643280029296875, | |
| "loss": 0.587, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": -0.10973939299583435, | |
| "rewards/margins": 0.20178647339344025, | |
| "rewards/rejected": -0.3115258812904358, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.8129032258064516, | |
| "grad_norm": 6.2735761784405675, | |
| "learning_rate": 4.909222113625544e-08, | |
| "logits/chosen": -3.1388683319091797, | |
| "logits/rejected": -3.0751585960388184, | |
| "logps/chosen": -81.34771728515625, | |
| "logps/rejected": -89.97032165527344, | |
| "loss": 0.5877, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": -0.19216133654117584, | |
| "rewards/margins": 0.20118926465511322, | |
| "rewards/rejected": -0.3933505713939667, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.8258064516129032, | |
| "grad_norm": 11.716263562699654, | |
| "learning_rate": 4.2528677000243737e-08, | |
| "logits/chosen": -2.9592177867889404, | |
| "logits/rejected": -2.9626688957214355, | |
| "logps/chosen": -117.65725708007812, | |
| "logps/rejected": -126.92681884765625, | |
| "loss": 0.6193, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": -0.5141925811767578, | |
| "rewards/margins": 0.1016259714961052, | |
| "rewards/rejected": -0.6158185005187988, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.8387096774193549, | |
| "grad_norm": 9.893509990517233, | |
| "learning_rate": 3.6395148863377854e-08, | |
| "logits/chosen": -2.8350701332092285, | |
| "logits/rejected": -2.797614812850952, | |
| "logps/chosen": -38.36604309082031, | |
| "logps/rejected": -57.85443115234375, | |
| "loss": 0.6155, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": -0.16758887469768524, | |
| "rewards/margins": 0.24942214787006378, | |
| "rewards/rejected": -0.417011022567749, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.8516129032258064, | |
| "grad_norm": 11.654525901199872, | |
| "learning_rate": 3.0704349399351435e-08, | |
| "logits/chosen": -2.892805814743042, | |
| "logits/rejected": -2.926443099975586, | |
| "logps/chosen": -63.805030822753906, | |
| "logps/rejected": -109.76573181152344, | |
| "loss": 0.6189, | |
| "rewards/accuracies": 0.375, | |
| "rewards/chosen": -0.33767932653427124, | |
| "rewards/margins": 0.3409982919692993, | |
| "rewards/rejected": -0.6786776781082153, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.864516129032258, | |
| "grad_norm": 9.135989557153602, | |
| "learning_rate": 2.5468073659075996e-08, | |
| "logits/chosen": -3.0026183128356934, | |
| "logits/rejected": -2.9746150970458984, | |
| "logps/chosen": -101.80465698242188, | |
| "logps/rejected": -122.83572387695312, | |
| "loss": 0.5887, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": -0.3310484290122986, | |
| "rewards/margins": 0.2203977406024933, | |
| "rewards/rejected": -0.5514461994171143, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.8774193548387097, | |
| "grad_norm": 9.564598187916838, | |
| "learning_rate": 2.069717462363679e-08, | |
| "logits/chosen": -3.0436861515045166, | |
| "logits/rejected": -3.017408847808838, | |
| "logps/chosen": -63.420555114746094, | |
| "logps/rejected": -79.03422546386719, | |
| "loss": 0.6229, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.33887577056884766, | |
| "rewards/margins": 0.19294096529483795, | |
| "rewards/rejected": -0.5318167209625244, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.8903225806451613, | |
| "grad_norm": 8.446220058864208, | |
| "learning_rate": 1.640154070983224e-08, | |
| "logits/chosen": -2.9233558177948, | |
| "logits/rejected": -2.9490303993225098, | |
| "logps/chosen": -106.49418640136719, | |
| "logps/rejected": -155.04872131347656, | |
| "loss": 0.5833, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.4763030707836151, | |
| "rewards/margins": 0.36830809712409973, | |
| "rewards/rejected": -0.8446111679077148, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.9032258064516129, | |
| "grad_norm": 11.736911118541979, | |
| "learning_rate": 1.2590075274920203e-08, | |
| "logits/chosen": -3.033332109451294, | |
| "logits/rejected": -2.9890236854553223, | |
| "logps/chosen": -72.19229125976562, | |
| "logps/rejected": -91.49864959716797, | |
| "loss": 0.643, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": -0.18326060473918915, | |
| "rewards/margins": 0.19467103481292725, | |
| "rewards/rejected": -0.3779316544532776, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9161290322580645, | |
| "grad_norm": 8.22057742391122, | |
| "learning_rate": 9.270678163050217e-09, | |
| "logits/chosen": -2.92671537399292, | |
| "logits/rejected": -2.944516658782959, | |
| "logps/chosen": -81.09933471679688, | |
| "logps/rejected": -119.98873138427734, | |
| "loss": 0.5992, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.39163094758987427, | |
| "rewards/margins": 0.3819514513015747, | |
| "rewards/rejected": -0.7735823392868042, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.9290322580645162, | |
| "grad_norm": 8.437281968122917, | |
| "learning_rate": 6.450229331630253e-09, | |
| "logits/chosen": -2.8446879386901855, | |
| "logits/rejected": -2.8200840950012207, | |
| "logps/chosen": -79.77139282226562, | |
| "logps/rejected": -117.7776107788086, | |
| "loss": 0.6254, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": -0.25027692317962646, | |
| "rewards/margins": 0.39766794443130493, | |
| "rewards/rejected": -0.6479449272155762, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.9419354838709677, | |
| "grad_norm": 10.325301780650673, | |
| "learning_rate": 4.1345745915644935e-09, | |
| "logits/chosen": -3.0622751712799072, | |
| "logits/rejected": -3.030594825744629, | |
| "logps/chosen": -88.93402862548828, | |
| "logps/rejected": -100.32481384277344, | |
| "loss": 0.6015, | |
| "rewards/accuracies": 0.375, | |
| "rewards/chosen": -0.345296174287796, | |
| "rewards/margins": 0.1704256534576416, | |
| "rewards/rejected": -0.51572185754776, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.9548387096774194, | |
| "grad_norm": 15.320595517735114, | |
| "learning_rate": 2.328513490917311e-09, | |
| "logits/chosen": -2.8475613594055176, | |
| "logits/rejected": -2.8002405166625977, | |
| "logps/chosen": -102.33820343017578, | |
| "logps/rejected": -135.93002319335938, | |
| "loss": 0.5982, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": -0.46599456667900085, | |
| "rewards/margins": 0.20195163786411285, | |
| "rewards/rejected": -0.6679461598396301, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.967741935483871, | |
| "grad_norm": 12.878552628840982, | |
| "learning_rate": 1.035789367117179e-09, | |
| "logits/chosen": -2.999194622039795, | |
| "logits/rejected": -2.9982104301452637, | |
| "logps/chosen": -38.21251678466797, | |
| "logps/rejected": -50.88334274291992, | |
| "loss": 0.6391, | |
| "rewards/accuracies": 0.1875, | |
| "rewards/chosen": -0.12685494124889374, | |
| "rewards/margins": 0.12716560065746307, | |
| "rewards/rejected": -0.2540205419063568, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.9806451612903225, | |
| "grad_norm": 8.7924927116735, | |
| "learning_rate": 2.5908158831811077e-10, | |
| "logits/chosen": -2.9358251094818115, | |
| "logits/rejected": -2.925405979156494, | |
| "logps/chosen": -51.06131362915039, | |
| "logps/rejected": -58.798614501953125, | |
| "loss": 0.6059, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": -0.2657967209815979, | |
| "rewards/margins": 0.02999306283891201, | |
| "rewards/rejected": -0.29578977823257446, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.9935483870967742, | |
| "grad_norm": 11.703381693919436, | |
| "learning_rate": 0.0, | |
| "logits/chosen": -2.8389170169830322, | |
| "logits/rejected": -2.8045477867126465, | |
| "logps/chosen": -82.48759460449219, | |
| "logps/rejected": -92.22787475585938, | |
| "loss": 0.6105, | |
| "rewards/accuracies": 0.375, | |
| "rewards/chosen": -0.343311071395874, | |
| "rewards/margins": 0.20035406947135925, | |
| "rewards/rejected": -0.5436651706695557, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.9935483870967742, | |
| "step": 77, | |
| "total_flos": 0.0, | |
| "train_loss": 0.6444497665801605, | |
| "train_runtime": 1967.4135, | |
| "train_samples_per_second": 5.042, | |
| "train_steps_per_second": 0.039 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 77, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "total_flos": 0.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |