| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.999247554552295, | |
| "eval_steps": 30, | |
| "global_step": 166, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.012039127163280662, | |
| "grad_norm": 11.953282356262207, | |
| "learning_rate": 5.555555555555555e-08, | |
| "logits/chosen": -0.48816660046577454, | |
| "logits/rejected": -0.42142170667648315, | |
| "logps/chosen": -117.26611328125, | |
| "logps/rejected": -125.41987609863281, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.06019563581640331, | |
| "grad_norm": 16.68506622314453, | |
| "learning_rate": 2.7777777777777776e-07, | |
| "logits/chosen": -0.46595269441604614, | |
| "logits/rejected": -0.356529176235199, | |
| "logps/chosen": -190.95057678222656, | |
| "logps/rejected": -211.25076293945312, | |
| "loss": 0.6926, | |
| "rewards/accuracies": 0.453125, | |
| "rewards/chosen": 0.0007588082225993276, | |
| "rewards/margins": 0.0022044419310986996, | |
| "rewards/rejected": -0.0014456338249146938, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.12039127163280662, | |
| "grad_norm": 13.722668647766113, | |
| "learning_rate": 4.999499509357132e-07, | |
| "logits/chosen": -0.4793759286403656, | |
| "logits/rejected": -0.37052756547927856, | |
| "logps/chosen": -155.6678009033203, | |
| "logps/rejected": -199.44947814941406, | |
| "loss": 0.6889, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": 0.005186144262552261, | |
| "rewards/margins": 0.009383995085954666, | |
| "rewards/rejected": -0.004197851754724979, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.18058690744920994, | |
| "grad_norm": 12.493337631225586, | |
| "learning_rate": 4.982003369106287e-07, | |
| "logits/chosen": -0.49185729026794434, | |
| "logits/rejected": -0.37670475244522095, | |
| "logps/chosen": -76.30625915527344, | |
| "logps/rejected": -177.40817260742188, | |
| "loss": 0.6691, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.028129320591688156, | |
| "rewards/margins": 0.048517487943172455, | |
| "rewards/rejected": -0.020388163626194, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.24078254326561324, | |
| "grad_norm": 11.79749870300293, | |
| "learning_rate": 4.939682729058838e-07, | |
| "logits/chosen": -0.45126277208328247, | |
| "logits/rejected": -0.3729521930217743, | |
| "logps/chosen": -166.20733642578125, | |
| "logps/rejected": -207.5035400390625, | |
| "loss": 0.6249, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.08626963198184967, | |
| "rewards/margins": 0.14315639436244965, | |
| "rewards/rejected": -0.05688678100705147, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.3009781790820166, | |
| "grad_norm": 10.574383735656738, | |
| "learning_rate": 4.872960871766826e-07, | |
| "logits/chosen": -0.4710594713687897, | |
| "logits/rejected": -0.3694532513618469, | |
| "logps/chosen": -86.93299865722656, | |
| "logps/rejected": -186.15248107910156, | |
| "loss": 0.587, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.12460210174322128, | |
| "rewards/margins": 0.23137669265270233, | |
| "rewards/rejected": -0.10677458345890045, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.3611738148984199, | |
| "grad_norm": 11.093204498291016, | |
| "learning_rate": 4.782505135862175e-07, | |
| "logits/chosen": -0.45945605635643005, | |
| "logits/rejected": -0.33354875445365906, | |
| "logps/chosen": -71.20188903808594, | |
| "logps/rejected": -212.95767211914062, | |
| "loss": 0.5229, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.16831260919570923, | |
| "rewards/margins": 0.38181251287460327, | |
| "rewards/rejected": -0.21349990367889404, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3611738148984199, | |
| "eval_logits/chosen": -0.4618959426879883, | |
| "eval_logits/rejected": -0.3433874249458313, | |
| "eval_logps/chosen": -98.28858947753906, | |
| "eval_logps/rejected": -212.0100555419922, | |
| "eval_loss": 0.5059286952018738, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": 0.18195854127407074, | |
| "eval_rewards/margins": 0.4181654751300812, | |
| "eval_rewards/rejected": -0.23620688915252686, | |
| "eval_runtime": 179.4182, | |
| "eval_samples_per_second": 3.043, | |
| "eval_steps_per_second": 1.522, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.4213694507148232, | |
| "grad_norm": 9.376220703125, | |
| "learning_rate": 4.6692202414695724e-07, | |
| "logits/chosen": -0.4632042944431305, | |
| "logits/rejected": -0.35029542446136475, | |
| "logps/chosen": -84.06396484375, | |
| "logps/rejected": -213.848388671875, | |
| "loss": 0.4976, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.2034817487001419, | |
| "rewards/margins": 0.4632874131202698, | |
| "rewards/rejected": -0.25980567932128906, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.4815650865312265, | |
| "grad_norm": 8.679346084594727, | |
| "learning_rate": 4.534239241377266e-07, | |
| "logits/chosen": -0.44362330436706543, | |
| "logits/rejected": -0.2992916703224182, | |
| "logps/chosen": -105.2283706665039, | |
| "logps/rejected": -244.84890747070312, | |
| "loss": 0.4197, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.22778573632240295, | |
| "rewards/margins": 0.6910415291786194, | |
| "rewards/rejected": -0.46325573325157166, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5417607223476298, | |
| "grad_norm": 7.219143867492676, | |
| "learning_rate": 4.3789121884703727e-07, | |
| "logits/chosen": -0.41270333528518677, | |
| "logits/rejected": -0.27924439311027527, | |
| "logps/chosen": -70.08865356445312, | |
| "logps/rejected": -261.56170654296875, | |
| "loss": 0.3621, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.23598209023475647, | |
| "rewards/margins": 0.9187321662902832, | |
| "rewards/rejected": -0.6827500462532043, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.6019563581640331, | |
| "grad_norm": 6.640863418579102, | |
| "learning_rate": 4.204792632772754e-07, | |
| "logits/chosen": -0.4174782633781433, | |
| "logits/rejected": -0.2659801244735718, | |
| "logps/chosen": -109.1211166381836, | |
| "logps/rejected": -280.77813720703125, | |
| "loss": 0.3123, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.2913265824317932, | |
| "rewards/margins": 1.1760694980621338, | |
| "rewards/rejected": -0.8847430348396301, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6621519939804364, | |
| "grad_norm": 5.293730735778809, | |
| "learning_rate": 4.01362208315132e-07, | |
| "logits/chosen": -0.4078051447868347, | |
| "logits/rejected": -0.25378990173339844, | |
| "logps/chosen": -116.1395492553711, | |
| "logps/rejected": -301.702392578125, | |
| "loss": 0.2619, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.3083065152168274, | |
| "rewards/margins": 1.4346027374267578, | |
| "rewards/rejected": -1.1262962818145752, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7223476297968398, | |
| "grad_norm": 4.923187255859375, | |
| "learning_rate": 3.807312589093701e-07, | |
| "logits/chosen": -0.4022981524467468, | |
| "logits/rejected": -0.2537968158721924, | |
| "logps/chosen": -103.5102310180664, | |
| "logps/rejected": -326.17486572265625, | |
| "loss": 0.2411, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.2954918146133423, | |
| "rewards/margins": 1.6640812158584595, | |
| "rewards/rejected": -1.3685895204544067, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.7223476297968398, | |
| "eval_logits/chosen": -0.406698077917099, | |
| "eval_logits/rejected": -0.23272451758384705, | |
| "eval_logps/chosen": -88.900146484375, | |
| "eval_logps/rejected": -330.7860107421875, | |
| "eval_loss": 0.2134791761636734, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": 0.27584296464920044, | |
| "eval_rewards/margins": 1.699809193611145, | |
| "eval_rewards/rejected": -1.4239662885665894, | |
| "eval_runtime": 183.6706, | |
| "eval_samples_per_second": 2.973, | |
| "eval_steps_per_second": 1.486, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.782543265613243, | |
| "grad_norm": 4.418694496154785, | |
| "learning_rate": 3.5879276167728337e-07, | |
| "logits/chosen": -0.4011690616607666, | |
| "logits/rejected": -0.22693100571632385, | |
| "logps/chosen": -56.017845153808594, | |
| "logps/rejected": -332.90380859375, | |
| "loss": 0.1992, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.2577177882194519, | |
| "rewards/margins": 1.7556695938110352, | |
| "rewards/rejected": -1.497951865196228, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.8427389014296464, | |
| "grad_norm": 3.794067859649658, | |
| "learning_rate": 3.357661410672247e-07, | |
| "logits/chosen": -0.33221831917762756, | |
| "logits/rejected": -0.1342475712299347, | |
| "logps/chosen": -74.8525619506836, | |
| "logps/rejected": -393.6372985839844, | |
| "loss": 0.1573, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.25318774580955505, | |
| "rewards/margins": 2.2917141914367676, | |
| "rewards/rejected": -2.0385265350341797, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9029345372460497, | |
| "grad_norm": 3.2060582637786865, | |
| "learning_rate": 3.1188170471929064e-07, | |
| "logits/chosen": -0.2731170058250427, | |
| "logits/rejected": -0.10557065159082413, | |
| "logps/chosen": -161.33474731445312, | |
| "logps/rejected": -437.1578063964844, | |
| "loss": 0.1191, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.2284388542175293, | |
| "rewards/margins": 2.7684743404388428, | |
| "rewards/rejected": -2.5400352478027344, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.963130173062453, | |
| "grad_norm": 1.8243048191070557, | |
| "learning_rate": 2.8737833997450657e-07, | |
| "logits/chosen": -0.2729615569114685, | |
| "logits/rejected": -0.0838087797164917, | |
| "logps/chosen": -80.7784423828125, | |
| "logps/rejected": -492.26080322265625, | |
| "loss": 0.0926, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.21951308846473694, | |
| "rewards/margins": 3.2957847118377686, | |
| "rewards/rejected": -3.0762715339660645, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.0233258088788564, | |
| "grad_norm": 1.6663548946380615, | |
| "learning_rate": 2.6250112457156293e-07, | |
| "logits/chosen": -0.2614014744758606, | |
| "logits/rejected": -0.06510574370622635, | |
| "logps/chosen": -87.82209777832031, | |
| "logps/rejected": -556.6070556640625, | |
| "loss": 0.0775, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.1841917783021927, | |
| "rewards/margins": 3.8404979705810547, | |
| "rewards/rejected": -3.656306028366089, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.0835214446952597, | |
| "grad_norm": 1.4261465072631836, | |
| "learning_rate": 2.3749887542843707e-07, | |
| "logits/chosen": -0.26909708976745605, | |
| "logits/rejected": -0.0703195109963417, | |
| "logps/chosen": -100.4935531616211, | |
| "logps/rejected": -598.0264892578125, | |
| "loss": 0.0634, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.16738824546337128, | |
| "rewards/margins": 4.255741119384766, | |
| "rewards/rejected": -4.088352680206299, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.0835214446952597, | |
| "eval_logits/chosen": -0.25795042514801025, | |
| "eval_logits/rejected": -0.035701148211956024, | |
| "eval_logps/chosen": -99.51206970214844, | |
| "eval_logps/rejected": -607.3591918945312, | |
| "eval_loss": 0.07514728605747223, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": 0.16972379386425018, | |
| "eval_rewards/margins": 4.359421730041504, | |
| "eval_rewards/rejected": -4.189698219299316, | |
| "eval_runtime": 184.8502, | |
| "eval_samples_per_second": 2.954, | |
| "eval_steps_per_second": 1.477, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.143717080511663, | |
| "grad_norm": 1.3145774602890015, | |
| "learning_rate": 2.126216600254934e-07, | |
| "logits/chosen": -0.2394520789384842, | |
| "logits/rejected": -0.023534994572401047, | |
| "logps/chosen": -150.5535888671875, | |
| "logps/rejected": -699.5059814453125, | |
| "loss": 0.0605, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.09343000501394272, | |
| "rewards/margins": 5.061221122741699, | |
| "rewards/rejected": -4.967791557312012, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.2039127163280663, | |
| "grad_norm": 0.628934919834137, | |
| "learning_rate": 1.8811829528070931e-07, | |
| "logits/chosen": -0.2859761714935303, | |
| "logits/rejected": -0.027079975232481956, | |
| "logps/chosen": -72.18778991699219, | |
| "logps/rejected": -737.0675048828125, | |
| "loss": 0.0489, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.1680155098438263, | |
| "rewards/margins": 5.535449028015137, | |
| "rewards/rejected": -5.367433547973633, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.2648607975921746, | |
| "grad_norm": 0.8070765733718872, | |
| "learning_rate": 1.6423385893277537e-07, | |
| "logits/chosen": -0.24801869690418243, | |
| "logits/rejected": -0.014291681349277496, | |
| "logps/chosen": -109.48841857910156, | |
| "logps/rejected": -681.1419677734375, | |
| "loss": 0.0476, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.09798868745565414, | |
| "rewards/margins": 5.150273323059082, | |
| "rewards/rejected": -5.052285194396973, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.325056433408578, | |
| "grad_norm": 0.6888783574104309, | |
| "learning_rate": 1.4120723832271663e-07, | |
| "logits/chosen": -0.23577141761779785, | |
| "logits/rejected": -0.011897795833647251, | |
| "logps/chosen": -120.05766296386719, | |
| "logps/rejected": -731.9443969726562, | |
| "loss": 0.054, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.07691726088523865, | |
| "rewards/margins": 5.525856018066406, | |
| "rewards/rejected": -5.448939323425293, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.3852520692249812, | |
| "grad_norm": 0.6200137734413147, | |
| "learning_rate": 1.1926874109062998e-07, | |
| "logits/chosen": -0.2343941181898117, | |
| "logits/rejected": 0.012432652525603771, | |
| "logps/chosen": -131.77175903320312, | |
| "logps/rejected": -745.412841796875, | |
| "loss": 0.0496, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.02012884058058262, | |
| "rewards/margins": 5.680893898010254, | |
| "rewards/rejected": -5.660765171051025, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.4454477050413845, | |
| "grad_norm": 0.5668838620185852, | |
| "learning_rate": 9.863779168486797e-08, | |
| "logits/chosen": -0.22034311294555664, | |
| "logits/rejected": 0.03210270777344704, | |
| "logps/chosen": -115.6126708984375, | |
| "logps/rejected": -790.9016723632812, | |
| "loss": 0.0452, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.053596943616867065, | |
| "rewards/margins": 6.07260799407959, | |
| "rewards/rejected": -6.0190110206604, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.4454477050413845, | |
| "eval_logits/chosen": -0.2345404177904129, | |
| "eval_logits/rejected": 0.038044609129428864, | |
| "eval_logps/chosen": -108.91590881347656, | |
| "eval_logps/rejected": -782.349365234375, | |
| "eval_loss": 0.05323062837123871, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": 0.07568521797657013, | |
| "eval_rewards/margins": 6.015285491943359, | |
| "eval_rewards/rejected": -5.939600467681885, | |
| "eval_runtime": 191.5149, | |
| "eval_samples_per_second": 2.851, | |
| "eval_steps_per_second": 1.425, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.5056433408577878, | |
| "grad_norm": 0.7317198514938354, | |
| "learning_rate": 7.952073672272464e-08, | |
| "logits/chosen": -0.21637864410877228, | |
| "logits/rejected": 0.0313236340880394, | |
| "logps/chosen": -126.0725326538086, | |
| "logps/rejected": -756.0071411132812, | |
| "loss": 0.0492, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.008594411425292492, | |
| "rewards/margins": 5.820641994476318, | |
| "rewards/rejected": -5.82923698425293, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.5658389766741911, | |
| "grad_norm": 0.5411990284919739, | |
| "learning_rate": 6.210878115296267e-08, | |
| "logits/chosen": -0.2258405238389969, | |
| "logits/rejected": 0.026202013716101646, | |
| "logps/chosen": -144.01856994628906, | |
| "logps/rejected": -760.4769287109375, | |
| "loss": 0.029, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.01425357349216938, | |
| "rewards/margins": 5.899747371673584, | |
| "rewards/rejected": -5.91400146484375, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.6260346124905944, | |
| "grad_norm": 0.30838528275489807, | |
| "learning_rate": 4.657607586227344e-08, | |
| "logits/chosen": -0.23345847427845, | |
| "logits/rejected": 0.027966421097517014, | |
| "logps/chosen": -124.6040267944336, | |
| "logps/rejected": -917.08349609375, | |
| "loss": 0.036, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.0217633955180645, | |
| "rewards/margins": 7.151785850524902, | |
| "rewards/rejected": -7.1300225257873535, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.6862302483069977, | |
| "grad_norm": 1.47968590259552, | |
| "learning_rate": 3.30779758530427e-08, | |
| "logits/chosen": -0.2245132029056549, | |
| "logits/rejected": 0.047028228640556335, | |
| "logps/chosen": -118.25953674316406, | |
| "logps/rejected": -851.4835205078125, | |
| "loss": 0.0516, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -0.0352528840303421, | |
| "rewards/margins": 6.62514591217041, | |
| "rewards/rejected": -6.660399436950684, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.746425884123401, | |
| "grad_norm": 0.5548922419548035, | |
| "learning_rate": 2.1749486413782435e-08, | |
| "logits/chosen": -0.21600095927715302, | |
| "logits/rejected": 0.06509985029697418, | |
| "logps/chosen": -140.62574768066406, | |
| "logps/rejected": -936.2386474609375, | |
| "loss": 0.0312, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.07837997376918793, | |
| "rewards/margins": 7.2990617752075195, | |
| "rewards/rejected": -7.37744140625, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.8066215199398044, | |
| "grad_norm": 0.5003569722175598, | |
| "learning_rate": 1.2703912823317397e-08, | |
| "logits/chosen": -0.20677892863750458, | |
| "logits/rejected": 0.06903555244207382, | |
| "logps/chosen": -150.6492919921875, | |
| "logps/rejected": -826.0875244140625, | |
| "loss": 0.0307, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.09083503484725952, | |
| "rewards/margins": 6.384757995605469, | |
| "rewards/rejected": -6.475593566894531, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.8066215199398044, | |
| "eval_logits/chosen": -0.230697363615036, | |
| "eval_logits/rejected": 0.079569511115551, | |
| "eval_logps/chosen": -114.66878509521484, | |
| "eval_logps/rejected": -867.22607421875, | |
| "eval_loss": 0.045937325805425644, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": 0.018156491219997406, | |
| "eval_rewards/margins": 6.806523323059082, | |
| "eval_rewards/rejected": -6.78836727142334, | |
| "eval_runtime": 193.7203, | |
| "eval_samples_per_second": 2.818, | |
| "eval_steps_per_second": 1.409, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.8668171557562077, | |
| "grad_norm": 0.303206205368042, | |
| "learning_rate": 6.031727094116174e-09, | |
| "logits/chosen": -0.22335031628608704, | |
| "logits/rejected": 0.0749906450510025, | |
| "logps/chosen": -178.22097778320312, | |
| "logps/rejected": -1008.4031372070312, | |
| "loss": 0.0311, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.15972693264484406, | |
| "rewards/margins": 7.850977420806885, | |
| "rewards/rejected": -8.01070499420166, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 1.927012791572611, | |
| "grad_norm": 0.5809817910194397, | |
| "learning_rate": 1.7996630893712671e-09, | |
| "logits/chosen": -0.20071235299110413, | |
| "logits/rejected": 0.08598125725984573, | |
| "logps/chosen": -156.4674530029297, | |
| "logps/rejected": -796.1658325195312, | |
| "loss": 0.0451, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.11812801659107208, | |
| "rewards/margins": 6.20440673828125, | |
| "rewards/rejected": -6.322534084320068, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.9872084273890143, | |
| "grad_norm": 0.5935037732124329, | |
| "learning_rate": 5.0049064286850074e-11, | |
| "logits/chosen": -0.20801086723804474, | |
| "logits/rejected": 0.09084095805883408, | |
| "logps/chosen": -133.05039978027344, | |
| "logps/rejected": -858.1156005859375, | |
| "loss": 0.0449, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -0.057532183825969696, | |
| "rewards/margins": 6.773335933685303, | |
| "rewards/rejected": -6.830867767333984, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 1.999247554552295, | |
| "step": 166, | |
| "total_flos": 0.0, | |
| "train_loss": 0.01645123219139964, | |
| "train_runtime": 7014.6347, | |
| "train_samples_per_second": 0.758, | |
| "train_steps_per_second": 0.024 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 166, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |