| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 10.126582278481013, |
| "eval_steps": 500, |
| "global_step": 300, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.33755274261603374, |
| "grad_norm": 4.021343231201172, |
| "learning_rate": 8.620689655172415e-07, |
| "logits/chosen": -2.3571667671203613, |
| "logits/rejected": -2.3413684368133545, |
| "logps/chosen": -66.12004852294922, |
| "logps/rejected": -75.67423248291016, |
| "loss": 0.6916, |
| "rewards/accuracies": 0.42500001192092896, |
| "rewards/chosen": 0.002384235616773367, |
| "rewards/margins": 0.0006812589708715677, |
| "rewards/rejected": 0.00170297606382519, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.6751054852320675, |
| "grad_norm": 4.169963359832764, |
| "learning_rate": 1.724137931034483e-06, |
| "logits/chosen": -2.2970731258392334, |
| "logits/rejected": -2.3126332759857178, |
| "logps/chosen": -75.44676208496094, |
| "logps/rejected": -91.96070861816406, |
| "loss": 0.695, |
| "rewards/accuracies": 0.4375, |
| "rewards/chosen": -0.004610966891050339, |
| "rewards/margins": -0.0035776779986917973, |
| "rewards/rejected": -0.0010332881938666105, |
| "step": 20 |
| }, |
| { |
| "epoch": 1.0126582278481013, |
| "grad_norm": 3.956296682357788, |
| "learning_rate": 2.5862068965517246e-06, |
| "logits/chosen": -2.323883056640625, |
| "logits/rejected": -2.360252857208252, |
| "logps/chosen": -82.34379577636719, |
| "logps/rejected": -76.72650146484375, |
| "loss": 0.6932, |
| "rewards/accuracies": 0.4375, |
| "rewards/chosen": -0.0038711726665496826, |
| "rewards/margins": -0.002081885002553463, |
| "rewards/rejected": -0.001789287431165576, |
| "step": 30 |
| }, |
| { |
| "epoch": 1.350210970464135, |
| "grad_norm": 3.930093765258789, |
| "learning_rate": 3.448275862068966e-06, |
| "logits/chosen": -2.3154587745666504, |
| "logits/rejected": -2.3474326133728027, |
| "logps/chosen": -65.43012237548828, |
| "logps/rejected": -73.15928649902344, |
| "loss": 0.691, |
| "rewards/accuracies": 0.4375, |
| "rewards/chosen": 0.005101156421005726, |
| "rewards/margins": 0.008008326403796673, |
| "rewards/rejected": -0.0029071702156215906, |
| "step": 40 |
| }, |
| { |
| "epoch": 1.6877637130801688, |
| "grad_norm": 4.285252094268799, |
| "learning_rate": 4.310344827586207e-06, |
| "logits/chosen": -2.3710691928863525, |
| "logits/rejected": -2.371490478515625, |
| "logps/chosen": -71.6757583618164, |
| "logps/rejected": -70.73356628417969, |
| "loss": 0.6889, |
| "rewards/accuracies": 0.637499988079071, |
| "rewards/chosen": 0.0005036738002672791, |
| "rewards/margins": 0.01000114344060421, |
| "rewards/rejected": -0.009497471153736115, |
| "step": 50 |
| }, |
| { |
| "epoch": 2.0253164556962027, |
| "grad_norm": 4.272525787353516, |
| "learning_rate": 4.999818897894192e-06, |
| "logits/chosen": -2.3313004970550537, |
| "logits/rejected": -2.3659963607788086, |
| "logps/chosen": -76.61670684814453, |
| "logps/rejected": -83.35383605957031, |
| "loss": 0.6826, |
| "rewards/accuracies": 0.6000000238418579, |
| "rewards/chosen": 0.0012120052706450224, |
| "rewards/margins": 0.0215681791305542, |
| "rewards/rejected": -0.020356174558401108, |
| "step": 60 |
| }, |
| { |
| "epoch": 2.3628691983122363, |
| "grad_norm": 3.9699583053588867, |
| "learning_rate": 4.9934830787948756e-06, |
| "logits/chosen": -2.335836172103882, |
| "logits/rejected": -2.346723794937134, |
| "logps/chosen": -77.20027923583984, |
| "logps/rejected": -76.7348403930664, |
| "loss": 0.668, |
| "rewards/accuracies": 0.7124999761581421, |
| "rewards/chosen": 0.01467165071517229, |
| "rewards/margins": 0.05109705403447151, |
| "rewards/rejected": -0.036425404250621796, |
| "step": 70 |
| }, |
| { |
| "epoch": 2.70042194092827, |
| "grad_norm": 4.201504230499268, |
| "learning_rate": 4.978118375700895e-06, |
| "logits/chosen": -2.3483119010925293, |
| "logits/rejected": -2.314948320388794, |
| "logps/chosen": -86.64659881591797, |
| "logps/rejected": -73.74983215332031, |
| "loss": 0.6543, |
| "rewards/accuracies": 0.699999988079071, |
| "rewards/chosen": 0.015483448281884193, |
| "rewards/margins": 0.07252755016088486, |
| "rewards/rejected": -0.057044100016355515, |
| "step": 80 |
| }, |
| { |
| "epoch": 3.037974683544304, |
| "grad_norm": 4.158086776733398, |
| "learning_rate": 4.953780424089803e-06, |
| "logits/chosen": -2.353440523147583, |
| "logits/rejected": -2.3548269271850586, |
| "logps/chosen": -66.6883773803711, |
| "logps/rejected": -70.85858917236328, |
| "loss": 0.6434, |
| "rewards/accuracies": 0.7875000238418579, |
| "rewards/chosen": 0.027489716187119484, |
| "rewards/margins": 0.11177561432123184, |
| "rewards/rejected": -0.08428589999675751, |
| "step": 90 |
| }, |
| { |
| "epoch": 3.3755274261603376, |
| "grad_norm": 4.082221031188965, |
| "learning_rate": 4.920557351506409e-06, |
| "logits/chosen": -2.341866970062256, |
| "logits/rejected": -2.364499092102051, |
| "logps/chosen": -129.14761352539062, |
| "logps/rejected": -73.15742492675781, |
| "loss": 0.6058, |
| "rewards/accuracies": 0.8125, |
| "rewards/chosen": 0.4784974157810211, |
| "rewards/margins": 0.6413174867630005, |
| "rewards/rejected": -0.16281995177268982, |
| "step": 100 |
| }, |
| { |
| "epoch": 3.7130801687763713, |
| "grad_norm": 4.832098007202148, |
| "learning_rate": 4.878569458453592e-06, |
| "logits/chosen": -2.345391035079956, |
| "logits/rejected": -2.348513603210449, |
| "logps/chosen": -77.12025451660156, |
| "logps/rejected": -87.58562469482422, |
| "loss": 0.583, |
| "rewards/accuracies": 0.7875000238418579, |
| "rewards/chosen": -0.005122403614223003, |
| "rewards/margins": 0.2564489245414734, |
| "rewards/rejected": -0.26157131791114807, |
| "step": 110 |
| }, |
| { |
| "epoch": 4.050632911392405, |
| "grad_norm": 4.12258243560791, |
| "learning_rate": 4.827968782785062e-06, |
| "logits/chosen": -2.373936176300049, |
| "logits/rejected": -2.4137730598449707, |
| "logps/chosen": -75.44515228271484, |
| "logps/rejected": -93.63223266601562, |
| "loss": 0.5739, |
| "rewards/accuracies": 0.7749999761581421, |
| "rewards/chosen": -0.009799259714782238, |
| "rewards/margins": 0.2614334523677826, |
| "rewards/rejected": -0.2712326943874359, |
| "step": 120 |
| }, |
| { |
| "epoch": 4.3881856540084385, |
| "grad_norm": 4.568253993988037, |
| "learning_rate": 4.7689385491773934e-06, |
| "logits/chosen": -2.3857738971710205, |
| "logits/rejected": -2.3939366340637207, |
| "logps/chosen": -75.08316802978516, |
| "logps/rejected": -85.54856872558594, |
| "loss": 0.5264, |
| "rewards/accuracies": 0.887499988079071, |
| "rewards/chosen": -0.057138361036777496, |
| "rewards/margins": 0.4834250807762146, |
| "rewards/rejected": -0.5405634641647339, |
| "step": 130 |
| }, |
| { |
| "epoch": 4.725738396624473, |
| "grad_norm": 5.059521675109863, |
| "learning_rate": 4.70169250567482e-06, |
| "logits/chosen": -2.403332471847534, |
| "logits/rejected": -2.4016706943511963, |
| "logps/chosen": -77.38340759277344, |
| "logps/rejected": -77.5801010131836, |
| "loss": 0.5097, |
| "rewards/accuracies": 0.824999988079071, |
| "rewards/chosen": -0.15228016674518585, |
| "rewards/margins": 0.39201101660728455, |
| "rewards/rejected": -0.5442911982536316, |
| "step": 140 |
| }, |
| { |
| "epoch": 5.063291139240507, |
| "grad_norm": 4.922734260559082, |
| "learning_rate": 4.626474149709127e-06, |
| "logits/chosen": -2.400696039199829, |
| "logits/rejected": -2.3924992084503174, |
| "logps/chosen": -65.39146423339844, |
| "logps/rejected": -72.46720886230469, |
| "loss": 0.5004, |
| "rewards/accuracies": 0.824999988079071, |
| "rewards/chosen": -0.16622374951839447, |
| "rewards/margins": 0.5306353569030762, |
| "rewards/rejected": -0.6968590617179871, |
| "step": 150 |
| }, |
| { |
| "epoch": 5.40084388185654, |
| "grad_norm": 4.739672660827637, |
| "learning_rate": 4.54355584639723e-06, |
| "logits/chosen": -2.426323413848877, |
| "logits/rejected": -2.4180703163146973, |
| "logps/chosen": -89.33406066894531, |
| "logps/rejected": -86.6529541015625, |
| "loss": 0.4472, |
| "rewards/accuracies": 0.8374999761581421, |
| "rewards/chosen": -0.2703530192375183, |
| "rewards/margins": 0.723961353302002, |
| "rewards/rejected": -0.9943143725395203, |
| "step": 160 |
| }, |
| { |
| "epoch": 5.738396624472574, |
| "grad_norm": 5.088452339172363, |
| "learning_rate": 4.45323784230908e-06, |
| "logits/chosen": -2.371295690536499, |
| "logits/rejected": -2.4007980823516846, |
| "logps/chosen": -74.7086410522461, |
| "logps/rejected": -110.96791076660156, |
| "loss": 0.4337, |
| "rewards/accuracies": 0.8999999761581421, |
| "rewards/chosen": -0.19489821791648865, |
| "rewards/margins": 0.838965117931366, |
| "rewards/rejected": -1.0338633060455322, |
| "step": 170 |
| }, |
| { |
| "epoch": 6.075949367088608, |
| "grad_norm": 4.700001239776611, |
| "learning_rate": 4.355847178277025e-06, |
| "logits/chosen": -2.408874750137329, |
| "logits/rejected": -2.433443546295166, |
| "logps/chosen": -73.986083984375, |
| "logps/rejected": -92.54002380371094, |
| "loss": 0.4201, |
| "rewards/accuracies": 0.8500000238418579, |
| "rewards/chosen": -0.3620097041130066, |
| "rewards/margins": 0.8090314865112305, |
| "rewards/rejected": -1.1710412502288818, |
| "step": 180 |
| }, |
| { |
| "epoch": 6.413502109704641, |
| "grad_norm": 5.745143890380859, |
| "learning_rate": 4.2517365051833564e-06, |
| "logits/chosen": -2.413661479949951, |
| "logits/rejected": -2.4319045543670654, |
| "logps/chosen": -61.93586349487305, |
| "logps/rejected": -87.93816375732422, |
| "loss": 0.3822, |
| "rewards/accuracies": 0.887499988079071, |
| "rewards/chosen": -0.3493840992450714, |
| "rewards/margins": 1.0286495685577393, |
| "rewards/rejected": -1.3780337572097778, |
| "step": 190 |
| }, |
| { |
| "epoch": 6.751054852320675, |
| "grad_norm": 14.167346000671387, |
| "learning_rate": 4.141282807014034e-06, |
| "logits/chosen": -2.376776695251465, |
| "logits/rejected": -2.386939287185669, |
| "logps/chosen": -78.03498840332031, |
| "logps/rejected": -103.80584716796875, |
| "loss": 0.3533, |
| "rewards/accuracies": 0.887499988079071, |
| "rewards/chosen": -0.5522706508636475, |
| "rewards/margins": 1.1880146265029907, |
| "rewards/rejected": -1.7402851581573486, |
| "step": 200 |
| }, |
| { |
| "epoch": 7.0886075949367084, |
| "grad_norm": 5.238346576690674, |
| "learning_rate": 4.024886035802432e-06, |
| "logits/chosen": -2.399014711380005, |
| "logits/rejected": -2.4000418186187744, |
| "logps/chosen": -91.64801025390625, |
| "logps/rejected": -88.77143096923828, |
| "loss": 0.3512, |
| "rewards/accuracies": 0.9375, |
| "rewards/chosen": -0.6410180330276489, |
| "rewards/margins": 1.2458090782165527, |
| "rewards/rejected": -1.8868271112442017, |
| "step": 210 |
| }, |
| { |
| "epoch": 7.4261603375527425, |
| "grad_norm": 4.895415306091309, |
| "learning_rate": 3.9029676634059565e-06, |
| "logits/chosen": -2.38824725151062, |
| "logits/rejected": -2.390937328338623, |
| "logps/chosen": -76.24591064453125, |
| "logps/rejected": -109.47352600097656, |
| "loss": 0.3182, |
| "rewards/accuracies": 0.9375, |
| "rewards/chosen": -0.6207951307296753, |
| "rewards/margins": 1.5345582962036133, |
| "rewards/rejected": -2.155353546142578, |
| "step": 220 |
| }, |
| { |
| "epoch": 7.763713080168777, |
| "grad_norm": 7.413693904876709, |
| "learning_rate": 3.7759691553595214e-06, |
| "logits/chosen": -2.399376153945923, |
| "logits/rejected": -2.3986117839813232, |
| "logps/chosen": -90.97004699707031, |
| "logps/rejected": -98.67308807373047, |
| "loss": 0.2989, |
| "rewards/accuracies": 0.949999988079071, |
| "rewards/chosen": -0.7282214760780334, |
| "rewards/margins": 1.5992071628570557, |
| "rewards/rejected": -2.3274283409118652, |
| "step": 230 |
| }, |
| { |
| "epoch": 8.10126582278481, |
| "grad_norm": 4.8047990798950195, |
| "learning_rate": 3.6443503723320837e-06, |
| "logits/chosen": -2.3746752738952637, |
| "logits/rejected": -2.3904380798339844, |
| "logps/chosen": -81.03557586669922, |
| "logps/rejected": -98.62934112548828, |
| "loss": 0.2891, |
| "rewards/accuracies": 0.949999988079071, |
| "rewards/chosen": -1.0007418394088745, |
| "rewards/margins": 1.6062211990356445, |
| "rewards/rejected": -2.6069629192352295, |
| "step": 240 |
| }, |
| { |
| "epoch": 8.438818565400844, |
| "grad_norm": 5.075074672698975, |
| "learning_rate": 3.508587904974522e-06, |
| "logits/chosen": -2.3420817852020264, |
| "logits/rejected": -2.3538601398468018, |
| "logps/chosen": -86.0930404663086, |
| "logps/rejected": -113.33358001708984, |
| "loss": 0.2451, |
| "rewards/accuracies": 0.9750000238418579, |
| "rewards/chosen": -0.8170707821846008, |
| "rewards/margins": 1.765887975692749, |
| "rewards/rejected": -2.582958698272705, |
| "step": 250 |
| }, |
| { |
| "epoch": 8.776371308016877, |
| "grad_norm": 5.683614730834961, |
| "learning_rate": 3.3691733481883693e-06, |
| "logits/chosen": -2.3793487548828125, |
| "logits/rejected": -2.376232862472534, |
| "logps/chosen": -82.44889831542969, |
| "logps/rejected": -100.43474578857422, |
| "loss": 0.2561, |
| "rewards/accuracies": 0.9375, |
| "rewards/chosen": -1.052356481552124, |
| "rewards/margins": 1.7894868850708008, |
| "rewards/rejected": -2.841843605041504, |
| "step": 260 |
| }, |
| { |
| "epoch": 9.113924050632912, |
| "grad_norm": 5.043191909790039, |
| "learning_rate": 3.226611521064278e-06, |
| "logits/chosen": -2.3715972900390625, |
| "logits/rejected": -2.3901712894439697, |
| "logps/chosen": -78.08219909667969, |
| "logps/rejected": -101.30404663085938, |
| "loss": 0.2135, |
| "rewards/accuracies": 0.925000011920929, |
| "rewards/chosen": -1.124599575996399, |
| "rewards/margins": 1.955810546875, |
| "rewards/rejected": -3.0804100036621094, |
| "step": 270 |
| }, |
| { |
| "epoch": 9.451476793248945, |
| "grad_norm": 4.911313533782959, |
| "learning_rate": 3.0814186389357765e-06, |
| "logits/chosen": -2.365330696105957, |
| "logits/rejected": -2.3815040588378906, |
| "logps/chosen": -74.75053405761719, |
| "logps/rejected": -96.59932708740234, |
| "loss": 0.2016, |
| "rewards/accuracies": 0.9750000238418579, |
| "rewards/chosen": -1.2124760150909424, |
| "rewards/margins": 2.1574602127075195, |
| "rewards/rejected": -3.369936466217041, |
| "step": 280 |
| }, |
| { |
| "epoch": 9.789029535864978, |
| "grad_norm": 5.809771537780762, |
| "learning_rate": 2.9341204441673267e-06, |
| "logits/chosen": -2.349513292312622, |
| "logits/rejected": -2.362490653991699, |
| "logps/chosen": -82.00188446044922, |
| "logps/rejected": -115.06053161621094, |
| "loss": 0.1873, |
| "rewards/accuracies": 0.9375, |
| "rewards/chosen": -1.5614944696426392, |
| "rewards/margins": 2.464742660522461, |
| "rewards/rejected": -4.0262370109558105, |
| "step": 290 |
| }, |
| { |
| "epoch": 10.126582278481013, |
| "grad_norm": 5.956116676330566, |
| "learning_rate": 2.785250302445062e-06, |
| "logits/chosen": -2.3490607738494873, |
| "logits/rejected": -2.3738341331481934, |
| "logps/chosen": -96.7169418334961, |
| "logps/rejected": -125.81837463378906, |
| "loss": 0.1794, |
| "rewards/accuracies": 0.9624999761581421, |
| "rewards/chosen": -1.4949434995651245, |
| "rewards/margins": 2.48805832862854, |
| "rewards/rejected": -3.983001708984375, |
| "step": 300 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 580, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 20, |
| "save_steps": 100, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.1106058590140498e+18, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|