| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.64, | |
| "eval_steps": 500, | |
| "global_step": 80, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "completion_length": 192.625, | |
| "epoch": 0.008, | |
| "grad_norm": 10.768404960632324, | |
| "kl": 0.0009242900705430657, | |
| "learning_rate": 5.000000000000001e-07, | |
| "loss": 0.0, | |
| "reward": 0.2329830303788185, | |
| "reward_std": 0.14905179431661963, | |
| "rewards/<lambda>": 0.2329830303788185, | |
| "step": 1 | |
| }, | |
| { | |
| "completion_length": 226.25, | |
| "epoch": 0.016, | |
| "grad_norm": 0.5803981423377991, | |
| "kl": 0.00016602075265836902, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 0.0, | |
| "reward": 0.20506704226136208, | |
| "reward_std": 0.07600272889249027, | |
| "rewards/<lambda>": 0.20506704226136208, | |
| "step": 2 | |
| }, | |
| { | |
| "completion_length": 195.5, | |
| "epoch": 0.024, | |
| "grad_norm": 1.928178071975708, | |
| "kl": 0.0007406917939078994, | |
| "learning_rate": 1.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.24738124385476112, | |
| "reward_std": 0.12009324785321951, | |
| "rewards/<lambda>": 0.24738124385476112, | |
| "step": 3 | |
| }, | |
| { | |
| "completion_length": 165.25, | |
| "epoch": 0.032, | |
| "grad_norm": 4.732048034667969, | |
| "kl": 0.0003298191677458817, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 0.0, | |
| "reward": 0.20593957975506783, | |
| "reward_std": 0.13085306528955698, | |
| "rewards/<lambda>": 0.20593957975506783, | |
| "step": 4 | |
| }, | |
| { | |
| "completion_length": 229.25, | |
| "epoch": 0.04, | |
| "grad_norm": 0.6906648278236389, | |
| "kl": 0.00029802451899740845, | |
| "learning_rate": 2.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.2962687499821186, | |
| "reward_std": 0.11282773199491203, | |
| "rewards/<lambda>": 0.2962687499821186, | |
| "step": 5 | |
| }, | |
| { | |
| "completion_length": 235.75, | |
| "epoch": 0.048, | |
| "grad_norm": 1.7818254232406616, | |
| "kl": 0.0009266494062103448, | |
| "learning_rate": 3e-06, | |
| "loss": 0.0, | |
| "reward": 0.17379014939069748, | |
| "reward_std": 0.03908907831646502, | |
| "rewards/<lambda>": 0.17379014939069748, | |
| "step": 6 | |
| }, | |
| { | |
| "completion_length": 141.25, | |
| "epoch": 0.056, | |
| "grad_norm": 5.83347225189209, | |
| "kl": 0.0010952446609735489, | |
| "learning_rate": 3.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.18913621827960014, | |
| "reward_std": 0.13922751136124134, | |
| "rewards/<lambda>": 0.18913621827960014, | |
| "step": 7 | |
| }, | |
| { | |
| "completion_length": 193.125, | |
| "epoch": 0.064, | |
| "grad_norm": 4.481875896453857, | |
| "kl": 0.002368737303186208, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.0001, | |
| "reward": 0.15933124721050262, | |
| "reward_std": 0.033755510827177204, | |
| "rewards/<lambda>": 0.15933124721050262, | |
| "step": 8 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.072, | |
| "grad_norm": 0.335154265165329, | |
| "kl": 0.00025119713973253965, | |
| "learning_rate": 4.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.36442025750875473, | |
| "reward_std": 0.21805795654654503, | |
| "rewards/<lambda>": 0.36442025750875473, | |
| "step": 9 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.08, | |
| "grad_norm": 0.5085432529449463, | |
| "kl": 0.00022752030054107308, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0, | |
| "reward": 0.26649028062820435, | |
| "reward_std": 0.05319603928364813, | |
| "rewards/<lambda>": 0.26649028062820435, | |
| "step": 10 | |
| }, | |
| { | |
| "completion_length": 194.875, | |
| "epoch": 0.088, | |
| "grad_norm": 5.396770000457764, | |
| "kl": 0.0003068408841500059, | |
| "learning_rate": 4.944444444444445e-06, | |
| "loss": 0.0, | |
| "reward": 0.1397937536239624, | |
| "reward_std": 0.049082053592428565, | |
| "rewards/<lambda>": 0.1397937536239624, | |
| "step": 11 | |
| }, | |
| { | |
| "completion_length": 228.25, | |
| "epoch": 0.096, | |
| "grad_norm": 0.9890621900558472, | |
| "kl": 0.0005189948133192956, | |
| "learning_rate": 4.888888888888889e-06, | |
| "loss": 0.0, | |
| "reward": 0.3081398792564869, | |
| "reward_std": 0.21328446036204696, | |
| "rewards/<lambda>": 0.3081398792564869, | |
| "step": 12 | |
| }, | |
| { | |
| "completion_length": 78.125, | |
| "epoch": 0.104, | |
| "grad_norm": 6.991876602172852, | |
| "kl": 0.0028572251394507475, | |
| "learning_rate": 4.833333333333333e-06, | |
| "loss": 0.0001, | |
| "reward": 0.16984067671000957, | |
| "reward_std": 0.09841607791895512, | |
| "rewards/<lambda>": 0.16984067671000957, | |
| "step": 13 | |
| }, | |
| { | |
| "completion_length": 224.5, | |
| "epoch": 0.112, | |
| "grad_norm": 2.380413770675659, | |
| "kl": 0.00024483678862452507, | |
| "learning_rate": 4.777777777777778e-06, | |
| "loss": 0.0, | |
| "reward": 0.3446955271065235, | |
| "reward_std": 0.07521216722670943, | |
| "rewards/<lambda>": 0.3446955271065235, | |
| "step": 14 | |
| }, | |
| { | |
| "completion_length": 198.25, | |
| "epoch": 0.12, | |
| "grad_norm": 0.9101364612579346, | |
| "kl": 0.000599912746110931, | |
| "learning_rate": 4.722222222222222e-06, | |
| "loss": 0.0, | |
| "reward": 0.3019299991428852, | |
| "reward_std": 0.11653121118433774, | |
| "rewards/<lambda>": 0.3019299991428852, | |
| "step": 15 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.128, | |
| "grad_norm": 0.3337076008319855, | |
| "kl": 0.00042263357318006456, | |
| "learning_rate": 4.666666666666667e-06, | |
| "loss": 0.0, | |
| "reward": 0.22962501272559166, | |
| "reward_std": 0.12568823376204818, | |
| "rewards/<lambda>": 0.22962501272559166, | |
| "step": 16 | |
| }, | |
| { | |
| "completion_length": 194.5, | |
| "epoch": 0.136, | |
| "grad_norm": 2.9452309608459473, | |
| "kl": 0.0003321969779790379, | |
| "learning_rate": 4.611111111111112e-06, | |
| "loss": 0.0, | |
| "reward": 0.2720954604446888, | |
| "reward_std": 0.11709045059978962, | |
| "rewards/<lambda>": 0.2720954604446888, | |
| "step": 17 | |
| }, | |
| { | |
| "completion_length": 138.0, | |
| "epoch": 0.144, | |
| "grad_norm": 6.265693664550781, | |
| "kl": 0.008480061980662867, | |
| "learning_rate": 4.555555555555556e-06, | |
| "loss": 0.0003, | |
| "reward": 0.21834762021899223, | |
| "reward_std": 0.15078714862465858, | |
| "rewards/<lambda>": 0.21834762021899223, | |
| "step": 18 | |
| }, | |
| { | |
| "completion_length": 207.25, | |
| "epoch": 0.152, | |
| "grad_norm": 1.309558391571045, | |
| "kl": 0.0008433158218394965, | |
| "learning_rate": 4.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.2984776981174946, | |
| "reward_std": 0.10264036408625543, | |
| "rewards/<lambda>": 0.2984776981174946, | |
| "step": 19 | |
| }, | |
| { | |
| "completion_length": 198.0, | |
| "epoch": 0.16, | |
| "grad_norm": 3.6814424991607666, | |
| "kl": 0.0018292521999683231, | |
| "learning_rate": 4.444444444444444e-06, | |
| "loss": 0.0001, | |
| "reward": 0.21348833292722702, | |
| "reward_std": 0.032345420273486525, | |
| "rewards/<lambda>": 0.21348833292722702, | |
| "step": 20 | |
| }, | |
| { | |
| "completion_length": 253.0, | |
| "epoch": 0.168, | |
| "grad_norm": 0.3527783751487732, | |
| "kl": 0.000525618303072406, | |
| "learning_rate": 4.388888888888889e-06, | |
| "loss": 0.0, | |
| "reward": 0.32108124345541, | |
| "reward_std": 0.13784162979573011, | |
| "rewards/<lambda>": 0.32108124345541, | |
| "step": 21 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.176, | |
| "grad_norm": 0.34396272897720337, | |
| "kl": 0.00019161385716870427, | |
| "learning_rate": 4.333333333333334e-06, | |
| "loss": 0.0, | |
| "reward": 0.2716740742325783, | |
| "reward_std": 0.0921019627712667, | |
| "rewards/<lambda>": 0.2716740742325783, | |
| "step": 22 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.184, | |
| "grad_norm": 0.38449183106422424, | |
| "kl": 0.0004789537051692605, | |
| "learning_rate": 4.277777777777778e-06, | |
| "loss": 0.0, | |
| "reward": 0.3738388866186142, | |
| "reward_std": 0.2010854547843337, | |
| "rewards/<lambda>": 0.3738388866186142, | |
| "step": 23 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.192, | |
| "grad_norm": 0.35797202587127686, | |
| "kl": 0.0003909100778400898, | |
| "learning_rate": 4.222222222222223e-06, | |
| "loss": 0.0, | |
| "reward": 0.3206794075667858, | |
| "reward_std": 0.20672890054993331, | |
| "rewards/<lambda>": 0.3206794075667858, | |
| "step": 24 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.2, | |
| "grad_norm": 0.22144320607185364, | |
| "kl": 0.0001992348989006132, | |
| "learning_rate": 4.166666666666667e-06, | |
| "loss": 0.0, | |
| "reward": 0.1902062501758337, | |
| "reward_std": 0.07741935132071376, | |
| "rewards/<lambda>": 0.1902062501758337, | |
| "step": 25 | |
| }, | |
| { | |
| "completion_length": 194.0, | |
| "epoch": 0.208, | |
| "grad_norm": 7.687119007110596, | |
| "kl": 0.0075769436080008745, | |
| "learning_rate": 4.111111111111111e-06, | |
| "loss": 0.0003, | |
| "reward": 0.27252499759197235, | |
| "reward_std": 0.11423310358077288, | |
| "rewards/<lambda>": 0.27252499759197235, | |
| "step": 26 | |
| }, | |
| { | |
| "completion_length": 226.75, | |
| "epoch": 0.216, | |
| "grad_norm": 4.88610315322876, | |
| "kl": 0.005719653330743313, | |
| "learning_rate": 4.055555555555556e-06, | |
| "loss": 0.0002, | |
| "reward": 0.28929195925593376, | |
| "reward_std": 0.2355259140022099, | |
| "rewards/<lambda>": 0.28929195925593376, | |
| "step": 27 | |
| }, | |
| { | |
| "completion_length": 163.875, | |
| "epoch": 0.224, | |
| "grad_norm": 0.8618195056915283, | |
| "kl": 0.001144724345067516, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.0, | |
| "reward": 0.2521233316510916, | |
| "reward_std": 0.18312063068151474, | |
| "rewards/<lambda>": 0.2521233316510916, | |
| "step": 28 | |
| }, | |
| { | |
| "completion_length": 193.375, | |
| "epoch": 0.232, | |
| "grad_norm": 5.040801048278809, | |
| "kl": 0.003916982212103903, | |
| "learning_rate": 3.944444444444445e-06, | |
| "loss": 0.0002, | |
| "reward": 0.2782749943435192, | |
| "reward_std": 0.09372699866071343, | |
| "rewards/<lambda>": 0.2782749943435192, | |
| "step": 29 | |
| }, | |
| { | |
| "completion_length": 203.5, | |
| "epoch": 0.24, | |
| "grad_norm": 1.5515941381454468, | |
| "kl": 0.0015425277961185202, | |
| "learning_rate": 3.88888888888889e-06, | |
| "loss": 0.0001, | |
| "reward": 0.327726773917675, | |
| "reward_std": 0.13597915810532868, | |
| "rewards/<lambda>": 0.327726773917675, | |
| "step": 30 | |
| }, | |
| { | |
| "completion_length": 224.375, | |
| "epoch": 0.248, | |
| "grad_norm": 6.099854946136475, | |
| "kl": 0.0065212192421313375, | |
| "learning_rate": 3.833333333333334e-06, | |
| "loss": 0.0003, | |
| "reward": 0.26245963387191296, | |
| "reward_std": 0.23274014610797167, | |
| "rewards/<lambda>": 0.26245963387191296, | |
| "step": 31 | |
| }, | |
| { | |
| "completion_length": 225.5, | |
| "epoch": 0.256, | |
| "grad_norm": 0.9755590558052063, | |
| "kl": 0.00134121990413405, | |
| "learning_rate": 3.777777777777778e-06, | |
| "loss": 0.0001, | |
| "reward": 0.2514333389699459, | |
| "reward_std": 0.07464690878987312, | |
| "rewards/<lambda>": 0.2514333389699459, | |
| "step": 32 | |
| }, | |
| { | |
| "completion_length": 228.875, | |
| "epoch": 0.264, | |
| "grad_norm": 0.5367397665977478, | |
| "kl": 0.0005917141388636082, | |
| "learning_rate": 3.7222222222222225e-06, | |
| "loss": 0.0, | |
| "reward": 0.3105039447546005, | |
| "reward_std": 0.029746492160484195, | |
| "rewards/<lambda>": 0.3105039447546005, | |
| "step": 33 | |
| }, | |
| { | |
| "completion_length": 194.5, | |
| "epoch": 0.272, | |
| "grad_norm": 3.941100597381592, | |
| "kl": 0.010093522083479911, | |
| "learning_rate": 3.6666666666666666e-06, | |
| "loss": 0.0004, | |
| "reward": 0.36005016416311264, | |
| "reward_std": 0.20752966683357954, | |
| "rewards/<lambda>": 0.36005016416311264, | |
| "step": 34 | |
| }, | |
| { | |
| "completion_length": 162.125, | |
| "epoch": 0.28, | |
| "grad_norm": 2.5163803100585938, | |
| "kl": 0.015705711644841358, | |
| "learning_rate": 3.6111111111111115e-06, | |
| "loss": 0.0006, | |
| "reward": 0.2547324914485216, | |
| "reward_std": 0.13099860399961472, | |
| "rewards/<lambda>": 0.2547324914485216, | |
| "step": 35 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.288, | |
| "grad_norm": 0.40727710723876953, | |
| "kl": 0.00078230255167, | |
| "learning_rate": 3.555555555555556e-06, | |
| "loss": 0.0, | |
| "reward": 0.32325625233352184, | |
| "reward_std": 0.1363072171807289, | |
| "rewards/<lambda>": 0.32325625233352184, | |
| "step": 36 | |
| }, | |
| { | |
| "completion_length": 196.75, | |
| "epoch": 0.296, | |
| "grad_norm": 3.323727607727051, | |
| "kl": 0.003316763584734872, | |
| "learning_rate": 3.5e-06, | |
| "loss": 0.0001, | |
| "reward": 0.21428124606609344, | |
| "reward_std": 0.10128421382978559, | |
| "rewards/<lambda>": 0.21428124606609344, | |
| "step": 37 | |
| }, | |
| { | |
| "completion_length": 224.75, | |
| "epoch": 0.304, | |
| "grad_norm": 4.83845853805542, | |
| "kl": 0.005240194499492645, | |
| "learning_rate": 3.444444444444445e-06, | |
| "loss": 0.0002, | |
| "reward": 0.21497500129044056, | |
| "reward_std": 0.15779088297858834, | |
| "rewards/<lambda>": 0.21497500129044056, | |
| "step": 38 | |
| }, | |
| { | |
| "completion_length": 230.125, | |
| "epoch": 0.312, | |
| "grad_norm": 0.7527925372123718, | |
| "kl": 0.0013605846324935555, | |
| "learning_rate": 3.3888888888888893e-06, | |
| "loss": 0.0001, | |
| "reward": 0.35972169786691666, | |
| "reward_std": 0.2259513009339571, | |
| "rewards/<lambda>": 0.35972169786691666, | |
| "step": 39 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.32, | |
| "grad_norm": 0.7770674824714661, | |
| "kl": 0.0018776137148961425, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.0001, | |
| "reward": 0.3781879246234894, | |
| "reward_std": 0.17891038954257965, | |
| "rewards/<lambda>": 0.3781879246234894, | |
| "step": 40 | |
| }, | |
| { | |
| "completion_length": 198.875, | |
| "epoch": 0.328, | |
| "grad_norm": 3.5453429222106934, | |
| "kl": 0.018746574758552015, | |
| "learning_rate": 3.277777777777778e-06, | |
| "loss": 0.0007, | |
| "reward": 0.2417041640728712, | |
| "reward_std": 0.12431526277214289, | |
| "rewards/<lambda>": 0.2417041640728712, | |
| "step": 41 | |
| }, | |
| { | |
| "completion_length": 193.0, | |
| "epoch": 0.336, | |
| "grad_norm": 0.19420652091503143, | |
| "kl": 0.0013477305474225432, | |
| "learning_rate": 3.2222222222222227e-06, | |
| "loss": 0.0001, | |
| "reward": 0.2721815500408411, | |
| "reward_std": 0.06904055853374302, | |
| "rewards/<lambda>": 0.2721815500408411, | |
| "step": 42 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.344, | |
| "grad_norm": 0.4106781780719757, | |
| "kl": 0.0015028241614345461, | |
| "learning_rate": 3.1666666666666667e-06, | |
| "loss": 0.0001, | |
| "reward": 0.35378505289554596, | |
| "reward_std": 0.11193534079939127, | |
| "rewards/<lambda>": 0.35378505289554596, | |
| "step": 43 | |
| }, | |
| { | |
| "completion_length": 224.75, | |
| "epoch": 0.352, | |
| "grad_norm": 2.137603521347046, | |
| "kl": 0.006690101174172014, | |
| "learning_rate": 3.1111111111111116e-06, | |
| "loss": 0.0003, | |
| "reward": 0.2571968212723732, | |
| "reward_std": 0.152589141856879, | |
| "rewards/<lambda>": 0.2571968212723732, | |
| "step": 44 | |
| }, | |
| { | |
| "completion_length": 224.375, | |
| "epoch": 0.36, | |
| "grad_norm": 6.381210803985596, | |
| "kl": 0.02422999477130361, | |
| "learning_rate": 3.055555555555556e-06, | |
| "loss": 0.001, | |
| "reward": 0.21905267424881458, | |
| "reward_std": 0.04801633581519127, | |
| "rewards/<lambda>": 0.21905267424881458, | |
| "step": 45 | |
| }, | |
| { | |
| "completion_length": 210.5, | |
| "epoch": 0.368, | |
| "grad_norm": 0.7354749441146851, | |
| "kl": 0.002457081514876336, | |
| "learning_rate": 3e-06, | |
| "loss": 0.0001, | |
| "reward": 0.2856549359858036, | |
| "reward_std": 0.19331230921670794, | |
| "rewards/<lambda>": 0.2856549359858036, | |
| "step": 46 | |
| }, | |
| { | |
| "completion_length": 212.0, | |
| "epoch": 0.376, | |
| "grad_norm": 1.6879031658172607, | |
| "kl": 0.01583882374688983, | |
| "learning_rate": 2.944444444444445e-06, | |
| "loss": 0.0006, | |
| "reward": 0.19962321408092976, | |
| "reward_std": 0.06973587442189455, | |
| "rewards/<lambda>": 0.19962321408092976, | |
| "step": 47 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.384, | |
| "grad_norm": 0.3729376792907715, | |
| "kl": 0.0011020679667126387, | |
| "learning_rate": 2.888888888888889e-06, | |
| "loss": 0.0, | |
| "reward": 0.2931182198226452, | |
| "reward_std": 0.11643674317747355, | |
| "rewards/<lambda>": 0.2931182198226452, | |
| "step": 48 | |
| }, | |
| { | |
| "completion_length": 224.75, | |
| "epoch": 0.392, | |
| "grad_norm": 2.862750768661499, | |
| "kl": 0.0038104150735307485, | |
| "learning_rate": 2.8333333333333335e-06, | |
| "loss": 0.0002, | |
| "reward": 0.3128395900130272, | |
| "reward_std": 0.03162912919651717, | |
| "rewards/<lambda>": 0.3128395900130272, | |
| "step": 49 | |
| }, | |
| { | |
| "completion_length": 193.0, | |
| "epoch": 0.4, | |
| "grad_norm": 6.374263286590576, | |
| "kl": 0.016765353531809524, | |
| "learning_rate": 2.7777777777777783e-06, | |
| "loss": 0.0007, | |
| "reward": 0.1548837460577488, | |
| "reward_std": 0.040469489293172956, | |
| "rewards/<lambda>": 0.1548837460577488, | |
| "step": 50 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.408, | |
| "grad_norm": 0.4507330358028412, | |
| "kl": 0.003867686173180118, | |
| "learning_rate": 2.7222222222222224e-06, | |
| "loss": 0.0002, | |
| "reward": 0.3406732901930809, | |
| "reward_std": 0.2136322446167469, | |
| "rewards/<lambda>": 0.3406732901930809, | |
| "step": 51 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.416, | |
| "grad_norm": 0.393788605928421, | |
| "kl": 0.0023176257964223623, | |
| "learning_rate": 2.666666666666667e-06, | |
| "loss": 0.0001, | |
| "reward": 0.3112866096198559, | |
| "reward_std": 0.1441778119187802, | |
| "rewards/<lambda>": 0.3112866096198559, | |
| "step": 52 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.424, | |
| "grad_norm": 0.31628501415252686, | |
| "kl": 0.0009135059954132885, | |
| "learning_rate": 2.6111111111111113e-06, | |
| "loss": 0.0, | |
| "reward": 0.29374171793460846, | |
| "reward_std": 0.08922838000580668, | |
| "rewards/<lambda>": 0.29374171793460846, | |
| "step": 53 | |
| }, | |
| { | |
| "completion_length": 224.625, | |
| "epoch": 0.432, | |
| "grad_norm": 3.522197723388672, | |
| "kl": 0.01970086299115792, | |
| "learning_rate": 2.5555555555555557e-06, | |
| "loss": 0.0008, | |
| "reward": 0.3511999882757664, | |
| "reward_std": 0.11985459551215172, | |
| "rewards/<lambda>": 0.3511999882757664, | |
| "step": 54 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.44, | |
| "grad_norm": 0.3412955105304718, | |
| "kl": 0.0014321491762530059, | |
| "learning_rate": 2.5e-06, | |
| "loss": 0.0001, | |
| "reward": 0.20944999530911446, | |
| "reward_std": 0.07488261186517775, | |
| "rewards/<lambda>": 0.20944999530911446, | |
| "step": 55 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.448, | |
| "grad_norm": 0.5333781242370605, | |
| "kl": 0.002151996799511835, | |
| "learning_rate": 2.4444444444444447e-06, | |
| "loss": 0.0001, | |
| "reward": 0.2980365641415119, | |
| "reward_std": 0.06948962598107755, | |
| "rewards/<lambda>": 0.2980365641415119, | |
| "step": 56 | |
| }, | |
| { | |
| "completion_length": 228.875, | |
| "epoch": 0.456, | |
| "grad_norm": 0.7872463464736938, | |
| "kl": 0.0018841840501409024, | |
| "learning_rate": 2.388888888888889e-06, | |
| "loss": 0.0001, | |
| "reward": 0.32610999420285225, | |
| "reward_std": 0.11902374215424061, | |
| "rewards/<lambda>": 0.32610999420285225, | |
| "step": 57 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.464, | |
| "grad_norm": 0.3681983947753906, | |
| "kl": 0.0016814242699183524, | |
| "learning_rate": 2.3333333333333336e-06, | |
| "loss": 0.0001, | |
| "reward": 0.4004254937171936, | |
| "reward_std": 0.13640513457357883, | |
| "rewards/<lambda>": 0.4004254937171936, | |
| "step": 58 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.472, | |
| "grad_norm": 0.4487823247909546, | |
| "kl": 0.0020352726278360933, | |
| "learning_rate": 2.277777777777778e-06, | |
| "loss": 0.0001, | |
| "reward": 0.3335774838924408, | |
| "reward_std": 0.14998088730499148, | |
| "rewards/<lambda>": 0.3335774838924408, | |
| "step": 59 | |
| }, | |
| { | |
| "completion_length": 238.0, | |
| "epoch": 0.48, | |
| "grad_norm": 1.0211271047592163, | |
| "kl": 0.0027659484185278416, | |
| "learning_rate": 2.222222222222222e-06, | |
| "loss": 0.0001, | |
| "reward": 0.32939722016453743, | |
| "reward_std": 0.16310204262845218, | |
| "rewards/<lambda>": 0.32939722016453743, | |
| "step": 60 | |
| }, | |
| { | |
| "completion_length": 225.25, | |
| "epoch": 0.488, | |
| "grad_norm": 2.5553455352783203, | |
| "kl": 0.02496814646292478, | |
| "learning_rate": 2.166666666666667e-06, | |
| "loss": 0.001, | |
| "reward": 0.3713379241526127, | |
| "reward_std": 0.0991770289838314, | |
| "rewards/<lambda>": 0.3713379241526127, | |
| "step": 61 | |
| }, | |
| { | |
| "completion_length": 224.875, | |
| "epoch": 0.496, | |
| "grad_norm": 2.7722630500793457, | |
| "kl": 0.015765567746711895, | |
| "learning_rate": 2.1111111111111114e-06, | |
| "loss": 0.0006, | |
| "reward": 0.31797249242663383, | |
| "reward_std": 0.12324517406523228, | |
| "rewards/<lambda>": 0.31797249242663383, | |
| "step": 62 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.504, | |
| "grad_norm": 2.1103341579437256, | |
| "kl": 0.02794836735120043, | |
| "learning_rate": 2.0555555555555555e-06, | |
| "loss": 0.0011, | |
| "reward": 0.360204316675663, | |
| "reward_std": 0.14990051230415702, | |
| "rewards/<lambda>": 0.360204316675663, | |
| "step": 63 | |
| }, | |
| { | |
| "completion_length": 241.125, | |
| "epoch": 0.512, | |
| "grad_norm": 0.31790691614151, | |
| "kl": 0.0019303768058307469, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 0.0001, | |
| "reward": 0.3009055554866791, | |
| "reward_std": 0.060819050297141075, | |
| "rewards/<lambda>": 0.3009055554866791, | |
| "step": 64 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.52, | |
| "grad_norm": 1.23534095287323, | |
| "kl": 0.0015001413121353835, | |
| "learning_rate": 1.944444444444445e-06, | |
| "loss": 0.0001, | |
| "reward": 0.382674403488636, | |
| "reward_std": 0.02178561605978757, | |
| "rewards/<lambda>": 0.382674403488636, | |
| "step": 65 | |
| }, | |
| { | |
| "completion_length": 194.25, | |
| "epoch": 0.528, | |
| "grad_norm": 4.171910762786865, | |
| "kl": 0.1054877576243598, | |
| "learning_rate": 1.888888888888889e-06, | |
| "loss": 0.0042, | |
| "reward": 0.2380066979676485, | |
| "reward_std": 0.13841149117797613, | |
| "rewards/<lambda>": 0.2380066979676485, | |
| "step": 66 | |
| }, | |
| { | |
| "completion_length": 226.625, | |
| "epoch": 0.536, | |
| "grad_norm": 4.038355350494385, | |
| "kl": 0.015106877748621628, | |
| "learning_rate": 1.8333333333333333e-06, | |
| "loss": 0.0006, | |
| "reward": 0.20904473587870598, | |
| "reward_std": 0.12156189302913845, | |
| "rewards/<lambda>": 0.20904473587870598, | |
| "step": 67 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.544, | |
| "grad_norm": 0.48601511120796204, | |
| "kl": 0.0016828322550281882, | |
| "learning_rate": 1.777777777777778e-06, | |
| "loss": 0.0001, | |
| "reward": 0.2340985517948866, | |
| "reward_std": 0.1624027146026492, | |
| "rewards/<lambda>": 0.2340985517948866, | |
| "step": 68 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.552, | |
| "grad_norm": 0.3185882270336151, | |
| "kl": 0.0010384579363744706, | |
| "learning_rate": 1.7222222222222224e-06, | |
| "loss": 0.0, | |
| "reward": 0.33658571913838387, | |
| "reward_std": 0.13875456689856946, | |
| "rewards/<lambda>": 0.33658571913838387, | |
| "step": 69 | |
| }, | |
| { | |
| "completion_length": 234.375, | |
| "epoch": 0.56, | |
| "grad_norm": 0.30494388937950134, | |
| "kl": 0.003805482352618128, | |
| "learning_rate": 1.6666666666666667e-06, | |
| "loss": 0.0002, | |
| "reward": 0.2663565091788769, | |
| "reward_std": 0.05944432225078344, | |
| "rewards/<lambda>": 0.2663565091788769, | |
| "step": 70 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.568, | |
| "grad_norm": 0.3982398211956024, | |
| "kl": 0.0018333147745579481, | |
| "learning_rate": 1.6111111111111113e-06, | |
| "loss": 0.0001, | |
| "reward": 0.3072025068104267, | |
| "reward_std": 0.12600289471447468, | |
| "rewards/<lambda>": 0.3072025068104267, | |
| "step": 71 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.576, | |
| "grad_norm": 0.5156778693199158, | |
| "kl": 0.004463265300728381, | |
| "learning_rate": 1.5555555555555558e-06, | |
| "loss": 0.0002, | |
| "reward": 0.37232405692338943, | |
| "reward_std": 0.1498066179046873, | |
| "rewards/<lambda>": 0.37232405692338943, | |
| "step": 72 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.584, | |
| "grad_norm": 0.48612210154533386, | |
| "kl": 0.0026613644731696695, | |
| "learning_rate": 1.5e-06, | |
| "loss": 0.0001, | |
| "reward": 0.3674117997288704, | |
| "reward_std": 0.18603881541639566, | |
| "rewards/<lambda>": 0.3674117997288704, | |
| "step": 73 | |
| }, | |
| { | |
| "completion_length": 224.375, | |
| "epoch": 0.592, | |
| "grad_norm": 18.72035789489746, | |
| "kl": 0.017221870803041384, | |
| "learning_rate": 1.4444444444444445e-06, | |
| "loss": 0.0007, | |
| "reward": 0.24141032621264458, | |
| "reward_std": 0.029294715961441398, | |
| "rewards/<lambda>": 0.24141032621264458, | |
| "step": 74 | |
| }, | |
| { | |
| "completion_length": 224.375, | |
| "epoch": 0.6, | |
| "grad_norm": 5.269930362701416, | |
| "kl": 0.10056470631388947, | |
| "learning_rate": 1.3888888888888892e-06, | |
| "loss": 0.004, | |
| "reward": 0.24822638183832169, | |
| "reward_std": 0.11068728659301996, | |
| "rewards/<lambda>": 0.24822638183832169, | |
| "step": 75 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.608, | |
| "grad_norm": 0.8006964921951294, | |
| "kl": 0.001940697489771992, | |
| "learning_rate": 1.3333333333333334e-06, | |
| "loss": 0.0001, | |
| "reward": 0.319825004786253, | |
| "reward_std": 0.023652719799429178, | |
| "rewards/<lambda>": 0.319825004786253, | |
| "step": 76 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.616, | |
| "grad_norm": 0.3913479745388031, | |
| "kl": 0.0020156825485173613, | |
| "learning_rate": 1.2777777777777779e-06, | |
| "loss": 0.0001, | |
| "reward": 0.40122249722480774, | |
| "reward_std": 0.15432959236204624, | |
| "rewards/<lambda>": 0.40122249722480774, | |
| "step": 77 | |
| }, | |
| { | |
| "completion_length": 226.125, | |
| "epoch": 0.624, | |
| "grad_norm": 1.298126220703125, | |
| "kl": 0.007474064303096384, | |
| "learning_rate": 1.2222222222222223e-06, | |
| "loss": 0.0003, | |
| "reward": 0.32424041628837585, | |
| "reward_std": 0.1945292092859745, | |
| "rewards/<lambda>": 0.32424041628837585, | |
| "step": 78 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.632, | |
| "grad_norm": 0.1872844249010086, | |
| "kl": 0.0026928953011520207, | |
| "learning_rate": 1.1666666666666668e-06, | |
| "loss": 0.0001, | |
| "reward": 0.1980762518942356, | |
| "reward_std": 0.06945732794702053, | |
| "rewards/<lambda>": 0.1980762518942356, | |
| "step": 79 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.64, | |
| "grad_norm": 0.3780773878097534, | |
| "kl": 0.0014608338242396712, | |
| "learning_rate": 1.111111111111111e-06, | |
| "loss": 0.0001, | |
| "reward": 0.36322637647390366, | |
| "reward_std": 0.1668244955362752, | |
| "rewards/<lambda>": 0.36322637647390366, | |
| "step": 80 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 100, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 20, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |