| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.32, | |
| "eval_steps": 500, | |
| "global_step": 40, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "completion_length": 192.625, | |
| "epoch": 0.008, | |
| "grad_norm": 10.768404960632324, | |
| "kl": 0.0009242900705430657, | |
| "learning_rate": 5.000000000000001e-07, | |
| "loss": 0.0, | |
| "reward": 0.2329830303788185, | |
| "reward_std": 0.14905179431661963, | |
| "rewards/<lambda>": 0.2329830303788185, | |
| "step": 1 | |
| }, | |
| { | |
| "completion_length": 226.25, | |
| "epoch": 0.016, | |
| "grad_norm": 0.5803981423377991, | |
| "kl": 0.00016602075265836902, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 0.0, | |
| "reward": 0.20506704226136208, | |
| "reward_std": 0.07600272889249027, | |
| "rewards/<lambda>": 0.20506704226136208, | |
| "step": 2 | |
| }, | |
| { | |
| "completion_length": 195.5, | |
| "epoch": 0.024, | |
| "grad_norm": 1.928178071975708, | |
| "kl": 0.0007406917939078994, | |
| "learning_rate": 1.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.24738124385476112, | |
| "reward_std": 0.12009324785321951, | |
| "rewards/<lambda>": 0.24738124385476112, | |
| "step": 3 | |
| }, | |
| { | |
| "completion_length": 165.25, | |
| "epoch": 0.032, | |
| "grad_norm": 4.732048034667969, | |
| "kl": 0.0003298191677458817, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 0.0, | |
| "reward": 0.20593957975506783, | |
| "reward_std": 0.13085306528955698, | |
| "rewards/<lambda>": 0.20593957975506783, | |
| "step": 4 | |
| }, | |
| { | |
| "completion_length": 229.25, | |
| "epoch": 0.04, | |
| "grad_norm": 0.6906648278236389, | |
| "kl": 0.00029802451899740845, | |
| "learning_rate": 2.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.2962687499821186, | |
| "reward_std": 0.11282773199491203, | |
| "rewards/<lambda>": 0.2962687499821186, | |
| "step": 5 | |
| }, | |
| { | |
| "completion_length": 235.75, | |
| "epoch": 0.048, | |
| "grad_norm": 1.7818254232406616, | |
| "kl": 0.0009266494062103448, | |
| "learning_rate": 3e-06, | |
| "loss": 0.0, | |
| "reward": 0.17379014939069748, | |
| "reward_std": 0.03908907831646502, | |
| "rewards/<lambda>": 0.17379014939069748, | |
| "step": 6 | |
| }, | |
| { | |
| "completion_length": 141.25, | |
| "epoch": 0.056, | |
| "grad_norm": 5.83347225189209, | |
| "kl": 0.0010952446609735489, | |
| "learning_rate": 3.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.18913621827960014, | |
| "reward_std": 0.13922751136124134, | |
| "rewards/<lambda>": 0.18913621827960014, | |
| "step": 7 | |
| }, | |
| { | |
| "completion_length": 193.125, | |
| "epoch": 0.064, | |
| "grad_norm": 4.481875896453857, | |
| "kl": 0.002368737303186208, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.0001, | |
| "reward": 0.15933124721050262, | |
| "reward_std": 0.033755510827177204, | |
| "rewards/<lambda>": 0.15933124721050262, | |
| "step": 8 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.072, | |
| "grad_norm": 0.335154265165329, | |
| "kl": 0.00025119713973253965, | |
| "learning_rate": 4.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.36442025750875473, | |
| "reward_std": 0.21805795654654503, | |
| "rewards/<lambda>": 0.36442025750875473, | |
| "step": 9 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.08, | |
| "grad_norm": 0.5085432529449463, | |
| "kl": 0.00022752030054107308, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0, | |
| "reward": 0.26649028062820435, | |
| "reward_std": 0.05319603928364813, | |
| "rewards/<lambda>": 0.26649028062820435, | |
| "step": 10 | |
| }, | |
| { | |
| "completion_length": 194.875, | |
| "epoch": 0.088, | |
| "grad_norm": 5.396770000457764, | |
| "kl": 0.0003068408841500059, | |
| "learning_rate": 4.944444444444445e-06, | |
| "loss": 0.0, | |
| "reward": 0.1397937536239624, | |
| "reward_std": 0.049082053592428565, | |
| "rewards/<lambda>": 0.1397937536239624, | |
| "step": 11 | |
| }, | |
| { | |
| "completion_length": 228.25, | |
| "epoch": 0.096, | |
| "grad_norm": 0.9890621900558472, | |
| "kl": 0.0005189948133192956, | |
| "learning_rate": 4.888888888888889e-06, | |
| "loss": 0.0, | |
| "reward": 0.3081398792564869, | |
| "reward_std": 0.21328446036204696, | |
| "rewards/<lambda>": 0.3081398792564869, | |
| "step": 12 | |
| }, | |
| { | |
| "completion_length": 78.125, | |
| "epoch": 0.104, | |
| "grad_norm": 6.991876602172852, | |
| "kl": 0.0028572251394507475, | |
| "learning_rate": 4.833333333333333e-06, | |
| "loss": 0.0001, | |
| "reward": 0.16984067671000957, | |
| "reward_std": 0.09841607791895512, | |
| "rewards/<lambda>": 0.16984067671000957, | |
| "step": 13 | |
| }, | |
| { | |
| "completion_length": 224.5, | |
| "epoch": 0.112, | |
| "grad_norm": 2.380413770675659, | |
| "kl": 0.00024483678862452507, | |
| "learning_rate": 4.777777777777778e-06, | |
| "loss": 0.0, | |
| "reward": 0.3446955271065235, | |
| "reward_std": 0.07521216722670943, | |
| "rewards/<lambda>": 0.3446955271065235, | |
| "step": 14 | |
| }, | |
| { | |
| "completion_length": 198.25, | |
| "epoch": 0.12, | |
| "grad_norm": 0.9101364612579346, | |
| "kl": 0.000599912746110931, | |
| "learning_rate": 4.722222222222222e-06, | |
| "loss": 0.0, | |
| "reward": 0.3019299991428852, | |
| "reward_std": 0.11653121118433774, | |
| "rewards/<lambda>": 0.3019299991428852, | |
| "step": 15 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.128, | |
| "grad_norm": 0.3337076008319855, | |
| "kl": 0.00042263357318006456, | |
| "learning_rate": 4.666666666666667e-06, | |
| "loss": 0.0, | |
| "reward": 0.22962501272559166, | |
| "reward_std": 0.12568823376204818, | |
| "rewards/<lambda>": 0.22962501272559166, | |
| "step": 16 | |
| }, | |
| { | |
| "completion_length": 194.5, | |
| "epoch": 0.136, | |
| "grad_norm": 2.9452309608459473, | |
| "kl": 0.0003321969779790379, | |
| "learning_rate": 4.611111111111112e-06, | |
| "loss": 0.0, | |
| "reward": 0.2720954604446888, | |
| "reward_std": 0.11709045059978962, | |
| "rewards/<lambda>": 0.2720954604446888, | |
| "step": 17 | |
| }, | |
| { | |
| "completion_length": 138.0, | |
| "epoch": 0.144, | |
| "grad_norm": 6.265693664550781, | |
| "kl": 0.008480061980662867, | |
| "learning_rate": 4.555555555555556e-06, | |
| "loss": 0.0003, | |
| "reward": 0.21834762021899223, | |
| "reward_std": 0.15078714862465858, | |
| "rewards/<lambda>": 0.21834762021899223, | |
| "step": 18 | |
| }, | |
| { | |
| "completion_length": 207.25, | |
| "epoch": 0.152, | |
| "grad_norm": 1.309558391571045, | |
| "kl": 0.0008433158218394965, | |
| "learning_rate": 4.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.2984776981174946, | |
| "reward_std": 0.10264036408625543, | |
| "rewards/<lambda>": 0.2984776981174946, | |
| "step": 19 | |
| }, | |
| { | |
| "completion_length": 198.0, | |
| "epoch": 0.16, | |
| "grad_norm": 3.6814424991607666, | |
| "kl": 0.0018292521999683231, | |
| "learning_rate": 4.444444444444444e-06, | |
| "loss": 0.0001, | |
| "reward": 0.21348833292722702, | |
| "reward_std": 0.032345420273486525, | |
| "rewards/<lambda>": 0.21348833292722702, | |
| "step": 20 | |
| }, | |
| { | |
| "completion_length": 253.0, | |
| "epoch": 0.168, | |
| "grad_norm": 0.3527783751487732, | |
| "kl": 0.000525618303072406, | |
| "learning_rate": 4.388888888888889e-06, | |
| "loss": 0.0, | |
| "reward": 0.32108124345541, | |
| "reward_std": 0.13784162979573011, | |
| "rewards/<lambda>": 0.32108124345541, | |
| "step": 21 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.176, | |
| "grad_norm": 0.34396272897720337, | |
| "kl": 0.00019161385716870427, | |
| "learning_rate": 4.333333333333334e-06, | |
| "loss": 0.0, | |
| "reward": 0.2716740742325783, | |
| "reward_std": 0.0921019627712667, | |
| "rewards/<lambda>": 0.2716740742325783, | |
| "step": 22 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.184, | |
| "grad_norm": 0.38449183106422424, | |
| "kl": 0.0004789537051692605, | |
| "learning_rate": 4.277777777777778e-06, | |
| "loss": 0.0, | |
| "reward": 0.3738388866186142, | |
| "reward_std": 0.2010854547843337, | |
| "rewards/<lambda>": 0.3738388866186142, | |
| "step": 23 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.192, | |
| "grad_norm": 0.35797202587127686, | |
| "kl": 0.0003909100778400898, | |
| "learning_rate": 4.222222222222223e-06, | |
| "loss": 0.0, | |
| "reward": 0.3206794075667858, | |
| "reward_std": 0.20672890054993331, | |
| "rewards/<lambda>": 0.3206794075667858, | |
| "step": 24 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.2, | |
| "grad_norm": 0.22144320607185364, | |
| "kl": 0.0001992348989006132, | |
| "learning_rate": 4.166666666666667e-06, | |
| "loss": 0.0, | |
| "reward": 0.1902062501758337, | |
| "reward_std": 0.07741935132071376, | |
| "rewards/<lambda>": 0.1902062501758337, | |
| "step": 25 | |
| }, | |
| { | |
| "completion_length": 194.0, | |
| "epoch": 0.208, | |
| "grad_norm": 7.687119007110596, | |
| "kl": 0.0075769436080008745, | |
| "learning_rate": 4.111111111111111e-06, | |
| "loss": 0.0003, | |
| "reward": 0.27252499759197235, | |
| "reward_std": 0.11423310358077288, | |
| "rewards/<lambda>": 0.27252499759197235, | |
| "step": 26 | |
| }, | |
| { | |
| "completion_length": 226.75, | |
| "epoch": 0.216, | |
| "grad_norm": 4.88610315322876, | |
| "kl": 0.005719653330743313, | |
| "learning_rate": 4.055555555555556e-06, | |
| "loss": 0.0002, | |
| "reward": 0.28929195925593376, | |
| "reward_std": 0.2355259140022099, | |
| "rewards/<lambda>": 0.28929195925593376, | |
| "step": 27 | |
| }, | |
| { | |
| "completion_length": 163.875, | |
| "epoch": 0.224, | |
| "grad_norm": 0.8618195056915283, | |
| "kl": 0.001144724345067516, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.0, | |
| "reward": 0.2521233316510916, | |
| "reward_std": 0.18312063068151474, | |
| "rewards/<lambda>": 0.2521233316510916, | |
| "step": 28 | |
| }, | |
| { | |
| "completion_length": 193.375, | |
| "epoch": 0.232, | |
| "grad_norm": 5.040801048278809, | |
| "kl": 0.003916982212103903, | |
| "learning_rate": 3.944444444444445e-06, | |
| "loss": 0.0002, | |
| "reward": 0.2782749943435192, | |
| "reward_std": 0.09372699866071343, | |
| "rewards/<lambda>": 0.2782749943435192, | |
| "step": 29 | |
| }, | |
| { | |
| "completion_length": 203.5, | |
| "epoch": 0.24, | |
| "grad_norm": 1.5515941381454468, | |
| "kl": 0.0015425277961185202, | |
| "learning_rate": 3.88888888888889e-06, | |
| "loss": 0.0001, | |
| "reward": 0.327726773917675, | |
| "reward_std": 0.13597915810532868, | |
| "rewards/<lambda>": 0.327726773917675, | |
| "step": 30 | |
| }, | |
| { | |
| "completion_length": 224.375, | |
| "epoch": 0.248, | |
| "grad_norm": 6.099854946136475, | |
| "kl": 0.0065212192421313375, | |
| "learning_rate": 3.833333333333334e-06, | |
| "loss": 0.0003, | |
| "reward": 0.26245963387191296, | |
| "reward_std": 0.23274014610797167, | |
| "rewards/<lambda>": 0.26245963387191296, | |
| "step": 31 | |
| }, | |
| { | |
| "completion_length": 225.5, | |
| "epoch": 0.256, | |
| "grad_norm": 0.9755590558052063, | |
| "kl": 0.00134121990413405, | |
| "learning_rate": 3.777777777777778e-06, | |
| "loss": 0.0001, | |
| "reward": 0.2514333389699459, | |
| "reward_std": 0.07464690878987312, | |
| "rewards/<lambda>": 0.2514333389699459, | |
| "step": 32 | |
| }, | |
| { | |
| "completion_length": 228.875, | |
| "epoch": 0.264, | |
| "grad_norm": 0.5367397665977478, | |
| "kl": 0.0005917141388636082, | |
| "learning_rate": 3.7222222222222225e-06, | |
| "loss": 0.0, | |
| "reward": 0.3105039447546005, | |
| "reward_std": 0.029746492160484195, | |
| "rewards/<lambda>": 0.3105039447546005, | |
| "step": 33 | |
| }, | |
| { | |
| "completion_length": 194.5, | |
| "epoch": 0.272, | |
| "grad_norm": 3.941100597381592, | |
| "kl": 0.010093522083479911, | |
| "learning_rate": 3.6666666666666666e-06, | |
| "loss": 0.0004, | |
| "reward": 0.36005016416311264, | |
| "reward_std": 0.20752966683357954, | |
| "rewards/<lambda>": 0.36005016416311264, | |
| "step": 34 | |
| }, | |
| { | |
| "completion_length": 162.125, | |
| "epoch": 0.28, | |
| "grad_norm": 2.5163803100585938, | |
| "kl": 0.015705711644841358, | |
| "learning_rate": 3.6111111111111115e-06, | |
| "loss": 0.0006, | |
| "reward": 0.2547324914485216, | |
| "reward_std": 0.13099860399961472, | |
| "rewards/<lambda>": 0.2547324914485216, | |
| "step": 35 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.288, | |
| "grad_norm": 0.40727710723876953, | |
| "kl": 0.00078230255167, | |
| "learning_rate": 3.555555555555556e-06, | |
| "loss": 0.0, | |
| "reward": 0.32325625233352184, | |
| "reward_std": 0.1363072171807289, | |
| "rewards/<lambda>": 0.32325625233352184, | |
| "step": 36 | |
| }, | |
| { | |
| "completion_length": 196.75, | |
| "epoch": 0.296, | |
| "grad_norm": 3.323727607727051, | |
| "kl": 0.003316763584734872, | |
| "learning_rate": 3.5e-06, | |
| "loss": 0.0001, | |
| "reward": 0.21428124606609344, | |
| "reward_std": 0.10128421382978559, | |
| "rewards/<lambda>": 0.21428124606609344, | |
| "step": 37 | |
| }, | |
| { | |
| "completion_length": 224.75, | |
| "epoch": 0.304, | |
| "grad_norm": 4.83845853805542, | |
| "kl": 0.005240194499492645, | |
| "learning_rate": 3.444444444444445e-06, | |
| "loss": 0.0002, | |
| "reward": 0.21497500129044056, | |
| "reward_std": 0.15779088297858834, | |
| "rewards/<lambda>": 0.21497500129044056, | |
| "step": 38 | |
| }, | |
| { | |
| "completion_length": 230.125, | |
| "epoch": 0.312, | |
| "grad_norm": 0.7527925372123718, | |
| "kl": 0.0013605846324935555, | |
| "learning_rate": 3.3888888888888893e-06, | |
| "loss": 0.0001, | |
| "reward": 0.35972169786691666, | |
| "reward_std": 0.2259513009339571, | |
| "rewards/<lambda>": 0.35972169786691666, | |
| "step": 39 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.32, | |
| "grad_norm": 0.7770674824714661, | |
| "kl": 0.0018776137148961425, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.0001, | |
| "reward": 0.3781879246234894, | |
| "reward_std": 0.17891038954257965, | |
| "rewards/<lambda>": 0.3781879246234894, | |
| "step": 40 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 100, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 20, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |