| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.16, | |
| "eval_steps": 500, | |
| "global_step": 20, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "completion_length": 192.625, | |
| "epoch": 0.008, | |
| "grad_norm": 10.768404960632324, | |
| "kl": 0.0009242900705430657, | |
| "learning_rate": 5.000000000000001e-07, | |
| "loss": 0.0, | |
| "reward": 0.2329830303788185, | |
| "reward_std": 0.14905179431661963, | |
| "rewards/<lambda>": 0.2329830303788185, | |
| "step": 1 | |
| }, | |
| { | |
| "completion_length": 226.25, | |
| "epoch": 0.016, | |
| "grad_norm": 0.5803981423377991, | |
| "kl": 0.00016602075265836902, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 0.0, | |
| "reward": 0.20506704226136208, | |
| "reward_std": 0.07600272889249027, | |
| "rewards/<lambda>": 0.20506704226136208, | |
| "step": 2 | |
| }, | |
| { | |
| "completion_length": 195.5, | |
| "epoch": 0.024, | |
| "grad_norm": 1.928178071975708, | |
| "kl": 0.0007406917939078994, | |
| "learning_rate": 1.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.24738124385476112, | |
| "reward_std": 0.12009324785321951, | |
| "rewards/<lambda>": 0.24738124385476112, | |
| "step": 3 | |
| }, | |
| { | |
| "completion_length": 165.25, | |
| "epoch": 0.032, | |
| "grad_norm": 4.732048034667969, | |
| "kl": 0.0003298191677458817, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 0.0, | |
| "reward": 0.20593957975506783, | |
| "reward_std": 0.13085306528955698, | |
| "rewards/<lambda>": 0.20593957975506783, | |
| "step": 4 | |
| }, | |
| { | |
| "completion_length": 229.25, | |
| "epoch": 0.04, | |
| "grad_norm": 0.6906648278236389, | |
| "kl": 0.00029802451899740845, | |
| "learning_rate": 2.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.2962687499821186, | |
| "reward_std": 0.11282773199491203, | |
| "rewards/<lambda>": 0.2962687499821186, | |
| "step": 5 | |
| }, | |
| { | |
| "completion_length": 235.75, | |
| "epoch": 0.048, | |
| "grad_norm": 1.7818254232406616, | |
| "kl": 0.0009266494062103448, | |
| "learning_rate": 3e-06, | |
| "loss": 0.0, | |
| "reward": 0.17379014939069748, | |
| "reward_std": 0.03908907831646502, | |
| "rewards/<lambda>": 0.17379014939069748, | |
| "step": 6 | |
| }, | |
| { | |
| "completion_length": 141.25, | |
| "epoch": 0.056, | |
| "grad_norm": 5.83347225189209, | |
| "kl": 0.0010952446609735489, | |
| "learning_rate": 3.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.18913621827960014, | |
| "reward_std": 0.13922751136124134, | |
| "rewards/<lambda>": 0.18913621827960014, | |
| "step": 7 | |
| }, | |
| { | |
| "completion_length": 193.125, | |
| "epoch": 0.064, | |
| "grad_norm": 4.481875896453857, | |
| "kl": 0.002368737303186208, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.0001, | |
| "reward": 0.15933124721050262, | |
| "reward_std": 0.033755510827177204, | |
| "rewards/<lambda>": 0.15933124721050262, | |
| "step": 8 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.072, | |
| "grad_norm": 0.335154265165329, | |
| "kl": 0.00025119713973253965, | |
| "learning_rate": 4.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.36442025750875473, | |
| "reward_std": 0.21805795654654503, | |
| "rewards/<lambda>": 0.36442025750875473, | |
| "step": 9 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.08, | |
| "grad_norm": 0.5085432529449463, | |
| "kl": 0.00022752030054107308, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0, | |
| "reward": 0.26649028062820435, | |
| "reward_std": 0.05319603928364813, | |
| "rewards/<lambda>": 0.26649028062820435, | |
| "step": 10 | |
| }, | |
| { | |
| "completion_length": 194.875, | |
| "epoch": 0.088, | |
| "grad_norm": 5.396770000457764, | |
| "kl": 0.0003068408841500059, | |
| "learning_rate": 4.944444444444445e-06, | |
| "loss": 0.0, | |
| "reward": 0.1397937536239624, | |
| "reward_std": 0.049082053592428565, | |
| "rewards/<lambda>": 0.1397937536239624, | |
| "step": 11 | |
| }, | |
| { | |
| "completion_length": 228.25, | |
| "epoch": 0.096, | |
| "grad_norm": 0.9890621900558472, | |
| "kl": 0.0005189948133192956, | |
| "learning_rate": 4.888888888888889e-06, | |
| "loss": 0.0, | |
| "reward": 0.3081398792564869, | |
| "reward_std": 0.21328446036204696, | |
| "rewards/<lambda>": 0.3081398792564869, | |
| "step": 12 | |
| }, | |
| { | |
| "completion_length": 78.125, | |
| "epoch": 0.104, | |
| "grad_norm": 6.991876602172852, | |
| "kl": 0.0028572251394507475, | |
| "learning_rate": 4.833333333333333e-06, | |
| "loss": 0.0001, | |
| "reward": 0.16984067671000957, | |
| "reward_std": 0.09841607791895512, | |
| "rewards/<lambda>": 0.16984067671000957, | |
| "step": 13 | |
| }, | |
| { | |
| "completion_length": 224.5, | |
| "epoch": 0.112, | |
| "grad_norm": 2.380413770675659, | |
| "kl": 0.00024483678862452507, | |
| "learning_rate": 4.777777777777778e-06, | |
| "loss": 0.0, | |
| "reward": 0.3446955271065235, | |
| "reward_std": 0.07521216722670943, | |
| "rewards/<lambda>": 0.3446955271065235, | |
| "step": 14 | |
| }, | |
| { | |
| "completion_length": 198.25, | |
| "epoch": 0.12, | |
| "grad_norm": 0.9101364612579346, | |
| "kl": 0.000599912746110931, | |
| "learning_rate": 4.722222222222222e-06, | |
| "loss": 0.0, | |
| "reward": 0.3019299991428852, | |
| "reward_std": 0.11653121118433774, | |
| "rewards/<lambda>": 0.3019299991428852, | |
| "step": 15 | |
| }, | |
| { | |
| "completion_length": 256.0, | |
| "epoch": 0.128, | |
| "grad_norm": 0.3337076008319855, | |
| "kl": 0.00042263357318006456, | |
| "learning_rate": 4.666666666666667e-06, | |
| "loss": 0.0, | |
| "reward": 0.22962501272559166, | |
| "reward_std": 0.12568823376204818, | |
| "rewards/<lambda>": 0.22962501272559166, | |
| "step": 16 | |
| }, | |
| { | |
| "completion_length": 194.5, | |
| "epoch": 0.136, | |
| "grad_norm": 2.9452309608459473, | |
| "kl": 0.0003321969779790379, | |
| "learning_rate": 4.611111111111112e-06, | |
| "loss": 0.0, | |
| "reward": 0.2720954604446888, | |
| "reward_std": 0.11709045059978962, | |
| "rewards/<lambda>": 0.2720954604446888, | |
| "step": 17 | |
| }, | |
| { | |
| "completion_length": 138.0, | |
| "epoch": 0.144, | |
| "grad_norm": 6.265693664550781, | |
| "kl": 0.008480061980662867, | |
| "learning_rate": 4.555555555555556e-06, | |
| "loss": 0.0003, | |
| "reward": 0.21834762021899223, | |
| "reward_std": 0.15078714862465858, | |
| "rewards/<lambda>": 0.21834762021899223, | |
| "step": 18 | |
| }, | |
| { | |
| "completion_length": 207.25, | |
| "epoch": 0.152, | |
| "grad_norm": 1.309558391571045, | |
| "kl": 0.0008433158218394965, | |
| "learning_rate": 4.5e-06, | |
| "loss": 0.0, | |
| "reward": 0.2984776981174946, | |
| "reward_std": 0.10264036408625543, | |
| "rewards/<lambda>": 0.2984776981174946, | |
| "step": 19 | |
| }, | |
| { | |
| "completion_length": 198.0, | |
| "epoch": 0.16, | |
| "grad_norm": 3.6814424991607666, | |
| "kl": 0.0018292521999683231, | |
| "learning_rate": 4.444444444444444e-06, | |
| "loss": 0.0001, | |
| "reward": 0.21348833292722702, | |
| "reward_std": 0.032345420273486525, | |
| "rewards/<lambda>": 0.21348833292722702, | |
| "step": 20 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 100, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 20, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |