| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.14705882352941177, | |
| "eval_steps": 500, | |
| "global_step": 5000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.014705882352941176, | |
| "grad_norm": 0.35731062293052673, | |
| "learning_rate": 4.975490196078432e-05, | |
| "loss": 0.5354, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.029411764705882353, | |
| "grad_norm": 2.1595866680145264, | |
| "learning_rate": 4.9509803921568634e-05, | |
| "loss": 0.1627, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.04411764705882353, | |
| "grad_norm": 0.14825384318828583, | |
| "learning_rate": 4.9264705882352944e-05, | |
| "loss": 0.101, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.058823529411764705, | |
| "grad_norm": 0.07121703773736954, | |
| "learning_rate": 4.901960784313725e-05, | |
| "loss": 0.0952, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.07352941176470588, | |
| "grad_norm": 0.06089532747864723, | |
| "learning_rate": 4.877450980392157e-05, | |
| "loss": 0.0881, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.08823529411764706, | |
| "grad_norm": 0.037034619599580765, | |
| "learning_rate": 4.8529411764705885e-05, | |
| "loss": 0.0717, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.10294117647058823, | |
| "grad_norm": 0.01999847963452339, | |
| "learning_rate": 4.82843137254902e-05, | |
| "loss": 0.0848, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.11764705882352941, | |
| "grad_norm": 0.018650399520993233, | |
| "learning_rate": 4.803921568627452e-05, | |
| "loss": 0.068, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.1323529411764706, | |
| "grad_norm": 0.023023229092359543, | |
| "learning_rate": 4.7794117647058826e-05, | |
| "loss": 0.0626, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.14705882352941177, | |
| "grad_norm": 0.018715515732765198, | |
| "learning_rate": 4.7549019607843135e-05, | |
| "loss": 0.0555, | |
| "step": 5000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 102000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "total_flos": 3.09998518050816e+18, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |