| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.970149253731344, | |
| "eval_steps": 500, | |
| "global_step": 800, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.746268656716418, | |
| "grad_norm": 0.2015659064054489, | |
| "learning_rate": 3.73134328358209e-06, | |
| "loss": 0.1198, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.4925373134328357, | |
| "grad_norm": 0.16025546193122864, | |
| "learning_rate": 7.46268656716418e-06, | |
| "loss": 0.0693, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.2388059701492535, | |
| "grad_norm": 0.1018073558807373, | |
| "learning_rate": 9.867330016583748e-06, | |
| "loss": 0.0496, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.9850746268656714, | |
| "grad_norm": 0.100601427257061, | |
| "learning_rate": 9.45273631840796e-06, | |
| "loss": 0.0341, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 3.7313432835820897, | |
| "grad_norm": 0.0992753654718399, | |
| "learning_rate": 9.038142620232173e-06, | |
| "loss": 0.0216, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 4.477611940298507, | |
| "grad_norm": 0.08199736475944519, | |
| "learning_rate": 8.623548922056384e-06, | |
| "loss": 0.0187, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 5.223880597014926, | |
| "grad_norm": 0.08397415280342102, | |
| "learning_rate": 8.208955223880599e-06, | |
| "loss": 0.0166, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 5.970149253731344, | |
| "grad_norm": 0.13193649053573608, | |
| "learning_rate": 7.79436152570481e-06, | |
| "loss": 0.013, | |
| "step": 800 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 2680, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 800, | |
| "total_flos": 0.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |