| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.832, | |
| "eval_steps": 500, | |
| "global_step": 90, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.4173983037471771, | |
| "learning_rate": 2.6666666666666667e-05, | |
| "loss": 0.7785, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.3714005947113037, | |
| "learning_rate": 6e-05, | |
| "loss": 0.7355, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.40615686774253845, | |
| "learning_rate": 9.333333333333334e-05, | |
| "loss": 0.6202, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.3105890154838562, | |
| "learning_rate": 9.703703703703704e-05, | |
| "loss": 0.5831, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.19839249551296234, | |
| "learning_rate": 9.333333333333334e-05, | |
| "loss": 0.5024, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.20537137985229492, | |
| "learning_rate": 8.962962962962963e-05, | |
| "loss": 0.5202, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.096, | |
| "grad_norm": 0.1985170543193817, | |
| "learning_rate": 8.592592592592593e-05, | |
| "loss": 0.4727, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.256, | |
| "grad_norm": 0.16953736543655396, | |
| "learning_rate": 8.222222222222222e-05, | |
| "loss": 0.4582, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.416, | |
| "grad_norm": 0.18532288074493408, | |
| "learning_rate": 7.851851851851852e-05, | |
| "loss": 0.4438, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.576, | |
| "grad_norm": 0.22516000270843506, | |
| "learning_rate": 7.481481481481481e-05, | |
| "loss": 0.4722, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.736, | |
| "grad_norm": 0.22280442714691162, | |
| "learning_rate": 7.111111111111112e-05, | |
| "loss": 0.4645, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.896, | |
| "grad_norm": 0.1927877962589264, | |
| "learning_rate": 6.740740740740741e-05, | |
| "loss": 0.4277, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.032, | |
| "grad_norm": 0.18494941294193268, | |
| "learning_rate": 6.37037037037037e-05, | |
| "loss": 0.4093, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 2.192, | |
| "grad_norm": 0.22715841233730316, | |
| "learning_rate": 6e-05, | |
| "loss": 0.4129, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.352, | |
| "grad_norm": 0.22747837007045746, | |
| "learning_rate": 5.62962962962963e-05, | |
| "loss": 0.4063, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 2.512, | |
| "grad_norm": 0.20530784130096436, | |
| "learning_rate": 5.259259259259259e-05, | |
| "loss": 0.3846, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.672, | |
| "grad_norm": 0.21205097436904907, | |
| "learning_rate": 4.888888888888889e-05, | |
| "loss": 0.3987, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 2.832, | |
| "grad_norm": 0.22707021236419678, | |
| "learning_rate": 4.518518518518519e-05, | |
| "loss": 0.4245, | |
| "step": 90 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 150, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 30, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.0310871209213952e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |