| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 15.0, | |
| "eval_steps": 500, | |
| "global_step": 66674, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.9998875014062323, | |
| "grad_norm": 181.65682983398438, | |
| "learning_rate": 1e-05, | |
| "loss": 45.7848, | |
| "step": 8889 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 422.18414306640625, | |
| "learning_rate": 1e-05, | |
| "loss": 43.869, | |
| "step": 13334 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 199.45924377441406, | |
| "learning_rate": 1e-05, | |
| "loss": 42.2688, | |
| "step": 17779 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 249.6620635986328, | |
| "learning_rate": 1e-05, | |
| "loss": 41.0351, | |
| "step": 22224 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 328.8633728027344, | |
| "learning_rate": 1e-05, | |
| "loss": 40.109, | |
| "step": 26669 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 358.3860778808594, | |
| "learning_rate": 1e-05, | |
| "loss": 39.3797, | |
| "step": 31114 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 239.27536010742188, | |
| "learning_rate": 1e-05, | |
| "loss": 38.5914, | |
| "step": 35559 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 316.275634765625, | |
| "learning_rate": 1e-05, | |
| "loss": 37.9495, | |
| "step": 40004 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 177.41749572753906, | |
| "learning_rate": 1e-05, | |
| "loss": 37.4032, | |
| "step": 44449 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "grad_norm": 193.14991760253906, | |
| "learning_rate": 1e-05, | |
| "loss": 36.8619, | |
| "step": 48894 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "grad_norm": 173.55499267578125, | |
| "learning_rate": 1e-05, | |
| "loss": 36.3437, | |
| "step": 53339 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "grad_norm": 377.0917663574219, | |
| "learning_rate": 1e-05, | |
| "loss": 35.8008, | |
| "step": 57784 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "grad_norm": 326.80535888671875, | |
| "learning_rate": 1e-05, | |
| "loss": 35.321, | |
| "step": 62229 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "grad_norm": 370.6023254394531, | |
| "learning_rate": 1e-05, | |
| "loss": 34.8802, | |
| "step": 66674 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "step": 66674, | |
| "total_flos": 4.498576409525775e+21, | |
| "train_loss": 34.2752071881468, | |
| "train_runtime": 162790.0677, | |
| "train_samples_per_second": 32.76, | |
| "train_steps_per_second": 0.41 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 66675, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 15, | |
| "save_steps": 2500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.498576409525775e+21, | |
| "train_batch_size": 5, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |