| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 66, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.23391812865497075, | |
| "grad_norm": 19.255443572998047, | |
| "learning_rate": 2.857142857142857e-05, | |
| "loss": 0.8241, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.4678362573099415, | |
| "grad_norm": 15.90062141418457, | |
| "learning_rate": 4.985837000525343e-05, | |
| "loss": 0.1671, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.7017543859649122, | |
| "grad_norm": 5.5942206382751465, | |
| "learning_rate": 4.8283404436308464e-05, | |
| "loss": 0.085, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.935672514619883, | |
| "grad_norm": 10.160480499267578, | |
| "learning_rate": 4.5067790948274094e-05, | |
| "loss": 0.0862, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.1403508771929824, | |
| "grad_norm": 0.6265833973884583, | |
| "learning_rate": 4.04381153736548e-05, | |
| "loss": 0.0485, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 1.3742690058479532, | |
| "grad_norm": 0.9624802470207214, | |
| "learning_rate": 3.472060438683302e-05, | |
| "loss": 0.0484, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.608187134502924, | |
| "grad_norm": 0.569491982460022, | |
| "learning_rate": 2.8318138182093052e-05, | |
| "loss": 0.0468, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.8421052631578947, | |
| "grad_norm": 0.3710443377494812, | |
| "learning_rate": 2.1681861817906954e-05, | |
| "loss": 0.0458, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.046783625730994, | |
| "grad_norm": 0.1184922382235527, | |
| "learning_rate": 1.5279395613166986e-05, | |
| "loss": 0.0455, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 2.280701754385965, | |
| "grad_norm": 0.21576550602912903, | |
| "learning_rate": 9.561884626345205e-06, | |
| "loss": 0.0439, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.5146198830409356, | |
| "grad_norm": 0.17528507113456726, | |
| "learning_rate": 4.932209051725914e-06, | |
| "loss": 0.0442, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 2.7485380116959064, | |
| "grad_norm": 0.1674930304288864, | |
| "learning_rate": 1.7165955636915392e-06, | |
| "loss": 0.0438, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.982456140350877, | |
| "grad_norm": 0.1652153730392456, | |
| "learning_rate": 1.4162999474657268e-07, | |
| "loss": 0.0451, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 66, | |
| "total_flos": 411612762796032.0, | |
| "train_loss": 0.12000229429792274, | |
| "train_runtime": 714.9914, | |
| "train_samples_per_second": 1.435, | |
| "train_steps_per_second": 0.092 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 66, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 411612762796032.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |