| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 159, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.1902497027348395, | |
| "grad_norm": 1.049843430519104, | |
| "learning_rate": 1.8e-05, | |
| "loss": 3.6094, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.380499405469679, | |
| "grad_norm": 1.4530141353607178, | |
| "learning_rate": 3.8e-05, | |
| "loss": 3.3156, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.5707491082045184, | |
| "grad_norm": 1.1667392253875732, | |
| "learning_rate": 5.8e-05, | |
| "loss": 2.6267, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.760998810939358, | |
| "grad_norm": 1.4776651859283447, | |
| "learning_rate": 7.800000000000001e-05, | |
| "loss": 2.1165, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.9512485136741974, | |
| "grad_norm": 1.4660998582839966, | |
| "learning_rate": 9.8e-05, | |
| "loss": 1.804, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.1331747919143877, | |
| "grad_norm": 1.163648247718811, | |
| "learning_rate": 0.000118, | |
| "loss": 1.5531, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.323424494649227, | |
| "grad_norm": 1.323768973350525, | |
| "learning_rate": 0.000138, | |
| "loss": 1.4815, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.5136741973840666, | |
| "grad_norm": 1.6716187000274658, | |
| "learning_rate": 0.00015800000000000002, | |
| "loss": 1.3993, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.7039239001189062, | |
| "grad_norm": 1.709359049797058, | |
| "learning_rate": 0.00017800000000000002, | |
| "loss": 1.3135, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.8941736028537455, | |
| "grad_norm": 1.735700249671936, | |
| "learning_rate": 0.00019800000000000002, | |
| "loss": 1.2533, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.0760998810939357, | |
| "grad_norm": 1.7600226402282715, | |
| "learning_rate": 0.00018873520750565718, | |
| "loss": 1.1572, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.2663495838287755, | |
| "grad_norm": 2.0459885597229004, | |
| "learning_rate": 0.00015304209081197425, | |
| "loss": 0.9394, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.456599286563615, | |
| "grad_norm": 1.884280800819397, | |
| "learning_rate": 0.00010266205214377748, | |
| "loss": 0.9113, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.646848989298454, | |
| "grad_norm": 2.249265432357788, | |
| "learning_rate": 5.1544912966734994e-05, | |
| "loss": 0.8832, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.837098692033294, | |
| "grad_norm": 2.0327601432800293, | |
| "learning_rate": 1.3844591860619383e-05, | |
| "loss": 0.9004, | |
| "step": 150 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 159, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.123192092794552e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |