| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 138, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.14492753623188406, | |
| "grad_norm": 12.398261352422882, | |
| "learning_rate": 6.4285714285714295e-06, | |
| "loss": 2.9235, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.2898550724637681, | |
| "grad_norm": 8.017807501158464, | |
| "learning_rate": 9.959935885253715e-06, | |
| "loss": 1.4926, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.43478260869565216, | |
| "grad_norm": 7.6729398768152, | |
| "learning_rate": 9.643264997861312e-06, | |
| "loss": 1.099, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.5797101449275363, | |
| "grad_norm": 7.937653371039124, | |
| "learning_rate": 9.030141317270026e-06, | |
| "loss": 1.0419, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.7246376811594203, | |
| "grad_norm": 6.935420300985396, | |
| "learning_rate": 8.15971019223152e-06, | |
| "loss": 0.9559, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.8695652173913043, | |
| "grad_norm": 6.521364461671462, | |
| "learning_rate": 7.087544961425317e-06, | |
| "loss": 0.8724, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.0144927536231885, | |
| "grad_norm": 4.940342489815436, | |
| "learning_rate": 5.882098831289044e-06, | |
| "loss": 0.8656, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.1594202898550725, | |
| "grad_norm": 6.488774654663568, | |
| "learning_rate": 4.62033442887377e-06, | |
| "loss": 0.5261, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.3043478260869565, | |
| "grad_norm": 5.384402568372393, | |
| "learning_rate": 3.3828100642538097e-06, | |
| "loss": 0.5177, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.4492753623188406, | |
| "grad_norm": 5.522607349461902, | |
| "learning_rate": 2.2485364238130435e-06, | |
| "loss": 0.4972, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.5942028985507246, | |
| "grad_norm": 5.956590886271073, | |
| "learning_rate": 1.2899320727454472e-06, | |
| "loss": 0.4665, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.7391304347826086, | |
| "grad_norm": 5.162066297238004, | |
| "learning_rate": 5.681998365579594e-07, | |
| "loss": 0.4959, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.8840579710144927, | |
| "grad_norm": 5.884961224410103, | |
| "learning_rate": 1.2941926002306536e-07, | |
| "loss": 0.4616, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 138, | |
| "total_flos": 3235348660224.0, | |
| "train_loss": 0.9176704987235691, | |
| "train_runtime": 291.5306, | |
| "train_samples_per_second": 3.766, | |
| "train_steps_per_second": 0.473 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 138, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 5000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3235348660224.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |