| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 11.0, | |
| "eval_steps": 500, | |
| "global_step": 1485, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 9.024576187133789, | |
| "learning_rate": 4.758024691358025e-05, | |
| "loss": 5.6879, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 3.6306915283203125, | |
| "eval_runtime": 51.3095, | |
| "eval_samples_per_second": 4.366, | |
| "eval_steps_per_second": 1.091, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.4814814814814814, | |
| "grad_norm": 5.388796329498291, | |
| "learning_rate": 4.511111111111112e-05, | |
| "loss": 3.394, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 2.354670286178589, | |
| "eval_runtime": 55.5839, | |
| "eval_samples_per_second": 4.03, | |
| "eval_steps_per_second": 1.007, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.2222222222222223, | |
| "grad_norm": 2.522367477416992, | |
| "learning_rate": 4.264197530864198e-05, | |
| "loss": 2.2705, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.962962962962963, | |
| "grad_norm": 2.4746665954589844, | |
| "learning_rate": 4.0172839506172845e-05, | |
| "loss": 1.9791, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 2.1306421756744385, | |
| "eval_runtime": 52.1249, | |
| "eval_samples_per_second": 4.297, | |
| "eval_steps_per_second": 1.074, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 3.7037037037037037, | |
| "grad_norm": 2.1820168495178223, | |
| "learning_rate": 3.770370370370371e-05, | |
| "loss": 1.769, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 2.044142961502075, | |
| "eval_runtime": 53.0307, | |
| "eval_samples_per_second": 4.224, | |
| "eval_steps_per_second": 1.056, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 4.444444444444445, | |
| "grad_norm": 2.130756139755249, | |
| "learning_rate": 3.523456790123457e-05, | |
| "loss": 1.6324, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 1.9857765436172485, | |
| "eval_runtime": 9.8322, | |
| "eval_samples_per_second": 22.782, | |
| "eval_steps_per_second": 5.696, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 5.185185185185185, | |
| "grad_norm": 2.337979555130005, | |
| "learning_rate": 3.2765432098765435e-05, | |
| "loss": 1.57, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 5.925925925925926, | |
| "grad_norm": 2.2854785919189453, | |
| "learning_rate": 3.02962962962963e-05, | |
| "loss": 1.5001, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 1.9689041376113892, | |
| "eval_runtime": 22.8388, | |
| "eval_samples_per_second": 9.808, | |
| "eval_steps_per_second": 2.452, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 6.666666666666667, | |
| "grad_norm": 2.746314525604248, | |
| "learning_rate": 2.7827160493827158e-05, | |
| "loss": 1.3906, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 1.9692579507827759, | |
| "eval_runtime": 28.0368, | |
| "eval_samples_per_second": 7.989, | |
| "eval_steps_per_second": 1.997, | |
| "step": 945 | |
| }, | |
| { | |
| "epoch": 7.407407407407407, | |
| "grad_norm": 2.559098720550537, | |
| "learning_rate": 2.5358024691358025e-05, | |
| "loss": 1.299, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 1.9595974683761597, | |
| "eval_runtime": 11.2298, | |
| "eval_samples_per_second": 19.947, | |
| "eval_steps_per_second": 4.987, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 8.148148148148149, | |
| "grad_norm": 2.383742094039917, | |
| "learning_rate": 2.288888888888889e-05, | |
| "loss": 1.2395, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 8.88888888888889, | |
| "grad_norm": 2.732578754425049, | |
| "learning_rate": 2.0419753086419755e-05, | |
| "loss": 1.1885, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_loss": 1.972147822380066, | |
| "eval_runtime": 35.0043, | |
| "eval_samples_per_second": 6.399, | |
| "eval_steps_per_second": 1.6, | |
| "step": 1215 | |
| }, | |
| { | |
| "epoch": 9.62962962962963, | |
| "grad_norm": 2.3015191555023193, | |
| "learning_rate": 1.7950617283950618e-05, | |
| "loss": 1.0952, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 1.9826620817184448, | |
| "eval_runtime": 28.3051, | |
| "eval_samples_per_second": 7.914, | |
| "eval_steps_per_second": 1.978, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 10.37037037037037, | |
| "grad_norm": 2.642057180404663, | |
| "learning_rate": 1.548148148148148e-05, | |
| "loss": 1.0285, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_loss": 1.9937878847122192, | |
| "eval_runtime": 11.3644, | |
| "eval_samples_per_second": 19.711, | |
| "eval_steps_per_second": 4.928, | |
| "step": 1485 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 2025, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 15, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.1628187800547328e+18, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |