| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 2.9983086680761097, |
| "eval_steps": 1000, |
| "global_step": 7089, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.00042319085907744394, |
| "grad_norm": 7.6988654136657715, |
| "learning_rate": 0.0, |
| "loss": 3.579, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.4231908590774439, |
| "grad_norm": 4.416678428649902, |
| "learning_rate": 2.8636363636363637e-05, |
| "loss": 2.4885, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.4231908590774439, |
| "eval_cosine_accuracy": 0.9691870808601379, |
| "eval_loss": 1.3169608116149902, |
| "eval_runtime": 22.2979, |
| "eval_samples_per_second": 426.453, |
| "eval_steps_per_second": 1.704, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.8463817181548878, |
| "grad_norm": 4.086009979248047, |
| "learning_rate": 2.393887147335423e-05, |
| "loss": 1.7805, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.8463817181548878, |
| "eval_cosine_accuracy": 0.9746555685997009, |
| "eval_loss": 1.2482444047927856, |
| "eval_runtime": 22.4445, |
| "eval_samples_per_second": 423.667, |
| "eval_steps_per_second": 1.693, |
| "step": 2000 |
| }, |
| { |
| "epoch": 1.2693446088794926, |
| "grad_norm": 4.127110004425049, |
| "learning_rate": 1.923667711598746e-05, |
| "loss": 1.5251, |
| "step": 3000 |
| }, |
| { |
| "epoch": 1.2693446088794926, |
| "eval_cosine_accuracy": 0.9765485525131226, |
| "eval_loss": 1.2090805768966675, |
| "eval_runtime": 22.6787, |
| "eval_samples_per_second": 419.292, |
| "eval_steps_per_second": 1.676, |
| "step": 3000 |
| }, |
| { |
| "epoch": 1.6921775898520086, |
| "grad_norm": 4.0318922996521, |
| "learning_rate": 1.453448275862069e-05, |
| "loss": 1.4389, |
| "step": 4000 |
| }, |
| { |
| "epoch": 1.6921775898520086, |
| "eval_cosine_accuracy": 0.976443350315094, |
| "eval_loss": 1.1945761442184448, |
| "eval_runtime": 22.7171, |
| "eval_samples_per_second": 418.584, |
| "eval_steps_per_second": 1.673, |
| "step": 4000 |
| }, |
| { |
| "epoch": 2.1150105708245244, |
| "grad_norm": 3.835808277130127, |
| "learning_rate": 9.832288401253918e-06, |
| "loss": 1.3679, |
| "step": 5000 |
| }, |
| { |
| "epoch": 2.1150105708245244, |
| "eval_cosine_accuracy": 0.9773898124694824, |
| "eval_loss": 1.1790019273757935, |
| "eval_runtime": 22.6173, |
| "eval_samples_per_second": 420.43, |
| "eval_steps_per_second": 1.68, |
| "step": 5000 |
| }, |
| { |
| "epoch": 2.53784355179704, |
| "grad_norm": 4.13162088394165, |
| "learning_rate": 5.1300940438871475e-06, |
| "loss": 1.3124, |
| "step": 6000 |
| }, |
| { |
| "epoch": 2.53784355179704, |
| "eval_cosine_accuracy": 0.9784414768218994, |
| "eval_loss": 1.17289137840271, |
| "eval_runtime": 23.7837, |
| "eval_samples_per_second": 399.811, |
| "eval_steps_per_second": 1.598, |
| "step": 6000 |
| }, |
| { |
| "epoch": 2.960676532769556, |
| "grad_norm": 4.070672512054443, |
| "learning_rate": 4.2789968652037616e-07, |
| "loss": 1.293, |
| "step": 7000 |
| }, |
| { |
| "epoch": 2.960676532769556, |
| "eval_cosine_accuracy": 0.9784414768218994, |
| "eval_loss": 1.1676408052444458, |
| "eval_runtime": 22.5006, |
| "eval_samples_per_second": 422.611, |
| "eval_steps_per_second": 1.689, |
| "step": 7000 |
| } |
| ], |
| "logging_steps": 1000, |
| "max_steps": 7089, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 0.0, |
| "train_batch_size": 256, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|