| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 150.0, |
| "eval_steps": 0, |
| "global_step": 150, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 7.0, |
| "grad_norm": 0.340549038019777, |
| "learning_rate": 9.59731543624161e-06, |
| "loss": -0.0115, |
| "step": 7 |
| }, |
| { |
| "epoch": 14.0, |
| "grad_norm": 0.4806914054580233, |
| "learning_rate": 9.12751677852349e-06, |
| "loss": -0.0201, |
| "step": 14 |
| }, |
| { |
| "epoch": 21.0, |
| "grad_norm": 0.6865384664374159, |
| "learning_rate": 8.65771812080537e-06, |
| "loss": -0.0527, |
| "step": 21 |
| }, |
| { |
| "epoch": 28.0, |
| "grad_norm": 0.8848410770619826, |
| "learning_rate": 8.18791946308725e-06, |
| "loss": -0.109, |
| "step": 28 |
| }, |
| { |
| "epoch": 35.0, |
| "grad_norm": 1.0317597290997735, |
| "learning_rate": 7.718120805369127e-06, |
| "loss": -0.1998, |
| "step": 35 |
| }, |
| { |
| "epoch": 42.0, |
| "grad_norm": 1.1724218860139075, |
| "learning_rate": 7.248322147651007e-06, |
| "loss": -0.3007, |
| "step": 42 |
| }, |
| { |
| "epoch": 49.0, |
| "grad_norm": 1.404017467068633, |
| "learning_rate": 6.778523489932887e-06, |
| "loss": -0.4292, |
| "step": 49 |
| }, |
| { |
| "epoch": 56.0, |
| "grad_norm": 1.6480035103147999, |
| "learning_rate": 6.308724832214766e-06, |
| "loss": -0.5447, |
| "step": 56 |
| }, |
| { |
| "epoch": 63.0, |
| "grad_norm": 1.8054218748623612, |
| "learning_rate": 5.8389261744966455e-06, |
| "loss": -0.6846, |
| "step": 63 |
| }, |
| { |
| "epoch": 70.0, |
| "grad_norm": 2.091450290177055, |
| "learning_rate": 5.369127516778524e-06, |
| "loss": -0.8246, |
| "step": 70 |
| }, |
| { |
| "epoch": 77.0, |
| "grad_norm": 2.438691123914411, |
| "learning_rate": 4.899328859060403e-06, |
| "loss": -0.9913, |
| "step": 77 |
| }, |
| { |
| "epoch": 84.0, |
| "grad_norm": 2.932245648926953, |
| "learning_rate": 4.429530201342283e-06, |
| "loss": -1.1595, |
| "step": 84 |
| }, |
| { |
| "epoch": 91.0, |
| "grad_norm": 2.9288808809135602, |
| "learning_rate": 3.959731543624161e-06, |
| "loss": -1.3425, |
| "step": 91 |
| }, |
| { |
| "epoch": 98.0, |
| "grad_norm": 3.1440641608396938, |
| "learning_rate": 3.4899328859060407e-06, |
| "loss": -1.5079, |
| "step": 98 |
| }, |
| { |
| "epoch": 105.0, |
| "grad_norm": 3.300982134141446, |
| "learning_rate": 3.02013422818792e-06, |
| "loss": -1.7054, |
| "step": 105 |
| }, |
| { |
| "epoch": 112.0, |
| "grad_norm": 3.3107987058552655, |
| "learning_rate": 2.5503355704697992e-06, |
| "loss": -1.8844, |
| "step": 112 |
| }, |
| { |
| "epoch": 119.0, |
| "grad_norm": 3.352363390224322, |
| "learning_rate": 2.080536912751678e-06, |
| "loss": -2.0353, |
| "step": 119 |
| }, |
| { |
| "epoch": 126.0, |
| "grad_norm": 3.275866172606612, |
| "learning_rate": 1.6107382550335572e-06, |
| "loss": -2.1582, |
| "step": 126 |
| }, |
| { |
| "epoch": 133.0, |
| "grad_norm": 3.4578599951853017, |
| "learning_rate": 1.1409395973154363e-06, |
| "loss": -2.2781, |
| "step": 133 |
| }, |
| { |
| "epoch": 140.0, |
| "grad_norm": 3.4881306139410415, |
| "learning_rate": 6.711409395973155e-07, |
| "loss": -2.3618, |
| "step": 140 |
| }, |
| { |
| "epoch": 147.0, |
| "grad_norm": 3.496334664070946, |
| "learning_rate": 2.0134228187919465e-07, |
| "loss": -2.4035, |
| "step": 147 |
| } |
| ], |
| "logging_steps": 7, |
| "max_steps": 150, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 150, |
| "save_steps": 15, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 0.0, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|