| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 0.5037783375314862, |
| "eval_steps": 500, |
| "global_step": 200, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.02518891687657431, |
| "grad_norm": 3.1443984508514404, |
| "learning_rate": 0.0001, |
| "loss": 0.6349, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.05037783375314862, |
| "grad_norm": 1.0879027843475342, |
| "learning_rate": 9.931806517013612e-05, |
| "loss": 0.1773, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.07556675062972293, |
| "grad_norm": 0.6599847674369812, |
| "learning_rate": 9.729086208503174e-05, |
| "loss": 0.0946, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.10075566750629723, |
| "grad_norm": 0.3650006353855133, |
| "learning_rate": 9.397368756032445e-05, |
| "loss": 0.0665, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.12594458438287154, |
| "grad_norm": 0.45757368206977844, |
| "learning_rate": 8.945702546981969e-05, |
| "loss": 0.0585, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.15113350125944586, |
| "grad_norm": 0.5691215991973877, |
| "learning_rate": 8.386407858128706e-05, |
| "loss": 0.0515, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.17632241813602015, |
| "grad_norm": 0.5051501989364624, |
| "learning_rate": 7.734740790612136e-05, |
| "loss": 0.0498, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.20151133501259447, |
| "grad_norm": 0.6604690551757812, |
| "learning_rate": 7.008477123264848e-05, |
| "loss": 0.0449, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.22670025188916876, |
| "grad_norm": 0.18909719586372375, |
| "learning_rate": 6.227427435703997e-05, |
| "loss": 0.0373, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.2518891687657431, |
| "grad_norm": 0.3792904019355774, |
| "learning_rate": 5.4128967273616625e-05, |
| "loss": 0.0411, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.2770780856423174, |
| "grad_norm": 0.4075814187526703, |
| "learning_rate": 4.5871032726383386e-05, |
| "loss": 0.0338, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.3022670025188917, |
| "grad_norm": 0.35018792748451233, |
| "learning_rate": 3.772572564296005e-05, |
| "loss": 0.0319, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.327455919395466, |
| "grad_norm": 0.43255582451820374, |
| "learning_rate": 2.991522876735154e-05, |
| "loss": 0.0331, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.3526448362720403, |
| "grad_norm": 0.4935697615146637, |
| "learning_rate": 2.2652592093878666e-05, |
| "loss": 0.0336, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.3778337531486146, |
| "grad_norm": 0.40300703048706055, |
| "learning_rate": 1.6135921418712956e-05, |
| "loss": 0.0295, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.40302267002518893, |
| "grad_norm": 0.201125368475914, |
| "learning_rate": 1.0542974530180327e-05, |
| "loss": 0.0281, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.4282115869017632, |
| "grad_norm": 0.28014999628067017, |
| "learning_rate": 6.026312439675552e-06, |
| "loss": 0.0277, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.4534005037783375, |
| "grad_norm": 0.2704128324985504, |
| "learning_rate": 2.7091379149682685e-06, |
| "loss": 0.0266, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.47858942065491183, |
| "grad_norm": 0.3430611491203308, |
| "learning_rate": 6.819348298638839e-07, |
| "loss": 0.0293, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.5037783375314862, |
| "grad_norm": 0.2024984359741211, |
| "learning_rate": 0.0, |
| "loss": 0.0267, |
| "step": 200 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 200, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 1, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 3.19231251712512e+16, |
| "train_batch_size": 24, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|