| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 0.9904761904761905, |
| "eval_steps": 500, |
| "global_step": 39, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.12698412698412698, |
| "grad_norm": 0.29794585704803467, |
| "learning_rate": 2.0833333333333336e-05, |
| "loss": 3.8299, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.25396825396825395, |
| "grad_norm": 0.4180847704410553, |
| "learning_rate": 4.166666666666667e-05, |
| "loss": 3.8377, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.38095238095238093, |
| "grad_norm": 0.602063775062561, |
| "learning_rate": 4.989935734988098e-05, |
| "loss": 3.7329, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.5079365079365079, |
| "grad_norm": 0.7148785591125488, |
| "learning_rate": 4.928725095732169e-05, |
| "loss": 3.2317, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.6349206349206349, |
| "grad_norm": 0.695216953754425, |
| "learning_rate": 4.813260751184992e-05, |
| "loss": 2.8175, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.7619047619047619, |
| "grad_norm": 0.690677285194397, |
| "learning_rate": 4.6461219840046654e-05, |
| "loss": 2.4144, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.8888888888888888, |
| "grad_norm": 0.6858775615692139, |
| "learning_rate": 4.431042398061499e-05, |
| "loss": 2.0554, |
| "step": 35 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 117, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 39, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 3369067842895872.0, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|