| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 0.40404040404040403, |
| "eval_steps": 500, |
| "global_step": 5, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.08080808080808081, |
| "grad_norm": 2.130047123856854, |
| "learning_rate": 0.0, |
| "loss": 0.5133, |
| "num_tokens": 6257944.0, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.16161616161616163, |
| "grad_norm": 2.17073479809882, |
| "learning_rate": 4e-05, |
| "loss": 0.5305, |
| "num_tokens": 12505814.0, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.24242424242424243, |
| "grad_norm": 4.710405368676624, |
| "learning_rate": 3.472792206135786e-05, |
| "loss": 0.5265, |
| "num_tokens": 18780610.0, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.32323232323232326, |
| "grad_norm": 2.605199562517092, |
| "learning_rate": 2.2000000000000003e-05, |
| "loss": 0.5326, |
| "num_tokens": 25023466.0, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.40404040404040403, |
| "grad_norm": 1.5058401495536415, |
| "learning_rate": 9.272077938642147e-06, |
| "loss": 0.5206, |
| "num_tokens": 31265374.0, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.40404040404040403, |
| "step": 5, |
| "total_flos": 1.2788518956407194e+17, |
| "train_loss": 0.5247070074081421, |
| "train_runtime": 1231.7554, |
| "train_samples_per_second": 0.779, |
| "train_steps_per_second": 0.004 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 5, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 1, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.2788518956407194e+17, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|