| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.504424778761062, | |
| "eval_steps": 500, | |
| "global_step": 170, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.08849557522123894, | |
| "grad_norm": 1.3407914638519287, | |
| "learning_rate": 9.932445757212674e-05, | |
| "loss": 1.4061, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.17699115044247787, | |
| "grad_norm": 3.8564281463623047, | |
| "learning_rate": 9.728612009803784e-05, | |
| "loss": 1.1197, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.26548672566371684, | |
| "grad_norm": 0.808464765548706, | |
| "learning_rate": 9.349259596987303e-05, | |
| "loss": 0.9867, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.35398230088495575, | |
| "grad_norm": 2.220690965652466, | |
| "learning_rate": 8.821624462612464e-05, | |
| "loss": 0.8487, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.4424778761061947, | |
| "grad_norm": 1.97934889793396, | |
| "learning_rate": 8.163695685502958e-05, | |
| "loss": 0.9099, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5309734513274337, | |
| "grad_norm": 2.3614091873168945, | |
| "learning_rate": 7.397904547923231e-05, | |
| "loss": 0.8382, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.6194690265486725, | |
| "grad_norm": 8.190383911132812, | |
| "learning_rate": 6.550359768263936e-05, | |
| "loss": 0.821, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.7079646017699115, | |
| "grad_norm": 0.7417541146278381, | |
| "learning_rate": 5.6499573560108075e-05, | |
| "loss": 0.8105, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.7964601769911505, | |
| "grad_norm": 1.3598263263702393, | |
| "learning_rate": 4.727395437410226e-05, | |
| "loss": 0.8244, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.8849557522123894, | |
| "grad_norm": 1.6104731559753418, | |
| "learning_rate": 3.8141276401534344e-05, | |
| "loss": 0.7157, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.9734513274336283, | |
| "grad_norm": 0.7015905976295471, | |
| "learning_rate": 2.9412907201569206e-05, | |
| "loss": 0.7185, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.0619469026548674, | |
| "grad_norm": 0.4986812174320221, | |
| "learning_rate": 2.1386429917009705e-05, | |
| "loss": 0.7282, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.1504424778761062, | |
| "grad_norm": 0.6439129710197449, | |
| "learning_rate": 1.43354975386098e-05, | |
| "loss": 0.7505, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.238938053097345, | |
| "grad_norm": 0.6853252053260803, | |
| "learning_rate": 8.500503038846292e-06, | |
| "loss": 0.7052, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.3274336283185841, | |
| "grad_norm": 0.5834405422210693, | |
| "learning_rate": 4.080383465602533e-06, | |
| "loss": 0.7423, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.415929203539823, | |
| "grad_norm": 0.48211467266082764, | |
| "learning_rate": 1.2258374252316407e-06, | |
| "loss": 0.7514, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.504424778761062, | |
| "grad_norm": 3.9493844509124756, | |
| "learning_rate": 3.418719667347969e-08, | |
| "loss": 0.604, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.504424778761062, | |
| "step": 170, | |
| "total_flos": 8891638258335744.0, | |
| "train_loss": 0.8400665844188017, | |
| "train_runtime": 143.8414, | |
| "train_samples_per_second": 9.385, | |
| "train_steps_per_second": 1.182 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 170, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "total_flos": 8891638258335744.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |