| { | |
| "best_metric": 0.7734330296516418, | |
| "best_model_checkpoint": "/content/drive/MyDrive/Colab Notebooks/output_model_v2/checkpoint-1500", | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 1844, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.10845986984815618, | |
| "grad_norm": 6.970224857330322, | |
| "learning_rate": 1.8915401301518438e-05, | |
| "loss": 1.137, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.21691973969631237, | |
| "grad_norm": 8.724026679992676, | |
| "learning_rate": 1.7830802603036878e-05, | |
| "loss": 0.9432, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.32537960954446854, | |
| "grad_norm": 6.079078674316406, | |
| "learning_rate": 1.6746203904555314e-05, | |
| "loss": 0.9348, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.43383947939262474, | |
| "grad_norm": 8.428298950195312, | |
| "learning_rate": 1.5661605206073754e-05, | |
| "loss": 0.9072, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5422993492407809, | |
| "grad_norm": 7.575742721557617, | |
| "learning_rate": 1.4577006507592192e-05, | |
| "loss": 0.8489, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.5422993492407809, | |
| "eval_loss": 0.8244659900665283, | |
| "eval_runtime": 5.151, | |
| "eval_samples_per_second": 39.798, | |
| "eval_steps_per_second": 19.996, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6507592190889371, | |
| "grad_norm": 5.566123962402344, | |
| "learning_rate": 1.349240780911063e-05, | |
| "loss": 0.8225, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7592190889370932, | |
| "grad_norm": 7.571514129638672, | |
| "learning_rate": 1.2407809110629067e-05, | |
| "loss": 0.8286, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.8676789587852495, | |
| "grad_norm": 8.96743106842041, | |
| "learning_rate": 1.1323210412147507e-05, | |
| "loss": 0.8177, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.9761388286334056, | |
| "grad_norm": 8.147588729858398, | |
| "learning_rate": 1.0238611713665945e-05, | |
| "loss": 0.8136, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.0845986984815619, | |
| "grad_norm": 6.813974380493164, | |
| "learning_rate": 9.154013015184382e-06, | |
| "loss": 0.5992, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.0845986984815619, | |
| "eval_loss": 0.7819855213165283, | |
| "eval_runtime": 5.1024, | |
| "eval_samples_per_second": 40.178, | |
| "eval_steps_per_second": 20.187, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.1930585683297181, | |
| "grad_norm": 10.001449584960938, | |
| "learning_rate": 8.06941431670282e-06, | |
| "loss": 0.5184, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.3015184381778742, | |
| "grad_norm": 8.078487396240234, | |
| "learning_rate": 6.984815618221259e-06, | |
| "loss": 0.5503, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.4099783080260304, | |
| "grad_norm": 8.292068481445312, | |
| "learning_rate": 5.900216919739696e-06, | |
| "loss": 0.5137, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.5184381778741867, | |
| "grad_norm": 8.094161033630371, | |
| "learning_rate": 4.815618221258135e-06, | |
| "loss": 0.5263, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.6268980477223427, | |
| "grad_norm": 7.674520492553711, | |
| "learning_rate": 3.7310195227765728e-06, | |
| "loss": 0.5207, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.6268980477223427, | |
| "eval_loss": 0.7734330296516418, | |
| "eval_runtime": 5.104, | |
| "eval_samples_per_second": 40.165, | |
| "eval_steps_per_second": 20.18, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.735357917570499, | |
| "grad_norm": 6.029451847076416, | |
| "learning_rate": 2.6464208242950113e-06, | |
| "loss": 0.4959, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.8438177874186552, | |
| "grad_norm": 6.463768482208252, | |
| "learning_rate": 1.561822125813449e-06, | |
| "loss": 0.494, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.9522776572668112, | |
| "grad_norm": 6.956821918487549, | |
| "learning_rate": 4.772234273318872e-07, | |
| "loss": 0.5281, | |
| "step": 1800 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 1844, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2262435467624448.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |