| { | |
| "best_metric": 0.05933728814125061, | |
| "best_model_checkpoint": "/content/drive/MyDrive/Colab Notebooks/models/t5-small-codesearchnet-python/checkpoint-2625", | |
| "epoch": 13.0, | |
| "global_step": 4875, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "eval_avg_length": 17.1074, | |
| "eval_bleu": 0.0358, | |
| "eval_loss": 0.0800933688879013, | |
| "eval_rouge1": 0.6174, | |
| "eval_rouge2": 0.6, | |
| "eval_runtime": 148.3283, | |
| "eval_samples_per_second": 33.709, | |
| "eval_steps_per_second": 4.214, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 0.011601821519434452, | |
| "loss": 1.6066, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_avg_length": 17.0262, | |
| "eval_bleu": 0.036, | |
| "eval_loss": 0.06744839996099472, | |
| "eval_rouge1": 0.6249, | |
| "eval_rouge2": 0.6068, | |
| "eval_runtime": 143.9281, | |
| "eval_samples_per_second": 34.74, | |
| "eval_steps_per_second": 4.342, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 0.02322324924170971, | |
| "loss": 0.0584, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_avg_length": 16.9962, | |
| "eval_bleu": 0.0351, | |
| "eval_loss": 0.06323764473199844, | |
| "eval_rouge1": 0.6255, | |
| "eval_rouge2": 0.6075, | |
| "eval_runtime": 142.3288, | |
| "eval_samples_per_second": 35.13, | |
| "eval_steps_per_second": 4.391, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 0.03487098217010498, | |
| "loss": 0.0484, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_avg_length": 17.003, | |
| "eval_bleu": 0.0351, | |
| "eval_loss": 0.06048833578824997, | |
| "eval_rouge1": 0.6251, | |
| "eval_rouge2": 0.6071, | |
| "eval_runtime": 142.2689, | |
| "eval_samples_per_second": 35.145, | |
| "eval_steps_per_second": 4.393, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_avg_length": 17.0012, | |
| "eval_bleu": 0.035, | |
| "eval_loss": 0.05961951985955238, | |
| "eval_rouge1": 0.6255, | |
| "eval_rouge2": 0.6075, | |
| "eval_runtime": 147.0583, | |
| "eval_samples_per_second": 34.0, | |
| "eval_steps_per_second": 4.25, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 5.33, | |
| "learning_rate": 0.04655396565794945, | |
| "loss": 0.0418, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_avg_length": 16.9958, | |
| "eval_bleu": 0.035, | |
| "eval_loss": 0.0601988285779953, | |
| "eval_rouge1": 0.6258, | |
| "eval_rouge2": 0.608, | |
| "eval_runtime": 140.3104, | |
| "eval_samples_per_second": 35.635, | |
| "eval_steps_per_second": 4.454, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 6.67, | |
| "learning_rate": 0.0582905150949955, | |
| "loss": 0.0377, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_avg_length": 17.0004, | |
| "eval_bleu": 0.0351, | |
| "eval_loss": 0.05933728814125061, | |
| "eval_rouge1": 0.6259, | |
| "eval_rouge2": 0.6079, | |
| "eval_runtime": 143.9238, | |
| "eval_samples_per_second": 34.741, | |
| "eval_steps_per_second": 4.343, | |
| "step": 2625 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 0.07008866220712662, | |
| "loss": 0.033, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_avg_length": 17.0032, | |
| "eval_bleu": 0.035, | |
| "eval_loss": 0.06179109960794449, | |
| "eval_rouge1": 0.6257, | |
| "eval_rouge2": 0.6078, | |
| "eval_runtime": 140.711, | |
| "eval_samples_per_second": 35.534, | |
| "eval_steps_per_second": 4.442, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_avg_length": 16.998, | |
| "eval_bleu": 0.035, | |
| "eval_loss": 0.06367889791727066, | |
| "eval_rouge1": 0.6257, | |
| "eval_rouge2": 0.6078, | |
| "eval_runtime": 140.8974, | |
| "eval_samples_per_second": 35.487, | |
| "eval_steps_per_second": 4.436, | |
| "step": 3375 | |
| }, | |
| { | |
| "epoch": 9.33, | |
| "learning_rate": 0.08194742351770401, | |
| "loss": 0.028, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_avg_length": 16.9984, | |
| "eval_bleu": 0.035, | |
| "eval_loss": 0.06447886675596237, | |
| "eval_rouge1": 0.6257, | |
| "eval_rouge2": 0.6079, | |
| "eval_runtime": 145.6234, | |
| "eval_samples_per_second": 34.335, | |
| "eval_steps_per_second": 4.292, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 10.67, | |
| "learning_rate": 0.0939134731888771, | |
| "loss": 0.0255, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_avg_length": 17.0008, | |
| "eval_bleu": 0.035, | |
| "eval_loss": 0.06500716507434845, | |
| "eval_rouge1": 0.6255, | |
| "eval_rouge2": 0.6078, | |
| "eval_runtime": 142.8222, | |
| "eval_samples_per_second": 35.009, | |
| "eval_steps_per_second": 4.376, | |
| "step": 4125 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "learning_rate": 0.10600020736455917, | |
| "loss": 0.0226, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_avg_length": 16.9976, | |
| "eval_bleu": 0.035, | |
| "eval_loss": 0.07480558753013611, | |
| "eval_rouge1": 0.6254, | |
| "eval_rouge2": 0.6076, | |
| "eval_runtime": 140.6294, | |
| "eval_samples_per_second": 35.554, | |
| "eval_steps_per_second": 4.444, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_avg_length": 16.9954, | |
| "eval_bleu": 0.035, | |
| "eval_loss": 0.07140845060348511, | |
| "eval_rouge1": 0.6256, | |
| "eval_rouge2": 0.6079, | |
| "eval_runtime": 141.0049, | |
| "eval_samples_per_second": 35.46, | |
| "eval_steps_per_second": 4.432, | |
| "step": 4875 | |
| } | |
| ], | |
| "max_steps": 5625, | |
| "num_train_epochs": 15, | |
| "total_flos": 2.639165128704e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |