| { | |
| "best_metric": 0.6800228357315063, | |
| "best_model_checkpoint": "/content/drive/MyDrive/Colab Notebooks/models/t5-small-codesearchnet-multilang-python-java/checkpoint-4125", | |
| "epoch": 12.0, | |
| "global_step": 4500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "eval_avg_length": 16.3976, | |
| "eval_bleu": 0.0013, | |
| "eval_loss": 0.9005016684532166, | |
| "eval_rouge1": 0.1397, | |
| "eval_rouge2": 0.0334, | |
| "eval_runtime": 159.8289, | |
| "eval_samples_per_second": 31.283, | |
| "eval_steps_per_second": 3.91, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 0.011611906811594963, | |
| "loss": 2.3568, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_avg_length": 15.8896, | |
| "eval_bleu": 0.0023, | |
| "eval_loss": 0.8036138415336609, | |
| "eval_rouge1": 0.1737, | |
| "eval_rouge2": 0.0526, | |
| "eval_runtime": 164.0069, | |
| "eval_samples_per_second": 30.487, | |
| "eval_steps_per_second": 3.811, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 0.023264221847057343, | |
| "loss": 0.7576, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_avg_length": 15.3102, | |
| "eval_bleu": 0.0021, | |
| "eval_loss": 0.7583827972412109, | |
| "eval_rouge1": 0.1856, | |
| "eval_rouge2": 0.0558, | |
| "eval_runtime": 167.7038, | |
| "eval_samples_per_second": 29.814, | |
| "eval_steps_per_second": 3.727, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 0.03498469293117523, | |
| "loss": 0.6778, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_avg_length": 15.3544, | |
| "eval_bleu": 0.0024, | |
| "eval_loss": 0.7298200130462646, | |
| "eval_rouge1": 0.1922, | |
| "eval_rouge2": 0.0597, | |
| "eval_runtime": 168.897, | |
| "eval_samples_per_second": 29.604, | |
| "eval_steps_per_second": 3.7, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_avg_length": 15.7588, | |
| "eval_bleu": 0.0037, | |
| "eval_loss": 0.7113876938819885, | |
| "eval_rouge1": 0.2114, | |
| "eval_rouge2": 0.0704, | |
| "eval_runtime": 172.1098, | |
| "eval_samples_per_second": 29.051, | |
| "eval_steps_per_second": 3.631, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 5.33, | |
| "learning_rate": 0.046804994344711304, | |
| "loss": 0.6206, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_avg_length": 15.8088, | |
| "eval_bleu": 0.0039, | |
| "eval_loss": 0.6949090957641602, | |
| "eval_rouge1": 0.2093, | |
| "eval_rouge2": 0.0729, | |
| "eval_runtime": 165.5542, | |
| "eval_samples_per_second": 30.202, | |
| "eval_steps_per_second": 3.775, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 6.67, | |
| "learning_rate": 0.05876593291759491, | |
| "loss": 0.5856, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_avg_length": 16.5838, | |
| "eval_bleu": 0.0042, | |
| "eval_loss": 0.6927398443222046, | |
| "eval_rouge1": 0.2143, | |
| "eval_rouge2": 0.0711, | |
| "eval_runtime": 164.6989, | |
| "eval_samples_per_second": 30.358, | |
| "eval_steps_per_second": 3.795, | |
| "step": 2625 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 0.07091041654348373, | |
| "loss": 0.5447, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_avg_length": 17.2174, | |
| "eval_bleu": 0.005, | |
| "eval_loss": 0.6866663694381714, | |
| "eval_rouge1": 0.2151, | |
| "eval_rouge2": 0.0717, | |
| "eval_runtime": 163.5142, | |
| "eval_samples_per_second": 30.578, | |
| "eval_steps_per_second": 3.822, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_avg_length": 16.1068, | |
| "eval_bleu": 0.0043, | |
| "eval_loss": 0.689474880695343, | |
| "eval_rouge1": 0.2179, | |
| "eval_rouge2": 0.0736, | |
| "eval_runtime": 161.9004, | |
| "eval_samples_per_second": 30.883, | |
| "eval_steps_per_second": 3.86, | |
| "step": 3375 | |
| }, | |
| { | |
| "epoch": 9.33, | |
| "learning_rate": 0.08324738591909409, | |
| "loss": 0.5117, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_avg_length": 15.5094, | |
| "eval_bleu": 0.0038, | |
| "eval_loss": 0.6876339912414551, | |
| "eval_rouge1": 0.2229, | |
| "eval_rouge2": 0.0777, | |
| "eval_runtime": 168.6323, | |
| "eval_samples_per_second": 29.65, | |
| "eval_steps_per_second": 3.706, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 10.67, | |
| "learning_rate": 0.09585673362016678, | |
| "loss": 0.4892, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_avg_length": 16.6902, | |
| "eval_bleu": 0.0047, | |
| "eval_loss": 0.6800228357315063, | |
| "eval_rouge1": 0.2201, | |
| "eval_rouge2": 0.0783, | |
| "eval_runtime": 164.6191, | |
| "eval_samples_per_second": 30.373, | |
| "eval_steps_per_second": 3.797, | |
| "step": 4125 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "learning_rate": 0.10878779739141464, | |
| "loss": 0.4629, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_avg_length": 16.7658, | |
| "eval_bleu": 0.0047, | |
| "eval_loss": 0.6902568340301514, | |
| "eval_rouge1": 0.2203, | |
| "eval_rouge2": 0.0771, | |
| "eval_runtime": 167.779, | |
| "eval_samples_per_second": 29.801, | |
| "eval_steps_per_second": 3.725, | |
| "step": 4500 | |
| } | |
| ], | |
| "max_steps": 5625, | |
| "num_train_epochs": 15, | |
| "total_flos": 2.436152426496e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |