| { | |
| "best_metric": 0.9298920007321985, | |
| "best_model_checkpoint": "./fp32/models/qnli-roberta-base/checkpoint-9000", | |
| "epoch": 6.108735491753207, | |
| "global_step": 20000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 5.0890585241730285e-06, | |
| "loss": 0.5591, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "eval_accuracy": 0.8694856306058942, | |
| "eval_loss": 0.31558236479759216, | |
| "eval_runtime": 21.239, | |
| "eval_samples_per_second": 257.216, | |
| "eval_steps_per_second": 8.051, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 9.988627132412675e-06, | |
| "loss": 0.3512, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "eval_accuracy": 0.9007871133077063, | |
| "eval_loss": 0.2616254389286041, | |
| "eval_runtime": 15.8012, | |
| "eval_samples_per_second": 345.733, | |
| "eval_steps_per_second": 10.822, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 9.663688058489034e-06, | |
| "loss": 0.3075, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "eval_accuracy": 0.9126853377265239, | |
| "eval_loss": 0.23381730914115906, | |
| "eval_runtime": 15.8636, | |
| "eval_samples_per_second": 344.372, | |
| "eval_steps_per_second": 10.779, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 9.338748984565394e-06, | |
| "loss": 0.2678, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "eval_accuracy": 0.920190371590701, | |
| "eval_loss": 0.22113902866840363, | |
| "eval_runtime": 18.4732, | |
| "eval_samples_per_second": 295.725, | |
| "eval_steps_per_second": 9.257, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 9.013809910641755e-06, | |
| "loss": 0.2453, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "eval_accuracy": 0.9103056928427604, | |
| "eval_loss": 0.221624493598938, | |
| "eval_runtime": 15.1776, | |
| "eval_samples_per_second": 359.939, | |
| "eval_steps_per_second": 11.267, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 8.688870836718116e-06, | |
| "loss": 0.2364, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "eval_accuracy": 0.923302214900238, | |
| "eval_loss": 0.20321357250213623, | |
| "eval_runtime": 14.6843, | |
| "eval_samples_per_second": 372.03, | |
| "eval_steps_per_second": 11.645, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "learning_rate": 8.363931762794477e-06, | |
| "loss": 0.2109, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "eval_accuracy": 0.9240344133260113, | |
| "eval_loss": 0.20744949579238892, | |
| "eval_runtime": 15.2273, | |
| "eval_samples_per_second": 358.765, | |
| "eval_steps_per_second": 11.23, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "learning_rate": 8.038992688870837e-06, | |
| "loss": 0.1861, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "eval_accuracy": 0.9253157605711148, | |
| "eval_loss": 0.22822654247283936, | |
| "eval_runtime": 14.5847, | |
| "eval_samples_per_second": 374.572, | |
| "eval_steps_per_second": 11.725, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "learning_rate": 7.714053614947198e-06, | |
| "loss": 0.1857, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "eval_accuracy": 0.9298920007321985, | |
| "eval_loss": 0.20272189378738403, | |
| "eval_runtime": 14.8122, | |
| "eval_samples_per_second": 368.818, | |
| "eval_steps_per_second": 11.545, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 3.05, | |
| "learning_rate": 7.389114541023559e-06, | |
| "loss": 0.1741, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 3.05, | |
| "eval_accuracy": 0.9174446274940509, | |
| "eval_loss": 0.2495354562997818, | |
| "eval_runtime": 14.8558, | |
| "eval_samples_per_second": 367.736, | |
| "eval_steps_per_second": 11.511, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "learning_rate": 7.0641754670999195e-06, | |
| "loss": 0.1442, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "eval_accuracy": 0.9242174629324547, | |
| "eval_loss": 0.2571660578250885, | |
| "eval_runtime": 18.4719, | |
| "eval_samples_per_second": 295.746, | |
| "eval_steps_per_second": 9.257, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 3.67, | |
| "learning_rate": 6.73923639317628e-06, | |
| "loss": 0.1453, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 3.67, | |
| "eval_accuracy": 0.9256818597840014, | |
| "eval_loss": 0.23131424188613892, | |
| "eval_runtime": 14.7976, | |
| "eval_samples_per_second": 369.181, | |
| "eval_steps_per_second": 11.556, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 3.97, | |
| "learning_rate": 6.41429731925264e-06, | |
| "loss": 0.1458, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 3.97, | |
| "eval_accuracy": 0.923851363719568, | |
| "eval_loss": 0.25046634674072266, | |
| "eval_runtime": 14.7742, | |
| "eval_samples_per_second": 369.767, | |
| "eval_steps_per_second": 11.574, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 4.28, | |
| "learning_rate": 6.089358245329001e-06, | |
| "loss": 0.1195, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 4.28, | |
| "eval_accuracy": 0.923851363719568, | |
| "eval_loss": 0.28182199597358704, | |
| "eval_runtime": 14.6725, | |
| "eval_samples_per_second": 372.328, | |
| "eval_steps_per_second": 11.654, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 4.58, | |
| "learning_rate": 5.7644191714053624e-06, | |
| "loss": 0.1209, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 4.58, | |
| "eval_accuracy": 0.9223869668680212, | |
| "eval_loss": 0.293856680393219, | |
| "eval_runtime": 15.1476, | |
| "eval_samples_per_second": 360.65, | |
| "eval_steps_per_second": 11.289, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 4.89, | |
| "learning_rate": 5.439480097481723e-06, | |
| "loss": 0.12, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 4.89, | |
| "eval_accuracy": 0.9245835621453414, | |
| "eval_loss": 0.28331321477890015, | |
| "eval_runtime": 17.008, | |
| "eval_samples_per_second": 321.202, | |
| "eval_steps_per_second": 10.054, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 5.19, | |
| "learning_rate": 5.114541023558083e-06, | |
| "loss": 0.1086, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 5.19, | |
| "eval_accuracy": 0.9249496613582281, | |
| "eval_loss": 0.3361791968345642, | |
| "eval_runtime": 16.1915, | |
| "eval_samples_per_second": 337.399, | |
| "eval_steps_per_second": 10.561, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 5.5, | |
| "learning_rate": 4.789601949634444e-06, | |
| "loss": 0.0991, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 5.5, | |
| "eval_accuracy": 0.923302214900238, | |
| "eval_loss": 0.3357701301574707, | |
| "eval_runtime": 15.4017, | |
| "eval_samples_per_second": 354.702, | |
| "eval_steps_per_second": 11.103, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 5.8, | |
| "learning_rate": 4.4646628757108045e-06, | |
| "loss": 0.1033, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 5.8, | |
| "eval_accuracy": 0.9264140582097748, | |
| "eval_loss": 0.36776551604270935, | |
| "eval_runtime": 18.05, | |
| "eval_samples_per_second": 302.659, | |
| "eval_steps_per_second": 9.474, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 6.11, | |
| "learning_rate": 4.139723801787165e-06, | |
| "loss": 0.0952, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 6.11, | |
| "eval_accuracy": 0.9267801574226615, | |
| "eval_loss": 0.36395037174224854, | |
| "eval_runtime": 20.1267, | |
| "eval_samples_per_second": 271.431, | |
| "eval_steps_per_second": 8.496, | |
| "step": 20000 | |
| } | |
| ], | |
| "max_steps": 32740, | |
| "num_train_epochs": 10, | |
| "total_flos": 4.2087902193024e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |