| { | |
| "best_metric": 0.2977927625179291, | |
| "best_model_checkpoint": "./vit-base-beans/checkpoint-1280", | |
| "epoch": 3.963963963963964, | |
| "global_step": 1320, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.0001984984984984985, | |
| "loss": 3.6277, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019699699699699701, | |
| "loss": 3.5588, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.0001954954954954955, | |
| "loss": 3.4115, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.00019399399399399402, | |
| "loss": 3.2498, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "eval_accuracy": 0.22024983563445102, | |
| "eval_loss": 3.1079554557800293, | |
| "eval_runtime": 25.4224, | |
| "eval_samples_per_second": 59.829, | |
| "eval_steps_per_second": 7.513, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.0001924924924924925, | |
| "loss": 2.967, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.000190990990990991, | |
| "loss": 2.8969, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.0001894894894894895, | |
| "loss": 2.7153, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.000187987987987988, | |
| "loss": 2.5076, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "eval_accuracy": 0.4339250493096647, | |
| "eval_loss": 2.4336094856262207, | |
| "eval_runtime": 25.9183, | |
| "eval_samples_per_second": 58.684, | |
| "eval_steps_per_second": 7.369, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.0001864864864864865, | |
| "loss": 2.3504, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.000184984984984985, | |
| "loss": 2.2198, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.0001834834834834835, | |
| "loss": 2.1227, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.000181981981981982, | |
| "loss": 1.9345, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "eval_accuracy": 0.6226166995397765, | |
| "eval_loss": 1.8898930549621582, | |
| "eval_runtime": 25.7004, | |
| "eval_samples_per_second": 59.182, | |
| "eval_steps_per_second": 7.432, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.0001804804804804805, | |
| "loss": 1.8725, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00017897897897897898, | |
| "loss": 1.6834, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.0001774774774774775, | |
| "loss": 1.6183, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.000175975975975976, | |
| "loss": 1.4224, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "eval_accuracy": 0.673241288625904, | |
| "eval_loss": 1.5380345582962036, | |
| "eval_runtime": 26.3631, | |
| "eval_samples_per_second": 57.694, | |
| "eval_steps_per_second": 7.245, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.0001744744744744745, | |
| "loss": 1.5553, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.000172972972972973, | |
| "loss": 1.3173, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.00017147147147147148, | |
| "loss": 1.2917, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.00016996996996997, | |
| "loss": 1.2626, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "eval_accuracy": 0.7370151216305062, | |
| "eval_loss": 1.2467007637023926, | |
| "eval_runtime": 25.735, | |
| "eval_samples_per_second": 59.102, | |
| "eval_steps_per_second": 7.422, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.00016846846846846846, | |
| "loss": 1.0941, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 0.00016696696696696697, | |
| "loss": 1.1499, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 0.00016546546546546546, | |
| "loss": 1.085, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 0.00016396396396396395, | |
| "loss": 1.0447, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "eval_accuracy": 0.7633136094674556, | |
| "eval_loss": 1.0867702960968018, | |
| "eval_runtime": 25.9624, | |
| "eval_samples_per_second": 58.585, | |
| "eval_steps_per_second": 7.357, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 0.00016246246246246247, | |
| "loss": 0.9169, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 0.00016096096096096096, | |
| "loss": 1.0707, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 0.00015945945945945947, | |
| "loss": 0.8938, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 0.00015795795795795796, | |
| "loss": 0.9403, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "eval_accuracy": 0.8007889546351085, | |
| "eval_loss": 0.8639808297157288, | |
| "eval_runtime": 25.817, | |
| "eval_samples_per_second": 58.915, | |
| "eval_steps_per_second": 7.398, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 0.00015645645645645645, | |
| "loss": 0.8592, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 0.00015495495495495496, | |
| "loss": 0.9383, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 0.00015345345345345345, | |
| "loss": 0.8804, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 0.00015195195195195194, | |
| "loss": 0.7259, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "eval_accuracy": 0.8198553583168968, | |
| "eval_loss": 0.7540761828422546, | |
| "eval_runtime": 25.7, | |
| "eval_samples_per_second": 59.183, | |
| "eval_steps_per_second": 7.432, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 0.00015045045045045046, | |
| "loss": 0.7308, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 0.00014894894894894895, | |
| "loss": 0.6628, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 0.00014744744744744746, | |
| "loss": 0.6611, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 0.00014594594594594595, | |
| "loss": 0.7276, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "eval_accuracy": 0.8500986193293886, | |
| "eval_loss": 0.6682031154632568, | |
| "eval_runtime": 26.3636, | |
| "eval_samples_per_second": 57.693, | |
| "eval_steps_per_second": 7.245, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 0.00014444444444444444, | |
| "loss": 0.6113, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 0.00014294294294294295, | |
| "loss": 0.545, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 0.00014144144144144144, | |
| "loss": 0.682, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 0.00013993993993993996, | |
| "loss": 0.5643, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "eval_accuracy": 0.8303747534516766, | |
| "eval_loss": 0.6452277302742004, | |
| "eval_runtime": 25.857, | |
| "eval_samples_per_second": 58.824, | |
| "eval_steps_per_second": 7.387, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 0.00013843843843843845, | |
| "loss": 0.5002, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 0.00013693693693693693, | |
| "loss": 0.5067, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 0.00013543543543543545, | |
| "loss": 0.5445, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 0.00013393393393393394, | |
| "loss": 0.6703, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "eval_accuracy": 0.7988165680473372, | |
| "eval_loss": 0.6957046985626221, | |
| "eval_runtime": 25.2844, | |
| "eval_samples_per_second": 60.156, | |
| "eval_steps_per_second": 7.554, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 0.00013243243243243243, | |
| "loss": 0.8127, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 0.00013093093093093094, | |
| "loss": 0.567, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 0.00012942942942942943, | |
| "loss": 0.5575, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 0.00012792792792792795, | |
| "loss": 0.396, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "eval_accuracy": 0.8428665351742275, | |
| "eval_loss": 0.5446497797966003, | |
| "eval_runtime": 25.6496, | |
| "eval_samples_per_second": 59.299, | |
| "eval_steps_per_second": 7.447, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 0.00012642642642642644, | |
| "loss": 0.4975, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 0.00012492492492492492, | |
| "loss": 0.3971, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 0.00012342342342342344, | |
| "loss": 0.469, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 0.00012192192192192193, | |
| "loss": 0.4277, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "eval_accuracy": 0.8408941485864563, | |
| "eval_loss": 0.5659409165382385, | |
| "eval_runtime": 25.2836, | |
| "eval_samples_per_second": 60.157, | |
| "eval_steps_per_second": 7.554, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 0.00012042042042042043, | |
| "loss": 0.4984, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 0.00011891891891891893, | |
| "loss": 0.476, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 0.00011741741741741743, | |
| "loss": 0.5402, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 0.00011591591591591592, | |
| "loss": 0.457, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "eval_accuracy": 0.8527284681130834, | |
| "eval_loss": 0.5437894463539124, | |
| "eval_runtime": 25.9766, | |
| "eval_samples_per_second": 58.553, | |
| "eval_steps_per_second": 7.353, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 0.00011441441441441443, | |
| "loss": 0.3039, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 0.00011291291291291293, | |
| "loss": 0.4621, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 0.00011141141141141143, | |
| "loss": 0.4584, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 0.00010990990990990993, | |
| "loss": 0.5632, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "eval_accuracy": 0.8547008547008547, | |
| "eval_loss": 0.49320539832115173, | |
| "eval_runtime": 25.6823, | |
| "eval_samples_per_second": 59.224, | |
| "eval_steps_per_second": 7.437, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 0.00010840840840840842, | |
| "loss": 0.4798, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 0.00010690690690690692, | |
| "loss": 0.3991, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 0.0001054054054054054, | |
| "loss": 0.4815, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 0.0001039039039039039, | |
| "loss": 0.4066, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "eval_accuracy": 0.8658777120315582, | |
| "eval_loss": 0.4579479694366455, | |
| "eval_runtime": 25.9931, | |
| "eval_samples_per_second": 58.516, | |
| "eval_steps_per_second": 7.348, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 0.0001024024024024024, | |
| "loss": 0.5159, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 0.00010090090090090089, | |
| "loss": 0.4886, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 2.01, | |
| "learning_rate": 9.93993993993994e-05, | |
| "loss": 0.2916, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "learning_rate": 9.789789789789791e-05, | |
| "loss": 0.2505, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "eval_accuracy": 0.8665351742274819, | |
| "eval_loss": 0.4546054005622864, | |
| "eval_runtime": 26.0755, | |
| "eval_samples_per_second": 58.331, | |
| "eval_steps_per_second": 7.325, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "learning_rate": 9.639639639639641e-05, | |
| "loss": 0.3156, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 9.48948948948949e-05, | |
| "loss": 0.375, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 2.13, | |
| "learning_rate": 9.33933933933934e-05, | |
| "loss": 0.2663, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "learning_rate": 9.18918918918919e-05, | |
| "loss": 0.3181, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "eval_accuracy": 0.8586456278763971, | |
| "eval_loss": 0.4709266424179077, | |
| "eval_runtime": 26.2379, | |
| "eval_samples_per_second": 57.97, | |
| "eval_steps_per_second": 7.28, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "learning_rate": 9.039039039039039e-05, | |
| "loss": 0.2628, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "learning_rate": 8.888888888888889e-05, | |
| "loss": 0.1784, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "learning_rate": 8.738738738738738e-05, | |
| "loss": 0.357, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "learning_rate": 8.588588588588588e-05, | |
| "loss": 0.1931, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "eval_accuracy": 0.8593030900723209, | |
| "eval_loss": 0.45762282609939575, | |
| "eval_runtime": 25.7501, | |
| "eval_samples_per_second": 59.068, | |
| "eval_steps_per_second": 7.417, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 8.438438438438439e-05, | |
| "loss": 0.2865, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "learning_rate": 8.288288288288289e-05, | |
| "loss": 0.3125, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "learning_rate": 8.138138138138138e-05, | |
| "loss": 0.417, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "learning_rate": 7.987987987987988e-05, | |
| "loss": 0.288, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "eval_accuracy": 0.8763971071663379, | |
| "eval_loss": 0.41621989011764526, | |
| "eval_runtime": 25.9323, | |
| "eval_samples_per_second": 58.653, | |
| "eval_steps_per_second": 7.365, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.43, | |
| "learning_rate": 7.837837837837838e-05, | |
| "loss": 0.3603, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 7.687687687687688e-05, | |
| "loss": 0.2386, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "learning_rate": 7.537537537537538e-05, | |
| "loss": 0.2035, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "learning_rate": 7.387387387387387e-05, | |
| "loss": 0.2315, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "eval_accuracy": 0.8895463510848126, | |
| "eval_loss": 0.3602614402770996, | |
| "eval_runtime": 26.3037, | |
| "eval_samples_per_second": 57.825, | |
| "eval_steps_per_second": 7.261, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 2.55, | |
| "learning_rate": 7.237237237237238e-05, | |
| "loss": 0.239, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "learning_rate": 7.087087087087088e-05, | |
| "loss": 0.2514, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 2.61, | |
| "learning_rate": 6.936936936936938e-05, | |
| "loss": 0.3601, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 6.786786786786787e-05, | |
| "loss": 0.1812, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "eval_accuracy": 0.8823142669296515, | |
| "eval_loss": 0.40812087059020996, | |
| "eval_runtime": 25.5461, | |
| "eval_samples_per_second": 59.539, | |
| "eval_steps_per_second": 7.477, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 6.636636636636637e-05, | |
| "loss": 0.2621, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "learning_rate": 6.486486486486487e-05, | |
| "loss": 0.2284, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "learning_rate": 6.336336336336337e-05, | |
| "loss": 0.1624, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 6.186186186186186e-05, | |
| "loss": 0.2661, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "eval_accuracy": 0.8777120315581854, | |
| "eval_loss": 0.3957211375236511, | |
| "eval_runtime": 26.3972, | |
| "eval_samples_per_second": 57.62, | |
| "eval_steps_per_second": 7.236, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "learning_rate": 6.0360360360360365e-05, | |
| "loss": 0.2434, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.82, | |
| "learning_rate": 5.8858858858858854e-05, | |
| "loss": 0.2894, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 5.7357357357357356e-05, | |
| "loss": 0.1421, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "learning_rate": 5.585585585585585e-05, | |
| "loss": 0.2632, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "eval_accuracy": 0.8915187376725838, | |
| "eval_loss": 0.34952232241630554, | |
| "eval_runtime": 25.2354, | |
| "eval_samples_per_second": 60.272, | |
| "eval_steps_per_second": 7.569, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.91, | |
| "learning_rate": 5.435435435435435e-05, | |
| "loss": 0.2474, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 5.2852852852852855e-05, | |
| "loss": 0.2458, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.97, | |
| "learning_rate": 5.135135135135135e-05, | |
| "loss": 0.2121, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 4.984984984984985e-05, | |
| "loss": 0.215, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.8915187376725838, | |
| "eval_loss": 0.3721879720687866, | |
| "eval_runtime": 26.0936, | |
| "eval_samples_per_second": 58.29, | |
| "eval_steps_per_second": 7.32, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 3.03, | |
| "learning_rate": 4.834834834834835e-05, | |
| "loss": 0.1788, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "learning_rate": 4.684684684684685e-05, | |
| "loss": 0.1671, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 3.09, | |
| "learning_rate": 4.5345345345345345e-05, | |
| "loss": 0.1663, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "learning_rate": 4.384384384384385e-05, | |
| "loss": 0.1661, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "eval_accuracy": 0.8888888888888888, | |
| "eval_loss": 0.3638674318790436, | |
| "eval_runtime": 25.7736, | |
| "eval_samples_per_second": 59.014, | |
| "eval_steps_per_second": 7.411, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 3.15, | |
| "learning_rate": 4.234234234234234e-05, | |
| "loss": 0.1734, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 3.18, | |
| "learning_rate": 4.0840840840840845e-05, | |
| "loss": 0.1525, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 3.21, | |
| "learning_rate": 3.933933933933934e-05, | |
| "loss": 0.1397, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 3.24, | |
| "learning_rate": 3.783783783783784e-05, | |
| "loss": 0.1164, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 3.24, | |
| "eval_accuracy": 0.9013806706114399, | |
| "eval_loss": 0.32475772500038147, | |
| "eval_runtime": 25.7546, | |
| "eval_samples_per_second": 59.057, | |
| "eval_steps_per_second": 7.416, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 3.27, | |
| "learning_rate": 3.633633633633634e-05, | |
| "loss": 0.176, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "learning_rate": 3.483483483483483e-05, | |
| "loss": 0.1248, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.1025, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "learning_rate": 3.183183183183183e-05, | |
| "loss": 0.1745, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "eval_accuracy": 0.8948060486522025, | |
| "eval_loss": 0.33917945623397827, | |
| "eval_runtime": 26.0362, | |
| "eval_samples_per_second": 58.419, | |
| "eval_steps_per_second": 7.336, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 3.39, | |
| "learning_rate": 3.0330330330330332e-05, | |
| "loss": 0.2151, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 3.42, | |
| "learning_rate": 2.882882882882883e-05, | |
| "loss": 0.1135, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 3.45, | |
| "learning_rate": 2.732732732732733e-05, | |
| "loss": 0.1307, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 3.48, | |
| "learning_rate": 2.582582582582583e-05, | |
| "loss": 0.1347, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 3.48, | |
| "eval_accuracy": 0.8980933596318211, | |
| "eval_loss": 0.33266809582710266, | |
| "eval_runtime": 25.5437, | |
| "eval_samples_per_second": 59.545, | |
| "eval_steps_per_second": 7.477, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 3.51, | |
| "learning_rate": 2.4324324324324327e-05, | |
| "loss": 0.1395, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 3.54, | |
| "learning_rate": 2.2822822822822822e-05, | |
| "loss": 0.1506, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 3.57, | |
| "learning_rate": 2.132132132132132e-05, | |
| "loss": 0.1833, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "learning_rate": 1.981981981981982e-05, | |
| "loss": 0.1362, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "eval_accuracy": 0.9105851413543721, | |
| "eval_loss": 0.3018592596054077, | |
| "eval_runtime": 26.0978, | |
| "eval_samples_per_second": 58.281, | |
| "eval_steps_per_second": 7.319, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 3.63, | |
| "learning_rate": 1.831831831831832e-05, | |
| "loss": 0.1402, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 3.66, | |
| "learning_rate": 1.6816816816816817e-05, | |
| "loss": 0.142, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "learning_rate": 1.5315315315315316e-05, | |
| "loss": 0.1422, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "learning_rate": 1.3813813813813815e-05, | |
| "loss": 0.127, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "eval_accuracy": 0.9072978303747534, | |
| "eval_loss": 0.3087291419506073, | |
| "eval_runtime": 25.0476, | |
| "eval_samples_per_second": 60.724, | |
| "eval_steps_per_second": 7.625, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "learning_rate": 1.2312312312312313e-05, | |
| "loss": 0.1435, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 3.78, | |
| "learning_rate": 1.0810810810810812e-05, | |
| "loss": 0.1236, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 3.81, | |
| "learning_rate": 9.309309309309309e-06, | |
| "loss": 0.1167, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 3.84, | |
| "learning_rate": 7.807807807807808e-06, | |
| "loss": 0.1041, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 3.84, | |
| "eval_accuracy": 0.9099276791584484, | |
| "eval_loss": 0.2977927625179291, | |
| "eval_runtime": 26.1338, | |
| "eval_samples_per_second": 58.2, | |
| "eval_steps_per_second": 7.309, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 3.87, | |
| "learning_rate": 6.306306306306306e-06, | |
| "loss": 0.1486, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "learning_rate": 4.804804804804805e-06, | |
| "loss": 0.1316, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 3.93, | |
| "learning_rate": 3.3033033033033035e-06, | |
| "loss": 0.1035, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 3.96, | |
| "learning_rate": 1.801801801801802e-06, | |
| "loss": 0.106, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 3.96, | |
| "eval_accuracy": 0.9112426035502958, | |
| "eval_loss": 0.2991219162940979, | |
| "eval_runtime": 25.2949, | |
| "eval_samples_per_second": 60.131, | |
| "eval_steps_per_second": 7.551, | |
| "step": 1320 | |
| } | |
| ], | |
| "max_steps": 1332, | |
| "num_train_epochs": 4, | |
| "total_flos": 1.6343682786726543e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |