| { | |
| "best_metric": 0.8333333333333334, | |
| "best_model_checkpoint": "vit-base-patch16-224-RU3-40\\checkpoint-134", | |
| "epoch": 39.48051948051948, | |
| "eval_steps": 500, | |
| "global_step": 760, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 1.4473684210526315e-05, | |
| "loss": 1.3821, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "eval_accuracy": 0.48333333333333334, | |
| "eval_loss": 1.3118996620178223, | |
| "eval_runtime": 1.0501, | |
| "eval_samples_per_second": 57.139, | |
| "eval_steps_per_second": 1.905, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 2.894736842105263e-05, | |
| "loss": 1.34, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 4.342105263157895e-05, | |
| "loss": 1.2698, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "eval_accuracy": 0.6166666666666667, | |
| "eval_loss": 1.085192084312439, | |
| "eval_runtime": 1.0848, | |
| "eval_samples_per_second": 55.308, | |
| "eval_steps_per_second": 1.844, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "learning_rate": 5.484764542936288e-05, | |
| "loss": 1.1206, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "learning_rate": 5.408587257617729e-05, | |
| "loss": 0.9819, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "eval_accuracy": 0.7, | |
| "eval_loss": 0.875718355178833, | |
| "eval_runtime": 1.0604, | |
| "eval_samples_per_second": 56.582, | |
| "eval_steps_per_second": 1.886, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "learning_rate": 5.332409972299169e-05, | |
| "loss": 0.8387, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "learning_rate": 5.256232686980609e-05, | |
| "loss": 0.6671, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.7333333333333333, | |
| "eval_loss": 0.768868088722229, | |
| "eval_runtime": 1.1869, | |
| "eval_samples_per_second": 50.551, | |
| "eval_steps_per_second": 1.685, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 4.16, | |
| "learning_rate": 5.18005540166205e-05, | |
| "loss": 0.5484, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 4.68, | |
| "learning_rate": 5.1038781163434903e-05, | |
| "loss": 0.4248, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 4.99, | |
| "eval_accuracy": 0.7166666666666667, | |
| "eval_loss": 0.7293522357940674, | |
| "eval_runtime": 1.0714, | |
| "eval_samples_per_second": 56.0, | |
| "eval_steps_per_second": 1.867, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 5.19, | |
| "learning_rate": 5.027700831024931e-05, | |
| "loss": 0.3571, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 5.71, | |
| "learning_rate": 4.9515235457063714e-05, | |
| "loss": 0.3005, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 5.97, | |
| "eval_accuracy": 0.7833333333333333, | |
| "eval_loss": 0.6517642140388489, | |
| "eval_runtime": 1.0607, | |
| "eval_samples_per_second": 56.569, | |
| "eval_steps_per_second": 1.886, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 6.23, | |
| "learning_rate": 4.8753462603878116e-05, | |
| "loss": 0.2452, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 6.75, | |
| "learning_rate": 4.7991689750692524e-05, | |
| "loss": 0.2035, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 6.96, | |
| "eval_accuracy": 0.8333333333333334, | |
| "eval_loss": 0.5666719079017639, | |
| "eval_runtime": 1.0912, | |
| "eval_samples_per_second": 54.986, | |
| "eval_steps_per_second": 1.833, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 7.27, | |
| "learning_rate": 4.7229916897506926e-05, | |
| "loss": 0.2138, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 7.79, | |
| "learning_rate": 4.6468144044321335e-05, | |
| "loss": 0.2195, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.8333333333333334, | |
| "eval_loss": 0.6646459102630615, | |
| "eval_runtime": 1.0657, | |
| "eval_samples_per_second": 56.3, | |
| "eval_steps_per_second": 1.877, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 8.31, | |
| "learning_rate": 4.570637119113573e-05, | |
| "loss": 0.1806, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 8.83, | |
| "learning_rate": 4.494459833795014e-05, | |
| "loss": 0.1654, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 8.99, | |
| "eval_accuracy": 0.8166666666666667, | |
| "eval_loss": 0.6294049024581909, | |
| "eval_runtime": 1.1008, | |
| "eval_samples_per_second": 54.503, | |
| "eval_steps_per_second": 1.817, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 9.35, | |
| "learning_rate": 4.418282548476455e-05, | |
| "loss": 0.1641, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 9.87, | |
| "learning_rate": 4.342105263157895e-05, | |
| "loss": 0.1581, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 9.97, | |
| "eval_accuracy": 0.7833333333333333, | |
| "eval_loss": 0.7211242318153381, | |
| "eval_runtime": 1.0634, | |
| "eval_samples_per_second": 56.425, | |
| "eval_steps_per_second": 1.881, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 10.39, | |
| "learning_rate": 4.265927977839336e-05, | |
| "loss": 0.1147, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 10.91, | |
| "learning_rate": 4.189750692520776e-05, | |
| "loss": 0.1338, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 10.96, | |
| "eval_accuracy": 0.7833333333333333, | |
| "eval_loss": 0.8129342198371887, | |
| "eval_runtime": 1.1857, | |
| "eval_samples_per_second": 50.603, | |
| "eval_steps_per_second": 1.687, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 11.43, | |
| "learning_rate": 4.113573407202216e-05, | |
| "loss": 0.1313, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 11.95, | |
| "learning_rate": 4.037396121883656e-05, | |
| "loss": 0.1188, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.8166666666666667, | |
| "eval_loss": 0.7924687266349792, | |
| "eval_runtime": 1.0887, | |
| "eval_samples_per_second": 55.111, | |
| "eval_steps_per_second": 1.837, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 12.47, | |
| "learning_rate": 3.961218836565097e-05, | |
| "loss": 0.0969, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 12.99, | |
| "learning_rate": 3.885041551246538e-05, | |
| "loss": 0.1179, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 12.99, | |
| "eval_accuracy": 0.7666666666666667, | |
| "eval_loss": 0.958832323551178, | |
| "eval_runtime": 1.165, | |
| "eval_samples_per_second": 51.501, | |
| "eval_steps_per_second": 1.717, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 13.51, | |
| "learning_rate": 3.808864265927978e-05, | |
| "loss": 0.1017, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 13.97, | |
| "eval_accuracy": 0.7166666666666667, | |
| "eval_loss": 1.0874536037445068, | |
| "eval_runtime": 1.0498, | |
| "eval_samples_per_second": 57.151, | |
| "eval_steps_per_second": 1.905, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 14.03, | |
| "learning_rate": 3.732686980609418e-05, | |
| "loss": 0.1004, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 14.55, | |
| "learning_rate": 3.6565096952908585e-05, | |
| "loss": 0.0845, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 14.96, | |
| "eval_accuracy": 0.7, | |
| "eval_loss": 0.935455322265625, | |
| "eval_runtime": 1.1044, | |
| "eval_samples_per_second": 54.328, | |
| "eval_steps_per_second": 1.811, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 15.06, | |
| "learning_rate": 3.5803324099722994e-05, | |
| "loss": 0.0898, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 15.58, | |
| "learning_rate": 3.5041551246537395e-05, | |
| "loss": 0.1109, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_accuracy": 0.8166666666666667, | |
| "eval_loss": 0.9386860728263855, | |
| "eval_runtime": 1.0479, | |
| "eval_samples_per_second": 57.256, | |
| "eval_steps_per_second": 1.909, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 16.1, | |
| "learning_rate": 3.4279778393351804e-05, | |
| "loss": 0.0868, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 16.62, | |
| "learning_rate": 3.3518005540166206e-05, | |
| "loss": 0.0711, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 16.99, | |
| "eval_accuracy": 0.7333333333333333, | |
| "eval_loss": 1.121420979499817, | |
| "eval_runtime": 1.0767, | |
| "eval_samples_per_second": 55.726, | |
| "eval_steps_per_second": 1.858, | |
| "step": 327 | |
| }, | |
| { | |
| "epoch": 17.14, | |
| "learning_rate": 3.275623268698061e-05, | |
| "loss": 0.079, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 17.66, | |
| "learning_rate": 3.1994459833795016e-05, | |
| "loss": 0.0884, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 17.97, | |
| "eval_accuracy": 0.7666666666666667, | |
| "eval_loss": 0.9687939286231995, | |
| "eval_runtime": 1.0985, | |
| "eval_samples_per_second": 54.619, | |
| "eval_steps_per_second": 1.821, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 18.18, | |
| "learning_rate": 3.123268698060942e-05, | |
| "loss": 0.0661, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 18.7, | |
| "learning_rate": 3.0470914127423827e-05, | |
| "loss": 0.0668, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 18.96, | |
| "eval_accuracy": 0.8, | |
| "eval_loss": 1.0306044816970825, | |
| "eval_runtime": 1.1148, | |
| "eval_samples_per_second": 53.823, | |
| "eval_steps_per_second": 1.794, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 19.22, | |
| "learning_rate": 2.9709141274238225e-05, | |
| "loss": 0.0655, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 19.74, | |
| "learning_rate": 2.894736842105263e-05, | |
| "loss": 0.0716, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_accuracy": 0.7166666666666667, | |
| "eval_loss": 1.2652896642684937, | |
| "eval_runtime": 1.097, | |
| "eval_samples_per_second": 54.696, | |
| "eval_steps_per_second": 1.823, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 20.26, | |
| "learning_rate": 2.8185595567867035e-05, | |
| "loss": 0.0725, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 20.78, | |
| "learning_rate": 2.742382271468144e-05, | |
| "loss": 0.0643, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 20.99, | |
| "eval_accuracy": 0.7833333333333333, | |
| "eval_loss": 0.9893661737442017, | |
| "eval_runtime": 1.1231, | |
| "eval_samples_per_second": 53.424, | |
| "eval_steps_per_second": 1.781, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 21.3, | |
| "learning_rate": 2.6662049861495846e-05, | |
| "loss": 0.0509, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 21.82, | |
| "learning_rate": 2.590027700831025e-05, | |
| "loss": 0.0517, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 21.97, | |
| "eval_accuracy": 0.7666666666666667, | |
| "eval_loss": 1.0438777208328247, | |
| "eval_runtime": 1.1007, | |
| "eval_samples_per_second": 54.511, | |
| "eval_steps_per_second": 1.817, | |
| "step": 423 | |
| }, | |
| { | |
| "epoch": 22.34, | |
| "learning_rate": 2.5138504155124656e-05, | |
| "loss": 0.0637, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 22.86, | |
| "learning_rate": 2.4376731301939058e-05, | |
| "loss": 0.0597, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 22.96, | |
| "eval_accuracy": 0.7666666666666667, | |
| "eval_loss": 1.1469690799713135, | |
| "eval_runtime": 1.0658, | |
| "eval_samples_per_second": 56.294, | |
| "eval_steps_per_second": 1.876, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 23.38, | |
| "learning_rate": 2.3614958448753463e-05, | |
| "loss": 0.0377, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 23.9, | |
| "learning_rate": 2.2853185595567865e-05, | |
| "loss": 0.0533, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_accuracy": 0.7833333333333333, | |
| "eval_loss": 1.0848194360733032, | |
| "eval_runtime": 1.148, | |
| "eval_samples_per_second": 52.264, | |
| "eval_steps_per_second": 1.742, | |
| "step": 462 | |
| }, | |
| { | |
| "epoch": 24.42, | |
| "learning_rate": 2.2091412742382273e-05, | |
| "loss": 0.0599, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 24.94, | |
| "learning_rate": 2.132963988919668e-05, | |
| "loss": 0.0529, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 24.99, | |
| "eval_accuracy": 0.75, | |
| "eval_loss": 1.1481404304504395, | |
| "eval_runtime": 1.0909, | |
| "eval_samples_per_second": 55.002, | |
| "eval_steps_per_second": 1.833, | |
| "step": 481 | |
| }, | |
| { | |
| "epoch": 25.45, | |
| "learning_rate": 2.056786703601108e-05, | |
| "loss": 0.0373, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 25.97, | |
| "learning_rate": 1.9806094182825486e-05, | |
| "loss": 0.0524, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 25.97, | |
| "eval_accuracy": 0.7333333333333333, | |
| "eval_loss": 1.1321938037872314, | |
| "eval_runtime": 1.1085, | |
| "eval_samples_per_second": 54.127, | |
| "eval_steps_per_second": 1.804, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 26.49, | |
| "learning_rate": 1.904432132963989e-05, | |
| "loss": 0.0525, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 26.96, | |
| "eval_accuracy": 0.7333333333333333, | |
| "eval_loss": 1.1867502927780151, | |
| "eval_runtime": 1.0522, | |
| "eval_samples_per_second": 57.023, | |
| "eval_steps_per_second": 1.901, | |
| "step": 519 | |
| }, | |
| { | |
| "epoch": 27.01, | |
| "learning_rate": 1.8282548476454293e-05, | |
| "loss": 0.0574, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 27.53, | |
| "learning_rate": 1.7520775623268698e-05, | |
| "loss": 0.0517, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "eval_accuracy": 0.7166666666666667, | |
| "eval_loss": 1.1561001539230347, | |
| "eval_runtime": 1.1047, | |
| "eval_samples_per_second": 54.311, | |
| "eval_steps_per_second": 1.81, | |
| "step": 539 | |
| }, | |
| { | |
| "epoch": 28.05, | |
| "learning_rate": 1.6759002770083103e-05, | |
| "loss": 0.0505, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 28.57, | |
| "learning_rate": 1.5997229916897508e-05, | |
| "loss": 0.0309, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 28.99, | |
| "eval_accuracy": 0.7833333333333333, | |
| "eval_loss": 1.0561987161636353, | |
| "eval_runtime": 1.0565, | |
| "eval_samples_per_second": 56.79, | |
| "eval_steps_per_second": 1.893, | |
| "step": 558 | |
| }, | |
| { | |
| "epoch": 29.09, | |
| "learning_rate": 1.5235457063711913e-05, | |
| "loss": 0.0429, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 29.61, | |
| "learning_rate": 1.4473684210526315e-05, | |
| "loss": 0.0403, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 29.97, | |
| "eval_accuracy": 0.7333333333333333, | |
| "eval_loss": 1.2900880575180054, | |
| "eval_runtime": 1.099, | |
| "eval_samples_per_second": 54.594, | |
| "eval_steps_per_second": 1.82, | |
| "step": 577 | |
| }, | |
| { | |
| "epoch": 30.13, | |
| "learning_rate": 1.371191135734072e-05, | |
| "loss": 0.0442, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 30.65, | |
| "learning_rate": 1.2950138504155125e-05, | |
| "loss": 0.0392, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 30.96, | |
| "eval_accuracy": 0.7666666666666667, | |
| "eval_loss": 1.1295415163040161, | |
| "eval_runtime": 1.0734, | |
| "eval_samples_per_second": 55.898, | |
| "eval_steps_per_second": 1.863, | |
| "step": 596 | |
| }, | |
| { | |
| "epoch": 31.17, | |
| "learning_rate": 1.2188365650969529e-05, | |
| "loss": 0.0274, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 31.69, | |
| "learning_rate": 1.1426592797783932e-05, | |
| "loss": 0.0404, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 32.0, | |
| "eval_accuracy": 0.7666666666666667, | |
| "eval_loss": 1.119759440422058, | |
| "eval_runtime": 1.2454, | |
| "eval_samples_per_second": 48.178, | |
| "eval_steps_per_second": 1.606, | |
| "step": 616 | |
| }, | |
| { | |
| "epoch": 32.21, | |
| "learning_rate": 1.066481994459834e-05, | |
| "loss": 0.0212, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 32.73, | |
| "learning_rate": 9.903047091412743e-06, | |
| "loss": 0.0381, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 32.99, | |
| "eval_accuracy": 0.7166666666666667, | |
| "eval_loss": 1.298552393913269, | |
| "eval_runtime": 1.1082, | |
| "eval_samples_per_second": 54.143, | |
| "eval_steps_per_second": 1.805, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 33.25, | |
| "learning_rate": 9.141274238227146e-06, | |
| "loss": 0.0259, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 33.77, | |
| "learning_rate": 8.379501385041551e-06, | |
| "loss": 0.0262, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 33.97, | |
| "eval_accuracy": 0.75, | |
| "eval_loss": 1.1655079126358032, | |
| "eval_runtime": 1.1294, | |
| "eval_samples_per_second": 53.126, | |
| "eval_steps_per_second": 1.771, | |
| "step": 654 | |
| }, | |
| { | |
| "epoch": 34.29, | |
| "learning_rate": 7.617728531855957e-06, | |
| "loss": 0.0301, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 34.81, | |
| "learning_rate": 6.85595567867036e-06, | |
| "loss": 0.0354, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 34.96, | |
| "eval_accuracy": 0.7833333333333333, | |
| "eval_loss": 1.1222782135009766, | |
| "eval_runtime": 1.0831, | |
| "eval_samples_per_second": 55.396, | |
| "eval_steps_per_second": 1.847, | |
| "step": 673 | |
| }, | |
| { | |
| "epoch": 35.32, | |
| "learning_rate": 6.0941828254847645e-06, | |
| "loss": 0.0291, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 35.84, | |
| "learning_rate": 5.33240997229917e-06, | |
| "loss": 0.0224, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 36.0, | |
| "eval_accuracy": 0.7833333333333333, | |
| "eval_loss": 1.1678553819656372, | |
| "eval_runtime": 1.0793, | |
| "eval_samples_per_second": 55.591, | |
| "eval_steps_per_second": 1.853, | |
| "step": 693 | |
| }, | |
| { | |
| "epoch": 36.36, | |
| "learning_rate": 4.570637119113573e-06, | |
| "loss": 0.0349, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 36.88, | |
| "learning_rate": 3.8088642659279783e-06, | |
| "loss": 0.0244, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 36.99, | |
| "eval_accuracy": 0.8166666666666667, | |
| "eval_loss": 1.0998696088790894, | |
| "eval_runtime": 1.077, | |
| "eval_samples_per_second": 55.712, | |
| "eval_steps_per_second": 1.857, | |
| "step": 712 | |
| }, | |
| { | |
| "epoch": 37.4, | |
| "learning_rate": 3.0470914127423822e-06, | |
| "loss": 0.0412, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 37.92, | |
| "learning_rate": 2.2853185595567866e-06, | |
| "loss": 0.0368, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 37.97, | |
| "eval_accuracy": 0.7833333333333333, | |
| "eval_loss": 1.121293306350708, | |
| "eval_runtime": 1.0596, | |
| "eval_samples_per_second": 56.628, | |
| "eval_steps_per_second": 1.888, | |
| "step": 731 | |
| }, | |
| { | |
| "epoch": 38.44, | |
| "learning_rate": 1.5235457063711911e-06, | |
| "loss": 0.0273, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 38.96, | |
| "learning_rate": 7.617728531855956e-07, | |
| "loss": 0.0199, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 38.96, | |
| "eval_accuracy": 0.8, | |
| "eval_loss": 1.100300669670105, | |
| "eval_runtime": 1.0701, | |
| "eval_samples_per_second": 56.068, | |
| "eval_steps_per_second": 1.869, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 39.48, | |
| "learning_rate": 0.0, | |
| "loss": 0.028, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 39.48, | |
| "eval_accuracy": 0.8, | |
| "eval_loss": 1.0988926887512207, | |
| "eval_runtime": 1.0717, | |
| "eval_samples_per_second": 55.986, | |
| "eval_steps_per_second": 1.866, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 39.48, | |
| "step": 760, | |
| "total_flos": 7.496244493905936e+18, | |
| "train_loss": 0.18620210740911333, | |
| "train_runtime": 1570.0451, | |
| "train_samples_per_second": 62.419, | |
| "train_steps_per_second": 0.484 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 760, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 40, | |
| "save_steps": 500, | |
| "total_flos": 7.496244493905936e+18, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |