| { | |
| "best_metric": 0.2815741002559662, | |
| "best_model_checkpoint": "finetuned-food/checkpoint-5000", | |
| "epoch": 4.0, | |
| "eval_steps": 500, | |
| "global_step": 5096, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.00019960753532182103, | |
| "loss": 0.4744, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.0001992150706436421, | |
| "loss": 0.4761, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.0001988226059654631, | |
| "loss": 0.5317, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.00019843014128728415, | |
| "loss": 0.477, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0001980376766091052, | |
| "loss": 0.3772, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019764521193092621, | |
| "loss": 0.3498, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019725274725274728, | |
| "loss": 0.7838, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.0001968602825745683, | |
| "loss": 0.6395, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00019646781789638932, | |
| "loss": 0.5704, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00019607535321821039, | |
| "loss": 0.556, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.0001956828885400314, | |
| "loss": 0.6918, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00019529042386185245, | |
| "loss": 0.9462, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.0001948979591836735, | |
| "loss": 0.9653, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0001945054945054945, | |
| "loss": 0.4661, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.00019411302982731555, | |
| "loss": 0.7594, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.0001937205651491366, | |
| "loss": 0.5468, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.0001933281004709576, | |
| "loss": 0.5591, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019293563579277868, | |
| "loss": 0.7155, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.0001925431711145997, | |
| "loss": 0.5256, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00019215070643642074, | |
| "loss": 0.7468, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00019175824175824178, | |
| "loss": 0.915, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.0001913657770800628, | |
| "loss": 0.9691, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00019097331240188384, | |
| "loss": 1.1112, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00019058084772370488, | |
| "loss": 1.0109, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.0001901883830455259, | |
| "loss": 0.923, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00018979591836734697, | |
| "loss": 0.8972, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00018940345368916798, | |
| "loss": 0.9926, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00018901098901098903, | |
| "loss": 1.0361, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00018861852433281007, | |
| "loss": 0.9765, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018822605965463109, | |
| "loss": 0.8471, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018783359497645213, | |
| "loss": 0.9841, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00018744113029827317, | |
| "loss": 1.1251, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.0001870486656200942, | |
| "loss": 0.9555, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00018665620094191523, | |
| "loss": 1.0878, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00018626373626373627, | |
| "loss": 1.0991, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00018587127158555732, | |
| "loss": 0.8107, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00018547880690737836, | |
| "loss": 0.7627, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00018508634222919938, | |
| "loss": 0.8323, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00018469387755102042, | |
| "loss": 0.858, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00018430141287284146, | |
| "loss": 1.0723, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00018390894819466248, | |
| "loss": 0.9763, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00018351648351648352, | |
| "loss": 0.8474, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00018312401883830456, | |
| "loss": 1.0441, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.0001827315541601256, | |
| "loss": 0.8014, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00018233908948194662, | |
| "loss": 0.8023, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00018194662480376767, | |
| "loss": 0.8577, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.0001815541601255887, | |
| "loss": 0.7513, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00018116169544740975, | |
| "loss": 0.9424, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00018076923076923077, | |
| "loss": 0.7058, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.0001803767660910518, | |
| "loss": 0.8456, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "eval_accuracy": 0.7633507853403141, | |
| "eval_loss": 0.8592807054519653, | |
| "eval_runtime": 104.955, | |
| "eval_samples_per_second": 45.496, | |
| "eval_steps_per_second": 5.688, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00017998430141287285, | |
| "loss": 0.9776, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.0001795918367346939, | |
| "loss": 0.8952, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.0001791993720565149, | |
| "loss": 1.1562, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00017880690737833596, | |
| "loss": 0.8117, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.000178414442700157, | |
| "loss": 0.7151, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.00017802197802197802, | |
| "loss": 0.811, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00017762951334379906, | |
| "loss": 1.0324, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.0001772370486656201, | |
| "loss": 0.9562, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00017684458398744114, | |
| "loss": 0.8997, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.0001764521193092622, | |
| "loss": 1.0177, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.0001760596546310832, | |
| "loss": 0.7006, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00017566718995290425, | |
| "loss": 0.777, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.0001752747252747253, | |
| "loss": 0.7591, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.0001748822605965463, | |
| "loss": 0.7587, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00017448979591836735, | |
| "loss": 0.7518, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.0001740973312401884, | |
| "loss": 0.6905, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00017370486656200943, | |
| "loss": 0.6435, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00017331240188383048, | |
| "loss": 0.8539, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.0001729199372056515, | |
| "loss": 0.9524, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00017252747252747254, | |
| "loss": 0.8727, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00017213500784929358, | |
| "loss": 0.7349, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.0001717425431711146, | |
| "loss": 0.6538, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.00017135007849293564, | |
| "loss": 0.9937, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.00017095761381475668, | |
| "loss": 0.7501, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 0.0001705651491365777, | |
| "loss": 0.8535, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.00017017268445839877, | |
| "loss": 0.9108, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.00016978021978021978, | |
| "loss": 0.7519, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.00016938775510204083, | |
| "loss": 0.661, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 0.00016899529042386187, | |
| "loss": 0.7516, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.0001686028257456829, | |
| "loss": 0.6468, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.00016821036106750393, | |
| "loss": 0.7219, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.00016781789638932497, | |
| "loss": 0.9463, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 0.000167425431711146, | |
| "loss": 0.9053, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 0.00016703296703296706, | |
| "loss": 0.7573, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 0.00016664050235478807, | |
| "loss": 0.7801, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 0.0001662480376766091, | |
| "loss": 0.8736, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 0.00016585557299843016, | |
| "loss": 0.4921, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 0.00016546310832025118, | |
| "loss": 0.7076, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 0.00016507064364207222, | |
| "loss": 0.7124, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 0.00016467817896389326, | |
| "loss": 0.8552, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 0.00016428571428571428, | |
| "loss": 0.9654, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 0.00016389324960753535, | |
| "loss": 0.6044, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 0.00016350078492935637, | |
| "loss": 0.7184, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 0.00016310832025117738, | |
| "loss": 0.649, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 0.00016271585557299845, | |
| "loss": 0.6896, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 0.00016232339089481947, | |
| "loss": 0.4548, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 0.0001619309262166405, | |
| "loss": 0.9116, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 0.00016153846153846155, | |
| "loss": 0.6909, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 0.00016114599686028257, | |
| "loss": 0.7693, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 0.00016075353218210364, | |
| "loss": 0.7824, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "eval_accuracy": 0.81717277486911, | |
| "eval_loss": 0.6625257730484009, | |
| "eval_runtime": 104.6909, | |
| "eval_samples_per_second": 45.61, | |
| "eval_steps_per_second": 5.703, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 0.00016036106750392466, | |
| "loss": 0.8368, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 0.00015996860282574567, | |
| "loss": 0.6853, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 0.00015957613814756674, | |
| "loss": 0.6957, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 0.00015918367346938776, | |
| "loss": 0.7415, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 0.0001587912087912088, | |
| "loss": 0.719, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 0.00015839874411302984, | |
| "loss": 0.6999, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 0.00015800627943485086, | |
| "loss": 0.6508, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 0.00015761381475667193, | |
| "loss": 0.7951, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 0.00015722135007849295, | |
| "loss": 0.646, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 0.00015682888540031396, | |
| "loss": 0.568, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 0.00015643642072213503, | |
| "loss": 0.7428, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 0.00015604395604395605, | |
| "loss": 0.6068, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 0.0001556514913657771, | |
| "loss": 0.5023, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 0.00015525902668759813, | |
| "loss": 0.7015, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 0.00015486656200941915, | |
| "loss": 0.4978, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 0.0001544740973312402, | |
| "loss": 0.572, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 0.00015408163265306124, | |
| "loss": 0.6404, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 0.00015368916797488225, | |
| "loss": 0.7149, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 0.00015329670329670332, | |
| "loss": 0.6737, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 0.00015290423861852434, | |
| "loss": 0.5757, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 0.00015251177394034538, | |
| "loss": 0.5646, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 0.00015211930926216642, | |
| "loss": 0.8011, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 0.00015172684458398744, | |
| "loss": 0.7641, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 0.00015133437990580848, | |
| "loss": 0.7895, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 0.00015094191522762953, | |
| "loss": 0.7088, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 0.00015054945054945054, | |
| "loss": 0.6559, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.00015015698587127159, | |
| "loss": 0.7573, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.00014976452119309263, | |
| "loss": 0.8222, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 0.00014937205651491367, | |
| "loss": 0.5375, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 0.00014897959183673472, | |
| "loss": 0.5014, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 0.00014858712715855573, | |
| "loss": 0.4494, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 0.00014819466248037677, | |
| "loss": 0.6057, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 0.00014780219780219782, | |
| "loss": 0.5502, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 0.00014740973312401883, | |
| "loss": 0.5677, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 0.00014701726844583988, | |
| "loss": 0.5548, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 0.00014662480376766092, | |
| "loss": 0.5428, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 0.00014623233908948196, | |
| "loss": 0.6924, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 0.00014583987441130298, | |
| "loss": 0.4367, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 0.00014544740973312402, | |
| "loss": 0.4269, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 0.00014505494505494506, | |
| "loss": 0.4049, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 0.0001446624803767661, | |
| "loss": 0.4435, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 0.00014427001569858712, | |
| "loss": 0.5317, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 0.00014387755102040817, | |
| "loss": 0.7213, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 0.0001434850863422292, | |
| "loss": 0.8834, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 0.00014309262166405025, | |
| "loss": 0.5434, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 0.00014270015698587127, | |
| "loss": 0.5588, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 0.0001423076923076923, | |
| "loss": 0.6011, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 0.00014191522762951335, | |
| "loss": 0.4124, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 0.0001415227629513344, | |
| "loss": 0.5477, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 0.00014113029827315541, | |
| "loss": 0.4806, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "eval_accuracy": 0.8617801047120419, | |
| "eval_loss": 0.4951171875, | |
| "eval_runtime": 104.5532, | |
| "eval_samples_per_second": 45.671, | |
| "eval_steps_per_second": 5.71, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 0.00014073783359497646, | |
| "loss": 0.5714, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 0.0001403453689167975, | |
| "loss": 0.4919, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 0.00013995290423861854, | |
| "loss": 0.6668, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 0.00013956043956043956, | |
| "loss": 0.6993, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 0.0001391679748822606, | |
| "loss": 0.7433, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 0.00013877551020408165, | |
| "loss": 0.6475, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 0.00013838304552590266, | |
| "loss": 0.5059, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 0.0001379905808477237, | |
| "loss": 0.6121, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 0.00013759811616954475, | |
| "loss": 0.7062, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 0.0001372056514913658, | |
| "loss": 0.4882, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 0.00013681318681318683, | |
| "loss": 0.5246, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 0.00013642072213500785, | |
| "loss": 0.5211, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 0.0001360282574568289, | |
| "loss": 0.5506, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 0.00013563579277864994, | |
| "loss": 0.3996, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 0.00013524332810047095, | |
| "loss": 0.5601, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 0.000134850863422292, | |
| "loss": 0.6195, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 0.00013445839874411304, | |
| "loss": 0.5741, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 0.00013406593406593405, | |
| "loss": 0.5964, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 0.00013367346938775512, | |
| "loss": 0.4962, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 0.00013328100470957614, | |
| "loss": 0.4467, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 0.00013288854003139718, | |
| "loss": 0.5517, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 0.00013249607535321823, | |
| "loss": 0.4578, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 0.00013210361067503924, | |
| "loss": 0.6583, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 0.00013171114599686029, | |
| "loss": 0.5374, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 0.00013131868131868133, | |
| "loss": 0.7157, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 0.00013092621664050234, | |
| "loss": 0.3906, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 0.00013053375196232341, | |
| "loss": 0.6177, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 0.00013014128728414443, | |
| "loss": 0.4234, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 0.00012974882260596545, | |
| "loss": 0.4714, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 0.00012935635792778652, | |
| "loss": 0.5318, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 0.00012896389324960753, | |
| "loss": 0.6005, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 0.00012857142857142858, | |
| "loss": 0.514, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 0.00012817896389324962, | |
| "loss": 0.4319, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 0.00012778649921507063, | |
| "loss": 0.3819, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 0.0001273940345368917, | |
| "loss": 0.7177, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 0.00012700156985871272, | |
| "loss": 0.5326, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 0.00012660910518053374, | |
| "loss": 0.5763, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 0.0001262166405023548, | |
| "loss": 0.6364, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 0.00012582417582417582, | |
| "loss": 0.6008, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 0.00012543171114599687, | |
| "loss": 0.6064, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 0.0001250392464678179, | |
| "loss": 0.4222, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 0.00012464678178963893, | |
| "loss": 0.4328, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 0.00012425431711146, | |
| "loss": 0.4382, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 0.000123861852433281, | |
| "loss": 0.4715, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 0.00012346938775510203, | |
| "loss": 0.5137, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 0.0001230769230769231, | |
| "loss": 0.5555, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 0.0001226844583987441, | |
| "loss": 0.6333, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 0.00012229199372056516, | |
| "loss": 0.5118, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 0.0001218995290423862, | |
| "loss": 0.3767, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 0.00012150706436420723, | |
| "loss": 0.6206, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "eval_accuracy": 0.88, | |
| "eval_loss": 0.4433819651603699, | |
| "eval_runtime": 104.7073, | |
| "eval_samples_per_second": 45.603, | |
| "eval_steps_per_second": 5.702, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 0.00012111459968602827, | |
| "loss": 0.4409, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 0.0001207221350078493, | |
| "loss": 0.5458, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 0.00012032967032967033, | |
| "loss": 0.369, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 0.00011993720565149137, | |
| "loss": 0.5078, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 0.0001195447409733124, | |
| "loss": 0.5815, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 0.00011915227629513343, | |
| "loss": 0.4357, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 0.00011875981161695449, | |
| "loss": 0.5955, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 0.00011836734693877552, | |
| "loss": 0.4177, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 0.00011797488226059654, | |
| "loss": 0.5155, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 0.00011758241758241759, | |
| "loss": 0.5691, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 0.00011718995290423862, | |
| "loss": 0.5891, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 0.00011679748822605966, | |
| "loss": 0.5013, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 0.0001164050235478807, | |
| "loss": 0.4859, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 0.00011601255886970172, | |
| "loss": 0.4267, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 0.00011562009419152278, | |
| "loss": 0.3648, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 0.00011522762951334381, | |
| "loss": 0.5093, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 0.00011483516483516483, | |
| "loss": 0.6334, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 0.00011444270015698588, | |
| "loss": 0.4285, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 0.00011405023547880691, | |
| "loss": 0.4107, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 0.00011365777080062794, | |
| "loss": 0.4215, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 0.00011326530612244898, | |
| "loss": 0.574, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 0.00011287284144427001, | |
| "loss": 0.5277, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 0.00011248037676609107, | |
| "loss": 0.4283, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 0.0001120879120879121, | |
| "loss": 0.5907, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 0.00011169544740973312, | |
| "loss": 0.4093, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 0.00011130298273155417, | |
| "loss": 0.5197, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 0.0001109105180533752, | |
| "loss": 0.6211, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 0.00011051805337519623, | |
| "loss": 0.6174, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 0.00011012558869701728, | |
| "loss": 0.6469, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 0.0001097331240188383, | |
| "loss": 0.5121, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 0.00010934065934065933, | |
| "loss": 0.5431, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 0.00010894819466248039, | |
| "loss": 0.4473, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 0.0001085557299843014, | |
| "loss": 0.4877, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 0.00010816326530612246, | |
| "loss": 0.5579, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 0.00010777080062794349, | |
| "loss": 0.4271, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 0.00010737833594976452, | |
| "loss": 0.5546, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 0.00010698587127158557, | |
| "loss": 0.3829, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 0.0001065934065934066, | |
| "loss": 0.5093, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 0.00010620094191522762, | |
| "loss": 0.3551, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 0.00010580847723704868, | |
| "loss": 0.4126, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 0.0001054160125588697, | |
| "loss": 0.4307, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 0.00010502354788069075, | |
| "loss": 0.4657, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 1.91, | |
| "learning_rate": 0.00010463108320251178, | |
| "loss": 0.39, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 0.00010423861852433281, | |
| "loss": 0.4949, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 0.00010384615384615386, | |
| "loss": 0.5374, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 0.00010345368916797489, | |
| "loss": 0.3725, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 0.00010306122448979591, | |
| "loss": 0.6204, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 0.00010266875981161697, | |
| "loss": 0.6382, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 0.00010227629513343799, | |
| "loss": 0.4172, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 0.00010188383045525902, | |
| "loss": 0.5096, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "eval_accuracy": 0.8682722513089005, | |
| "eval_loss": 0.4937422275543213, | |
| "eval_runtime": 104.7964, | |
| "eval_samples_per_second": 45.565, | |
| "eval_steps_per_second": 5.697, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "learning_rate": 0.00010149136577708007, | |
| "loss": 0.7216, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 0.0001010989010989011, | |
| "loss": 0.5306, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 0.00010070643642072215, | |
| "loss": 0.3693, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 0.00010031397174254318, | |
| "loss": 0.2895, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 9.992150706436422e-05, | |
| "loss": 0.3191, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 2.01, | |
| "learning_rate": 9.952904238618525e-05, | |
| "loss": 0.3354, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "learning_rate": 9.913657770800628e-05, | |
| "loss": 0.5719, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 2.03, | |
| "learning_rate": 9.874411302982732e-05, | |
| "loss": 0.3117, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 2.03, | |
| "learning_rate": 9.835164835164835e-05, | |
| "loss": 0.4133, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "learning_rate": 9.79591836734694e-05, | |
| "loss": 0.3336, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "learning_rate": 9.756671899529042e-05, | |
| "loss": 0.1835, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 2.06, | |
| "learning_rate": 9.717425431711147e-05, | |
| "loss": 0.3862, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 2.06, | |
| "learning_rate": 9.67817896389325e-05, | |
| "loss": 0.4571, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "learning_rate": 9.638932496075354e-05, | |
| "loss": 0.5059, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "learning_rate": 9.599686028257457e-05, | |
| "loss": 0.3877, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 2.09, | |
| "learning_rate": 9.560439560439561e-05, | |
| "loss": 0.4755, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 9.521193092621664e-05, | |
| "loss": 0.33, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 9.481946624803768e-05, | |
| "loss": 0.3211, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "learning_rate": 9.442700156985871e-05, | |
| "loss": 0.5344, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "learning_rate": 9.403453689167976e-05, | |
| "loss": 0.503, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 2.13, | |
| "learning_rate": 9.364207221350079e-05, | |
| "loss": 0.2102, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "learning_rate": 9.324960753532183e-05, | |
| "loss": 0.5119, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "learning_rate": 9.285714285714286e-05, | |
| "loss": 0.2937, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "learning_rate": 9.246467817896389e-05, | |
| "loss": 0.2749, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "learning_rate": 9.207221350078493e-05, | |
| "loss": 0.5112, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 9.167974882260597e-05, | |
| "loss": 0.4431, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 9.1287284144427e-05, | |
| "loss": 0.3892, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "learning_rate": 9.089481946624803e-05, | |
| "loss": 0.4078, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "learning_rate": 9.050235478806908e-05, | |
| "loss": 0.4673, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "learning_rate": 9.010989010989012e-05, | |
| "loss": 0.3158, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "learning_rate": 8.971742543171115e-05, | |
| "loss": 0.2539, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "learning_rate": 8.932496075353218e-05, | |
| "loss": 0.4073, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "learning_rate": 8.893249607535322e-05, | |
| "loss": 0.4748, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 2.23, | |
| "learning_rate": 8.854003139717426e-05, | |
| "loss": 0.4274, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "learning_rate": 8.81475667189953e-05, | |
| "loss": 0.2248, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "learning_rate": 8.775510204081632e-05, | |
| "loss": 0.2974, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "learning_rate": 8.736263736263737e-05, | |
| "loss": 0.3354, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 8.697017268445841e-05, | |
| "loss": 0.2687, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 2.27, | |
| "learning_rate": 8.657770800627944e-05, | |
| "loss": 0.3089, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "learning_rate": 8.618524332810047e-05, | |
| "loss": 0.326, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "learning_rate": 8.579277864992151e-05, | |
| "loss": 0.437, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "learning_rate": 8.540031397174256e-05, | |
| "loss": 0.3589, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "learning_rate": 8.500784929356358e-05, | |
| "loss": 0.3766, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 8.461538461538461e-05, | |
| "loss": 0.2418, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "learning_rate": 8.422291993720566e-05, | |
| "loss": 0.3981, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "learning_rate": 8.38304552590267e-05, | |
| "loss": 0.3544, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "learning_rate": 8.343799058084773e-05, | |
| "loss": 0.2367, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "learning_rate": 8.304552590266876e-05, | |
| "loss": 0.3049, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 8.26530612244898e-05, | |
| "loss": 0.4299, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 8.226059654631083e-05, | |
| "loss": 0.4576, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "eval_accuracy": 0.8906806282722514, | |
| "eval_loss": 0.4059741497039795, | |
| "eval_runtime": 104.8886, | |
| "eval_samples_per_second": 45.524, | |
| "eval_steps_per_second": 5.692, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "learning_rate": 8.186813186813188e-05, | |
| "loss": 0.2715, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "learning_rate": 8.14756671899529e-05, | |
| "loss": 0.4298, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 8.108320251177395e-05, | |
| "loss": 0.2927, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "learning_rate": 8.069073783359498e-05, | |
| "loss": 0.2767, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "learning_rate": 8.029827315541602e-05, | |
| "loss": 0.2875, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "learning_rate": 7.990580847723705e-05, | |
| "loss": 0.3713, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 2.41, | |
| "learning_rate": 7.951334379905809e-05, | |
| "loss": 0.3841, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 2.42, | |
| "learning_rate": 7.912087912087912e-05, | |
| "loss": 0.3523, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 2.43, | |
| "learning_rate": 7.872841444270017e-05, | |
| "loss": 0.3103, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 2.43, | |
| "learning_rate": 7.83359497645212e-05, | |
| "loss": 0.4337, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "learning_rate": 7.794348508634224e-05, | |
| "loss": 0.3, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "learning_rate": 7.755102040816327e-05, | |
| "loss": 0.3451, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 7.715855572998431e-05, | |
| "loss": 0.4831, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 7.676609105180534e-05, | |
| "loss": 0.4635, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 2.47, | |
| "learning_rate": 7.637362637362637e-05, | |
| "loss": 0.3224, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "learning_rate": 7.598116169544741e-05, | |
| "loss": 0.4548, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "learning_rate": 7.558869701726846e-05, | |
| "loss": 0.3212, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "learning_rate": 7.519623233908949e-05, | |
| "loss": 0.3922, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "learning_rate": 7.480376766091051e-05, | |
| "loss": 0.4831, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "learning_rate": 7.441130298273156e-05, | |
| "loss": 0.1519, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "learning_rate": 7.40188383045526e-05, | |
| "loss": 0.301, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 7.362637362637363e-05, | |
| "loss": 0.3742, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 2.54, | |
| "learning_rate": 7.323390894819466e-05, | |
| "loss": 0.2249, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 2.54, | |
| "learning_rate": 7.28414442700157e-05, | |
| "loss": 0.5049, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 2.55, | |
| "learning_rate": 7.244897959183675e-05, | |
| "loss": 0.2744, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "learning_rate": 7.205651491365776e-05, | |
| "loss": 0.4095, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 2.57, | |
| "learning_rate": 7.16640502354788e-05, | |
| "loss": 0.5031, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 2.57, | |
| "learning_rate": 7.127158555729985e-05, | |
| "loss": 0.3179, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "learning_rate": 7.087912087912089e-05, | |
| "loss": 0.2982, | |
| "step": 3290 | |
| }, | |
| { | |
| "epoch": 2.59, | |
| "learning_rate": 7.048665620094191e-05, | |
| "loss": 0.2934, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "learning_rate": 7.009419152276295e-05, | |
| "loss": 0.395, | |
| "step": 3310 | |
| }, | |
| { | |
| "epoch": 2.61, | |
| "learning_rate": 6.9701726844584e-05, | |
| "loss": 0.2843, | |
| "step": 3320 | |
| }, | |
| { | |
| "epoch": 2.61, | |
| "learning_rate": 6.930926216640504e-05, | |
| "loss": 0.2671, | |
| "step": 3330 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "learning_rate": 6.891679748822605e-05, | |
| "loss": 0.2192, | |
| "step": 3340 | |
| }, | |
| { | |
| "epoch": 2.63, | |
| "learning_rate": 6.85243328100471e-05, | |
| "loss": 0.298, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 6.813186813186814e-05, | |
| "loss": 0.3189, | |
| "step": 3360 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "learning_rate": 6.773940345368918e-05, | |
| "loss": 0.3521, | |
| "step": 3370 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "learning_rate": 6.73469387755102e-05, | |
| "loss": 0.293, | |
| "step": 3380 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "learning_rate": 6.695447409733124e-05, | |
| "loss": 0.3281, | |
| "step": 3390 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 6.656200941915228e-05, | |
| "loss": 0.2706, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "learning_rate": 6.616954474097331e-05, | |
| "loss": 0.2927, | |
| "step": 3410 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "learning_rate": 6.577708006279434e-05, | |
| "loss": 0.2787, | |
| "step": 3420 | |
| }, | |
| { | |
| "epoch": 2.69, | |
| "learning_rate": 6.538461538461539e-05, | |
| "loss": 0.3786, | |
| "step": 3430 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "learning_rate": 6.499215070643643e-05, | |
| "loss": 0.2341, | |
| "step": 3440 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "learning_rate": 6.459968602825746e-05, | |
| "loss": 0.4556, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 2.72, | |
| "learning_rate": 6.420722135007849e-05, | |
| "loss": 0.1937, | |
| "step": 3460 | |
| }, | |
| { | |
| "epoch": 2.72, | |
| "learning_rate": 6.381475667189953e-05, | |
| "loss": 0.4824, | |
| "step": 3470 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "learning_rate": 6.342229199372057e-05, | |
| "loss": 0.4207, | |
| "step": 3480 | |
| }, | |
| { | |
| "epoch": 2.74, | |
| "learning_rate": 6.30298273155416e-05, | |
| "loss": 0.4383, | |
| "step": 3490 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "learning_rate": 6.263736263736263e-05, | |
| "loss": 0.3284, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "eval_accuracy": 0.9080628272251309, | |
| "eval_loss": 0.34141793847084045, | |
| "eval_runtime": 105.2957, | |
| "eval_samples_per_second": 45.348, | |
| "eval_steps_per_second": 5.67, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 6.224489795918368e-05, | |
| "loss": 0.2761, | |
| "step": 3510 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 6.185243328100472e-05, | |
| "loss": 0.2809, | |
| "step": 3520 | |
| }, | |
| { | |
| "epoch": 2.77, | |
| "learning_rate": 6.145996860282575e-05, | |
| "loss": 0.369, | |
| "step": 3530 | |
| }, | |
| { | |
| "epoch": 2.78, | |
| "learning_rate": 6.106750392464678e-05, | |
| "loss": 0.4629, | |
| "step": 3540 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "learning_rate": 6.067503924646782e-05, | |
| "loss": 0.4081, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "learning_rate": 6.028257456828885e-05, | |
| "loss": 0.216, | |
| "step": 3560 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 5.9890109890109894e-05, | |
| "loss": 0.4566, | |
| "step": 3570 | |
| }, | |
| { | |
| "epoch": 2.81, | |
| "learning_rate": 5.949764521193093e-05, | |
| "loss": 0.2259, | |
| "step": 3580 | |
| }, | |
| { | |
| "epoch": 2.82, | |
| "learning_rate": 5.910518053375197e-05, | |
| "loss": 0.2951, | |
| "step": 3590 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "learning_rate": 5.8712715855572997e-05, | |
| "loss": 0.346, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "learning_rate": 5.832025117739404e-05, | |
| "loss": 0.3088, | |
| "step": 3610 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "learning_rate": 5.7927786499215076e-05, | |
| "loss": 0.3857, | |
| "step": 3620 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 5.753532182103611e-05, | |
| "loss": 0.32, | |
| "step": 3630 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "learning_rate": 5.714285714285714e-05, | |
| "loss": 0.3178, | |
| "step": 3640 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "learning_rate": 5.6750392464678185e-05, | |
| "loss": 0.3984, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 2.87, | |
| "learning_rate": 5.635792778649922e-05, | |
| "loss": 0.4079, | |
| "step": 3660 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "learning_rate": 5.596546310832025e-05, | |
| "loss": 0.2981, | |
| "step": 3670 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "learning_rate": 5.557299843014129e-05, | |
| "loss": 0.179, | |
| "step": 3680 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 5.518053375196233e-05, | |
| "loss": 0.3578, | |
| "step": 3690 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 5.4788069073783366e-05, | |
| "loss": 0.235, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 2.91, | |
| "learning_rate": 5.4395604395604396e-05, | |
| "loss": 0.2339, | |
| "step": 3710 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "learning_rate": 5.400313971742543e-05, | |
| "loss": 0.1819, | |
| "step": 3720 | |
| }, | |
| { | |
| "epoch": 2.93, | |
| "learning_rate": 5.3610675039246475e-05, | |
| "loss": 0.2206, | |
| "step": 3730 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 5.321821036106751e-05, | |
| "loss": 0.1959, | |
| "step": 3740 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 5.282574568288854e-05, | |
| "loss": 0.3743, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "learning_rate": 5.243328100470958e-05, | |
| "loss": 0.3278, | |
| "step": 3760 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "learning_rate": 5.2040816326530614e-05, | |
| "loss": 0.3743, | |
| "step": 3770 | |
| }, | |
| { | |
| "epoch": 2.97, | |
| "learning_rate": 5.164835164835166e-05, | |
| "loss": 0.2863, | |
| "step": 3780 | |
| }, | |
| { | |
| "epoch": 2.97, | |
| "learning_rate": 5.1255886970172686e-05, | |
| "loss": 0.347, | |
| "step": 3790 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "learning_rate": 5.086342229199372e-05, | |
| "loss": 0.2294, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 2.99, | |
| "learning_rate": 5.047095761381476e-05, | |
| "loss": 0.4746, | |
| "step": 3810 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 5.007849293563579e-05, | |
| "loss": 0.3479, | |
| "step": 3820 | |
| }, | |
| { | |
| "epoch": 3.01, | |
| "learning_rate": 4.968602825745683e-05, | |
| "loss": 0.1066, | |
| "step": 3830 | |
| }, | |
| { | |
| "epoch": 3.01, | |
| "learning_rate": 4.929356357927787e-05, | |
| "loss": 0.2885, | |
| "step": 3840 | |
| }, | |
| { | |
| "epoch": 3.02, | |
| "learning_rate": 4.8901098901098904e-05, | |
| "loss": 0.1881, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 3.03, | |
| "learning_rate": 4.850863422291994e-05, | |
| "loss": 0.3133, | |
| "step": 3860 | |
| }, | |
| { | |
| "epoch": 3.04, | |
| "learning_rate": 4.811616954474098e-05, | |
| "loss": 0.2348, | |
| "step": 3870 | |
| }, | |
| { | |
| "epoch": 3.05, | |
| "learning_rate": 4.772370486656201e-05, | |
| "loss": 0.2296, | |
| "step": 3880 | |
| }, | |
| { | |
| "epoch": 3.05, | |
| "learning_rate": 4.733124018838305e-05, | |
| "loss": 0.2402, | |
| "step": 3890 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "learning_rate": 4.6938775510204086e-05, | |
| "loss": 0.1956, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 3.07, | |
| "learning_rate": 4.654631083202512e-05, | |
| "loss": 0.1585, | |
| "step": 3910 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 4.615384615384616e-05, | |
| "loss": 0.2759, | |
| "step": 3920 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 4.5761381475667194e-05, | |
| "loss": 0.2603, | |
| "step": 3930 | |
| }, | |
| { | |
| "epoch": 3.09, | |
| "learning_rate": 4.5368916797488224e-05, | |
| "loss": 0.2199, | |
| "step": 3940 | |
| }, | |
| { | |
| "epoch": 3.1, | |
| "learning_rate": 4.497645211930927e-05, | |
| "loss": 0.2321, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 3.11, | |
| "learning_rate": 4.4583987441130297e-05, | |
| "loss": 0.2188, | |
| "step": 3960 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "learning_rate": 4.419152276295134e-05, | |
| "loss": 0.2422, | |
| "step": 3970 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "learning_rate": 4.379905808477237e-05, | |
| "loss": 0.1887, | |
| "step": 3980 | |
| }, | |
| { | |
| "epoch": 3.13, | |
| "learning_rate": 4.340659340659341e-05, | |
| "loss": 0.2668, | |
| "step": 3990 | |
| }, | |
| { | |
| "epoch": 3.14, | |
| "learning_rate": 4.301412872841444e-05, | |
| "loss": 0.2022, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 3.14, | |
| "eval_accuracy": 0.9118324607329843, | |
| "eval_loss": 0.3330060839653015, | |
| "eval_runtime": 105.6575, | |
| "eval_samples_per_second": 45.193, | |
| "eval_steps_per_second": 5.65, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 3.15, | |
| "learning_rate": 4.2621664050235485e-05, | |
| "loss": 0.4154, | |
| "step": 4010 | |
| }, | |
| { | |
| "epoch": 3.16, | |
| "learning_rate": 4.2229199372056514e-05, | |
| "loss": 0.2389, | |
| "step": 4020 | |
| }, | |
| { | |
| "epoch": 3.16, | |
| "learning_rate": 4.183673469387756e-05, | |
| "loss": 0.2303, | |
| "step": 4030 | |
| }, | |
| { | |
| "epoch": 3.17, | |
| "learning_rate": 4.144427001569859e-05, | |
| "loss": 0.136, | |
| "step": 4040 | |
| }, | |
| { | |
| "epoch": 3.18, | |
| "learning_rate": 4.105180533751963e-05, | |
| "loss": 0.2214, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 3.19, | |
| "learning_rate": 4.065934065934066e-05, | |
| "loss": 0.1767, | |
| "step": 4060 | |
| }, | |
| { | |
| "epoch": 3.19, | |
| "learning_rate": 4.0266875981161696e-05, | |
| "loss": 0.183, | |
| "step": 4070 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "learning_rate": 3.987441130298273e-05, | |
| "loss": 0.2733, | |
| "step": 4080 | |
| }, | |
| { | |
| "epoch": 3.21, | |
| "learning_rate": 3.948194662480377e-05, | |
| "loss": 0.1869, | |
| "step": 4090 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "learning_rate": 3.9089481946624805e-05, | |
| "loss": 0.2429, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 3.23, | |
| "learning_rate": 3.869701726844584e-05, | |
| "loss": 0.166, | |
| "step": 4110 | |
| }, | |
| { | |
| "epoch": 3.23, | |
| "learning_rate": 3.830455259026688e-05, | |
| "loss": 0.2535, | |
| "step": 4120 | |
| }, | |
| { | |
| "epoch": 3.24, | |
| "learning_rate": 3.7912087912087914e-05, | |
| "loss": 0.1876, | |
| "step": 4130 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "learning_rate": 3.751962323390895e-05, | |
| "loss": 0.2684, | |
| "step": 4140 | |
| }, | |
| { | |
| "epoch": 3.26, | |
| "learning_rate": 3.7127158555729986e-05, | |
| "loss": 0.3673, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 3.27, | |
| "learning_rate": 3.673469387755102e-05, | |
| "loss": 0.2569, | |
| "step": 4160 | |
| }, | |
| { | |
| "epoch": 3.27, | |
| "learning_rate": 3.634222919937206e-05, | |
| "loss": 0.1942, | |
| "step": 4170 | |
| }, | |
| { | |
| "epoch": 3.28, | |
| "learning_rate": 3.5949764521193095e-05, | |
| "loss": 0.2461, | |
| "step": 4180 | |
| }, | |
| { | |
| "epoch": 3.29, | |
| "learning_rate": 3.555729984301413e-05, | |
| "loss": 0.2283, | |
| "step": 4190 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "learning_rate": 3.516483516483517e-05, | |
| "loss": 0.3326, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "learning_rate": 3.4772370486656204e-05, | |
| "loss": 0.1666, | |
| "step": 4210 | |
| }, | |
| { | |
| "epoch": 3.31, | |
| "learning_rate": 3.4379905808477234e-05, | |
| "loss": 0.1527, | |
| "step": 4220 | |
| }, | |
| { | |
| "epoch": 3.32, | |
| "learning_rate": 3.398744113029828e-05, | |
| "loss": 0.1704, | |
| "step": 4230 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "learning_rate": 3.3594976452119306e-05, | |
| "loss": 0.1885, | |
| "step": 4240 | |
| }, | |
| { | |
| "epoch": 3.34, | |
| "learning_rate": 3.320251177394035e-05, | |
| "loss": 0.3355, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 3.34, | |
| "learning_rate": 3.281004709576138e-05, | |
| "loss": 0.2072, | |
| "step": 4260 | |
| }, | |
| { | |
| "epoch": 3.35, | |
| "learning_rate": 3.241758241758242e-05, | |
| "loss": 0.1902, | |
| "step": 4270 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "learning_rate": 3.202511773940345e-05, | |
| "loss": 0.2432, | |
| "step": 4280 | |
| }, | |
| { | |
| "epoch": 3.37, | |
| "learning_rate": 3.1632653061224494e-05, | |
| "loss": 0.151, | |
| "step": 4290 | |
| }, | |
| { | |
| "epoch": 3.38, | |
| "learning_rate": 3.1240188383045524e-05, | |
| "loss": 0.3722, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 3.38, | |
| "learning_rate": 3.084772370486657e-05, | |
| "loss": 0.1488, | |
| "step": 4310 | |
| }, | |
| { | |
| "epoch": 3.39, | |
| "learning_rate": 3.04552590266876e-05, | |
| "loss": 0.1683, | |
| "step": 4320 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "learning_rate": 3.006279434850864e-05, | |
| "loss": 0.1746, | |
| "step": 4330 | |
| }, | |
| { | |
| "epoch": 3.41, | |
| "learning_rate": 2.9670329670329673e-05, | |
| "loss": 0.2985, | |
| "step": 4340 | |
| }, | |
| { | |
| "epoch": 3.41, | |
| "learning_rate": 2.9277864992150706e-05, | |
| "loss": 0.1778, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 3.42, | |
| "learning_rate": 2.8885400313971745e-05, | |
| "loss": 0.0781, | |
| "step": 4360 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "learning_rate": 2.8492935635792778e-05, | |
| "loss": 0.2062, | |
| "step": 4370 | |
| }, | |
| { | |
| "epoch": 3.44, | |
| "learning_rate": 2.8100470957613818e-05, | |
| "loss": 0.1906, | |
| "step": 4380 | |
| }, | |
| { | |
| "epoch": 3.45, | |
| "learning_rate": 2.770800627943485e-05, | |
| "loss": 0.2594, | |
| "step": 4390 | |
| }, | |
| { | |
| "epoch": 3.45, | |
| "learning_rate": 2.731554160125589e-05, | |
| "loss": 0.2087, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 3.46, | |
| "learning_rate": 2.6923076923076923e-05, | |
| "loss": 0.3252, | |
| "step": 4410 | |
| }, | |
| { | |
| "epoch": 3.47, | |
| "learning_rate": 2.6530612244897963e-05, | |
| "loss": 0.1681, | |
| "step": 4420 | |
| }, | |
| { | |
| "epoch": 3.48, | |
| "learning_rate": 2.6138147566718996e-05, | |
| "loss": 0.267, | |
| "step": 4430 | |
| }, | |
| { | |
| "epoch": 3.49, | |
| "learning_rate": 2.5745682888540036e-05, | |
| "loss": 0.2589, | |
| "step": 4440 | |
| }, | |
| { | |
| "epoch": 3.49, | |
| "learning_rate": 2.535321821036107e-05, | |
| "loss": 0.2525, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "learning_rate": 2.4960753532182105e-05, | |
| "loss": 0.3417, | |
| "step": 4460 | |
| }, | |
| { | |
| "epoch": 3.51, | |
| "learning_rate": 2.456828885400314e-05, | |
| "loss": 0.2417, | |
| "step": 4470 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 2.4175824175824177e-05, | |
| "loss": 0.1734, | |
| "step": 4480 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 2.3783359497645214e-05, | |
| "loss": 0.212, | |
| "step": 4490 | |
| }, | |
| { | |
| "epoch": 3.53, | |
| "learning_rate": 2.339089481946625e-05, | |
| "loss": 0.1332, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 3.53, | |
| "eval_accuracy": 0.9208376963350785, | |
| "eval_loss": 0.3042859733104706, | |
| "eval_runtime": 105.8953, | |
| "eval_samples_per_second": 45.092, | |
| "eval_steps_per_second": 5.638, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 3.54, | |
| "learning_rate": 2.2998430141287286e-05, | |
| "loss": 0.2482, | |
| "step": 4510 | |
| }, | |
| { | |
| "epoch": 3.55, | |
| "learning_rate": 2.2605965463108323e-05, | |
| "loss": 0.151, | |
| "step": 4520 | |
| }, | |
| { | |
| "epoch": 3.56, | |
| "learning_rate": 2.221350078492936e-05, | |
| "loss": 0.1627, | |
| "step": 4530 | |
| }, | |
| { | |
| "epoch": 3.56, | |
| "learning_rate": 2.1821036106750395e-05, | |
| "loss": 0.3471, | |
| "step": 4540 | |
| }, | |
| { | |
| "epoch": 3.57, | |
| "learning_rate": 2.1428571428571428e-05, | |
| "loss": 0.162, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 3.58, | |
| "learning_rate": 2.1036106750392464e-05, | |
| "loss": 0.2045, | |
| "step": 4560 | |
| }, | |
| { | |
| "epoch": 3.59, | |
| "learning_rate": 2.06436420722135e-05, | |
| "loss": 0.0903, | |
| "step": 4570 | |
| }, | |
| { | |
| "epoch": 3.59, | |
| "learning_rate": 2.0251177394034537e-05, | |
| "loss": 0.1779, | |
| "step": 4580 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "learning_rate": 1.9858712715855573e-05, | |
| "loss": 0.1739, | |
| "step": 4590 | |
| }, | |
| { | |
| "epoch": 3.61, | |
| "learning_rate": 1.946624803767661e-05, | |
| "loss": 0.1945, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 3.62, | |
| "learning_rate": 1.9073783359497646e-05, | |
| "loss": 0.37, | |
| "step": 4610 | |
| }, | |
| { | |
| "epoch": 3.63, | |
| "learning_rate": 1.8681318681318682e-05, | |
| "loss": 0.2932, | |
| "step": 4620 | |
| }, | |
| { | |
| "epoch": 3.63, | |
| "learning_rate": 1.828885400313972e-05, | |
| "loss": 0.1827, | |
| "step": 4630 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "learning_rate": 1.7896389324960755e-05, | |
| "loss": 0.1647, | |
| "step": 4640 | |
| }, | |
| { | |
| "epoch": 3.65, | |
| "learning_rate": 1.750392464678179e-05, | |
| "loss": 0.1071, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 3.66, | |
| "learning_rate": 1.7111459968602827e-05, | |
| "loss": 0.1995, | |
| "step": 4660 | |
| }, | |
| { | |
| "epoch": 3.67, | |
| "learning_rate": 1.6718995290423864e-05, | |
| "loss": 0.2714, | |
| "step": 4670 | |
| }, | |
| { | |
| "epoch": 3.67, | |
| "learning_rate": 1.6326530612244897e-05, | |
| "loss": 0.1401, | |
| "step": 4680 | |
| }, | |
| { | |
| "epoch": 3.68, | |
| "learning_rate": 1.5934065934065933e-05, | |
| "loss": 0.1516, | |
| "step": 4690 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "learning_rate": 1.554160125588697e-05, | |
| "loss": 0.262, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 3.7, | |
| "learning_rate": 1.5149136577708006e-05, | |
| "loss": 0.1809, | |
| "step": 4710 | |
| }, | |
| { | |
| "epoch": 3.7, | |
| "learning_rate": 1.4756671899529042e-05, | |
| "loss": 0.2473, | |
| "step": 4720 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "learning_rate": 1.4364207221350078e-05, | |
| "loss": 0.074, | |
| "step": 4730 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "learning_rate": 1.3971742543171114e-05, | |
| "loss": 0.1139, | |
| "step": 4740 | |
| }, | |
| { | |
| "epoch": 3.73, | |
| "learning_rate": 1.357927786499215e-05, | |
| "loss": 0.2363, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 3.74, | |
| "learning_rate": 1.3186813186813187e-05, | |
| "loss": 0.1619, | |
| "step": 4760 | |
| }, | |
| { | |
| "epoch": 3.74, | |
| "learning_rate": 1.2794348508634223e-05, | |
| "loss": 0.0841, | |
| "step": 4770 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "learning_rate": 1.240188383045526e-05, | |
| "loss": 0.2056, | |
| "step": 4780 | |
| }, | |
| { | |
| "epoch": 3.76, | |
| "learning_rate": 1.2009419152276296e-05, | |
| "loss": 0.2504, | |
| "step": 4790 | |
| }, | |
| { | |
| "epoch": 3.77, | |
| "learning_rate": 1.1616954474097332e-05, | |
| "loss": 0.2733, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 3.78, | |
| "learning_rate": 1.1224489795918369e-05, | |
| "loss": 0.2208, | |
| "step": 4810 | |
| }, | |
| { | |
| "epoch": 3.78, | |
| "learning_rate": 1.0832025117739405e-05, | |
| "loss": 0.1542, | |
| "step": 4820 | |
| }, | |
| { | |
| "epoch": 3.79, | |
| "learning_rate": 1.0439560439560441e-05, | |
| "loss": 0.3311, | |
| "step": 4830 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "learning_rate": 1.0047095761381477e-05, | |
| "loss": 0.1597, | |
| "step": 4840 | |
| }, | |
| { | |
| "epoch": 3.81, | |
| "learning_rate": 9.654631083202512e-06, | |
| "loss": 0.1415, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 3.81, | |
| "learning_rate": 9.262166405023548e-06, | |
| "loss": 0.229, | |
| "step": 4860 | |
| }, | |
| { | |
| "epoch": 3.82, | |
| "learning_rate": 8.869701726844585e-06, | |
| "loss": 0.1686, | |
| "step": 4870 | |
| }, | |
| { | |
| "epoch": 3.83, | |
| "learning_rate": 8.477237048665621e-06, | |
| "loss": 0.2163, | |
| "step": 4880 | |
| }, | |
| { | |
| "epoch": 3.84, | |
| "learning_rate": 8.084772370486657e-06, | |
| "loss": 0.1421, | |
| "step": 4890 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "learning_rate": 7.692307692307694e-06, | |
| "loss": 0.2704, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "learning_rate": 7.299843014128729e-06, | |
| "loss": 0.0956, | |
| "step": 4910 | |
| }, | |
| { | |
| "epoch": 3.86, | |
| "learning_rate": 6.9073783359497645e-06, | |
| "loss": 0.1536, | |
| "step": 4920 | |
| }, | |
| { | |
| "epoch": 3.87, | |
| "learning_rate": 6.514913657770801e-06, | |
| "loss": 0.2418, | |
| "step": 4930 | |
| }, | |
| { | |
| "epoch": 3.88, | |
| "learning_rate": 6.122448979591837e-06, | |
| "loss": 0.3416, | |
| "step": 4940 | |
| }, | |
| { | |
| "epoch": 3.89, | |
| "learning_rate": 5.729984301412873e-06, | |
| "loss": 0.2246, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 3.89, | |
| "learning_rate": 5.33751962323391e-06, | |
| "loss": 0.2371, | |
| "step": 4960 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "learning_rate": 4.945054945054945e-06, | |
| "loss": 0.1669, | |
| "step": 4970 | |
| }, | |
| { | |
| "epoch": 3.91, | |
| "learning_rate": 4.5525902668759815e-06, | |
| "loss": 0.2007, | |
| "step": 4980 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "learning_rate": 4.160125588697018e-06, | |
| "loss": 0.1772, | |
| "step": 4990 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "learning_rate": 3.767660910518053e-06, | |
| "loss": 0.1821, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "eval_accuracy": 0.9281675392670157, | |
| "eval_loss": 0.2815741002559662, | |
| "eval_runtime": 105.8792, | |
| "eval_samples_per_second": 45.099, | |
| "eval_steps_per_second": 5.638, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 3.93, | |
| "learning_rate": 3.3751962323390895e-06, | |
| "loss": 0.2243, | |
| "step": 5010 | |
| }, | |
| { | |
| "epoch": 3.94, | |
| "learning_rate": 2.982731554160126e-06, | |
| "loss": 0.1931, | |
| "step": 5020 | |
| }, | |
| { | |
| "epoch": 3.95, | |
| "learning_rate": 2.5902668759811617e-06, | |
| "loss": 0.136, | |
| "step": 5030 | |
| }, | |
| { | |
| "epoch": 3.96, | |
| "learning_rate": 2.197802197802198e-06, | |
| "loss": 0.3308, | |
| "step": 5040 | |
| }, | |
| { | |
| "epoch": 3.96, | |
| "learning_rate": 1.805337519623234e-06, | |
| "loss": 0.1563, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 3.97, | |
| "learning_rate": 1.4128728414442702e-06, | |
| "loss": 0.1906, | |
| "step": 5060 | |
| }, | |
| { | |
| "epoch": 3.98, | |
| "learning_rate": 1.020408163265306e-06, | |
| "loss": 0.2072, | |
| "step": 5070 | |
| }, | |
| { | |
| "epoch": 3.99, | |
| "learning_rate": 6.279434850863423e-07, | |
| "loss": 0.2218, | |
| "step": 5080 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 2.3547880690737833e-07, | |
| "loss": 0.2175, | |
| "step": 5090 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "step": 5096, | |
| "total_flos": 5.921465894918849e+18, | |
| "train_loss": 0.46419070048178757, | |
| "train_runtime": 5529.79, | |
| "train_samples_per_second": 13.815, | |
| "train_steps_per_second": 0.922 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 5096, | |
| "num_train_epochs": 4, | |
| "save_steps": 500, | |
| "total_flos": 5.921465894918849e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |