| { | |
| "best_metric": 0.26857295632362366, | |
| "best_model_checkpoint": "./vit-mae-flysheet/checkpoint-2716", | |
| "epoch": 100.0, | |
| "global_step": 2800, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 2.3437499999999998e-07, | |
| "loss": 2.2918, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 5.691964285714285e-07, | |
| "loss": 2.284, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 2.281158924102783, | |
| "eval_runtime": 2.6825, | |
| "eval_samples_per_second": 115.564, | |
| "eval_steps_per_second": 1.864, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 9.040178571428571e-07, | |
| "loss": 2.29, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 1.2388392857142857e-06, | |
| "loss": 2.1474, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 1.573660714285714e-06, | |
| "loss": 2.137, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 2.0287907123565674, | |
| "eval_runtime": 2.9845, | |
| "eval_samples_per_second": 103.869, | |
| "eval_steps_per_second": 1.675, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "learning_rate": 1.9084821428571425e-06, | |
| "loss": 2.0038, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "learning_rate": 2.2433035714285713e-06, | |
| "loss": 1.8154, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "learning_rate": 2.578125e-06, | |
| "loss": 1.6016, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 1.243688702583313, | |
| "eval_runtime": 3.1026, | |
| "eval_samples_per_second": 99.916, | |
| "eval_steps_per_second": 1.612, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 3.21, | |
| "learning_rate": 2.9129464285714283e-06, | |
| "loss": 1.2624, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.57, | |
| "learning_rate": 3.2477678571428566e-06, | |
| "loss": 0.9693, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.93, | |
| "learning_rate": 3.5825892857142853e-06, | |
| "loss": 0.8055, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.7418646812438965, | |
| "eval_runtime": 3.0666, | |
| "eval_samples_per_second": 101.089, | |
| "eval_steps_per_second": 1.63, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 4.29, | |
| "learning_rate": 3.917410714285714e-06, | |
| "loss": 0.7145, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.64, | |
| "learning_rate": 4.252232142857143e-06, | |
| "loss": 0.6144, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 4.587053571428571e-06, | |
| "loss": 0.5304, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.5150813460350037, | |
| "eval_runtime": 2.9639, | |
| "eval_samples_per_second": 104.591, | |
| "eval_steps_per_second": 1.687, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 5.36, | |
| "learning_rate": 4.921874999999999e-06, | |
| "loss": 0.4939, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 5.71, | |
| "learning_rate": 5.256696428571428e-06, | |
| "loss": 0.4873, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.4884074628353119, | |
| "eval_runtime": 3.1297, | |
| "eval_samples_per_second": 99.052, | |
| "eval_steps_per_second": 1.598, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 6.07, | |
| "learning_rate": 5.591517857142857e-06, | |
| "loss": 0.465, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 6.43, | |
| "learning_rate": 5.926339285714285e-06, | |
| "loss": 0.4548, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 6.79, | |
| "learning_rate": 6.261160714285713e-06, | |
| "loss": 0.442, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 0.44406256079673767, | |
| "eval_runtime": 3.1182, | |
| "eval_samples_per_second": 99.418, | |
| "eval_steps_per_second": 1.604, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 7.14, | |
| "learning_rate": 6.595982142857143e-06, | |
| "loss": 0.4262, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "learning_rate": 6.930803571428571e-06, | |
| "loss": 0.4275, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 7.86, | |
| "learning_rate": 7.2656249999999996e-06, | |
| "loss": 0.4039, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 0.41594529151916504, | |
| "eval_runtime": 3.0271, | |
| "eval_samples_per_second": 102.41, | |
| "eval_steps_per_second": 1.652, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 8.21, | |
| "learning_rate": 7.600446428571428e-06, | |
| "loss": 0.3925, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 8.57, | |
| "learning_rate": 7.935267857142856e-06, | |
| "loss": 0.4092, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 8.93, | |
| "learning_rate": 8.270089285714285e-06, | |
| "loss": 0.3866, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_loss": 0.39747074246406555, | |
| "eval_runtime": 3.1219, | |
| "eval_samples_per_second": 99.3, | |
| "eval_steps_per_second": 1.602, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 9.29, | |
| "learning_rate": 8.604910714285714e-06, | |
| "loss": 0.3675, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 9.64, | |
| "learning_rate": 8.939732142857142e-06, | |
| "loss": 0.3827, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "learning_rate": 9.274553571428571e-06, | |
| "loss": 0.391, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 0.38686203956604004, | |
| "eval_runtime": 2.6794, | |
| "eval_samples_per_second": 115.697, | |
| "eval_steps_per_second": 1.866, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 10.36, | |
| "learning_rate": 9.609374999999998e-06, | |
| "loss": 0.3701, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 10.71, | |
| "learning_rate": 9.944196428571429e-06, | |
| "loss": 0.3549, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_loss": 0.38007286190986633, | |
| "eval_runtime": 2.6889, | |
| "eval_samples_per_second": 115.289, | |
| "eval_steps_per_second": 1.859, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 11.07, | |
| "learning_rate": 1.0279017857142857e-05, | |
| "loss": 0.3618, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 11.43, | |
| "learning_rate": 1.0613839285714284e-05, | |
| "loss": 0.357, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 11.79, | |
| "learning_rate": 1.0948660714285715e-05, | |
| "loss": 0.3462, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_loss": 0.35770875215530396, | |
| "eval_runtime": 2.6881, | |
| "eval_samples_per_second": 115.324, | |
| "eval_steps_per_second": 1.86, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 12.14, | |
| "learning_rate": 1.1283482142857142e-05, | |
| "loss": 0.3413, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 12.5, | |
| "learning_rate": 1.161830357142857e-05, | |
| "loss": 0.341, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 12.86, | |
| "learning_rate": 1.1953124999999997e-05, | |
| "loss": 0.3402, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_loss": 0.3519320487976074, | |
| "eval_runtime": 2.6575, | |
| "eval_samples_per_second": 116.65, | |
| "eval_steps_per_second": 1.881, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 13.21, | |
| "learning_rate": 1.2287946428571428e-05, | |
| "loss": 0.3281, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 13.57, | |
| "learning_rate": 1.2622767857142857e-05, | |
| "loss": 0.3423, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 13.93, | |
| "learning_rate": 1.2957589285714284e-05, | |
| "loss": 0.3357, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_loss": 0.344655305147171, | |
| "eval_runtime": 2.6732, | |
| "eval_samples_per_second": 115.968, | |
| "eval_steps_per_second": 1.87, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 14.29, | |
| "learning_rate": 1.3292410714285714e-05, | |
| "loss": 0.3254, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 14.64, | |
| "learning_rate": 1.3627232142857141e-05, | |
| "loss": 0.3231, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "learning_rate": 1.396205357142857e-05, | |
| "loss": 0.3474, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_loss": 0.33689987659454346, | |
| "eval_runtime": 2.6785, | |
| "eval_samples_per_second": 115.736, | |
| "eval_steps_per_second": 1.867, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 15.36, | |
| "learning_rate": 1.4296874999999999e-05, | |
| "loss": 0.3117, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 15.71, | |
| "learning_rate": 1.4631696428571427e-05, | |
| "loss": 0.3254, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_loss": 0.33861759305000305, | |
| "eval_runtime": 2.6231, | |
| "eval_samples_per_second": 118.183, | |
| "eval_steps_per_second": 1.906, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 16.07, | |
| "learning_rate": 1.4966517857142856e-05, | |
| "loss": 0.3221, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 16.43, | |
| "learning_rate": 1.5301339285714285e-05, | |
| "loss": 0.3347, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 16.79, | |
| "learning_rate": 1.5636160714285715e-05, | |
| "loss": 0.3033, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_loss": 0.3294062316417694, | |
| "eval_runtime": 2.6872, | |
| "eval_samples_per_second": 115.362, | |
| "eval_steps_per_second": 1.861, | |
| "step": 476 | |
| }, | |
| { | |
| "epoch": 17.14, | |
| "learning_rate": 1.5970982142857142e-05, | |
| "loss": 0.3135, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 17.5, | |
| "learning_rate": 1.630580357142857e-05, | |
| "loss": 0.3104, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 17.86, | |
| "learning_rate": 1.6640624999999996e-05, | |
| "loss": 0.3047, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_loss": 0.32739999890327454, | |
| "eval_runtime": 2.6527, | |
| "eval_samples_per_second": 116.864, | |
| "eval_steps_per_second": 1.885, | |
| "step": 504 | |
| }, | |
| { | |
| "epoch": 18.21, | |
| "learning_rate": 1.6975446428571427e-05, | |
| "loss": 0.3088, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 18.57, | |
| "learning_rate": 1.7310267857142857e-05, | |
| "loss": 0.3079, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 18.93, | |
| "learning_rate": 1.7645089285714284e-05, | |
| "loss": 0.3103, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_loss": 0.3209254741668701, | |
| "eval_runtime": 2.6351, | |
| "eval_samples_per_second": 117.643, | |
| "eval_steps_per_second": 1.897, | |
| "step": 532 | |
| }, | |
| { | |
| "epoch": 19.29, | |
| "learning_rate": 1.7979910714285714e-05, | |
| "loss": 0.3004, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 19.64, | |
| "learning_rate": 1.831473214285714e-05, | |
| "loss": 0.3037, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "learning_rate": 1.864955357142857e-05, | |
| "loss": 0.3067, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_loss": 0.3185921013355255, | |
| "eval_runtime": 2.6798, | |
| "eval_samples_per_second": 115.68, | |
| "eval_steps_per_second": 1.866, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 20.36, | |
| "learning_rate": 1.8984375e-05, | |
| "loss": 0.3007, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 20.71, | |
| "learning_rate": 1.9319196428571426e-05, | |
| "loss": 0.2959, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 21.0, | |
| "eval_loss": 0.31901559233665466, | |
| "eval_runtime": 2.6847, | |
| "eval_samples_per_second": 115.468, | |
| "eval_steps_per_second": 1.862, | |
| "step": 588 | |
| }, | |
| { | |
| "epoch": 21.07, | |
| "learning_rate": 1.9654017857142856e-05, | |
| "loss": 0.3031, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 21.43, | |
| "learning_rate": 1.9988839285714283e-05, | |
| "loss": 0.3107, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 21.79, | |
| "learning_rate": 2.032366071428571e-05, | |
| "loss": 0.2899, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "eval_loss": 0.3146977126598358, | |
| "eval_runtime": 2.6772, | |
| "eval_samples_per_second": 115.792, | |
| "eval_steps_per_second": 1.868, | |
| "step": 616 | |
| }, | |
| { | |
| "epoch": 22.14, | |
| "learning_rate": 2.0658482142857144e-05, | |
| "loss": 0.2868, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 22.5, | |
| "learning_rate": 2.099330357142857e-05, | |
| "loss": 0.3018, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 22.86, | |
| "learning_rate": 2.1328125e-05, | |
| "loss": 0.2872, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 23.0, | |
| "eval_loss": 0.30817940831184387, | |
| "eval_runtime": 2.6553, | |
| "eval_samples_per_second": 116.748, | |
| "eval_steps_per_second": 1.883, | |
| "step": 644 | |
| }, | |
| { | |
| "epoch": 23.21, | |
| "learning_rate": 2.1662946428571425e-05, | |
| "loss": 0.3033, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 23.57, | |
| "learning_rate": 2.1997767857142856e-05, | |
| "loss": 0.2946, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 23.93, | |
| "learning_rate": 2.2332589285714283e-05, | |
| "loss": 0.2956, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_loss": 0.3069721758365631, | |
| "eval_runtime": 2.6278, | |
| "eval_samples_per_second": 117.971, | |
| "eval_steps_per_second": 1.903, | |
| "step": 672 | |
| }, | |
| { | |
| "epoch": 24.29, | |
| "learning_rate": 2.266741071428571e-05, | |
| "loss": 0.2754, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 24.64, | |
| "learning_rate": 2.3002232142857144e-05, | |
| "loss": 0.3039, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "learning_rate": 2.333705357142857e-05, | |
| "loss": 0.2865, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "eval_loss": 0.30717733502388, | |
| "eval_runtime": 2.6953, | |
| "eval_samples_per_second": 115.015, | |
| "eval_steps_per_second": 1.855, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 25.36, | |
| "learning_rate": 2.3671874999999998e-05, | |
| "loss": 0.2865, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 25.71, | |
| "learning_rate": 2.4006696428571425e-05, | |
| "loss": 0.2947, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 26.0, | |
| "eval_loss": 0.3071895241737366, | |
| "eval_runtime": 2.6411, | |
| "eval_samples_per_second": 117.376, | |
| "eval_steps_per_second": 1.893, | |
| "step": 728 | |
| }, | |
| { | |
| "epoch": 26.07, | |
| "learning_rate": 2.4341517857142855e-05, | |
| "loss": 0.2831, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 26.43, | |
| "learning_rate": 2.4676339285714282e-05, | |
| "loss": 0.3008, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 26.79, | |
| "learning_rate": 2.501116071428571e-05, | |
| "loss": 0.2811, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 27.0, | |
| "eval_loss": 0.3130524158477783, | |
| "eval_runtime": 2.683, | |
| "eval_samples_per_second": 115.544, | |
| "eval_steps_per_second": 1.864, | |
| "step": 756 | |
| }, | |
| { | |
| "epoch": 27.14, | |
| "learning_rate": 2.5345982142857143e-05, | |
| "loss": 0.2814, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 27.5, | |
| "learning_rate": 2.568080357142857e-05, | |
| "loss": 0.2872, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 27.86, | |
| "learning_rate": 2.6015624999999997e-05, | |
| "loss": 0.2935, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "eval_loss": 0.3069266080856323, | |
| "eval_runtime": 2.6568, | |
| "eval_samples_per_second": 116.681, | |
| "eval_steps_per_second": 1.882, | |
| "step": 784 | |
| }, | |
| { | |
| "epoch": 28.21, | |
| "learning_rate": 2.6350446428571424e-05, | |
| "loss": 0.2968, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 28.57, | |
| "learning_rate": 2.6685267857142855e-05, | |
| "loss": 0.284, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 28.93, | |
| "learning_rate": 2.702008928571428e-05, | |
| "loss": 0.2814, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 29.0, | |
| "eval_loss": 0.30426502227783203, | |
| "eval_runtime": 2.6601, | |
| "eval_samples_per_second": 116.537, | |
| "eval_steps_per_second": 1.88, | |
| "step": 812 | |
| }, | |
| { | |
| "epoch": 29.29, | |
| "learning_rate": 2.7354910714285712e-05, | |
| "loss": 0.2963, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 29.64, | |
| "learning_rate": 2.7689732142857142e-05, | |
| "loss": 0.2891, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "learning_rate": 2.802455357142857e-05, | |
| "loss": 0.2753, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "eval_loss": 0.2983907461166382, | |
| "eval_runtime": 2.7173, | |
| "eval_samples_per_second": 114.083, | |
| "eval_steps_per_second": 1.84, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 30.36, | |
| "learning_rate": 2.8359374999999996e-05, | |
| "loss": 0.2894, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 30.71, | |
| "learning_rate": 2.8694196428571423e-05, | |
| "loss": 0.2823, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 31.0, | |
| "eval_loss": 0.2994972765445709, | |
| "eval_runtime": 2.6498, | |
| "eval_samples_per_second": 116.99, | |
| "eval_steps_per_second": 1.887, | |
| "step": 868 | |
| }, | |
| { | |
| "epoch": 31.07, | |
| "learning_rate": 2.9029017857142854e-05, | |
| "loss": 0.2792, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 31.43, | |
| "learning_rate": 2.9363839285714284e-05, | |
| "loss": 0.2755, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 31.79, | |
| "learning_rate": 2.969866071428571e-05, | |
| "loss": 0.2962, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 32.0, | |
| "eval_loss": 0.30121392011642456, | |
| "eval_runtime": 2.6677, | |
| "eval_samples_per_second": 116.206, | |
| "eval_steps_per_second": 1.874, | |
| "step": 896 | |
| }, | |
| { | |
| "epoch": 32.14, | |
| "learning_rate": 3.0033482142857142e-05, | |
| "loss": 0.2835, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 32.5, | |
| "learning_rate": 3.036830357142857e-05, | |
| "loss": 0.2724, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 32.86, | |
| "learning_rate": 3.0703124999999996e-05, | |
| "loss": 0.2869, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 33.0, | |
| "eval_loss": 0.30497127771377563, | |
| "eval_runtime": 2.6483, | |
| "eval_samples_per_second": 117.058, | |
| "eval_steps_per_second": 1.888, | |
| "step": 924 | |
| }, | |
| { | |
| "epoch": 33.21, | |
| "learning_rate": 3.1037946428571426e-05, | |
| "loss": 0.2812, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 33.57, | |
| "learning_rate": 3.137276785714286e-05, | |
| "loss": 0.2855, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 33.93, | |
| "learning_rate": 3.170758928571428e-05, | |
| "loss": 0.2833, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 34.0, | |
| "eval_loss": 0.29598191380500793, | |
| "eval_runtime": 2.6636, | |
| "eval_samples_per_second": 116.384, | |
| "eval_steps_per_second": 1.877, | |
| "step": 952 | |
| }, | |
| { | |
| "epoch": 34.29, | |
| "learning_rate": 3.204241071428571e-05, | |
| "loss": 0.2771, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 34.64, | |
| "learning_rate": 3.237723214285714e-05, | |
| "loss": 0.2753, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 35.0, | |
| "learning_rate": 3.271205357142857e-05, | |
| "loss": 0.2892, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 35.0, | |
| "eval_loss": 0.3039288818836212, | |
| "eval_runtime": 2.6706, | |
| "eval_samples_per_second": 116.078, | |
| "eval_steps_per_second": 1.872, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 35.36, | |
| "learning_rate": 3.3046874999999995e-05, | |
| "loss": 0.2915, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 35.71, | |
| "learning_rate": 3.3381696428571426e-05, | |
| "loss": 0.2764, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 36.0, | |
| "eval_loss": 0.301042377948761, | |
| "eval_runtime": 2.6356, | |
| "eval_samples_per_second": 117.622, | |
| "eval_steps_per_second": 1.897, | |
| "step": 1008 | |
| }, | |
| { | |
| "epoch": 36.07, | |
| "learning_rate": 3.3716517857142856e-05, | |
| "loss": 0.2854, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 36.43, | |
| "learning_rate": 3.405133928571428e-05, | |
| "loss": 0.2874, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 36.79, | |
| "learning_rate": 3.438616071428571e-05, | |
| "loss": 0.2807, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 37.0, | |
| "eval_loss": 0.2998359799385071, | |
| "eval_runtime": 2.6115, | |
| "eval_samples_per_second": 118.706, | |
| "eval_steps_per_second": 1.915, | |
| "step": 1036 | |
| }, | |
| { | |
| "epoch": 37.14, | |
| "learning_rate": 3.472098214285714e-05, | |
| "loss": 0.2629, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 37.5, | |
| "learning_rate": 3.505580357142857e-05, | |
| "loss": 0.2759, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 37.86, | |
| "learning_rate": 3.5390624999999995e-05, | |
| "loss": 0.2843, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 38.0, | |
| "eval_loss": 0.2989421784877777, | |
| "eval_runtime": 2.6315, | |
| "eval_samples_per_second": 117.805, | |
| "eval_steps_per_second": 1.9, | |
| "step": 1064 | |
| }, | |
| { | |
| "epoch": 38.21, | |
| "learning_rate": 3.5725446428571425e-05, | |
| "loss": 0.284, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 38.57, | |
| "learning_rate": 3.6060267857142855e-05, | |
| "loss": 0.2786, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 38.93, | |
| "learning_rate": 3.639508928571428e-05, | |
| "loss": 0.2808, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 39.0, | |
| "eval_loss": 0.2969764173030853, | |
| "eval_runtime": 2.6345, | |
| "eval_samples_per_second": 117.671, | |
| "eval_steps_per_second": 1.898, | |
| "step": 1092 | |
| }, | |
| { | |
| "epoch": 39.29, | |
| "learning_rate": 3.672991071428571e-05, | |
| "loss": 0.2748, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 39.64, | |
| "learning_rate": 3.706473214285714e-05, | |
| "loss": 0.2751, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 40.0, | |
| "learning_rate": 3.739955357142857e-05, | |
| "loss": 0.2862, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 40.0, | |
| "eval_loss": 0.29402607679367065, | |
| "eval_runtime": 2.6346, | |
| "eval_samples_per_second": 117.666, | |
| "eval_steps_per_second": 1.898, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 40.36, | |
| "learning_rate": 3.7499989987931416e-05, | |
| "loss": 0.2951, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 40.71, | |
| "learning_rate": 3.7499940949253886e-05, | |
| "loss": 0.2601, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 41.0, | |
| "eval_loss": 0.2952007055282593, | |
| "eval_runtime": 2.6361, | |
| "eval_samples_per_second": 117.597, | |
| "eval_steps_per_second": 1.897, | |
| "step": 1148 | |
| }, | |
| { | |
| "epoch": 41.07, | |
| "learning_rate": 3.749985104512278e-05, | |
| "loss": 0.2796, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 41.43, | |
| "learning_rate": 3.7499720275734064e-05, | |
| "loss": 0.2759, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 41.79, | |
| "learning_rate": 3.7499548641372725e-05, | |
| "loss": 0.2742, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 42.0, | |
| "eval_loss": 0.2939775884151459, | |
| "eval_runtime": 2.6378, | |
| "eval_samples_per_second": 117.522, | |
| "eval_steps_per_second": 1.896, | |
| "step": 1176 | |
| }, | |
| { | |
| "epoch": 42.14, | |
| "learning_rate": 3.7499336142412856e-05, | |
| "loss": 0.2757, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 42.5, | |
| "learning_rate": 3.749908277931758e-05, | |
| "loss": 0.2801, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 42.86, | |
| "learning_rate": 3.749878855263912e-05, | |
| "loss": 0.2791, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 43.0, | |
| "eval_loss": 0.29970163106918335, | |
| "eval_runtime": 2.6144, | |
| "eval_samples_per_second": 118.572, | |
| "eval_steps_per_second": 1.912, | |
| "step": 1204 | |
| }, | |
| { | |
| "epoch": 43.21, | |
| "learning_rate": 3.7498453463018734e-05, | |
| "loss": 0.2744, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 43.57, | |
| "learning_rate": 3.749807751118675e-05, | |
| "loss": 0.2793, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 43.93, | |
| "learning_rate": 3.749766069796256e-05, | |
| "loss": 0.2759, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 44.0, | |
| "eval_loss": 0.2951151728630066, | |
| "eval_runtime": 2.6705, | |
| "eval_samples_per_second": 116.083, | |
| "eval_steps_per_second": 1.872, | |
| "step": 1232 | |
| }, | |
| { | |
| "epoch": 44.29, | |
| "learning_rate": 3.749720302425459e-05, | |
| "loss": 0.2802, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 44.64, | |
| "learning_rate": 3.749670449106036e-05, | |
| "loss": 0.268, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 45.0, | |
| "learning_rate": 3.749616509946641e-05, | |
| "loss": 0.2819, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 45.0, | |
| "eval_loss": 0.2895747125148773, | |
| "eval_runtime": 2.6558, | |
| "eval_samples_per_second": 116.726, | |
| "eval_steps_per_second": 1.883, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 45.36, | |
| "learning_rate": 3.7495584850648345e-05, | |
| "loss": 0.2697, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 45.71, | |
| "learning_rate": 3.749496374587082e-05, | |
| "loss": 0.287, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 46.0, | |
| "eval_loss": 0.2938202917575836, | |
| "eval_runtime": 2.6182, | |
| "eval_samples_per_second": 118.403, | |
| "eval_steps_per_second": 1.91, | |
| "step": 1288 | |
| }, | |
| { | |
| "epoch": 46.07, | |
| "learning_rate": 3.749430178648752e-05, | |
| "loss": 0.2736, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 46.43, | |
| "learning_rate": 3.7493598973941194e-05, | |
| "loss": 0.2817, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 46.79, | |
| "learning_rate": 3.749285530976362e-05, | |
| "loss": 0.2711, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 47.0, | |
| "eval_loss": 0.2973059117794037, | |
| "eval_runtime": 2.6436, | |
| "eval_samples_per_second": 117.264, | |
| "eval_steps_per_second": 1.891, | |
| "step": 1316 | |
| }, | |
| { | |
| "epoch": 47.14, | |
| "learning_rate": 3.749207079557561e-05, | |
| "loss": 0.2761, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 47.5, | |
| "learning_rate": 3.749124543308701e-05, | |
| "loss": 0.2732, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 47.86, | |
| "learning_rate": 3.74903792240967e-05, | |
| "loss": 0.2782, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 48.0, | |
| "eval_loss": 0.2946121096611023, | |
| "eval_runtime": 2.6239, | |
| "eval_samples_per_second": 118.144, | |
| "eval_steps_per_second": 1.906, | |
| "step": 1344 | |
| }, | |
| { | |
| "epoch": 48.21, | |
| "learning_rate": 3.748947217049258e-05, | |
| "loss": 0.2682, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 48.57, | |
| "learning_rate": 3.7488524274251566e-05, | |
| "loss": 0.2765, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 48.93, | |
| "learning_rate": 3.748753553743959e-05, | |
| "loss": 0.2674, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 49.0, | |
| "eval_loss": 0.29130813479423523, | |
| "eval_runtime": 2.6539, | |
| "eval_samples_per_second": 116.81, | |
| "eval_steps_per_second": 1.884, | |
| "step": 1372 | |
| }, | |
| { | |
| "epoch": 49.29, | |
| "learning_rate": 3.748650596221162e-05, | |
| "loss": 0.2833, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 49.64, | |
| "learning_rate": 3.74854355508116e-05, | |
| "loss": 0.2701, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 50.0, | |
| "learning_rate": 3.748432430557249e-05, | |
| "loss": 0.268, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 50.0, | |
| "eval_loss": 0.29444822669029236, | |
| "eval_runtime": 2.6587, | |
| "eval_samples_per_second": 116.597, | |
| "eval_steps_per_second": 1.881, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 50.36, | |
| "learning_rate": 3.748317222891625e-05, | |
| "loss": 0.2743, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 50.71, | |
| "learning_rate": 3.748197932335383e-05, | |
| "loss": 0.2624, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 51.0, | |
| "eval_loss": 0.29401645064353943, | |
| "eval_runtime": 2.6105, | |
| "eval_samples_per_second": 118.751, | |
| "eval_steps_per_second": 1.915, | |
| "step": 1428 | |
| }, | |
| { | |
| "epoch": 51.07, | |
| "learning_rate": 3.748074559148516e-05, | |
| "loss": 0.2865, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 51.43, | |
| "learning_rate": 3.747947103599916e-05, | |
| "loss": 0.2717, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 51.79, | |
| "learning_rate": 3.747815565967371e-05, | |
| "loss": 0.2842, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 52.0, | |
| "eval_loss": 0.29776862263679504, | |
| "eval_runtime": 2.6164, | |
| "eval_samples_per_second": 118.483, | |
| "eval_steps_per_second": 1.911, | |
| "step": 1456 | |
| }, | |
| { | |
| "epoch": 52.14, | |
| "learning_rate": 3.747679946537569e-05, | |
| "loss": 0.2716, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 52.5, | |
| "learning_rate": 3.7475402456060905e-05, | |
| "loss": 0.2776, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 52.86, | |
| "learning_rate": 3.7473964634774137e-05, | |
| "loss": 0.2753, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 53.0, | |
| "eval_loss": 0.29514825344085693, | |
| "eval_runtime": 2.6208, | |
| "eval_samples_per_second": 118.283, | |
| "eval_steps_per_second": 1.908, | |
| "step": 1484 | |
| }, | |
| { | |
| "epoch": 53.21, | |
| "learning_rate": 3.747248600464912e-05, | |
| "loss": 0.268, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 53.57, | |
| "learning_rate": 3.747096656890852e-05, | |
| "loss": 0.2734, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 53.93, | |
| "learning_rate": 3.746940633086394e-05, | |
| "loss": 0.2733, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 54.0, | |
| "eval_loss": 0.28796395659446716, | |
| "eval_runtime": 2.6647, | |
| "eval_samples_per_second": 116.337, | |
| "eval_steps_per_second": 1.876, | |
| "step": 1512 | |
| }, | |
| { | |
| "epoch": 54.29, | |
| "learning_rate": 3.746780529391593e-05, | |
| "loss": 0.2638, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 54.64, | |
| "learning_rate": 3.7466163461553934e-05, | |
| "loss": 0.274, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 55.0, | |
| "learning_rate": 3.746448083735632e-05, | |
| "loss": 0.2782, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 55.0, | |
| "eval_loss": 0.29691949486732483, | |
| "eval_runtime": 2.6296, | |
| "eval_samples_per_second": 117.889, | |
| "eval_steps_per_second": 1.901, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 55.36, | |
| "learning_rate": 3.746275742499037e-05, | |
| "loss": 0.2749, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 55.71, | |
| "learning_rate": 3.746099322821226e-05, | |
| "loss": 0.2789, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 56.0, | |
| "eval_loss": 0.29188984632492065, | |
| "eval_runtime": 2.6147, | |
| "eval_samples_per_second": 118.559, | |
| "eval_steps_per_second": 1.912, | |
| "step": 1568 | |
| }, | |
| { | |
| "epoch": 56.07, | |
| "learning_rate": 3.7459188250867045e-05, | |
| "loss": 0.2537, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 56.43, | |
| "learning_rate": 3.7457342496888676e-05, | |
| "loss": 0.2718, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 56.79, | |
| "learning_rate": 3.745545597029996e-05, | |
| "loss": 0.2815, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 57.0, | |
| "eval_loss": 0.291569322347641, | |
| "eval_runtime": 2.6148, | |
| "eval_samples_per_second": 118.555, | |
| "eval_steps_per_second": 1.912, | |
| "step": 1596 | |
| }, | |
| { | |
| "epoch": 57.14, | |
| "learning_rate": 3.7453528675212585e-05, | |
| "loss": 0.2663, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 57.5, | |
| "learning_rate": 3.745156061582709e-05, | |
| "loss": 0.2785, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 57.86, | |
| "learning_rate": 3.744955179643285e-05, | |
| "loss": 0.2629, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 58.0, | |
| "eval_loss": 0.29474544525146484, | |
| "eval_runtime": 2.6396, | |
| "eval_samples_per_second": 117.441, | |
| "eval_steps_per_second": 1.894, | |
| "step": 1624 | |
| }, | |
| { | |
| "epoch": 58.21, | |
| "learning_rate": 3.744750222140808e-05, | |
| "loss": 0.2682, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 58.57, | |
| "learning_rate": 3.744541189521983e-05, | |
| "loss": 0.2666, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 58.93, | |
| "learning_rate": 3.7443280822423956e-05, | |
| "loss": 0.2716, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 59.0, | |
| "eval_loss": 0.28275418281555176, | |
| "eval_runtime": 2.6524, | |
| "eval_samples_per_second": 116.876, | |
| "eval_steps_per_second": 1.885, | |
| "step": 1652 | |
| }, | |
| { | |
| "epoch": 59.29, | |
| "learning_rate": 3.744110900766512e-05, | |
| "loss": 0.2775, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 59.64, | |
| "learning_rate": 3.7438896455676804e-05, | |
| "loss": 0.2767, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 60.0, | |
| "learning_rate": 3.7436643171281254e-05, | |
| "loss": 0.2623, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 60.0, | |
| "eval_loss": 0.29241943359375, | |
| "eval_runtime": 2.6403, | |
| "eval_samples_per_second": 117.412, | |
| "eval_steps_per_second": 1.894, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 60.36, | |
| "learning_rate": 1.3997036682248361e-05, | |
| "loss": 0.2725, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 60.71, | |
| "learning_rate": 1.3783159273580512e-05, | |
| "loss": 0.2773, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 61.0, | |
| "eval_loss": 0.27645179629325867, | |
| "eval_runtime": 2.6338, | |
| "eval_samples_per_second": 117.701, | |
| "eval_steps_per_second": 1.898, | |
| "step": 1708 | |
| }, | |
| { | |
| "epoch": 61.07, | |
| "learning_rate": 1.3569974670940111e-05, | |
| "loss": 0.2674, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 61.43, | |
| "learning_rate": 1.3357512610649675e-05, | |
| "loss": 0.2761, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 61.79, | |
| "learning_rate": 1.3145802728246982e-05, | |
| "loss": 0.268, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 62.0, | |
| "eval_loss": 0.2753865420818329, | |
| "eval_runtime": 3.0543, | |
| "eval_samples_per_second": 101.496, | |
| "eval_steps_per_second": 1.637, | |
| "step": 1736 | |
| }, | |
| { | |
| "epoch": 62.14, | |
| "learning_rate": 1.2934874554351332e-05, | |
| "loss": 0.2724, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 62.5, | |
| "learning_rate": 1.2724757510544413e-05, | |
| "loss": 0.2694, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 62.86, | |
| "learning_rate": 1.2515480905266435e-05, | |
| "loss": 0.2839, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 63.0, | |
| "eval_loss": 0.2744416296482086, | |
| "eval_runtime": 2.9673, | |
| "eval_samples_per_second": 104.471, | |
| "eval_steps_per_second": 1.685, | |
| "step": 1764 | |
| }, | |
| { | |
| "epoch": 63.21, | |
| "learning_rate": 1.2307073929727975e-05, | |
| "loss": 0.2731, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 63.57, | |
| "learning_rate": 1.2099565653838216e-05, | |
| "loss": 0.277, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 63.93, | |
| "learning_rate": 1.1892985022150073e-05, | |
| "loss": 0.2684, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 64.0, | |
| "eval_loss": 0.2744133472442627, | |
| "eval_runtime": 3.1325, | |
| "eval_samples_per_second": 98.963, | |
| "eval_steps_per_second": 1.596, | |
| "step": 1792 | |
| }, | |
| { | |
| "epoch": 64.29, | |
| "learning_rate": 1.1687360849822864e-05, | |
| "loss": 0.2694, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 64.64, | |
| "learning_rate": 1.1482721818602949e-05, | |
| "loss": 0.2639, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 65.0, | |
| "learning_rate": 1.1279096472823067e-05, | |
| "loss": 0.2865, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 65.0, | |
| "eval_loss": 0.2716241776943207, | |
| "eval_runtime": 2.9389, | |
| "eval_samples_per_second": 105.482, | |
| "eval_steps_per_second": 1.701, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 65.36, | |
| "learning_rate": 1.1076513215420738e-05, | |
| "loss": 0.2651, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 65.71, | |
| "learning_rate": 1.0875000303976493e-05, | |
| "loss": 0.2845, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 66.0, | |
| "eval_loss": 0.2768835127353668, | |
| "eval_runtime": 2.9046, | |
| "eval_samples_per_second": 106.726, | |
| "eval_steps_per_second": 1.721, | |
| "step": 1848 | |
| }, | |
| { | |
| "epoch": 66.07, | |
| "learning_rate": 1.0674585846772314e-05, | |
| "loss": 0.2768, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 66.43, | |
| "learning_rate": 1.04752977988709e-05, | |
| "loss": 0.2785, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 66.79, | |
| "learning_rate": 1.0277163958216333e-05, | |
| "loss": 0.2663, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 67.0, | |
| "eval_loss": 0.2754151523113251, | |
| "eval_runtime": 3.1034, | |
| "eval_samples_per_second": 99.891, | |
| "eval_steps_per_second": 1.611, | |
| "step": 1876 | |
| }, | |
| { | |
| "epoch": 67.14, | |
| "learning_rate": 1.0080211961756649e-05, | |
| "loss": 0.2632, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 67.5, | |
| "learning_rate": 9.884469281588848e-06, | |
| "loss": 0.2832, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 67.86, | |
| "learning_rate": 9.68996322112695e-06, | |
| "loss": 0.269, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 68.0, | |
| "eval_loss": 0.2737092077732086, | |
| "eval_runtime": 3.0377, | |
| "eval_samples_per_second": 102.051, | |
| "eval_steps_per_second": 1.646, | |
| "step": 1904 | |
| }, | |
| { | |
| "epoch": 68.21, | |
| "learning_rate": 9.496720911293496e-06, | |
| "loss": 0.2753, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 68.57, | |
| "learning_rate": 9.304769306735192e-06, | |
| "loss": 0.2793, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 68.93, | |
| "learning_rate": 9.114135182063088e-06, | |
| "loss": 0.2681, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 69.0, | |
| "eval_loss": 0.269718736410141, | |
| "eval_runtime": 3.08, | |
| "eval_samples_per_second": 100.648, | |
| "eval_steps_per_second": 1.623, | |
| "step": 1932 | |
| }, | |
| { | |
| "epoch": 69.29, | |
| "learning_rate": 8.924845128117907e-06, | |
| "loss": 0.2763, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 69.64, | |
| "learning_rate": 8.736925548260963e-06, | |
| "loss": 0.2679, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 70.0, | |
| "learning_rate": 8.550402654691271e-06, | |
| "loss": 0.2748, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 70.0, | |
| "eval_loss": 0.2778591513633728, | |
| "eval_runtime": 2.6486, | |
| "eval_samples_per_second": 117.044, | |
| "eval_steps_per_second": 1.888, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 70.36, | |
| "learning_rate": 8.365302464789306e-06, | |
| "loss": 0.2758, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 70.71, | |
| "learning_rate": 8.181650797487918e-06, | |
| "loss": 0.2769, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 71.0, | |
| "eval_loss": 0.272790789604187, | |
| "eval_runtime": 2.6447, | |
| "eval_samples_per_second": 117.214, | |
| "eval_steps_per_second": 1.891, | |
| "step": 1988 | |
| }, | |
| { | |
| "epoch": 71.07, | |
| "learning_rate": 7.999473269670997e-06, | |
| "loss": 0.2662, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 71.43, | |
| "learning_rate": 7.818795292600176e-06, | |
| "loss": 0.2549, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 71.79, | |
| "learning_rate": 7.639642068370416e-06, | |
| "loss": 0.2805, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 72.0, | |
| "eval_loss": 0.27293434739112854, | |
| "eval_runtime": 2.6471, | |
| "eval_samples_per_second": 117.111, | |
| "eval_steps_per_second": 1.889, | |
| "step": 2016 | |
| }, | |
| { | |
| "epoch": 72.14, | |
| "learning_rate": 7.462038586394576e-06, | |
| "loss": 0.2852, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 72.5, | |
| "learning_rate": 7.286009619917803e-06, | |
| "loss": 0.2597, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 72.86, | |
| "learning_rate": 7.1115797225619155e-06, | |
| "loss": 0.2771, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 73.0, | |
| "eval_loss": 0.272775262594223, | |
| "eval_runtime": 2.6711, | |
| "eval_samples_per_second": 116.058, | |
| "eval_steps_per_second": 1.872, | |
| "step": 2044 | |
| }, | |
| { | |
| "epoch": 73.21, | |
| "learning_rate": 6.938773224900619e-06, | |
| "loss": 0.2752, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 73.57, | |
| "learning_rate": 6.7676142310656305e-06, | |
| "loss": 0.2646, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 73.93, | |
| "learning_rate": 6.598126615384559e-06, | |
| "loss": 0.2717, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 74.0, | |
| "eval_loss": 0.27492260932922363, | |
| "eval_runtime": 2.6449, | |
| "eval_samples_per_second": 117.205, | |
| "eval_steps_per_second": 1.89, | |
| "step": 2072 | |
| }, | |
| { | |
| "epoch": 74.29, | |
| "learning_rate": 6.430334019050682e-06, | |
| "loss": 0.2793, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 74.64, | |
| "learning_rate": 6.264259846825417e-06, | |
| "loss": 0.2744, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 75.0, | |
| "learning_rate": 6.099927263773606e-06, | |
| "loss": 0.267, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 75.0, | |
| "eval_loss": 0.273212730884552, | |
| "eval_runtime": 2.6667, | |
| "eval_samples_per_second": 116.249, | |
| "eval_steps_per_second": 1.875, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 75.36, | |
| "learning_rate": 5.9373591920323426e-06, | |
| "loss": 0.2673, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 75.71, | |
| "learning_rate": 5.776578307613639e-06, | |
| "loss": 0.2812, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 76.0, | |
| "eval_loss": 0.27429237961769104, | |
| "eval_runtime": 2.66, | |
| "eval_samples_per_second": 116.541, | |
| "eval_steps_per_second": 1.88, | |
| "step": 2128 | |
| }, | |
| { | |
| "epoch": 76.07, | |
| "learning_rate": 5.6176070372414275e-06, | |
| "loss": 0.2674, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 76.43, | |
| "learning_rate": 5.460467555223369e-06, | |
| "loss": 0.2696, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 76.79, | |
| "learning_rate": 5.305181780357802e-06, | |
| "loss": 0.2749, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 77.0, | |
| "eval_loss": 0.2738768756389618, | |
| "eval_runtime": 2.6486, | |
| "eval_samples_per_second": 117.043, | |
| "eval_steps_per_second": 1.888, | |
| "step": 2156 | |
| }, | |
| { | |
| "epoch": 77.14, | |
| "learning_rate": 5.1517713728764e-06, | |
| "loss": 0.2837, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 77.5, | |
| "learning_rate": 5.000257731422843e-06, | |
| "loss": 0.2606, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 77.86, | |
| "learning_rate": 4.850661990068047e-06, | |
| "loss": 0.2746, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 78.0, | |
| "eval_loss": 0.27298134565353394, | |
| "eval_runtime": 2.6537, | |
| "eval_samples_per_second": 116.817, | |
| "eval_steps_per_second": 1.884, | |
| "step": 2184 | |
| }, | |
| { | |
| "epoch": 78.21, | |
| "learning_rate": 4.703005015362185e-06, | |
| "loss": 0.2828, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 78.57, | |
| "learning_rate": 4.557307403424147e-06, | |
| "loss": 0.2687, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 78.93, | |
| "learning_rate": 4.41358947706863e-06, | |
| "loss": 0.2707, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 79.0, | |
| "eval_loss": 0.274308443069458, | |
| "eval_runtime": 2.6512, | |
| "eval_samples_per_second": 116.929, | |
| "eval_steps_per_second": 1.886, | |
| "step": 2212 | |
| }, | |
| { | |
| "epoch": 79.29, | |
| "learning_rate": 4.271871282971398e-06, | |
| "loss": 0.2788, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 79.64, | |
| "learning_rate": 4.1321725888730675e-06, | |
| "loss": 0.2692, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 80.0, | |
| "learning_rate": 3.994512880821713e-06, | |
| "loss": 0.2644, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 80.0, | |
| "eval_loss": 0.2740114629268646, | |
| "eval_runtime": 2.6658, | |
| "eval_samples_per_second": 116.29, | |
| "eval_steps_per_second": 1.876, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 80.36, | |
| "learning_rate": 3.858911360454918e-06, | |
| "loss": 0.2685, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 80.71, | |
| "learning_rate": 3.72538694232134e-06, | |
| "loss": 0.2691, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 81.0, | |
| "eval_loss": 0.2726958394050598, | |
| "eval_runtime": 2.6341, | |
| "eval_samples_per_second": 117.686, | |
| "eval_steps_per_second": 1.898, | |
| "step": 2268 | |
| }, | |
| { | |
| "epoch": 81.07, | |
| "learning_rate": 3.593958251242444e-06, | |
| "loss": 0.2748, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 81.43, | |
| "learning_rate": 3.4646436197145335e-06, | |
| "loss": 0.2689, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 81.79, | |
| "learning_rate": 3.3374610853516906e-06, | |
| "loss": 0.2679, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 82.0, | |
| "eval_loss": 0.27709755301475525, | |
| "eval_runtime": 2.6545, | |
| "eval_samples_per_second": 116.784, | |
| "eval_steps_per_second": 1.884, | |
| "step": 2296 | |
| }, | |
| { | |
| "epoch": 82.14, | |
| "learning_rate": 3.212428388369718e-06, | |
| "loss": 0.2699, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 82.5, | |
| "learning_rate": 3.0895629691116723e-06, | |
| "loss": 0.2768, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 82.86, | |
| "learning_rate": 2.9688819656151164e-06, | |
| "loss": 0.2748, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 83.0, | |
| "eval_loss": 0.2743559777736664, | |
| "eval_runtime": 2.6236, | |
| "eval_samples_per_second": 118.158, | |
| "eval_steps_per_second": 1.906, | |
| "step": 2324 | |
| }, | |
| { | |
| "epoch": 83.21, | |
| "learning_rate": 2.8504022112216642e-06, | |
| "loss": 0.2635, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 83.57, | |
| "learning_rate": 2.734140232228908e-06, | |
| "loss": 0.2667, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 83.93, | |
| "learning_rate": 2.6201122455852582e-06, | |
| "loss": 0.2744, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 84.0, | |
| "eval_loss": 0.2703389823436737, | |
| "eval_runtime": 2.6131, | |
| "eval_samples_per_second": 118.631, | |
| "eval_steps_per_second": 1.913, | |
| "step": 2352 | |
| }, | |
| { | |
| "epoch": 84.29, | |
| "learning_rate": 2.5083341566278907e-06, | |
| "loss": 0.2761, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 84.64, | |
| "learning_rate": 2.398821556864163e-06, | |
| "loss": 0.2699, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 85.0, | |
| "learning_rate": 2.291589721796841e-06, | |
| "loss": 0.2715, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 85.0, | |
| "eval_loss": 0.27332332730293274, | |
| "eval_runtime": 2.6685, | |
| "eval_samples_per_second": 116.168, | |
| "eval_steps_per_second": 1.874, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 85.36, | |
| "learning_rate": 2.1866536087933395e-06, | |
| "loss": 0.263, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 85.71, | |
| "learning_rate": 2.084027854999404e-06, | |
| "loss": 0.2682, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 86.0, | |
| "eval_loss": 0.271467000246048, | |
| "eval_runtime": 2.6604, | |
| "eval_samples_per_second": 116.522, | |
| "eval_steps_per_second": 1.879, | |
| "step": 2408 | |
| }, | |
| { | |
| "epoch": 86.07, | |
| "learning_rate": 1.9837267752974077e-06, | |
| "loss": 0.2783, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 86.43, | |
| "learning_rate": 1.8857643603096489e-06, | |
| "loss": 0.2776, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 86.79, | |
| "learning_rate": 1.7901542744468242e-06, | |
| "loss": 0.2641, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 87.0, | |
| "eval_loss": 0.27218252420425415, | |
| "eval_runtime": 2.6297, | |
| "eval_samples_per_second": 117.884, | |
| "eval_steps_per_second": 1.901, | |
| "step": 2436 | |
| }, | |
| { | |
| "epoch": 87.14, | |
| "learning_rate": 1.6969098540020453e-06, | |
| "loss": 0.2704, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 87.5, | |
| "learning_rate": 1.6060441052906117e-06, | |
| "loss": 0.2769, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 87.86, | |
| "learning_rate": 1.517569702835792e-06, | |
| "loss": 0.274, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 88.0, | |
| "eval_loss": 0.27482396364212036, | |
| "eval_runtime": 2.6311, | |
| "eval_samples_per_second": 117.823, | |
| "eval_steps_per_second": 1.9, | |
| "step": 2464 | |
| }, | |
| { | |
| "epoch": 88.21, | |
| "learning_rate": 1.4314989876009292e-06, | |
| "loss": 0.2561, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 88.57, | |
| "learning_rate": 1.3478439652680215e-06, | |
| "loss": 0.281, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 88.93, | |
| "learning_rate": 1.2666163045631007e-06, | |
| "loss": 0.2669, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 89.0, | |
| "eval_loss": 0.27534210681915283, | |
| "eval_runtime": 2.6393, | |
| "eval_samples_per_second": 117.455, | |
| "eval_steps_per_second": 1.894, | |
| "step": 2492 | |
| }, | |
| { | |
| "epoch": 89.29, | |
| "learning_rate": 1.1878273356286133e-06, | |
| "loss": 0.2765, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 89.64, | |
| "learning_rate": 1.1114880484430204e-06, | |
| "loss": 0.2635, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 90.0, | |
| "learning_rate": 1.0376090912878263e-06, | |
| "loss": 0.2707, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 90.0, | |
| "eval_loss": 0.2724357843399048, | |
| "eval_runtime": 2.6596, | |
| "eval_samples_per_second": 116.558, | |
| "eval_steps_per_second": 1.88, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 90.36, | |
| "learning_rate": 9.662007692623237e-07, | |
| "loss": 0.2667, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 90.71, | |
| "learning_rate": 8.972730428461452e-07, | |
| "loss": 0.2755, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 91.0, | |
| "eval_loss": 0.27034902572631836, | |
| "eval_runtime": 2.6286, | |
| "eval_samples_per_second": 117.934, | |
| "eval_steps_per_second": 1.902, | |
| "step": 2548 | |
| }, | |
| { | |
| "epoch": 91.07, | |
| "learning_rate": 8.308355265099334e-07, | |
| "loss": 0.2692, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 91.43, | |
| "learning_rate": 7.668974873742282e-07, | |
| "loss": 0.263, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 91.79, | |
| "learning_rate": 7.054678439168689e-07, | |
| "loss": 0.2769, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 92.0, | |
| "eval_loss": 0.27368903160095215, | |
| "eval_runtime": 2.6502, | |
| "eval_samples_per_second": 116.973, | |
| "eval_steps_per_second": 1.887, | |
| "step": 2576 | |
| }, | |
| { | |
| "epoch": 92.14, | |
| "learning_rate": 6.465551647289506e-07, | |
| "loss": 0.2722, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 92.5, | |
| "learning_rate": 5.901676673196507e-07, | |
| "loss": 0.2828, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 92.86, | |
| "learning_rate": 5.36313216969983e-07, | |
| "loss": 0.2659, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 93.0, | |
| "eval_loss": 0.2720969319343567, | |
| "eval_runtime": 2.6064, | |
| "eval_samples_per_second": 118.938, | |
| "eval_steps_per_second": 1.918, | |
| "step": 2604 | |
| }, | |
| { | |
| "epoch": 93.21, | |
| "learning_rate": 4.849993256357071e-07, | |
| "loss": 0.2608, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 93.57, | |
| "learning_rate": 4.362331508995204e-07, | |
| "loss": 0.2731, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 93.93, | |
| "learning_rate": 3.9002149497265255e-07, | |
| "loss": 0.2674, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 94.0, | |
| "eval_loss": 0.2763327658176422, | |
| "eval_runtime": 2.6289, | |
| "eval_samples_per_second": 117.922, | |
| "eval_steps_per_second": 1.902, | |
| "step": 2632 | |
| }, | |
| { | |
| "epoch": 94.29, | |
| "learning_rate": 3.463708037460683e-07, | |
| "loss": 0.277, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 94.64, | |
| "learning_rate": 3.052871658913533e-07, | |
| "loss": 0.2746, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 95.0, | |
| "learning_rate": 2.6677631201141856e-07, | |
| "loss": 0.2723, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 95.0, | |
| "eval_loss": 0.27226099371910095, | |
| "eval_runtime": 2.6569, | |
| "eval_samples_per_second": 116.678, | |
| "eval_steps_per_second": 1.882, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 95.36, | |
| "learning_rate": 2.3084361384116886e-07, | |
| "loss": 0.261, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 95.71, | |
| "learning_rate": 1.9749408349821265e-07, | |
| "loss": 0.2723, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 96.0, | |
| "eval_loss": 0.2743915319442749, | |
| "eval_runtime": 2.6421, | |
| "eval_samples_per_second": 117.329, | |
| "eval_steps_per_second": 1.892, | |
| "step": 2688 | |
| }, | |
| { | |
| "epoch": 96.07, | |
| "learning_rate": 1.6673237278375036e-07, | |
| "loss": 0.2775, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 96.43, | |
| "learning_rate": 1.3856277253369782e-07, | |
| "loss": 0.2813, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 96.79, | |
| "learning_rate": 1.1298921202018765e-07, | |
| "loss": 0.272, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 97.0, | |
| "eval_loss": 0.26857295632362366, | |
| "eval_runtime": 2.6133, | |
| "eval_samples_per_second": 118.624, | |
| "eval_steps_per_second": 1.913, | |
| "step": 2716 | |
| }, | |
| { | |
| "epoch": 97.14, | |
| "learning_rate": 9.001525840348495e-08, | |
| "loss": 0.2601, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 97.5, | |
| "learning_rate": 6.964411623440816e-08, | |
| "loss": 0.2756, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 97.86, | |
| "learning_rate": 5.187862700735332e-08, | |
| "loss": 0.27, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 98.0, | |
| "eval_loss": 0.27275460958480835, | |
| "eval_runtime": 2.6408, | |
| "eval_samples_per_second": 117.389, | |
| "eval_steps_per_second": 1.893, | |
| "step": 2744 | |
| }, | |
| { | |
| "epoch": 98.21, | |
| "learning_rate": 3.672126876393816e-08, | |
| "loss": 0.2757, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 98.57, | |
| "learning_rate": 2.4174155747345297e-08, | |
| "loss": 0.2662, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 98.93, | |
| "learning_rate": 1.4239038107416445e-08, | |
| "loss": 0.2721, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 99.0, | |
| "eval_loss": 0.27428731322288513, | |
| "eval_runtime": 2.6271, | |
| "eval_samples_per_second": 118.001, | |
| "eval_steps_per_second": 1.903, | |
| "step": 2772 | |
| }, | |
| { | |
| "epoch": 99.29, | |
| "learning_rate": 6.917301656537283e-09, | |
| "loss": 0.2792, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 99.64, | |
| "learning_rate": 2.2099676763295825e-09, | |
| "loss": 0.2751, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 100.0, | |
| "learning_rate": 1.176927751922463e-10, | |
| "loss": 0.2692, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 100.0, | |
| "eval_loss": 0.2747902274131775, | |
| "eval_runtime": 2.6706, | |
| "eval_samples_per_second": 116.08, | |
| "eval_steps_per_second": 1.872, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 100.0, | |
| "step": 2800, | |
| "total_flos": 1.7697633664499712e+19, | |
| "train_loss": 0.1087875749383654, | |
| "train_runtime": 1716.2917, | |
| "train_samples_per_second": 102.022, | |
| "train_steps_per_second": 1.631 | |
| } | |
| ], | |
| "max_steps": 2800, | |
| "num_train_epochs": 100, | |
| "total_flos": 1.7697633664499712e+19, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |