| { | |
| "best_metric": 1.153731107711792, | |
| "best_model_checkpoint": "outputs/checkpoint-2000", | |
| "epoch": 0.64, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 1e-05, | |
| "loss": 1.8184, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2e-05, | |
| "loss": 1.7858, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 3e-05, | |
| "loss": 1.7218, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 4e-05, | |
| "loss": 1.7218, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 5e-05, | |
| "loss": 1.7009, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 6e-05, | |
| "loss": 1.6714, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 7e-05, | |
| "loss": 1.6582, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 8e-05, | |
| "loss": 1.5659, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 9e-05, | |
| "loss": 1.5343, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.0001, | |
| "loss": 1.5252, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00011000000000000002, | |
| "loss": 1.414, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00012, | |
| "loss": 1.3919, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00013000000000000002, | |
| "loss": 1.352, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00014, | |
| "loss": 1.3762, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00015000000000000001, | |
| "loss": 1.3238, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.00016, | |
| "loss": 1.3306, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.00017, | |
| "loss": 1.3423, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.00018, | |
| "loss": 1.3199, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.00019, | |
| "loss": 1.3506, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.0002, | |
| "loss": 1.3315, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.0001998921832884097, | |
| "loss": 1.3142, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019978436657681943, | |
| "loss": 1.2994, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019967654986522912, | |
| "loss": 1.2972, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019956873315363883, | |
| "loss": 1.3135, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019946091644204851, | |
| "loss": 1.2903, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019935309973045823, | |
| "loss": 1.2783, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019924528301886794, | |
| "loss": 1.3044, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019913746630727762, | |
| "loss": 1.2947, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019902964959568736, | |
| "loss": 1.2759, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019892183288409705, | |
| "loss": 1.2746, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019881401617250676, | |
| "loss": 1.2869, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019870619946091644, | |
| "loss": 1.26, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019859838274932616, | |
| "loss": 1.258, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019849056603773587, | |
| "loss": 1.2649, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019838274932614555, | |
| "loss": 1.2549, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019827493261455526, | |
| "loss": 1.2859, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019816711590296498, | |
| "loss": 1.2934, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019805929919137466, | |
| "loss": 1.3011, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019795148247978437, | |
| "loss": 1.2304, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019784366576819408, | |
| "loss": 1.2849, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "eval_loss": 1.2345943450927734, | |
| "eval_runtime": 710.2534, | |
| "eval_samples_per_second": 7.04, | |
| "eval_steps_per_second": 0.88, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.0001977358490566038, | |
| "loss": 1.2726, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00019762803234501348, | |
| "loss": 1.2996, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.0001975202156334232, | |
| "loss": 1.2456, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.0001974123989218329, | |
| "loss": 1.2794, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.0001973045822102426, | |
| "loss": 1.2637, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.0001971967654986523, | |
| "loss": 1.252, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.000197088948787062, | |
| "loss": 1.2492, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.0001969811320754717, | |
| "loss": 1.2972, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.0001968733153638814, | |
| "loss": 1.3023, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00019676549865229112, | |
| "loss": 1.2682, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00019665768194070083, | |
| "loss": 1.2981, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00019654986522911052, | |
| "loss": 1.3084, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00019644204851752023, | |
| "loss": 1.2666, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00019633423180592994, | |
| "loss": 1.2623, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00019622641509433963, | |
| "loss": 1.2406, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00019611859838274934, | |
| "loss": 1.2623, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00019601078167115905, | |
| "loss": 1.2562, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00019590296495956873, | |
| "loss": 1.2166, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00019579514824797845, | |
| "loss": 1.2931, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00019568733153638813, | |
| "loss": 1.2354, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00019557951482479787, | |
| "loss": 1.2246, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00019547169811320755, | |
| "loss": 1.2236, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00019536388140161727, | |
| "loss": 1.2442, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00019525606469002698, | |
| "loss": 1.2779, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00019514824797843666, | |
| "loss": 1.2285, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00019504043126684637, | |
| "loss": 1.2157, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00019493261455525606, | |
| "loss": 1.2405, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0001948247978436658, | |
| "loss": 1.2335, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00019471698113207548, | |
| "loss": 1.2401, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00019460916442048517, | |
| "loss": 1.2521, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0001945013477088949, | |
| "loss": 1.2386, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0001943935309973046, | |
| "loss": 1.2196, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0001942857142857143, | |
| "loss": 1.192, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.000194177897574124, | |
| "loss": 1.1964, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0001940700808625337, | |
| "loss": 1.2876, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0001939622641509434, | |
| "loss": 1.2622, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0001938544474393531, | |
| "loss": 1.2183, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.00019374663072776284, | |
| "loss": 1.206, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00019363881401617252, | |
| "loss": 1.2697, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.0001935309973045822, | |
| "loss": 1.2515, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "eval_loss": 1.207701563835144, | |
| "eval_runtime": 709.7794, | |
| "eval_samples_per_second": 7.044, | |
| "eval_steps_per_second": 0.881, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00019342318059299192, | |
| "loss": 1.2596, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00019331536388140163, | |
| "loss": 1.2537, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00019320754716981134, | |
| "loss": 1.2545, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00019309973045822103, | |
| "loss": 1.2473, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019299191374663074, | |
| "loss": 1.2308, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019288409703504045, | |
| "loss": 1.2482, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019277628032345013, | |
| "loss": 1.2342, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019266846361185985, | |
| "loss": 1.2101, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019256064690026956, | |
| "loss": 1.233, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019245283018867927, | |
| "loss": 1.2401, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00019234501347708895, | |
| "loss": 1.2088, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00019223719676549864, | |
| "loss": 1.2205, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00019212938005390838, | |
| "loss": 1.218, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00019202156334231806, | |
| "loss": 1.2208, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00019191374663072777, | |
| "loss": 1.2783, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00019180592991913749, | |
| "loss": 1.1886, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00019169811320754717, | |
| "loss": 1.1939, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00019159029649595688, | |
| "loss": 1.2079, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00019148247978436657, | |
| "loss": 1.2473, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.0001913746630727763, | |
| "loss": 1.2295, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.000191266846361186, | |
| "loss": 1.2064, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00019115902964959568, | |
| "loss": 1.2138, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00019105121293800541, | |
| "loss": 1.2618, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.0001909433962264151, | |
| "loss": 1.2495, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.0001908355795148248, | |
| "loss": 1.2553, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.0001907277628032345, | |
| "loss": 1.2334, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.0001906199460916442, | |
| "loss": 1.2442, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.00019051212938005392, | |
| "loss": 1.2301, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.0001904043126684636, | |
| "loss": 1.2678, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00019029649595687334, | |
| "loss": 1.2116, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00019018867924528303, | |
| "loss": 1.2556, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00019008086253369274, | |
| "loss": 1.2427, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00018997304582210242, | |
| "loss": 1.2138, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00018986522911051214, | |
| "loss": 1.248, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00018975741239892185, | |
| "loss": 1.2415, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00018964959568733153, | |
| "loss": 1.2303, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00018954177897574125, | |
| "loss": 1.1939, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00018943396226415096, | |
| "loss": 1.1927, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00018932614555256064, | |
| "loss": 1.1982, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00018921832884097035, | |
| "loss": 1.1859, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "eval_loss": 1.1948214769363403, | |
| "eval_runtime": 709.3061, | |
| "eval_samples_per_second": 7.049, | |
| "eval_steps_per_second": 0.881, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00018911051212938007, | |
| "loss": 1.2265, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00018900269541778978, | |
| "loss": 1.2402, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00018889487870619946, | |
| "loss": 1.2261, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00018878706199460917, | |
| "loss": 1.2264, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00018867924528301889, | |
| "loss": 1.2044, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00018857142857142857, | |
| "loss": 1.2203, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00018846361185983828, | |
| "loss": 1.2258, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.000188355795148248, | |
| "loss": 1.2475, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00018824797843665768, | |
| "loss": 1.239, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.0001881401617250674, | |
| "loss": 1.2374, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.0001880323450134771, | |
| "loss": 1.2472, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00018792452830188681, | |
| "loss": 1.2411, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.0001878167115902965, | |
| "loss": 1.1916, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.0001877088948787062, | |
| "loss": 1.2043, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00018760107816711592, | |
| "loss": 1.2087, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.0001874932614555256, | |
| "loss": 1.2147, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00018738544474393532, | |
| "loss": 1.1999, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00018727762803234503, | |
| "loss": 1.2031, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00018716981132075472, | |
| "loss": 1.268, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00018706199460916443, | |
| "loss": 1.2181, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.0001869541778975741, | |
| "loss": 1.2187, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00018684636118598385, | |
| "loss": 1.2475, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00018673854447439354, | |
| "loss": 1.2043, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00018663072776280325, | |
| "loss": 1.1833, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00018652291105121296, | |
| "loss": 1.1899, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00018641509433962264, | |
| "loss": 1.232, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018630727762803236, | |
| "loss": 1.1885, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018619946091644204, | |
| "loss": 1.2087, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018609164420485178, | |
| "loss": 1.2433, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018598382749326146, | |
| "loss": 1.2233, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018587601078167115, | |
| "loss": 1.2348, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.0001857681940700809, | |
| "loss": 1.1667, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018566037735849057, | |
| "loss": 1.2952, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00018555256064690029, | |
| "loss": 1.1939, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00018544474393530997, | |
| "loss": 1.1646, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00018533692722371968, | |
| "loss": 1.2055, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.0001852291105121294, | |
| "loss": 1.183, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00018512129380053908, | |
| "loss": 1.2367, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00018501347708894882, | |
| "loss": 1.2265, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.0001849056603773585, | |
| "loss": 1.2007, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "eval_loss": 1.184471845626831, | |
| "eval_runtime": 708.4336, | |
| "eval_samples_per_second": 7.058, | |
| "eval_steps_per_second": 0.882, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.0001847978436657682, | |
| "loss": 1.204, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.0001846900269541779, | |
| "loss": 1.2379, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.0001845822102425876, | |
| "loss": 1.157, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00018447439353099732, | |
| "loss": 1.2072, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.000184366576819407, | |
| "loss": 1.1764, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00018425876010781672, | |
| "loss": 1.21, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00018415094339622643, | |
| "loss": 1.19, | |
| "step": 835 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00018404312668463612, | |
| "loss": 1.1923, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00018393530997304583, | |
| "loss": 1.2367, | |
| "step": 845 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00018382749326145554, | |
| "loss": 1.2394, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00018371967654986525, | |
| "loss": 1.2275, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00018361185983827494, | |
| "loss": 1.2206, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00018350404312668465, | |
| "loss": 1.2017, | |
| "step": 865 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00018339622641509436, | |
| "loss": 1.2277, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00018328840970350404, | |
| "loss": 1.1958, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00018318059299191376, | |
| "loss": 1.2317, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00018307277628032347, | |
| "loss": 1.2339, | |
| "step": 885 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00018296495956873315, | |
| "loss": 1.2408, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00018285714285714286, | |
| "loss": 1.1818, | |
| "step": 895 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00018274932614555258, | |
| "loss": 1.2051, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.0001826415094339623, | |
| "loss": 1.2304, | |
| "step": 905 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00018253369272237197, | |
| "loss": 1.1717, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00018242587601078168, | |
| "loss": 1.1811, | |
| "step": 915 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.0001823180592991914, | |
| "loss": 1.2127, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00018221024258760108, | |
| "loss": 1.2278, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.0001821024258760108, | |
| "loss": 1.223, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.0001819946091644205, | |
| "loss": 1.233, | |
| "step": 935 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.0001818867924528302, | |
| "loss": 1.2321, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.0001817789757412399, | |
| "loss": 1.1894, | |
| "step": 945 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00018167115902964959, | |
| "loss": 1.2213, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00018156334231805933, | |
| "loss": 1.2444, | |
| "step": 955 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.000181455525606469, | |
| "loss": 1.1988, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00018134770889487872, | |
| "loss": 1.2039, | |
| "step": 965 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00018123989218328843, | |
| "loss": 1.1972, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00018113207547169812, | |
| "loss": 1.2146, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00018102425876010783, | |
| "loss": 1.2346, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00018091644204851751, | |
| "loss": 1.2283, | |
| "step": 985 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00018080862533692723, | |
| "loss": 1.201, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00018070080862533694, | |
| "loss": 1.2149, | |
| "step": 995 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00018059299191374662, | |
| "loss": 1.2085, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "eval_loss": 1.176331877708435, | |
| "eval_runtime": 709.1565, | |
| "eval_samples_per_second": 7.051, | |
| "eval_steps_per_second": 0.881, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00018048517520215636, | |
| "loss": 1.2484, | |
| "step": 1005 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00018037735849056605, | |
| "loss": 1.2193, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00018026954177897576, | |
| "loss": 1.1733, | |
| "step": 1015 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00018016172506738544, | |
| "loss": 1.2152, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00018005390835579516, | |
| "loss": 1.1688, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00017994609164420487, | |
| "loss": 1.1893, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00017983827493261455, | |
| "loss": 1.1483, | |
| "step": 1035 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.0001797304582210243, | |
| "loss": 1.189, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00017962264150943398, | |
| "loss": 1.1873, | |
| "step": 1045 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00017951482479784366, | |
| "loss": 1.1883, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00017940700808625337, | |
| "loss": 1.1741, | |
| "step": 1055 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00017929919137466308, | |
| "loss": 1.2546, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.0001791913746630728, | |
| "loss": 1.2194, | |
| "step": 1065 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00017908355795148248, | |
| "loss": 1.1945, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.0001789757412398922, | |
| "loss": 1.2011, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.0001788679245283019, | |
| "loss": 1.1836, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.0001787601078167116, | |
| "loss": 1.2116, | |
| "step": 1085 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.0001786522911051213, | |
| "loss": 1.1983, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.000178544474393531, | |
| "loss": 1.2095, | |
| "step": 1095 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.0001784366576819407, | |
| "loss": 1.2007, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.0001783288409703504, | |
| "loss": 1.1936, | |
| "step": 1105 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.0001782210242587601, | |
| "loss": 1.2207, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00017811320754716983, | |
| "loss": 1.2016, | |
| "step": 1115 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00017800539083557952, | |
| "loss": 1.2013, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00017789757412398923, | |
| "loss": 1.1794, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00017778975741239894, | |
| "loss": 1.235, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00017768194070080863, | |
| "loss": 1.1865, | |
| "step": 1135 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00017757412398921834, | |
| "loss": 1.2159, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00017746630727762802, | |
| "loss": 1.1887, | |
| "step": 1145 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00017735849056603776, | |
| "loss": 1.1923, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00017725067385444745, | |
| "loss": 1.1894, | |
| "step": 1155 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00017714285714285713, | |
| "loss": 1.1863, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00017703504043126687, | |
| "loss": 1.2128, | |
| "step": 1165 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00017692722371967655, | |
| "loss": 1.214, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00017681940700808627, | |
| "loss": 1.2083, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00017671159029649595, | |
| "loss": 1.2252, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00017660377358490566, | |
| "loss": 1.1818, | |
| "step": 1185 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00017649595687331538, | |
| "loss": 1.2334, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00017638814016172506, | |
| "loss": 1.1752, | |
| "step": 1195 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.0001762803234501348, | |
| "loss": 1.2036, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "eval_loss": 1.1710153818130493, | |
| "eval_runtime": 708.8537, | |
| "eval_samples_per_second": 7.054, | |
| "eval_steps_per_second": 0.882, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.00017617250673854448, | |
| "loss": 1.2283, | |
| "step": 1205 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.0001760646900269542, | |
| "loss": 1.2464, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.00017595687331536388, | |
| "loss": 1.2126, | |
| "step": 1215 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.0001758490566037736, | |
| "loss": 1.2217, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.0001757412398921833, | |
| "loss": 1.2245, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.000175633423180593, | |
| "loss": 1.1941, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.0001755256064690027, | |
| "loss": 1.2041, | |
| "step": 1235 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.0001754177897574124, | |
| "loss": 1.2076, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.0001753099730458221, | |
| "loss": 1.2154, | |
| "step": 1245 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.0001752021563342318, | |
| "loss": 1.2247, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00017509433962264152, | |
| "loss": 1.1822, | |
| "step": 1255 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00017498652291105123, | |
| "loss": 1.2015, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00017487870619946092, | |
| "loss": 1.2111, | |
| "step": 1265 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00017477088948787063, | |
| "loss": 1.1946, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00017466307277628034, | |
| "loss": 1.1365, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00017455525606469003, | |
| "loss": 1.1742, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00017444743935309974, | |
| "loss": 1.2442, | |
| "step": 1285 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00017433962264150945, | |
| "loss": 1.2095, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00017423180592991913, | |
| "loss": 1.1727, | |
| "step": 1295 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00017412398921832885, | |
| "loss": 1.1948, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00017401617250673856, | |
| "loss": 1.1737, | |
| "step": 1305 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00017390835579514827, | |
| "loss": 1.198, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00017380053908355795, | |
| "loss": 1.2152, | |
| "step": 1315 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00017369272237196767, | |
| "loss": 1.2061, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00017358490566037738, | |
| "loss": 1.1925, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00017347708894878706, | |
| "loss": 1.2067, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00017336927223719677, | |
| "loss": 1.1964, | |
| "step": 1335 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.0001732614555256065, | |
| "loss": 1.2062, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00017315363881401617, | |
| "loss": 1.2208, | |
| "step": 1345 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00017304582210242588, | |
| "loss": 1.167, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00017293800539083557, | |
| "loss": 1.1802, | |
| "step": 1355 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.0001728301886792453, | |
| "loss": 1.2193, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.000172722371967655, | |
| "loss": 1.2015, | |
| "step": 1365 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.0001726145552560647, | |
| "loss": 1.2183, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.00017250673854447442, | |
| "loss": 1.1986, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.0001723989218328841, | |
| "loss": 1.1575, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.0001722911051212938, | |
| "loss": 1.1949, | |
| "step": 1385 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.0001721832884097035, | |
| "loss": 1.2103, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00017207547169811324, | |
| "loss": 1.2141, | |
| "step": 1395 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00017196765498652292, | |
| "loss": 1.2134, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "eval_loss": 1.1655155420303345, | |
| "eval_runtime": 709.3424, | |
| "eval_samples_per_second": 7.049, | |
| "eval_steps_per_second": 0.881, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.0001718598382749326, | |
| "loss": 1.1573, | |
| "step": 1405 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00017175202156334234, | |
| "loss": 1.1694, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00017164420485175203, | |
| "loss": 1.226, | |
| "step": 1415 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00017153638814016174, | |
| "loss": 1.1758, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00017142857142857143, | |
| "loss": 1.185, | |
| "step": 1425 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00017132075471698114, | |
| "loss": 1.1878, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00017121293800539085, | |
| "loss": 1.2108, | |
| "step": 1435 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00017110512129380053, | |
| "loss": 1.254, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00017099730458221027, | |
| "loss": 1.2071, | |
| "step": 1445 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00017088948787061996, | |
| "loss": 1.1914, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00017078167115902964, | |
| "loss": 1.1936, | |
| "step": 1455 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00017067385444743935, | |
| "loss": 1.2369, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00017056603773584907, | |
| "loss": 1.1627, | |
| "step": 1465 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00017045822102425878, | |
| "loss": 1.216, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00017035040431266846, | |
| "loss": 1.1931, | |
| "step": 1475 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00017024258760107817, | |
| "loss": 1.1859, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.00017013477088948789, | |
| "loss": 1.1769, | |
| "step": 1485 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.00017002695417789757, | |
| "loss": 1.1376, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.00016991913746630728, | |
| "loss": 1.1692, | |
| "step": 1495 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.000169811320754717, | |
| "loss": 1.1948, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.0001697035040431267, | |
| "loss": 1.183, | |
| "step": 1505 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.0001695956873315364, | |
| "loss": 1.1687, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.0001694878706199461, | |
| "loss": 1.1754, | |
| "step": 1515 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00016938005390835581, | |
| "loss": 1.2051, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.0001692722371967655, | |
| "loss": 1.1862, | |
| "step": 1525 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.0001691644204851752, | |
| "loss": 1.2014, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00016905660377358492, | |
| "loss": 1.1992, | |
| "step": 1535 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.0001689487870619946, | |
| "loss": 1.2253, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00016884097035040432, | |
| "loss": 1.2125, | |
| "step": 1545 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00016873315363881403, | |
| "loss": 1.1838, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00016862533692722374, | |
| "loss": 1.2048, | |
| "step": 1555 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00016851752021563343, | |
| "loss": 1.1855, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.0001684097035040431, | |
| "loss": 1.1646, | |
| "step": 1565 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00016830188679245285, | |
| "loss": 1.2024, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00016819407008086254, | |
| "loss": 1.2162, | |
| "step": 1575 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00016808625336927225, | |
| "loss": 1.1909, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00016797843665768196, | |
| "loss": 1.1807, | |
| "step": 1585 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00016787061994609164, | |
| "loss": 1.1803, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00016776280323450136, | |
| "loss": 1.1894, | |
| "step": 1595 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00016765498652291104, | |
| "loss": 1.1869, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "eval_loss": 1.1610257625579834, | |
| "eval_runtime": 709.4343, | |
| "eval_samples_per_second": 7.048, | |
| "eval_steps_per_second": 0.881, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00016754716981132078, | |
| "loss": 1.1832, | |
| "step": 1605 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00016743935309973047, | |
| "loss": 1.1553, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00016733153638814018, | |
| "loss": 1.1788, | |
| "step": 1615 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.0001672237196765499, | |
| "loss": 1.1896, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00016711590296495957, | |
| "loss": 1.1839, | |
| "step": 1625 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00016700808625336929, | |
| "loss": 1.2063, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00016690026954177897, | |
| "loss": 1.1756, | |
| "step": 1635 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00016679245283018868, | |
| "loss": 1.1903, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.0001666846361185984, | |
| "loss": 1.1901, | |
| "step": 1645 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00016657681940700808, | |
| "loss": 1.1852, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00016646900269541782, | |
| "loss": 1.1824, | |
| "step": 1655 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.0001663611859838275, | |
| "loss": 1.1514, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00016625336927223721, | |
| "loss": 1.1835, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.0001661455525606469, | |
| "loss": 1.1564, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.0001660377358490566, | |
| "loss": 1.2315, | |
| "step": 1675 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.00016592991913746632, | |
| "loss": 1.2016, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.000165822102425876, | |
| "loss": 1.2058, | |
| "step": 1685 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.00016571428571428575, | |
| "loss": 1.2109, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.00016560646900269543, | |
| "loss": 1.2114, | |
| "step": 1695 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.00016549865229110512, | |
| "loss": 1.1754, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00016539083557951483, | |
| "loss": 1.1659, | |
| "step": 1705 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00016528301886792454, | |
| "loss": 1.2003, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00016517520215633425, | |
| "loss": 1.2209, | |
| "step": 1715 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00016506738544474394, | |
| "loss": 1.173, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00016495956873315365, | |
| "loss": 1.2318, | |
| "step": 1725 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00016485175202156336, | |
| "loss": 1.2312, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00016474393530997304, | |
| "loss": 1.1828, | |
| "step": 1735 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00016463611859838276, | |
| "loss": 1.1994, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00016452830188679247, | |
| "loss": 1.1953, | |
| "step": 1745 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00016442048517520215, | |
| "loss": 1.1821, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00016431266846361186, | |
| "loss": 1.2484, | |
| "step": 1755 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00016420485175202155, | |
| "loss": 1.1838, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.0001640970350404313, | |
| "loss": 1.1851, | |
| "step": 1765 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.00016398921832884097, | |
| "loss": 1.2036, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.00016388140161725068, | |
| "loss": 1.2104, | |
| "step": 1775 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.0001637735849056604, | |
| "loss": 1.1789, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.00016366576819407008, | |
| "loss": 1.2007, | |
| "step": 1785 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.0001635579514824798, | |
| "loss": 1.1963, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.00016345013477088948, | |
| "loss": 1.1817, | |
| "step": 1795 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.00016334231805929922, | |
| "loss": 1.2017, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "eval_loss": 1.157345175743103, | |
| "eval_runtime": 708.5342, | |
| "eval_samples_per_second": 7.057, | |
| "eval_steps_per_second": 0.882, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.0001632345013477089, | |
| "loss": 1.1706, | |
| "step": 1805 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.0001631266846361186, | |
| "loss": 1.2104, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.00016301886792452833, | |
| "loss": 1.1855, | |
| "step": 1815 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.000162911051212938, | |
| "loss": 1.1485, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.00016280323450134772, | |
| "loss": 1.1951, | |
| "step": 1825 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 0.0001626954177897574, | |
| "loss": 1.1655, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 0.00016258760107816712, | |
| "loss": 1.153, | |
| "step": 1835 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 0.00016247978436657683, | |
| "loss": 1.1635, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 0.00016237196765498652, | |
| "loss": 1.2176, | |
| "step": 1845 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 0.00016226415094339625, | |
| "loss": 1.1589, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 0.00016215633423180594, | |
| "loss": 1.2518, | |
| "step": 1855 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.00016204851752021562, | |
| "loss": 1.1931, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.00016194070080862534, | |
| "loss": 1.1886, | |
| "step": 1865 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.00016183288409703505, | |
| "loss": 1.1653, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.00016172506738544476, | |
| "loss": 1.1975, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.00016161725067385444, | |
| "loss": 1.2009, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.00016150943396226416, | |
| "loss": 1.2339, | |
| "step": 1885 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.00016140161725067387, | |
| "loss": 1.1958, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.00016129380053908355, | |
| "loss": 1.1811, | |
| "step": 1895 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.00016118598382749326, | |
| "loss": 1.1835, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.00016107816711590298, | |
| "loss": 1.1767, | |
| "step": 1905 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.0001609703504043127, | |
| "loss": 1.1796, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.00016086253369272237, | |
| "loss": 1.2022, | |
| "step": 1915 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.00016075471698113208, | |
| "loss": 1.1926, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 0.0001606469002695418, | |
| "loss": 1.1882, | |
| "step": 1925 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 0.00016053908355795148, | |
| "loss": 1.1504, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 0.0001604312668463612, | |
| "loss": 1.1927, | |
| "step": 1935 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 0.0001603234501347709, | |
| "loss": 1.1601, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 0.0001602156334231806, | |
| "loss": 1.1726, | |
| "step": 1945 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 0.0001601078167115903, | |
| "loss": 1.2053, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.00016, | |
| "loss": 1.1919, | |
| "step": 1955 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.00015989218328840972, | |
| "loss": 1.1814, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.0001597843665768194, | |
| "loss": 1.1838, | |
| "step": 1965 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.00015967654986522912, | |
| "loss": 1.1954, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.00015956873315363883, | |
| "loss": 1.196, | |
| "step": 1975 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.00015946091644204852, | |
| "loss": 1.1877, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.00015935309973045823, | |
| "loss": 1.1853, | |
| "step": 1985 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.00015924528301886794, | |
| "loss": 1.197, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.00015913746630727763, | |
| "loss": 1.1484, | |
| "step": 1995 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.00015902964959568734, | |
| "loss": 1.2139, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "eval_loss": 1.153731107711792, | |
| "eval_runtime": 709.6014, | |
| "eval_samples_per_second": 7.046, | |
| "eval_steps_per_second": 0.881, | |
| "step": 2000 | |
| } | |
| ], | |
| "max_steps": 9375, | |
| "num_train_epochs": 3, | |
| "total_flos": 1.19119709995008e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |