Invalid JSON: Unexpected token 'N', ..."ad_norm": NaN,
"... is not valid JSON
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.208313194780087, | |
| "eval_steps": 500, | |
| "global_step": 5000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0000000000000002e-07, | |
| "loss": 2.0414, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "grad_norm": 53.515953063964844, | |
| "learning_rate": 6.5e-07, | |
| "loss": 1.8355, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 27.920907974243164, | |
| "learning_rate": 1.15e-06, | |
| "loss": 1.5172, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 20.47172737121582, | |
| "learning_rate": 1.65e-06, | |
| "loss": 1.2654, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 8.903529167175293, | |
| "learning_rate": 2.1499999999999997e-06, | |
| "loss": 1.1095, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 5.902841567993164, | |
| "learning_rate": 2.65e-06, | |
| "loss": 1.0017, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 2.9340908527374268, | |
| "learning_rate": 3.1500000000000003e-06, | |
| "loss": 0.8846, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 8.701367378234863, | |
| "learning_rate": 3.6499999999999998e-06, | |
| "loss": 0.8572, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 1.3205362558364868, | |
| "learning_rate": 4.15e-06, | |
| "loss": 0.839, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 3.0334558486938477, | |
| "learning_rate": 4.65e-06, | |
| "loss": 0.7838, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 1.8086520433425903, | |
| "learning_rate": 5.15e-06, | |
| "loss": 0.7782, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 29.50135040283203, | |
| "learning_rate": 5.65e-06, | |
| "loss": 0.7883, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 3.2708683013916016, | |
| "learning_rate": 6.15e-06, | |
| "loss": 0.7961, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 33.43790817260742, | |
| "learning_rate": 6.650000000000001e-06, | |
| "loss": 0.7626, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 1.2334959506988525, | |
| "learning_rate": 7.15e-06, | |
| "loss": 0.7732, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 1.7489686012268066, | |
| "learning_rate": 7.65e-06, | |
| "loss": 0.7715, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 3.358823776245117, | |
| "learning_rate": 8.15e-06, | |
| "loss": 0.7778, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 4.348245620727539, | |
| "learning_rate": 8.65e-06, | |
| "loss": 0.7619, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.9023125767707825, | |
| "learning_rate": 9.15e-06, | |
| "loss": 0.7746, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.9158114790916443, | |
| "learning_rate": 9.65e-06, | |
| "loss": 0.7592, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.6256226301193237, | |
| "learning_rate": 1.0150000000000001e-05, | |
| "loss": 0.7787, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.3805778920650482, | |
| "learning_rate": 1.065e-05, | |
| "loss": 0.7811, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 0.6968041062355042, | |
| "learning_rate": 1.115e-05, | |
| "loss": 0.73, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 0.9035410284996033, | |
| "learning_rate": 1.1650000000000002e-05, | |
| "loss": 0.7518, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 1.6727488040924072, | |
| "learning_rate": 1.215e-05, | |
| "loss": 0.7805, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 0.4153461158275604, | |
| "learning_rate": 1.2650000000000001e-05, | |
| "loss": 0.7706, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 1.4846651554107666, | |
| "learning_rate": 1.3150000000000001e-05, | |
| "loss": 0.7492, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 2.6631388664245605, | |
| "learning_rate": 1.3650000000000001e-05, | |
| "loss": 0.7681, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 0.6325013041496277, | |
| "learning_rate": 1.415e-05, | |
| "loss": 0.7753, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 0.877907395362854, | |
| "learning_rate": 1.465e-05, | |
| "loss": 0.7188, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 0.37142279744148254, | |
| "learning_rate": 1.515e-05, | |
| "loss": 0.7204, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.7337246537208557, | |
| "learning_rate": 1.565e-05, | |
| "loss": 0.778, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.5847220420837402, | |
| "learning_rate": 1.6150000000000003e-05, | |
| "loss": 0.7288, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 3.745180606842041, | |
| "learning_rate": 1.665e-05, | |
| "loss": 0.7531, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.357301265001297, | |
| "learning_rate": 1.7150000000000004e-05, | |
| "loss": 0.7448, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 0.9032486081123352, | |
| "learning_rate": 1.765e-05, | |
| "loss": 0.7651, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 0.2864232361316681, | |
| "learning_rate": 1.815e-05, | |
| "loss": 0.7193, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 1.8560261726379395, | |
| "learning_rate": 1.865e-05, | |
| "loss": 0.7421, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 0.2971792221069336, | |
| "learning_rate": 1.915e-05, | |
| "loss": 0.7171, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 0.40850627422332764, | |
| "learning_rate": 1.9650000000000003e-05, | |
| "loss": 0.7459, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 0.3934139311313629, | |
| "learning_rate": 2.0150000000000002e-05, | |
| "loss": 0.7205, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 1.8674131631851196, | |
| "learning_rate": 2.065e-05, | |
| "loss": 0.752, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 0.36707818508148193, | |
| "learning_rate": 2.115e-05, | |
| "loss": 0.7494, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 0.5044310092926025, | |
| "learning_rate": 2.165e-05, | |
| "loss": 0.7595, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 1.9025150537490845, | |
| "learning_rate": 2.215e-05, | |
| "loss": 0.7379, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 0.5004140138626099, | |
| "learning_rate": 2.265e-05, | |
| "loss": 0.7886, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 3.544482707977295, | |
| "learning_rate": 2.3150000000000004e-05, | |
| "loss": 0.7259, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.33023321628570557, | |
| "learning_rate": 2.365e-05, | |
| "loss": 0.7333, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.3548080325126648, | |
| "learning_rate": 2.415e-05, | |
| "loss": 0.7527, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.42450150847435, | |
| "learning_rate": 2.465e-05, | |
| "loss": 0.7443, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "eval_loss": 0.7428915500640869, | |
| "eval_runtime": 68.3143, | |
| "eval_samples_per_second": 29.276, | |
| "eval_steps_per_second": 0.922, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.3299656808376312, | |
| "learning_rate": 2.515e-05, | |
| "loss": 0.7447, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 0.27258285880088806, | |
| "learning_rate": 2.5650000000000003e-05, | |
| "loss": 0.7278, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 0.23753009736537933, | |
| "learning_rate": 2.6150000000000002e-05, | |
| "loss": 0.7721, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 1.1074901819229126, | |
| "learning_rate": 2.6650000000000004e-05, | |
| "loss": 0.7383, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 0.428067147731781, | |
| "learning_rate": 2.7150000000000003e-05, | |
| "loss": 0.7508, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.35956478118896484, | |
| "learning_rate": 2.7650000000000005e-05, | |
| "loss": 0.7461, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.23017314076423645, | |
| "learning_rate": 2.815e-05, | |
| "loss": 0.7463, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.20370900630950928, | |
| "learning_rate": 2.865e-05, | |
| "loss": 0.7405, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.24586541950702667, | |
| "learning_rate": 2.915e-05, | |
| "loss": 0.6964, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.27857884764671326, | |
| "learning_rate": 2.965e-05, | |
| "loss": 0.7376, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.2655833065509796, | |
| "learning_rate": 3.015e-05, | |
| "loss": 0.7496, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.17174670100212097, | |
| "learning_rate": 3.065e-05, | |
| "loss": 0.7219, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.22892743349075317, | |
| "learning_rate": 3.115e-05, | |
| "loss": 0.7111, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.5913789868354797, | |
| "learning_rate": 3.1650000000000004e-05, | |
| "loss": 0.7097, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.25620004534721375, | |
| "learning_rate": 3.215e-05, | |
| "loss": 0.7306, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.3311476409435272, | |
| "learning_rate": 3.265e-05, | |
| "loss": 0.7406, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.4708893597126007, | |
| "learning_rate": 3.3150000000000006e-05, | |
| "loss": 0.7071, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.22469288110733032, | |
| "learning_rate": 3.3650000000000005e-05, | |
| "loss": 0.7116, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 0.2909330725669861, | |
| "learning_rate": 3.415e-05, | |
| "loss": 0.7592, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 0.21274766325950623, | |
| "learning_rate": 3.465e-05, | |
| "loss": 0.7144, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 0.23929230868816376, | |
| "learning_rate": 3.515e-05, | |
| "loss": 0.7382, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 0.3021218180656433, | |
| "learning_rate": 3.565e-05, | |
| "loss": 0.7287, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.23557321727275848, | |
| "learning_rate": 3.615e-05, | |
| "loss": 0.7475, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.22648084163665771, | |
| "learning_rate": 3.665e-05, | |
| "loss": 0.7457, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.2707761824131012, | |
| "learning_rate": 3.715e-05, | |
| "loss": 0.7322, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.2799661457538605, | |
| "learning_rate": 3.765e-05, | |
| "loss": 0.7682, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 0.326861172914505, | |
| "learning_rate": 3.8150000000000006e-05, | |
| "loss": 0.73, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 0.31985723972320557, | |
| "learning_rate": 3.8650000000000004e-05, | |
| "loss": 0.744, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 0.21637533605098724, | |
| "learning_rate": 3.915e-05, | |
| "loss": 0.7161, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 0.26102888584136963, | |
| "learning_rate": 3.965e-05, | |
| "loss": 0.7553, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.2469174712896347, | |
| "learning_rate": 4.015000000000001e-05, | |
| "loss": 0.7444, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.2022310495376587, | |
| "learning_rate": 4.065e-05, | |
| "loss": 0.7089, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.1729898750782013, | |
| "learning_rate": 4.115e-05, | |
| "loss": 0.7122, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.22450707852840424, | |
| "learning_rate": 4.165e-05, | |
| "loss": 0.7099, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 0.22389698028564453, | |
| "learning_rate": 4.215e-05, | |
| "loss": 0.7445, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 0.24962696433067322, | |
| "learning_rate": 4.265e-05, | |
| "loss": 0.7052, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 0.23780404031276703, | |
| "learning_rate": 4.315e-05, | |
| "loss": 0.7423, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 0.2434270679950714, | |
| "learning_rate": 4.3650000000000004e-05, | |
| "loss": 0.7291, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.3332350552082062, | |
| "learning_rate": 4.415e-05, | |
| "loss": 0.7306, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.5084495544433594, | |
| "learning_rate": 4.465e-05, | |
| "loss": 0.735, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.27552103996276855, | |
| "learning_rate": 4.5150000000000006e-05, | |
| "loss": 0.7082, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.2184416800737381, | |
| "learning_rate": 4.5650000000000005e-05, | |
| "loss": 0.7066, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.21738290786743164, | |
| "learning_rate": 4.6150000000000004e-05, | |
| "loss": 0.6977, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 0.21888123452663422, | |
| "learning_rate": 4.665e-05, | |
| "loss": 0.7083, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 0.2516074776649475, | |
| "learning_rate": 4.715e-05, | |
| "loss": 0.7451, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 0.39361557364463806, | |
| "learning_rate": 4.765e-05, | |
| "loss": 0.7013, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 0.2461744099855423, | |
| "learning_rate": 4.815e-05, | |
| "loss": 0.7325, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.2554089426994324, | |
| "learning_rate": 4.8650000000000003e-05, | |
| "loss": 0.7384, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.2717374861240387, | |
| "learning_rate": 4.915e-05, | |
| "loss": 0.7172, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.35919347405433655, | |
| "learning_rate": 4.965e-05, | |
| "loss": 0.6851, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "eval_loss": 0.7169972658157349, | |
| "eval_runtime": 67.4497, | |
| "eval_samples_per_second": 29.652, | |
| "eval_steps_per_second": 0.934, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.19895273447036743, | |
| "learning_rate": 4.999999713608037e-05, | |
| "loss": 0.7104, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.21741873025894165, | |
| "learning_rate": 4.999994622197174e-05, | |
| "loss": 0.7217, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.3415576219558716, | |
| "learning_rate": 4.999983166535371e-05, | |
| "loss": 0.716, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.22503221035003662, | |
| "learning_rate": 4.99996534665179e-05, | |
| "loss": 0.7166, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.22469323873519897, | |
| "learning_rate": 4.999941162591795e-05, | |
| "loss": 0.7173, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 0.24848666787147522, | |
| "learning_rate": 4.999910614416952e-05, | |
| "loss": 0.7442, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 0.23361551761627197, | |
| "learning_rate": 4.999873702205027e-05, | |
| "loss": 0.7142, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 0.2513304054737091, | |
| "learning_rate": 4.999830426049987e-05, | |
| "loss": 0.6998, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 0.2846020758152008, | |
| "learning_rate": 4.999780786062003e-05, | |
| "loss": 0.6967, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 0.2192545235157013, | |
| "learning_rate": 4.999724782367441e-05, | |
| "loss": 0.7259, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 0.37123605608940125, | |
| "learning_rate": 4.999662415108872e-05, | |
| "loss": 0.7228, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 0.3963713049888611, | |
| "learning_rate": 4.999593684445063e-05, | |
| "loss": 0.6848, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 0.24653227627277374, | |
| "learning_rate": 4.9995185905509836e-05, | |
| "loss": 0.7181, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.25218069553375244, | |
| "learning_rate": 4.999437133617799e-05, | |
| "loss": 0.7263, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.20412948727607727, | |
| "learning_rate": 4.9993493138528765e-05, | |
| "loss": 0.6965, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.21635311841964722, | |
| "learning_rate": 4.9992551314797775e-05, | |
| "loss": 0.7042, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.21901027858257294, | |
| "learning_rate": 4.999154586738264e-05, | |
| "loss": 0.6967, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.24104245007038116, | |
| "learning_rate": 4.9990476798842935e-05, | |
| "loss": 0.7009, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.2433982491493225, | |
| "learning_rate": 4.998934411190018e-05, | |
| "loss": 0.7052, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.5261262655258179, | |
| "learning_rate": 4.9988147809437876e-05, | |
| "loss": 0.7103, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.36266064643859863, | |
| "learning_rate": 4.998688789450146e-05, | |
| "loss": 0.7028, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.2509474754333496, | |
| "learning_rate": 4.9985564370298274e-05, | |
| "loss": 0.7047, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.1925598382949829, | |
| "learning_rate": 4.9984177240197665e-05, | |
| "loss": 0.6943, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.22494031488895416, | |
| "learning_rate": 4.998272650773083e-05, | |
| "loss": 0.6953, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.24152888357639313, | |
| "learning_rate": 4.998121217659092e-05, | |
| "loss": 0.7181, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.24296504259109497, | |
| "learning_rate": 4.997963425063297e-05, | |
| "loss": 0.6934, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 0.2438274323940277, | |
| "learning_rate": 4.9977992733873906e-05, | |
| "loss": 0.7088, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 0.21081580221652985, | |
| "learning_rate": 4.997628763049257e-05, | |
| "loss": 0.6833, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 0.2111068218946457, | |
| "learning_rate": 4.9974518944829626e-05, | |
| "loss": 0.6922, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 0.309772253036499, | |
| "learning_rate": 4.997268668138766e-05, | |
| "loss": 0.7195, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.2199670672416687, | |
| "learning_rate": 4.997079084483105e-05, | |
| "loss": 0.6924, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.21781674027442932, | |
| "learning_rate": 4.996883143998605e-05, | |
| "loss": 0.6873, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.2969052791595459, | |
| "learning_rate": 4.996680847184072e-05, | |
| "loss": 0.7135, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.23092682659626007, | |
| "learning_rate": 4.996472194554495e-05, | |
| "loss": 0.6827, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 0.29615628719329834, | |
| "learning_rate": 4.996257186641042e-05, | |
| "loss": 0.6738, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 0.23491479456424713, | |
| "learning_rate": 4.99603582399106e-05, | |
| "loss": 0.7396, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 0.1986425668001175, | |
| "learning_rate": 4.9958081071680726e-05, | |
| "loss": 0.6787, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 0.19658410549163818, | |
| "learning_rate": 4.99557403675178e-05, | |
| "loss": 0.66, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 0.29404616355895996, | |
| "learning_rate": 4.995333613338057e-05, | |
| "loss": 0.6824, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 0.23689547181129456, | |
| "learning_rate": 4.9950868375389514e-05, | |
| "loss": 0.7008, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 0.22758638858795166, | |
| "learning_rate": 4.99483370998268e-05, | |
| "loss": 0.6946, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 0.20259805023670197, | |
| "learning_rate": 4.994574231313634e-05, | |
| "loss": 0.6734, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.21935436129570007, | |
| "learning_rate": 4.994308402192366e-05, | |
| "loss": 0.7094, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.23414015769958496, | |
| "learning_rate": 4.9940362232956026e-05, | |
| "loss": 0.7019, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.2623121440410614, | |
| "learning_rate": 4.993757695316228e-05, | |
| "loss": 0.6793, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.31137487292289734, | |
| "learning_rate": 4.993472818963295e-05, | |
| "loss": 0.6902, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.22092680633068085, | |
| "learning_rate": 4.993181594962013e-05, | |
| "loss": 0.6912, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.23723623156547546, | |
| "learning_rate": 4.992884024053754e-05, | |
| "loss": 0.652, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.26028233766555786, | |
| "learning_rate": 4.9925801069960454e-05, | |
| "loss": 0.6969, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.23856200277805328, | |
| "learning_rate": 4.992269844562572e-05, | |
| "loss": 0.6723, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "eval_loss": 0.6912096738815308, | |
| "eval_runtime": 67.244, | |
| "eval_samples_per_second": 29.742, | |
| "eval_steps_per_second": 0.937, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.167326420545578, | |
| "learning_rate": 4.9919532375431677e-05, | |
| "loss": 0.6897, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 0.26183101534843445, | |
| "learning_rate": 4.991630286743823e-05, | |
| "loss": 0.6844, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 0.25918036699295044, | |
| "learning_rate": 4.991300992986676e-05, | |
| "loss": 0.6645, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 0.2354690134525299, | |
| "learning_rate": 4.99096535711001e-05, | |
| "loss": 0.6672, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 0.24444998800754547, | |
| "learning_rate": 4.990623379968257e-05, | |
| "loss": 0.6749, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 0.2562585771083832, | |
| "learning_rate": 4.990275062431989e-05, | |
| "loss": 0.6652, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 0.22618699073791504, | |
| "learning_rate": 4.98992040538792e-05, | |
| "loss": 0.7163, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 0.20957091450691223, | |
| "learning_rate": 4.9895594097389044e-05, | |
| "loss": 0.7053, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 0.26973310112953186, | |
| "learning_rate": 4.989192076403928e-05, | |
| "loss": 0.6716, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 0.24841250479221344, | |
| "learning_rate": 4.9888184063181154e-05, | |
| "loss": 0.6884, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 0.25283706188201904, | |
| "learning_rate": 4.98843840043272e-05, | |
| "loss": 0.6632, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 0.196313738822937, | |
| "learning_rate": 4.988052059715126e-05, | |
| "loss": 0.712, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 0.21621187031269073, | |
| "learning_rate": 4.987659385148842e-05, | |
| "loss": 0.6732, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.2180820256471634, | |
| "learning_rate": 4.987260377733502e-05, | |
| "loss": 0.7045, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.20110243558883667, | |
| "learning_rate": 4.986855038484862e-05, | |
| "loss": 0.6604, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.20813095569610596, | |
| "learning_rate": 4.9864433684347964e-05, | |
| "loss": 0.7051, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.22026021778583527, | |
| "learning_rate": 4.9860253686312964e-05, | |
| "loss": 0.6752, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 0.23409809172153473, | |
| "learning_rate": 4.9856010401384654e-05, | |
| "loss": 0.6805, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 0.1982721984386444, | |
| "learning_rate": 4.985170384036521e-05, | |
| "loss": 0.6888, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 0.24984286725521088, | |
| "learning_rate": 4.984733401421785e-05, | |
| "loss": 0.6741, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 0.24465042352676392, | |
| "learning_rate": 4.9842900934066874e-05, | |
| "loss": 0.6781, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 0.26656293869018555, | |
| "learning_rate": 4.98384046111976e-05, | |
| "loss": 0.7086, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 0.23073184490203857, | |
| "learning_rate": 4.9833845057056336e-05, | |
| "loss": 0.6966, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 0.23296333849430084, | |
| "learning_rate": 4.982922228325037e-05, | |
| "loss": 0.6769, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 0.22282017767429352, | |
| "learning_rate": 4.982453630154794e-05, | |
| "loss": 0.6581, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 0.22943562269210815, | |
| "learning_rate": 4.981978712387815e-05, | |
| "loss": 0.6803, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 0.29295608401298523, | |
| "learning_rate": 4.9814974762331034e-05, | |
| "loss": 0.6514, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 0.21845972537994385, | |
| "learning_rate": 4.981009922915743e-05, | |
| "loss": 0.691, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 0.24616730213165283, | |
| "learning_rate": 4.980516053676903e-05, | |
| "loss": 0.6948, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 0.2354935109615326, | |
| "learning_rate": 4.9800158697738264e-05, | |
| "loss": 0.6778, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 0.3475668430328369, | |
| "learning_rate": 4.979509372479837e-05, | |
| "loss": 0.6626, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 0.2817700207233429, | |
| "learning_rate": 4.9789965630843265e-05, | |
| "loss": 0.6724, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 0.28306934237480164, | |
| "learning_rate": 4.978477442892758e-05, | |
| "loss": 0.6673, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 0.2593139111995697, | |
| "learning_rate": 4.9779520132266575e-05, | |
| "loss": 0.6866, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.2618165612220764, | |
| "learning_rate": 4.9774202754236145e-05, | |
| "loss": 0.6671, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.22875259816646576, | |
| "learning_rate": 4.9768822308372784e-05, | |
| "loss": 0.6743, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.20143887400627136, | |
| "learning_rate": 4.976337880837351e-05, | |
| "loss": 0.6866, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.23163306713104248, | |
| "learning_rate": 4.975787226809587e-05, | |
| "loss": 0.6772, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 0.21628862619400024, | |
| "learning_rate": 4.975230270155791e-05, | |
| "loss": 0.6706, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 0.21055322885513306, | |
| "learning_rate": 4.9746670122938105e-05, | |
| "loss": 0.6536, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 0.20045976340770721, | |
| "learning_rate": 4.974097454657534e-05, | |
| "loss": 0.6477, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 0.20996223390102386, | |
| "learning_rate": 4.9735215986968874e-05, | |
| "loss": 0.6526, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "grad_norm": 0.1778746396303177, | |
| "learning_rate": 4.972939445877831e-05, | |
| "loss": 0.6951, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "grad_norm": 0.2416403591632843, | |
| "learning_rate": 4.972350997682354e-05, | |
| "loss": 0.6703, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "grad_norm": 0.23652783036231995, | |
| "learning_rate": 4.9717562556084735e-05, | |
| "loss": 0.6401, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "grad_norm": 0.23598232865333557, | |
| "learning_rate": 4.9711552211702274e-05, | |
| "loss": 0.6668, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.22741574048995972, | |
| "learning_rate": 4.970547895897672e-05, | |
| "loss": 0.6765, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.20418991148471832, | |
| "learning_rate": 4.96993428133688e-05, | |
| "loss": 0.6771, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.24983160197734833, | |
| "learning_rate": 4.969314379049932e-05, | |
| "loss": 0.6656, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.2511340379714966, | |
| "learning_rate": 4.968688190614919e-05, | |
| "loss": 0.6605, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "eval_loss": 0.6730201244354248, | |
| "eval_runtime": 67.2807, | |
| "eval_samples_per_second": 29.726, | |
| "eval_steps_per_second": 0.936, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "grad_norm": 0.22646427154541016, | |
| "learning_rate": 4.96805571762593e-05, | |
| "loss": 0.6729, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "grad_norm": 0.2343759387731552, | |
| "learning_rate": 4.9674169616930574e-05, | |
| "loss": 0.6635, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "grad_norm": 0.23573775589466095, | |
| "learning_rate": 4.966771924442385e-05, | |
| "loss": 0.654, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "grad_norm": 0.2696162760257721, | |
| "learning_rate": 4.966120607515987e-05, | |
| "loss": 0.6728, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.25032511353492737, | |
| "learning_rate": 4.965463012571927e-05, | |
| "loss": 0.6743, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.24621817469596863, | |
| "learning_rate": 4.964799141284247e-05, | |
| "loss": 0.6596, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.23296469449996948, | |
| "learning_rate": 4.964128995342966e-05, | |
| "loss": 0.6471, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.22769325971603394, | |
| "learning_rate": 4.963452576454082e-05, | |
| "loss": 0.6499, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "grad_norm": 0.2572191655635834, | |
| "learning_rate": 4.9627698863395564e-05, | |
| "loss": 0.6499, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "grad_norm": 0.21782545745372772, | |
| "learning_rate": 4.962080926737319e-05, | |
| "loss": 0.6641, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "grad_norm": 0.22925855219364166, | |
| "learning_rate": 4.9613856994012567e-05, | |
| "loss": 0.6618, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "grad_norm": 0.2127605825662613, | |
| "learning_rate": 4.960684206101214e-05, | |
| "loss": 0.6821, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "grad_norm": 0.2520267069339752, | |
| "learning_rate": 4.9599764486229865e-05, | |
| "loss": 0.6748, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 0.20282019674777985, | |
| "learning_rate": 4.9592624287683176e-05, | |
| "loss": 0.6674, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 0.46226009726524353, | |
| "learning_rate": 4.958542148354891e-05, | |
| "loss": 0.6422, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 0.2241724133491516, | |
| "learning_rate": 4.957815609216329e-05, | |
| "loss": 0.6612, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 0.23772084712982178, | |
| "learning_rate": 4.957082813202186e-05, | |
| "loss": 0.6614, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "grad_norm": 0.2049807757139206, | |
| "learning_rate": 4.9563437621779465e-05, | |
| "loss": 0.6827, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "grad_norm": 0.22666208446025848, | |
| "learning_rate": 4.955598458025015e-05, | |
| "loss": 0.6522, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "grad_norm": 0.26957106590270996, | |
| "learning_rate": 4.954846902640718e-05, | |
| "loss": 0.6817, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "grad_norm": 0.22548554837703705, | |
| "learning_rate": 4.954089097938294e-05, | |
| "loss": 0.6444, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "grad_norm": 0.22197945415973663, | |
| "learning_rate": 4.9533250458468914e-05, | |
| "loss": 0.6779, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "grad_norm": 0.22711919248104095, | |
| "learning_rate": 4.9525547483115617e-05, | |
| "loss": 0.6416, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "grad_norm": 0.22576060891151428, | |
| "learning_rate": 4.951778207293255e-05, | |
| "loss": 0.6598, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "grad_norm": 0.22785042226314545, | |
| "learning_rate": 4.950995424768818e-05, | |
| "loss": 0.6499, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 0.20107567310333252, | |
| "learning_rate": 4.9502064027309836e-05, | |
| "loss": 0.6509, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 0.23379752039909363, | |
| "learning_rate": 4.94941114318837e-05, | |
| "loss": 0.6551, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 0.22154928743839264, | |
| "learning_rate": 4.948609648165475e-05, | |
| "loss": 0.6428, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 0.23805582523345947, | |
| "learning_rate": 4.947801919702667e-05, | |
| "loss": 0.6633, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 0.24756357073783875, | |
| "learning_rate": 4.946987959856188e-05, | |
| "loss": 0.6561, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 0.23767632246017456, | |
| "learning_rate": 4.9461677706981374e-05, | |
| "loss": 0.6552, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 0.32177987694740295, | |
| "learning_rate": 4.9453413543164775e-05, | |
| "loss": 0.6705, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 0.29961881041526794, | |
| "learning_rate": 4.94450871281502e-05, | |
| "loss": 0.6551, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "grad_norm": 0.2768830955028534, | |
| "learning_rate": 4.943669848313427e-05, | |
| "loss": 0.6468, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "grad_norm": 0.24728989601135254, | |
| "learning_rate": 4.9428247629472e-05, | |
| "loss": 0.6506, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "grad_norm": 0.2641989290714264, | |
| "learning_rate": 4.941973458867677e-05, | |
| "loss": 0.6517, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "grad_norm": 0.18787774443626404, | |
| "learning_rate": 4.941115938242028e-05, | |
| "loss": 0.6355, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 0.26642099022865295, | |
| "learning_rate": 4.940252203253248e-05, | |
| "loss": 0.6811, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 0.24736198782920837, | |
| "learning_rate": 4.939382256100154e-05, | |
| "loss": 0.6493, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 0.3060832619667053, | |
| "learning_rate": 4.938506098997374e-05, | |
| "loss": 0.6707, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 0.24164025485515594, | |
| "learning_rate": 4.937623734175346e-05, | |
| "loss": 0.6508, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 0.26685982942581177, | |
| "learning_rate": 4.936735163880313e-05, | |
| "loss": 0.6441, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "grad_norm": 0.17739726603031158, | |
| "learning_rate": 4.9358403903743124e-05, | |
| "loss": 0.6499, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "grad_norm": 0.23560577630996704, | |
| "learning_rate": 4.9349394159351735e-05, | |
| "loss": 0.645, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "grad_norm": 0.23137669265270233, | |
| "learning_rate": 4.9340322428565135e-05, | |
| "loss": 0.6615, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "grad_norm": 0.3485463857650757, | |
| "learning_rate": 4.933118873447728e-05, | |
| "loss": 0.6283, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.20602945983409882, | |
| "learning_rate": 4.932199310033987e-05, | |
| "loss": 0.6831, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.21541930735111237, | |
| "learning_rate": 4.931273554956227e-05, | |
| "loss": 0.6722, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.2092689573764801, | |
| "learning_rate": 4.930341610571151e-05, | |
| "loss": 0.6609, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.23792122304439545, | |
| "learning_rate": 4.9294034792512126e-05, | |
| "loss": 0.6475, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "eval_loss": 0.6643247008323669, | |
| "eval_runtime": 67.0911, | |
| "eval_samples_per_second": 29.81, | |
| "eval_steps_per_second": 0.939, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "grad_norm": 0.2101823091506958, | |
| "learning_rate": 4.928459163384619e-05, | |
| "loss": 0.6604, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "grad_norm": 0.1859540194272995, | |
| "learning_rate": 4.927508665375321e-05, | |
| "loss": 0.6252, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "grad_norm": 0.2371070235967636, | |
| "learning_rate": 4.926551987643007e-05, | |
| "loss": 0.6413, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "grad_norm": 0.2149161994457245, | |
| "learning_rate": 4.9255891326230964e-05, | |
| "loss": 0.6558, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "grad_norm": 0.21805475652217865, | |
| "learning_rate": 4.9246201027667354e-05, | |
| "loss": 0.631, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "grad_norm": 0.22705689072608948, | |
| "learning_rate": 4.9236449005407895e-05, | |
| "loss": 0.6785, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "grad_norm": 0.21902668476104736, | |
| "learning_rate": 4.9226635284278355e-05, | |
| "loss": 0.657, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "grad_norm": 0.21482336521148682, | |
| "learning_rate": 4.9216759889261586e-05, | |
| "loss": 0.6514, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 0.20008622109889984, | |
| "learning_rate": 4.9206822845497444e-05, | |
| "loss": 0.6516, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 0.205527663230896, | |
| "learning_rate": 4.919682417828271e-05, | |
| "loss": 0.6336, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 0.263959139585495, | |
| "learning_rate": 4.9186763913071065e-05, | |
| "loss": 0.6449, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 0.21950668096542358, | |
| "learning_rate": 4.917664207547297e-05, | |
| "loss": 0.6916, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.215659499168396, | |
| "learning_rate": 4.916645869125564e-05, | |
| "loss": 0.6441, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.22789780795574188, | |
| "learning_rate": 4.915621378634301e-05, | |
| "loss": 0.6576, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.27431604266166687, | |
| "learning_rate": 4.914590738681555e-05, | |
| "loss": 0.6487, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.21077977120876312, | |
| "learning_rate": 4.913553951891036e-05, | |
| "loss": 0.6688, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 0.2798519730567932, | |
| "learning_rate": 4.9125110209020954e-05, | |
| "loss": 0.6618, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 0.24790987372398376, | |
| "learning_rate": 4.911461948369731e-05, | |
| "loss": 0.6598, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 0.2008027732372284, | |
| "learning_rate": 4.91040673696457e-05, | |
| "loss": 0.6388, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 0.23889446258544922, | |
| "learning_rate": 4.9093453893728733e-05, | |
| "loss": 0.6526, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 0.1997964233160019, | |
| "learning_rate": 4.908277908296518e-05, | |
| "loss": 0.6521, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "grad_norm": 0.24335375428199768, | |
| "learning_rate": 4.907204296452997e-05, | |
| "loss": 0.6636, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "grad_norm": 0.23431982100009918, | |
| "learning_rate": 4.906124556575411e-05, | |
| "loss": 0.6557, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "grad_norm": 0.21532365679740906, | |
| "learning_rate": 4.90503869141246e-05, | |
| "loss": 0.628, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "grad_norm": 0.22962962090969086, | |
| "learning_rate": 4.903946703728436e-05, | |
| "loss": 0.6422, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "grad_norm": 0.22807002067565918, | |
| "learning_rate": 4.90284859630322e-05, | |
| "loss": 0.6631, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "grad_norm": 0.27668192982673645, | |
| "learning_rate": 4.90174437193227e-05, | |
| "loss": 0.6778, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "grad_norm": 0.2503020763397217, | |
| "learning_rate": 4.900634033426616e-05, | |
| "loss": 0.6483, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "grad_norm": 0.3033277094364166, | |
| "learning_rate": 4.8995175836128536e-05, | |
| "loss": 0.6572, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 0.2776722311973572, | |
| "learning_rate": 4.898395025333136e-05, | |
| "loss": 0.662, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 0.31251537799835205, | |
| "learning_rate": 4.897266361445165e-05, | |
| "loss": 0.6617, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 0.38570669293403625, | |
| "learning_rate": 4.8961315948221884e-05, | |
| "loss": 0.6419, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 0.23277431726455688, | |
| "learning_rate": 4.894990728352988e-05, | |
| "loss": 0.6373, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "grad_norm": 0.2519891858100891, | |
| "learning_rate": 4.893843764941874e-05, | |
| "loss": 0.6759, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "grad_norm": 0.2358052134513855, | |
| "learning_rate": 4.892690707508677e-05, | |
| "loss": 0.6826, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "grad_norm": 0.23103272914886475, | |
| "learning_rate": 4.8915315589887436e-05, | |
| "loss": 0.624, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "grad_norm": 0.2128864973783493, | |
| "learning_rate": 4.8903663223329245e-05, | |
| "loss": 0.654, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 0.22060807049274445, | |
| "learning_rate": 4.889195000507568e-05, | |
| "loss": 0.6376, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 0.24493062496185303, | |
| "learning_rate": 4.888017596494517e-05, | |
| "loss": 0.6648, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 0.2580796480178833, | |
| "learning_rate": 4.886834113291094e-05, | |
| "loss": 0.6607, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 0.26906147599220276, | |
| "learning_rate": 4.8856445539101016e-05, | |
| "loss": 0.6441, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "grad_norm": 0.2145451009273529, | |
| "learning_rate": 4.884448921379805e-05, | |
| "loss": 0.6543, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "grad_norm": 0.2156788855791092, | |
| "learning_rate": 4.8832472187439345e-05, | |
| "loss": 0.6185, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "grad_norm": 0.25632360577583313, | |
| "learning_rate": 4.882039449061673e-05, | |
| "loss": 0.639, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "grad_norm": 0.2496800720691681, | |
| "learning_rate": 4.8808256154076436e-05, | |
| "loss": 0.6377, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.39942485094070435, | |
| "learning_rate": 4.8796057208719124e-05, | |
| "loss": 0.6083, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.27986374497413635, | |
| "learning_rate": 4.8783797685599706e-05, | |
| "loss": 0.6528, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.22164517641067505, | |
| "learning_rate": 4.877147761592733e-05, | |
| "loss": 0.6456, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.29858505725860596, | |
| "learning_rate": 4.875909703106527e-05, | |
| "loss": 0.643, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.22595979273319244, | |
| "learning_rate": 4.874665596253084e-05, | |
| "loss": 0.6419, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "eval_loss": 0.6583871841430664, | |
| "eval_runtime": 67.1961, | |
| "eval_samples_per_second": 29.764, | |
| "eval_steps_per_second": 0.938, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "grad_norm": 0.19871610403060913, | |
| "learning_rate": 4.8734154441995364e-05, | |
| "loss": 0.6556, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "grad_norm": 0.2072724997997284, | |
| "learning_rate": 4.872159250128401e-05, | |
| "loss": 0.6764, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "grad_norm": 0.22515198588371277, | |
| "learning_rate": 4.87089701723758e-05, | |
| "loss": 0.6703, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "grad_norm": 0.2292303442955017, | |
| "learning_rate": 4.869628748740347e-05, | |
| "loss": 0.6288, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "grad_norm": 0.25133493542671204, | |
| "learning_rate": 4.8683544478653395e-05, | |
| "loss": 0.6652, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "grad_norm": 0.29863354563713074, | |
| "learning_rate": 4.867074117856555e-05, | |
| "loss": 0.6352, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "grad_norm": 0.2168973684310913, | |
| "learning_rate": 4.865787761973334e-05, | |
| "loss": 0.6562, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "grad_norm": 0.2312694936990738, | |
| "learning_rate": 4.864495383490363e-05, | |
| "loss": 0.6463, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.2333671748638153, | |
| "learning_rate": 4.863196985697655e-05, | |
| "loss": 0.6267, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.23559433221817017, | |
| "learning_rate": 4.86189257190055e-05, | |
| "loss": 0.623, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.22314316034317017, | |
| "learning_rate": 4.860582145419703e-05, | |
| "loss": 0.6298, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.22701682150363922, | |
| "learning_rate": 4.859265709591073e-05, | |
| "loss": 0.6359, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 0.21683742105960846, | |
| "learning_rate": 4.857943267765919e-05, | |
| "loss": 0.6666, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 0.2519407272338867, | |
| "learning_rate": 4.856614823310788e-05, | |
| "loss": 0.6731, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 0.23536355793476105, | |
| "learning_rate": 4.855280379607509e-05, | |
| "loss": 0.629, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 0.19676193594932556, | |
| "learning_rate": 4.853939940053184e-05, | |
| "loss": 0.6329, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "grad_norm": 0.23497731983661652, | |
| "learning_rate": 4.852593508060177e-05, | |
| "loss": 0.6584, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "grad_norm": 0.2515724003314972, | |
| "learning_rate": 4.8512410870561084e-05, | |
| "loss": 0.6177, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "grad_norm": 0.20130635797977448, | |
| "learning_rate": 4.8498826804838436e-05, | |
| "loss": 0.6645, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "grad_norm": 0.23828361928462982, | |
| "learning_rate": 4.8485182918014876e-05, | |
| "loss": 0.6401, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 0.2598424255847931, | |
| "learning_rate": 4.847147924482371e-05, | |
| "loss": 0.6193, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 0.20697768032550812, | |
| "learning_rate": 4.845771582015046e-05, | |
| "loss": 0.626, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 0.19661648571491241, | |
| "learning_rate": 4.8443892679032775e-05, | |
| "loss": 0.6283, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 0.22154510021209717, | |
| "learning_rate": 4.843000985666028e-05, | |
| "loss": 0.6571, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "grad_norm": 0.3334987759590149, | |
| "learning_rate": 4.841606738837458e-05, | |
| "loss": 0.6553, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "grad_norm": 0.6604078412055969, | |
| "learning_rate": 4.8402065309669085e-05, | |
| "loss": 0.6239, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "grad_norm": 0.22238047420978546, | |
| "learning_rate": 4.838800365618898e-05, | |
| "loss": 0.6725, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "grad_norm": 0.21566419303417206, | |
| "learning_rate": 4.837388246373108e-05, | |
| "loss": 0.6597, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.9337890148162842, | |
| "learning_rate": 4.83597017682438e-05, | |
| "loss": 0.6454, | |
| "step": 3290 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.2716673016548157, | |
| "learning_rate": 4.8345461605827014e-05, | |
| "loss": 0.6526, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.23388756811618805, | |
| "learning_rate": 4.8331162012732e-05, | |
| "loss": 0.6642, | |
| "step": 3310 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.21243353188037872, | |
| "learning_rate": 4.8316803025361304e-05, | |
| "loss": 0.6233, | |
| "step": 3320 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.28600814938545227, | |
| "learning_rate": 4.8302384680268684e-05, | |
| "loss": 0.6559, | |
| "step": 3330 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "grad_norm": 0.19718654453754425, | |
| "learning_rate": 4.8287907014159004e-05, | |
| "loss": 0.6357, | |
| "step": 3340 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "grad_norm": 0.2330627292394638, | |
| "learning_rate": 4.827337006388816e-05, | |
| "loss": 0.6692, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "grad_norm": 0.18776822090148926, | |
| "learning_rate": 4.8258773866462927e-05, | |
| "loss": 0.6804, | |
| "step": 3360 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "grad_norm": 0.24929189682006836, | |
| "learning_rate": 4.8244118459040944e-05, | |
| "loss": 0.6455, | |
| "step": 3370 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "grad_norm": 0.23884710669517517, | |
| "learning_rate": 4.8229403878930566e-05, | |
| "loss": 0.6345, | |
| "step": 3380 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "grad_norm": 0.2579725682735443, | |
| "learning_rate": 4.821463016359078e-05, | |
| "loss": 0.6584, | |
| "step": 3390 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "grad_norm": 0.2741701900959015, | |
| "learning_rate": 4.8199797350631136e-05, | |
| "loss": 0.6463, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "grad_norm": 0.2254386991262436, | |
| "learning_rate": 4.818490547781159e-05, | |
| "loss": 0.6399, | |
| "step": 3410 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "grad_norm": 0.2172977328300476, | |
| "learning_rate": 4.816995458304249e-05, | |
| "loss": 0.661, | |
| "step": 3420 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "grad_norm": 0.23464016616344452, | |
| "learning_rate": 4.815494470438441e-05, | |
| "loss": 0.6516, | |
| "step": 3430 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "grad_norm": 0.27163979411125183, | |
| "learning_rate": 4.813987588004807e-05, | |
| "loss": 0.6451, | |
| "step": 3440 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "grad_norm": 0.20852161943912506, | |
| "learning_rate": 4.8124748148394285e-05, | |
| "loss": 0.6394, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 0.2224467694759369, | |
| "learning_rate": 4.81095615479338e-05, | |
| "loss": 0.6427, | |
| "step": 3460 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 0.2205616980791092, | |
| "learning_rate": 4.8094316117327245e-05, | |
| "loss": 0.6491, | |
| "step": 3470 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 0.21984492242336273, | |
| "learning_rate": 4.8079011895384985e-05, | |
| "loss": 0.6271, | |
| "step": 3480 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 0.29770565032958984, | |
| "learning_rate": 4.806364892106707e-05, | |
| "loss": 0.6048, | |
| "step": 3490 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 0.2440354973077774, | |
| "learning_rate": 4.8048227233483127e-05, | |
| "loss": 0.6307, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "eval_loss": 0.6532164216041565, | |
| "eval_runtime": 67.3224, | |
| "eval_samples_per_second": 29.708, | |
| "eval_steps_per_second": 0.936, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 0.25720447301864624, | |
| "learning_rate": 4.803274687189222e-05, | |
| "loss": 0.6337, | |
| "step": 3510 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 0.21051880717277527, | |
| "learning_rate": 4.8017207875702814e-05, | |
| "loss": 0.6398, | |
| "step": 3520 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 0.2977493107318878, | |
| "learning_rate": 4.800161028447261e-05, | |
| "loss": 0.6346, | |
| "step": 3530 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "grad_norm": 0.19749422371387482, | |
| "learning_rate": 4.798595413790848e-05, | |
| "loss": 0.6325, | |
| "step": 3540 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "grad_norm": 0.21522672474384308, | |
| "learning_rate": 4.7970239475866386e-05, | |
| "loss": 0.6244, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "grad_norm": 0.24835069477558136, | |
| "learning_rate": 4.7954466338351224e-05, | |
| "loss": 0.612, | |
| "step": 3560 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "grad_norm": 0.25459495186805725, | |
| "learning_rate": 4.793863476551677e-05, | |
| "loss": 0.6317, | |
| "step": 3570 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "grad_norm": 0.28858670592308044, | |
| "learning_rate": 4.7922744797665544e-05, | |
| "loss": 0.6485, | |
| "step": 3580 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "grad_norm": 0.30466488003730774, | |
| "learning_rate": 4.790679647524873e-05, | |
| "loss": 0.6307, | |
| "step": 3590 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "grad_norm": 0.36087316274642944, | |
| "learning_rate": 4.789078983886607e-05, | |
| "loss": 0.6749, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "grad_norm": 0.2436164766550064, | |
| "learning_rate": 4.787472492926575e-05, | |
| "loss": 0.6239, | |
| "step": 3610 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "grad_norm": 0.2527044415473938, | |
| "learning_rate": 4.78586017873443e-05, | |
| "loss": 0.6584, | |
| "step": 3620 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 0.25492456555366516, | |
| "learning_rate": 4.784242045414651e-05, | |
| "loss": 0.6487, | |
| "step": 3630 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 0.2293190211057663, | |
| "learning_rate": 4.782618097086528e-05, | |
| "loss": 0.646, | |
| "step": 3640 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 0.7955949902534485, | |
| "learning_rate": 4.780988337884157e-05, | |
| "loss": 0.6391, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 0.2331811785697937, | |
| "learning_rate": 4.779352771956425e-05, | |
| "loss": 0.6267, | |
| "step": 3660 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "grad_norm": 0.23316140472888947, | |
| "learning_rate": 4.7777114034670006e-05, | |
| "loss": 0.6405, | |
| "step": 3670 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "grad_norm": 0.2253628969192505, | |
| "learning_rate": 4.776064236594327e-05, | |
| "loss": 0.6415, | |
| "step": 3680 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "grad_norm": 0.2378271222114563, | |
| "learning_rate": 4.774411275531606e-05, | |
| "loss": 0.6362, | |
| "step": 3690 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "grad_norm": 0.22514688968658447, | |
| "learning_rate": 4.7727525244867896e-05, | |
| "loss": 0.6375, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 0.20843249559402466, | |
| "learning_rate": 4.771087987682571e-05, | |
| "loss": 0.6166, | |
| "step": 3710 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 0.2776215076446533, | |
| "learning_rate": 4.7694176693563705e-05, | |
| "loss": 0.6441, | |
| "step": 3720 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 0.21339628100395203, | |
| "learning_rate": 4.767741573760327e-05, | |
| "loss": 0.6388, | |
| "step": 3730 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 0.24429263174533844, | |
| "learning_rate": 4.766059705161288e-05, | |
| "loss": 0.6263, | |
| "step": 3740 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "grad_norm": 0.2099929004907608, | |
| "learning_rate": 4.764372067840795e-05, | |
| "loss": 0.6383, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "grad_norm": 0.22066868841648102, | |
| "learning_rate": 4.7626786660950784e-05, | |
| "loss": 0.6313, | |
| "step": 3760 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "grad_norm": 0.22953028976917267, | |
| "learning_rate": 4.760979504235038e-05, | |
| "loss": 0.6264, | |
| "step": 3770 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "grad_norm": 0.3549204170703888, | |
| "learning_rate": 4.759274586586242e-05, | |
| "loss": 0.6368, | |
| "step": 3780 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 0.24681401252746582, | |
| "learning_rate": 4.757563917488909e-05, | |
| "loss": 0.6422, | |
| "step": 3790 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 0.2417946755886078, | |
| "learning_rate": 4.755847501297898e-05, | |
| "loss": 0.6438, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 0.2778252959251404, | |
| "learning_rate": 4.7541253423827006e-05, | |
| "loss": 0.6312, | |
| "step": 3810 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 0.20280030369758606, | |
| "learning_rate": 4.7523974451274275e-05, | |
| "loss": 0.6349, | |
| "step": 3820 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "grad_norm": 0.24433667957782745, | |
| "learning_rate": 4.7506638139307966e-05, | |
| "loss": 0.6648, | |
| "step": 3830 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "grad_norm": 0.3273336887359619, | |
| "learning_rate": 4.7489244532061225e-05, | |
| "loss": 0.6498, | |
| "step": 3840 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "grad_norm": 0.23943009972572327, | |
| "learning_rate": 4.747179367381307e-05, | |
| "loss": 0.6358, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "grad_norm": 0.26670822501182556, | |
| "learning_rate": 4.745428560898824e-05, | |
| "loss": 0.6563, | |
| "step": 3860 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "grad_norm": 0.18882083892822266, | |
| "learning_rate": 4.7436720382157116e-05, | |
| "loss": 0.6358, | |
| "step": 3870 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "grad_norm": 0.23588411509990692, | |
| "learning_rate": 4.741909803803562e-05, | |
| "loss": 0.6396, | |
| "step": 3880 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "grad_norm": 0.24063749611377716, | |
| "learning_rate": 4.740141862148503e-05, | |
| "loss": 0.6412, | |
| "step": 3890 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "grad_norm": 0.19155845046043396, | |
| "learning_rate": 4.738368217751196e-05, | |
| "loss": 0.62, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "grad_norm": 0.20259159803390503, | |
| "learning_rate": 4.736588875126816e-05, | |
| "loss": 0.6172, | |
| "step": 3910 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 0.22824090719223022, | |
| "learning_rate": 4.734803838805048e-05, | |
| "loss": 0.6286, | |
| "step": 3920 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 0.2356242537498474, | |
| "learning_rate": 4.7330131133300686e-05, | |
| "loss": 0.6121, | |
| "step": 3930 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 0.22052697837352753, | |
| "learning_rate": 4.731216703260538e-05, | |
| "loss": 0.6402, | |
| "step": 3940 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 0.2817922532558441, | |
| "learning_rate": 4.7294146131695874e-05, | |
| "loss": 0.6256, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.23476284742355347, | |
| "learning_rate": 4.7276068476448097e-05, | |
| "loss": 0.644, | |
| "step": 3960 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.25303345918655396, | |
| "learning_rate": 4.725793411288242e-05, | |
| "loss": 0.649, | |
| "step": 3970 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.2615244686603546, | |
| "learning_rate": 4.723974308716361e-05, | |
| "loss": 0.6158, | |
| "step": 3980 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.2798680365085602, | |
| "learning_rate": 4.722149544560067e-05, | |
| "loss": 0.6149, | |
| "step": 3990 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "grad_norm": 0.19780561327934265, | |
| "learning_rate": 4.720319123464672e-05, | |
| "loss": 0.6167, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "eval_loss": 0.6494756937026978, | |
| "eval_runtime": 67.2363, | |
| "eval_samples_per_second": 29.746, | |
| "eval_steps_per_second": 0.937, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "grad_norm": 0.22284342348575592, | |
| "learning_rate": 4.718483050089891e-05, | |
| "loss": 0.6282, | |
| "step": 4010 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "grad_norm": 0.242294043302536, | |
| "learning_rate": 4.7166413291098246e-05, | |
| "loss": 0.6418, | |
| "step": 4020 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "grad_norm": 0.19954660534858704, | |
| "learning_rate": 4.714793965212955e-05, | |
| "loss": 0.6293, | |
| "step": 4030 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "grad_norm": 0.2239541858434677, | |
| "learning_rate": 4.712940963102126e-05, | |
| "loss": 0.6441, | |
| "step": 4040 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "grad_norm": 0.2324070930480957, | |
| "learning_rate": 4.711082327494536e-05, | |
| "loss": 0.6355, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "grad_norm": 0.27540045976638794, | |
| "learning_rate": 4.709218063121725e-05, | |
| "loss": 0.6476, | |
| "step": 4060 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "grad_norm": 0.37015092372894287, | |
| "learning_rate": 4.7073481747295614e-05, | |
| "loss": 0.6161, | |
| "step": 4070 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "grad_norm": 0.2678948938846588, | |
| "learning_rate": 4.7054726670782304e-05, | |
| "loss": 0.6404, | |
| "step": 4080 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "grad_norm": 0.2190527468919754, | |
| "learning_rate": 4.703591544942224e-05, | |
| "loss": 0.6619, | |
| "step": 4090 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "grad_norm": 0.23498305678367615, | |
| "learning_rate": 4.701704813110325e-05, | |
| "loss": 0.6297, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "grad_norm": 0.2132258415222168, | |
| "learning_rate": 4.6998124763855984e-05, | |
| "loss": 0.6462, | |
| "step": 4110 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.2967916429042816, | |
| "learning_rate": 4.697914539585376e-05, | |
| "loss": 0.6347, | |
| "step": 4120 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.2503927946090698, | |
| "learning_rate": 4.6960110075412473e-05, | |
| "loss": 0.6264, | |
| "step": 4130 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.2261209934949875, | |
| "learning_rate": 4.694101885099045e-05, | |
| "loss": 0.6351, | |
| "step": 4140 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.2164410799741745, | |
| "learning_rate": 4.692187177118832e-05, | |
| "loss": 0.626, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "grad_norm": 0.2934332489967346, | |
| "learning_rate": 4.690266888474893e-05, | |
| "loss": 0.6243, | |
| "step": 4160 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "grad_norm": 0.23883183300495148, | |
| "learning_rate": 4.688341024055718e-05, | |
| "loss": 0.624, | |
| "step": 4170 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "grad_norm": 0.1730484664440155, | |
| "learning_rate": 4.686409588763991e-05, | |
| "loss": 0.6473, | |
| "step": 4180 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "grad_norm": 0.27762770652770996, | |
| "learning_rate": 4.6844725875165775e-05, | |
| "loss": 0.6508, | |
| "step": 4190 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "grad_norm": 0.243989497423172, | |
| "learning_rate": 4.682530025244514e-05, | |
| "loss": 0.6305, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "grad_norm": 0.21361881494522095, | |
| "learning_rate": 4.6805819068929925e-05, | |
| "loss": 0.5969, | |
| "step": 4210 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "grad_norm": 0.22126366198062897, | |
| "learning_rate": 4.678628237421348e-05, | |
| "loss": 0.6192, | |
| "step": 4220 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "grad_norm": 0.2569611370563507, | |
| "learning_rate": 4.6766690218030495e-05, | |
| "loss": 0.6235, | |
| "step": 4230 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "grad_norm": 0.26861709356307983, | |
| "learning_rate": 4.674704265025683e-05, | |
| "loss": 0.6413, | |
| "step": 4240 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "grad_norm": 0.19330435991287231, | |
| "learning_rate": 4.672733972090943e-05, | |
| "loss": 0.6413, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "grad_norm": 0.25647568702697754, | |
| "learning_rate": 4.6707581480146136e-05, | |
| "loss": 0.6411, | |
| "step": 4260 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "grad_norm": 0.23879855871200562, | |
| "learning_rate": 4.6687767978265625e-05, | |
| "loss": 0.6188, | |
| "step": 4270 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "grad_norm": 0.25180870294570923, | |
| "learning_rate": 4.666789926570725e-05, | |
| "loss": 0.6388, | |
| "step": 4280 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "grad_norm": 0.26070743799209595, | |
| "learning_rate": 4.6647975393050904e-05, | |
| "loss": 0.6254, | |
| "step": 4290 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "grad_norm": 0.2934954762458801, | |
| "learning_rate": 4.662799641101691e-05, | |
| "loss": 0.6089, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "grad_norm": 0.24827881157398224, | |
| "learning_rate": 4.6607962370465866e-05, | |
| "loss": 0.6233, | |
| "step": 4310 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "grad_norm": 0.7880712151527405, | |
| "learning_rate": 4.658787332239856e-05, | |
| "loss": 0.6305, | |
| "step": 4320 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "grad_norm": 0.23941326141357422, | |
| "learning_rate": 4.6567729317955796e-05, | |
| "loss": 0.6449, | |
| "step": 4330 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "grad_norm": 0.21050909161567688, | |
| "learning_rate": 4.654753040841829e-05, | |
| "loss": 0.6324, | |
| "step": 4340 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "grad_norm": 0.2372525930404663, | |
| "learning_rate": 4.6527276645206516e-05, | |
| "loss": 0.6098, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "grad_norm": 0.23655132949352264, | |
| "learning_rate": 4.65069680798806e-05, | |
| "loss": 0.6457, | |
| "step": 4360 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "grad_norm": 0.26810187101364136, | |
| "learning_rate": 4.648660476414017e-05, | |
| "loss": 0.6327, | |
| "step": 4370 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "grad_norm": 0.2635047137737274, | |
| "learning_rate": 4.6466186749824235e-05, | |
| "loss": 0.6359, | |
| "step": 4380 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "grad_norm": 0.2955858111381531, | |
| "learning_rate": 4.6445714088911076e-05, | |
| "loss": 0.6412, | |
| "step": 4390 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "grad_norm": 0.2971380949020386, | |
| "learning_rate": 4.6425186833518054e-05, | |
| "loss": 0.61, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "grad_norm": 0.22041000425815582, | |
| "learning_rate": 4.6404605035901505e-05, | |
| "loss": 0.6494, | |
| "step": 4410 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "grad_norm": 0.2463519424200058, | |
| "learning_rate": 4.638396874845666e-05, | |
| "loss": 0.6223, | |
| "step": 4420 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "grad_norm": 0.2519693374633789, | |
| "learning_rate": 4.636327802371742e-05, | |
| "loss": 0.6399, | |
| "step": 4430 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "grad_norm": 0.23022256791591644, | |
| "learning_rate": 4.6342532914356284e-05, | |
| "loss": 0.6106, | |
| "step": 4440 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 0.21000300347805023, | |
| "learning_rate": 4.632173347318421e-05, | |
| "loss": 0.6463, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 0.2383776307106018, | |
| "learning_rate": 4.630087975315045e-05, | |
| "loss": 0.6202, | |
| "step": 4460 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 0.2528972327709198, | |
| "learning_rate": 4.627997180734244e-05, | |
| "loss": 0.6401, | |
| "step": 4470 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 0.20749187469482422, | |
| "learning_rate": 4.625900968898565e-05, | |
| "loss": 0.6541, | |
| "step": 4480 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "grad_norm": 0.2283320277929306, | |
| "learning_rate": 4.623799345144348e-05, | |
| "loss": 0.6311, | |
| "step": 4490 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "grad_norm": 0.21833132207393646, | |
| "learning_rate": 4.6216923148217096e-05, | |
| "loss": 0.6272, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "eval_loss": 0.6476519107818604, | |
| "eval_runtime": 67.279, | |
| "eval_samples_per_second": 29.727, | |
| "eval_steps_per_second": 0.936, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "grad_norm": 0.2190130203962326, | |
| "learning_rate": 4.619579883294528e-05, | |
| "loss": 0.6405, | |
| "step": 4510 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "grad_norm": 0.32068029046058655, | |
| "learning_rate": 4.617462055940433e-05, | |
| "loss": 0.6101, | |
| "step": 4520 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "grad_norm": 0.2526598572731018, | |
| "learning_rate": 4.6153388381507886e-05, | |
| "loss": 0.6252, | |
| "step": 4530 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 0.19768927991390228, | |
| "learning_rate": 4.613210235330686e-05, | |
| "loss": 0.6138, | |
| "step": 4540 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 0.24341870844364166, | |
| "learning_rate": 4.611076252898919e-05, | |
| "loss": 0.6072, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 0.2283903807401657, | |
| "learning_rate": 4.60893689628798e-05, | |
| "loss": 0.6396, | |
| "step": 4560 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 0.21138328313827515, | |
| "learning_rate": 4.606792170944041e-05, | |
| "loss": 0.6306, | |
| "step": 4570 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "grad_norm": 0.4939797818660736, | |
| "learning_rate": 4.604642082326944e-05, | |
| "loss": 0.6259, | |
| "step": 4580 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "grad_norm": 0.22861245274543762, | |
| "learning_rate": 4.60248663591018e-05, | |
| "loss": 0.6274, | |
| "step": 4590 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "grad_norm": 0.35914289951324463, | |
| "learning_rate": 4.6003258371808825e-05, | |
| "loss": 0.6357, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "grad_norm": 0.2684130072593689, | |
| "learning_rate": 4.598159691639809e-05, | |
| "loss": 0.6299, | |
| "step": 4610 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "grad_norm": 0.2696840763092041, | |
| "learning_rate": 4.5959882048013294e-05, | |
| "loss": 0.6224, | |
| "step": 4620 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "grad_norm": 0.21357202529907227, | |
| "learning_rate": 4.59381138219341e-05, | |
| "loss": 0.6119, | |
| "step": 4630 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "grad_norm": 0.20233683288097382, | |
| "learning_rate": 4.591629229357601e-05, | |
| "loss": 0.5956, | |
| "step": 4640 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "grad_norm": 0.2905120849609375, | |
| "learning_rate": 4.5894417518490225e-05, | |
| "loss": 0.6106, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "grad_norm": 0.2116033285856247, | |
| "learning_rate": 4.5872489552363475e-05, | |
| "loss": 0.6218, | |
| "step": 4660 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "grad_norm": 0.22969362139701843, | |
| "learning_rate": 4.585050845101791e-05, | |
| "loss": 0.6419, | |
| "step": 4670 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "grad_norm": 0.23150619864463806, | |
| "learning_rate": 4.582847427041097e-05, | |
| "loss": 0.6128, | |
| "step": 4680 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "grad_norm": 0.23322267830371857, | |
| "learning_rate": 4.580638706663517e-05, | |
| "loss": 0.6114, | |
| "step": 4690 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "grad_norm": 0.23113666474819183, | |
| "learning_rate": 4.578424689591805e-05, | |
| "loss": 0.6173, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "grad_norm": 0.2632519602775574, | |
| "learning_rate": 4.5762053814621975e-05, | |
| "loss": 0.643, | |
| "step": 4710 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "grad_norm": 0.19099248945713043, | |
| "learning_rate": 4.573980787924399e-05, | |
| "loss": 0.6201, | |
| "step": 4720 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "grad_norm": 0.23567742109298706, | |
| "learning_rate": 4.5717509146415705e-05, | |
| "loss": 0.6163, | |
| "step": 4730 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "grad_norm": 0.21497173607349396, | |
| "learning_rate": 4.5695157672903144e-05, | |
| "loss": 0.6226, | |
| "step": 4740 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "grad_norm": 0.22423885762691498, | |
| "learning_rate": 4.567275351560658e-05, | |
| "loss": 0.6052, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "grad_norm": 0.2486392855644226, | |
| "learning_rate": 4.5650296731560396e-05, | |
| "loss": 0.6102, | |
| "step": 4760 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "grad_norm": 0.19872063398361206, | |
| "learning_rate": 4.562778737793298e-05, | |
| "loss": 0.633, | |
| "step": 4770 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 0.19403629004955292, | |
| "learning_rate": 4.560522551202651e-05, | |
| "loss": 0.6074, | |
| "step": 4780 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 0.2101258486509323, | |
| "learning_rate": 4.558261119127686e-05, | |
| "loss": 0.6019, | |
| "step": 4790 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 0.2554053068161011, | |
| "learning_rate": 4.555994447325344e-05, | |
| "loss": 0.6095, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 0.3209736943244934, | |
| "learning_rate": 4.5537225415659054e-05, | |
| "loss": 0.6095, | |
| "step": 4810 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 0.1882716864347458, | |
| "learning_rate": 4.551445407632973e-05, | |
| "loss": 0.624, | |
| "step": 4820 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "grad_norm": 0.219781294465065, | |
| "learning_rate": 4.549163051323461e-05, | |
| "loss": 0.6212, | |
| "step": 4830 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "grad_norm": 0.1968742161989212, | |
| "learning_rate": 4.5468754784475764e-05, | |
| "loss": 0.6088, | |
| "step": 4840 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "grad_norm": 0.23859383165836334, | |
| "learning_rate": 4.5445826948288074e-05, | |
| "loss": 0.6429, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "grad_norm": 0.21909688413143158, | |
| "learning_rate": 4.542284706303906e-05, | |
| "loss": 0.6163, | |
| "step": 4860 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "grad_norm": 0.23673193156719208, | |
| "learning_rate": 4.539981518722876e-05, | |
| "loss": 0.6261, | |
| "step": 4870 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "grad_norm": 0.23555058240890503, | |
| "learning_rate": 4.537673137948954e-05, | |
| "loss": 0.598, | |
| "step": 4880 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "grad_norm": 0.3052713871002197, | |
| "learning_rate": 4.5353595698586e-05, | |
| "loss": 0.6485, | |
| "step": 4890 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "grad_norm": 0.2587834298610687, | |
| "learning_rate": 4.533040820341477e-05, | |
| "loss": 0.6104, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "grad_norm": 0.2710438370704651, | |
| "learning_rate": 4.53071689530044e-05, | |
| "loss": 0.6286, | |
| "step": 4910 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "grad_norm": 0.2709255516529083, | |
| "learning_rate": 4.528387800651517e-05, | |
| "loss": 0.6199, | |
| "step": 4920 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "grad_norm": 0.3711196780204773, | |
| "learning_rate": 4.5260535423239e-05, | |
| "loss": 0.6303, | |
| "step": 4930 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "grad_norm": 0.2359853833913803, | |
| "learning_rate": 4.523714126259923e-05, | |
| "loss": 0.6232, | |
| "step": 4940 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 0.2610563039779663, | |
| "learning_rate": 4.5213695584150495e-05, | |
| "loss": 0.6266, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 0.23091551661491394, | |
| "learning_rate": 4.519019844757863e-05, | |
| "loss": 0.6264, | |
| "step": 4960 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 0.20323573052883148, | |
| "learning_rate": 4.516664991270041e-05, | |
| "loss": 0.6137, | |
| "step": 4970 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 0.2040981501340866, | |
| "learning_rate": 4.5143050039463476e-05, | |
| "loss": 0.6433, | |
| "step": 4980 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "grad_norm": 0.24421969056129456, | |
| "learning_rate": 4.511939888794617e-05, | |
| "loss": 0.6221, | |
| "step": 4990 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "grad_norm": 0.22230570018291473, | |
| "learning_rate": 4.5095696518357375e-05, | |
| "loss": 0.6002, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "eval_loss": 0.6445114016532898, | |
| "eval_runtime": 67.1925, | |
| "eval_samples_per_second": 29.765, | |
| "eval_steps_per_second": 0.938, | |
| "step": 5000 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 20690, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "total_flos": 1.0812897507781116e+19, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |