Invalid JSON: Unexpected token 'N', ..."ad_norm": NaN,
"... is not valid JSON
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.4833252779120348, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0000000000000002e-07, | |
| "loss": 2.0414, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0, | |
| "grad_norm": 53.515953063964844, | |
| "learning_rate": 6.5e-07, | |
| "loss": 1.8355, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 27.920907974243164, | |
| "learning_rate": 1.15e-06, | |
| "loss": 1.5172, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 20.47172737121582, | |
| "learning_rate": 1.65e-06, | |
| "loss": 1.2654, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 8.903529167175293, | |
| "learning_rate": 2.1499999999999997e-06, | |
| "loss": 1.1095, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 5.902841567993164, | |
| "learning_rate": 2.65e-06, | |
| "loss": 1.0017, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 2.9340908527374268, | |
| "learning_rate": 3.1500000000000003e-06, | |
| "loss": 0.8846, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 8.701367378234863, | |
| "learning_rate": 3.6499999999999998e-06, | |
| "loss": 0.8572, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 1.3205362558364868, | |
| "learning_rate": 4.15e-06, | |
| "loss": 0.839, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 3.0334558486938477, | |
| "learning_rate": 4.65e-06, | |
| "loss": 0.7838, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 1.8086520433425903, | |
| "learning_rate": 5.15e-06, | |
| "loss": 0.7782, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 29.50135040283203, | |
| "learning_rate": 5.65e-06, | |
| "loss": 0.7883, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 3.2708683013916016, | |
| "learning_rate": 6.15e-06, | |
| "loss": 0.7961, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 33.43790817260742, | |
| "learning_rate": 6.650000000000001e-06, | |
| "loss": 0.7626, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 1.2334959506988525, | |
| "learning_rate": 7.15e-06, | |
| "loss": 0.7732, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 1.7489686012268066, | |
| "learning_rate": 7.65e-06, | |
| "loss": 0.7715, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 3.358823776245117, | |
| "learning_rate": 8.15e-06, | |
| "loss": 0.7778, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 4.348245620727539, | |
| "learning_rate": 8.65e-06, | |
| "loss": 0.7619, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.9023125767707825, | |
| "learning_rate": 9.15e-06, | |
| "loss": 0.7746, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.9158114790916443, | |
| "learning_rate": 9.65e-06, | |
| "loss": 0.7592, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.6256226301193237, | |
| "learning_rate": 1.0150000000000001e-05, | |
| "loss": 0.7787, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.3805778920650482, | |
| "learning_rate": 1.065e-05, | |
| "loss": 0.7811, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 0.6968041062355042, | |
| "learning_rate": 1.115e-05, | |
| "loss": 0.73, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 0.9035410284996033, | |
| "learning_rate": 1.1650000000000002e-05, | |
| "loss": 0.7518, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 1.6727488040924072, | |
| "learning_rate": 1.215e-05, | |
| "loss": 0.7805, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 0.4153461158275604, | |
| "learning_rate": 1.2650000000000001e-05, | |
| "loss": 0.7706, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 1.4846651554107666, | |
| "learning_rate": 1.3150000000000001e-05, | |
| "loss": 0.7492, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 2.6631388664245605, | |
| "learning_rate": 1.3650000000000001e-05, | |
| "loss": 0.7681, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 0.6325013041496277, | |
| "learning_rate": 1.415e-05, | |
| "loss": 0.7753, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 0.877907395362854, | |
| "learning_rate": 1.465e-05, | |
| "loss": 0.7188, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 0.37142279744148254, | |
| "learning_rate": 1.515e-05, | |
| "loss": 0.7204, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.7337246537208557, | |
| "learning_rate": 1.565e-05, | |
| "loss": 0.778, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.5847220420837402, | |
| "learning_rate": 1.6150000000000003e-05, | |
| "loss": 0.7288, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 3.745180606842041, | |
| "learning_rate": 1.665e-05, | |
| "loss": 0.7531, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.357301265001297, | |
| "learning_rate": 1.7150000000000004e-05, | |
| "loss": 0.7448, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 0.9032486081123352, | |
| "learning_rate": 1.765e-05, | |
| "loss": 0.7651, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 0.2864232361316681, | |
| "learning_rate": 1.815e-05, | |
| "loss": 0.7193, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 1.8560261726379395, | |
| "learning_rate": 1.865e-05, | |
| "loss": 0.7421, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 0.2971792221069336, | |
| "learning_rate": 1.915e-05, | |
| "loss": 0.7171, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 0.40850627422332764, | |
| "learning_rate": 1.9650000000000003e-05, | |
| "loss": 0.7459, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 0.3934139311313629, | |
| "learning_rate": 2.0150000000000002e-05, | |
| "loss": 0.7205, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 1.8674131631851196, | |
| "learning_rate": 2.065e-05, | |
| "loss": 0.752, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 0.36707818508148193, | |
| "learning_rate": 2.115e-05, | |
| "loss": 0.7494, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 0.5044310092926025, | |
| "learning_rate": 2.165e-05, | |
| "loss": 0.7595, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 1.9025150537490845, | |
| "learning_rate": 2.215e-05, | |
| "loss": 0.7379, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 0.5004140138626099, | |
| "learning_rate": 2.265e-05, | |
| "loss": 0.7886, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 3.544482707977295, | |
| "learning_rate": 2.3150000000000004e-05, | |
| "loss": 0.7259, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.33023321628570557, | |
| "learning_rate": 2.365e-05, | |
| "loss": 0.7333, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.3548080325126648, | |
| "learning_rate": 2.415e-05, | |
| "loss": 0.7527, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.42450150847435, | |
| "learning_rate": 2.465e-05, | |
| "loss": 0.7443, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "eval_loss": 0.7428915500640869, | |
| "eval_runtime": 68.3143, | |
| "eval_samples_per_second": 29.276, | |
| "eval_steps_per_second": 0.922, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.3299656808376312, | |
| "learning_rate": 2.515e-05, | |
| "loss": 0.7447, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 0.27258285880088806, | |
| "learning_rate": 2.5650000000000003e-05, | |
| "loss": 0.7278, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 0.23753009736537933, | |
| "learning_rate": 2.6150000000000002e-05, | |
| "loss": 0.7721, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 1.1074901819229126, | |
| "learning_rate": 2.6650000000000004e-05, | |
| "loss": 0.7383, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 0.428067147731781, | |
| "learning_rate": 2.7150000000000003e-05, | |
| "loss": 0.7508, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.35956478118896484, | |
| "learning_rate": 2.7650000000000005e-05, | |
| "loss": 0.7461, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.23017314076423645, | |
| "learning_rate": 2.815e-05, | |
| "loss": 0.7463, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.20370900630950928, | |
| "learning_rate": 2.865e-05, | |
| "loss": 0.7405, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.24586541950702667, | |
| "learning_rate": 2.915e-05, | |
| "loss": 0.6964, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.27857884764671326, | |
| "learning_rate": 2.965e-05, | |
| "loss": 0.7376, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.2655833065509796, | |
| "learning_rate": 3.015e-05, | |
| "loss": 0.7496, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.17174670100212097, | |
| "learning_rate": 3.065e-05, | |
| "loss": 0.7219, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.22892743349075317, | |
| "learning_rate": 3.115e-05, | |
| "loss": 0.7111, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.5913789868354797, | |
| "learning_rate": 3.1650000000000004e-05, | |
| "loss": 0.7097, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.25620004534721375, | |
| "learning_rate": 3.215e-05, | |
| "loss": 0.7306, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.3311476409435272, | |
| "learning_rate": 3.265e-05, | |
| "loss": 0.7406, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.4708893597126007, | |
| "learning_rate": 3.3150000000000006e-05, | |
| "loss": 0.7071, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.22469288110733032, | |
| "learning_rate": 3.3650000000000005e-05, | |
| "loss": 0.7116, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 0.2909330725669861, | |
| "learning_rate": 3.415e-05, | |
| "loss": 0.7592, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 0.21274766325950623, | |
| "learning_rate": 3.465e-05, | |
| "loss": 0.7144, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 0.23929230868816376, | |
| "learning_rate": 3.515e-05, | |
| "loss": 0.7382, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 0.3021218180656433, | |
| "learning_rate": 3.565e-05, | |
| "loss": 0.7287, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.23557321727275848, | |
| "learning_rate": 3.615e-05, | |
| "loss": 0.7475, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.22648084163665771, | |
| "learning_rate": 3.665e-05, | |
| "loss": 0.7457, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.2707761824131012, | |
| "learning_rate": 3.715e-05, | |
| "loss": 0.7322, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.2799661457538605, | |
| "learning_rate": 3.765e-05, | |
| "loss": 0.7682, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 0.326861172914505, | |
| "learning_rate": 3.8150000000000006e-05, | |
| "loss": 0.73, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 0.31985723972320557, | |
| "learning_rate": 3.8650000000000004e-05, | |
| "loss": 0.744, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 0.21637533605098724, | |
| "learning_rate": 3.915e-05, | |
| "loss": 0.7161, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 0.26102888584136963, | |
| "learning_rate": 3.965e-05, | |
| "loss": 0.7553, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.2469174712896347, | |
| "learning_rate": 4.015000000000001e-05, | |
| "loss": 0.7444, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.2022310495376587, | |
| "learning_rate": 4.065e-05, | |
| "loss": 0.7089, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.1729898750782013, | |
| "learning_rate": 4.115e-05, | |
| "loss": 0.7122, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.22450707852840424, | |
| "learning_rate": 4.165e-05, | |
| "loss": 0.7099, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 0.22389698028564453, | |
| "learning_rate": 4.215e-05, | |
| "loss": 0.7445, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 0.24962696433067322, | |
| "learning_rate": 4.265e-05, | |
| "loss": 0.7052, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 0.23780404031276703, | |
| "learning_rate": 4.315e-05, | |
| "loss": 0.7423, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 0.2434270679950714, | |
| "learning_rate": 4.3650000000000004e-05, | |
| "loss": 0.7291, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.3332350552082062, | |
| "learning_rate": 4.415e-05, | |
| "loss": 0.7306, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.5084495544433594, | |
| "learning_rate": 4.465e-05, | |
| "loss": 0.735, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.27552103996276855, | |
| "learning_rate": 4.5150000000000006e-05, | |
| "loss": 0.7082, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.2184416800737381, | |
| "learning_rate": 4.5650000000000005e-05, | |
| "loss": 0.7066, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.21738290786743164, | |
| "learning_rate": 4.6150000000000004e-05, | |
| "loss": 0.6977, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 0.21888123452663422, | |
| "learning_rate": 4.665e-05, | |
| "loss": 0.7083, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 0.2516074776649475, | |
| "learning_rate": 4.715e-05, | |
| "loss": 0.7451, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 0.39361557364463806, | |
| "learning_rate": 4.765e-05, | |
| "loss": 0.7013, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 0.2461744099855423, | |
| "learning_rate": 4.815e-05, | |
| "loss": 0.7325, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.2554089426994324, | |
| "learning_rate": 4.8650000000000003e-05, | |
| "loss": 0.7384, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.2717374861240387, | |
| "learning_rate": 4.915e-05, | |
| "loss": 0.7172, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.35919347405433655, | |
| "learning_rate": 4.965e-05, | |
| "loss": 0.6851, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "eval_loss": 0.7169972658157349, | |
| "eval_runtime": 67.4497, | |
| "eval_samples_per_second": 29.652, | |
| "eval_steps_per_second": 0.934, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.19895273447036743, | |
| "learning_rate": 4.999999713608037e-05, | |
| "loss": 0.7104, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.21741873025894165, | |
| "learning_rate": 4.999994622197174e-05, | |
| "loss": 0.7217, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.3415576219558716, | |
| "learning_rate": 4.999983166535371e-05, | |
| "loss": 0.716, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.22503221035003662, | |
| "learning_rate": 4.99996534665179e-05, | |
| "loss": 0.7166, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.22469323873519897, | |
| "learning_rate": 4.999941162591795e-05, | |
| "loss": 0.7173, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 0.24848666787147522, | |
| "learning_rate": 4.999910614416952e-05, | |
| "loss": 0.7442, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 0.23361551761627197, | |
| "learning_rate": 4.999873702205027e-05, | |
| "loss": 0.7142, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 0.2513304054737091, | |
| "learning_rate": 4.999830426049987e-05, | |
| "loss": 0.6998, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 0.2846020758152008, | |
| "learning_rate": 4.999780786062003e-05, | |
| "loss": 0.6967, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 0.2192545235157013, | |
| "learning_rate": 4.999724782367441e-05, | |
| "loss": 0.7259, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 0.37123605608940125, | |
| "learning_rate": 4.999662415108872e-05, | |
| "loss": 0.7228, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 0.3963713049888611, | |
| "learning_rate": 4.999593684445063e-05, | |
| "loss": 0.6848, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 0.24653227627277374, | |
| "learning_rate": 4.9995185905509836e-05, | |
| "loss": 0.7181, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.25218069553375244, | |
| "learning_rate": 4.999437133617799e-05, | |
| "loss": 0.7263, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.20412948727607727, | |
| "learning_rate": 4.9993493138528765e-05, | |
| "loss": 0.6965, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.21635311841964722, | |
| "learning_rate": 4.9992551314797775e-05, | |
| "loss": 0.7042, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.21901027858257294, | |
| "learning_rate": 4.999154586738264e-05, | |
| "loss": 0.6967, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.24104245007038116, | |
| "learning_rate": 4.9990476798842935e-05, | |
| "loss": 0.7009, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.2433982491493225, | |
| "learning_rate": 4.998934411190018e-05, | |
| "loss": 0.7052, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.5261262655258179, | |
| "learning_rate": 4.9988147809437876e-05, | |
| "loss": 0.7103, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.36266064643859863, | |
| "learning_rate": 4.998688789450146e-05, | |
| "loss": 0.7028, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.2509474754333496, | |
| "learning_rate": 4.9985564370298274e-05, | |
| "loss": 0.7047, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.1925598382949829, | |
| "learning_rate": 4.9984177240197665e-05, | |
| "loss": 0.6943, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.22494031488895416, | |
| "learning_rate": 4.998272650773083e-05, | |
| "loss": 0.6953, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.24152888357639313, | |
| "learning_rate": 4.998121217659092e-05, | |
| "loss": 0.7181, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.24296504259109497, | |
| "learning_rate": 4.997963425063297e-05, | |
| "loss": 0.6934, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 0.2438274323940277, | |
| "learning_rate": 4.9977992733873906e-05, | |
| "loss": 0.7088, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 0.21081580221652985, | |
| "learning_rate": 4.997628763049257e-05, | |
| "loss": 0.6833, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 0.2111068218946457, | |
| "learning_rate": 4.9974518944829626e-05, | |
| "loss": 0.6922, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 0.309772253036499, | |
| "learning_rate": 4.997268668138766e-05, | |
| "loss": 0.7195, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.2199670672416687, | |
| "learning_rate": 4.997079084483105e-05, | |
| "loss": 0.6924, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.21781674027442932, | |
| "learning_rate": 4.996883143998605e-05, | |
| "loss": 0.6873, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.2969052791595459, | |
| "learning_rate": 4.996680847184072e-05, | |
| "loss": 0.7135, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.23092682659626007, | |
| "learning_rate": 4.996472194554495e-05, | |
| "loss": 0.6827, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 0.29615628719329834, | |
| "learning_rate": 4.996257186641042e-05, | |
| "loss": 0.6738, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 0.23491479456424713, | |
| "learning_rate": 4.99603582399106e-05, | |
| "loss": 0.7396, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 0.1986425668001175, | |
| "learning_rate": 4.9958081071680726e-05, | |
| "loss": 0.6787, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 0.19658410549163818, | |
| "learning_rate": 4.99557403675178e-05, | |
| "loss": 0.66, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 0.29404616355895996, | |
| "learning_rate": 4.995333613338057e-05, | |
| "loss": 0.6824, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 0.23689547181129456, | |
| "learning_rate": 4.9950868375389514e-05, | |
| "loss": 0.7008, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 0.22758638858795166, | |
| "learning_rate": 4.99483370998268e-05, | |
| "loss": 0.6946, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 0.20259805023670197, | |
| "learning_rate": 4.994574231313634e-05, | |
| "loss": 0.6734, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.21935436129570007, | |
| "learning_rate": 4.994308402192366e-05, | |
| "loss": 0.7094, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.23414015769958496, | |
| "learning_rate": 4.9940362232956026e-05, | |
| "loss": 0.7019, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.2623121440410614, | |
| "learning_rate": 4.993757695316228e-05, | |
| "loss": 0.6793, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.31137487292289734, | |
| "learning_rate": 4.993472818963295e-05, | |
| "loss": 0.6902, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.22092680633068085, | |
| "learning_rate": 4.993181594962013e-05, | |
| "loss": 0.6912, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.23723623156547546, | |
| "learning_rate": 4.992884024053754e-05, | |
| "loss": 0.652, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.26028233766555786, | |
| "learning_rate": 4.9925801069960454e-05, | |
| "loss": 0.6969, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.23856200277805328, | |
| "learning_rate": 4.992269844562572e-05, | |
| "loss": 0.6723, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "eval_loss": 0.6912096738815308, | |
| "eval_runtime": 67.244, | |
| "eval_samples_per_second": 29.742, | |
| "eval_steps_per_second": 0.937, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.167326420545578, | |
| "learning_rate": 4.9919532375431677e-05, | |
| "loss": 0.6897, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 0.26183101534843445, | |
| "learning_rate": 4.991630286743823e-05, | |
| "loss": 0.6844, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 0.25918036699295044, | |
| "learning_rate": 4.991300992986676e-05, | |
| "loss": 0.6645, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 0.2354690134525299, | |
| "learning_rate": 4.99096535711001e-05, | |
| "loss": 0.6672, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 0.24444998800754547, | |
| "learning_rate": 4.990623379968257e-05, | |
| "loss": 0.6749, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 0.2562585771083832, | |
| "learning_rate": 4.990275062431989e-05, | |
| "loss": 0.6652, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 0.22618699073791504, | |
| "learning_rate": 4.98992040538792e-05, | |
| "loss": 0.7163, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 0.20957091450691223, | |
| "learning_rate": 4.9895594097389044e-05, | |
| "loss": 0.7053, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 0.26973310112953186, | |
| "learning_rate": 4.989192076403928e-05, | |
| "loss": 0.6716, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 0.24841250479221344, | |
| "learning_rate": 4.9888184063181154e-05, | |
| "loss": 0.6884, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 0.25283706188201904, | |
| "learning_rate": 4.98843840043272e-05, | |
| "loss": 0.6632, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 0.196313738822937, | |
| "learning_rate": 4.988052059715126e-05, | |
| "loss": 0.712, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 0.21621187031269073, | |
| "learning_rate": 4.987659385148842e-05, | |
| "loss": 0.6732, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.2180820256471634, | |
| "learning_rate": 4.987260377733502e-05, | |
| "loss": 0.7045, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.20110243558883667, | |
| "learning_rate": 4.986855038484862e-05, | |
| "loss": 0.6604, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.20813095569610596, | |
| "learning_rate": 4.9864433684347964e-05, | |
| "loss": 0.7051, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.22026021778583527, | |
| "learning_rate": 4.9860253686312964e-05, | |
| "loss": 0.6752, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 0.23409809172153473, | |
| "learning_rate": 4.9856010401384654e-05, | |
| "loss": 0.6805, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 0.1982721984386444, | |
| "learning_rate": 4.985170384036521e-05, | |
| "loss": 0.6888, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 0.24984286725521088, | |
| "learning_rate": 4.984733401421785e-05, | |
| "loss": 0.6741, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 0.24465042352676392, | |
| "learning_rate": 4.9842900934066874e-05, | |
| "loss": 0.6781, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 0.26656293869018555, | |
| "learning_rate": 4.98384046111976e-05, | |
| "loss": 0.7086, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 0.23073184490203857, | |
| "learning_rate": 4.9833845057056336e-05, | |
| "loss": 0.6966, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 0.23296333849430084, | |
| "learning_rate": 4.982922228325037e-05, | |
| "loss": 0.6769, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 0.22282017767429352, | |
| "learning_rate": 4.982453630154794e-05, | |
| "loss": 0.6581, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 0.22943562269210815, | |
| "learning_rate": 4.981978712387815e-05, | |
| "loss": 0.6803, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 0.29295608401298523, | |
| "learning_rate": 4.9814974762331034e-05, | |
| "loss": 0.6514, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 0.21845972537994385, | |
| "learning_rate": 4.981009922915743e-05, | |
| "loss": 0.691, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 0.24616730213165283, | |
| "learning_rate": 4.980516053676903e-05, | |
| "loss": 0.6948, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 0.2354935109615326, | |
| "learning_rate": 4.9800158697738264e-05, | |
| "loss": 0.6778, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 0.3475668430328369, | |
| "learning_rate": 4.979509372479837e-05, | |
| "loss": 0.6626, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 0.2817700207233429, | |
| "learning_rate": 4.9789965630843265e-05, | |
| "loss": 0.6724, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 0.28306934237480164, | |
| "learning_rate": 4.978477442892758e-05, | |
| "loss": 0.6673, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 0.2593139111995697, | |
| "learning_rate": 4.9779520132266575e-05, | |
| "loss": 0.6866, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.2618165612220764, | |
| "learning_rate": 4.9774202754236145e-05, | |
| "loss": 0.6671, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.22875259816646576, | |
| "learning_rate": 4.9768822308372784e-05, | |
| "loss": 0.6743, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.20143887400627136, | |
| "learning_rate": 4.976337880837351e-05, | |
| "loss": 0.6866, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.23163306713104248, | |
| "learning_rate": 4.975787226809587e-05, | |
| "loss": 0.6772, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 0.21628862619400024, | |
| "learning_rate": 4.975230270155791e-05, | |
| "loss": 0.6706, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 0.21055322885513306, | |
| "learning_rate": 4.9746670122938105e-05, | |
| "loss": 0.6536, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 0.20045976340770721, | |
| "learning_rate": 4.974097454657534e-05, | |
| "loss": 0.6477, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 0.20996223390102386, | |
| "learning_rate": 4.9735215986968874e-05, | |
| "loss": 0.6526, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "grad_norm": 0.1778746396303177, | |
| "learning_rate": 4.972939445877831e-05, | |
| "loss": 0.6951, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "grad_norm": 0.2416403591632843, | |
| "learning_rate": 4.972350997682354e-05, | |
| "loss": 0.6703, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "grad_norm": 0.23652783036231995, | |
| "learning_rate": 4.9717562556084735e-05, | |
| "loss": 0.6401, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "grad_norm": 0.23598232865333557, | |
| "learning_rate": 4.9711552211702274e-05, | |
| "loss": 0.6668, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.22741574048995972, | |
| "learning_rate": 4.970547895897672e-05, | |
| "loss": 0.6765, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.20418991148471832, | |
| "learning_rate": 4.96993428133688e-05, | |
| "loss": 0.6771, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.24983160197734833, | |
| "learning_rate": 4.969314379049932e-05, | |
| "loss": 0.6656, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.2511340379714966, | |
| "learning_rate": 4.968688190614919e-05, | |
| "loss": 0.6605, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "eval_loss": 0.6730201244354248, | |
| "eval_runtime": 67.2807, | |
| "eval_samples_per_second": 29.726, | |
| "eval_steps_per_second": 0.936, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 20690, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "total_flos": 4.3244811194046874e+18, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |