Invalid JSON: Unexpected token 'I', ..."ad_norm": Infinity,
"... is not valid JSON
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 5740, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.008710801393728223, | |
| "grad_norm": Infinity, | |
| "learning_rate": 3.71949339942388e-07, | |
| "loss": 23.9832, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.017421602787456445, | |
| "grad_norm": 759.6871337890625, | |
| "learning_rate": 1.2088353548127608e-06, | |
| "loss": 68.5664, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02613240418118467, | |
| "grad_norm": 1186.6964111328125, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 113.461, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.03484320557491289, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04355400696864112, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.05226480836236934, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.06097560975609756, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.06968641114982578, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.078397212543554, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.08710801393728224, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.09581881533101046, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.10452961672473868, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.1132404181184669, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.12195121951219512, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.13066202090592335, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.13937282229965156, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.1480836236933798, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.156794425087108, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.16550522648083624, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.17421602787456447, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.18292682926829268, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.1916376306620209, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.20034843205574912, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.20905923344947736, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.21777003484320556, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.2264808362369338, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.23519163763066203, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.24390243902439024, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.25261324041811845, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.2613240418118467, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.2700348432055749, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.2787456445993031, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.2874564459930314, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.2961672473867596, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.3048780487804878, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.313588850174216, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.32229965156794427, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.3310104529616725, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.3397212543554007, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.34843205574912894, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.35714285714285715, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.36585365853658536, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.37456445993031356, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.3832752613240418, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.39198606271777003, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.40069686411149824, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.4094076655052265, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.4181184668989547, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.4268292682926829, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.4355400696864111, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.4442508710801394, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.4529616724738676, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.4616724738675958, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.47038327526132406, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.47909407665505227, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.4878048780487805, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.4965156794425087, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.5052264808362369, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.5139372822299652, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.5226480836236934, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.5313588850174216, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.5400696864111498, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.5487804878048781, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.5574912891986062, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.5662020905923345, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.5749128919860628, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.5836236933797909, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.5923344947735192, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.6010452961672473, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.6097560975609756, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.6184668989547039, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.627177700348432, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.6358885017421603, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.6445993031358885, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.6533101045296167, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.662020905923345, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.6707317073170732, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.6794425087108014, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.6881533101045296, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.6968641114982579, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.705574912891986, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.7229965156794426, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.7317073170731707, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.740418118466899, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.7491289198606271, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.7578397212543554, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.7665505226480837, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.7752613240418118, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.7839721254355401, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.7926829268292683, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.8013937282229965, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.8101045296167247, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.818815331010453, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.8275261324041812, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.8362369337979094, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.8449477351916377, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.8536585365853658, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.8623693379790941, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.8710801393728222, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.8797909407665505, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.8885017421602788, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.8972125435540069, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.9059233449477352, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.9146341463414634, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.9233449477351916, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.9320557491289199, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.9407665505226481, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.9494773519163763, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.9581881533101045, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.9668989547038328, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.975609756097561, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.9843205574912892, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.9930313588850174, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.0017421602787457, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.0104529616724738, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.019163763066202, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.0278745644599303, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.0365853658536586, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.0452961672473868, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.054006968641115, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.0627177700348431, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.0714285714285714, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.0801393728222997, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.088850174216028, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.0975609756097562, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.1062717770034842, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.1149825783972125, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.1236933797909407, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.132404181184669, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.1411149825783973, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 1.1498257839721253, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 1.1585365853658536, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 1.1672473867595818, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 1.17595818815331, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.1846689895470384, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 1.1933797909407666, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 1.202090592334495, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 1.210801393728223, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 1.2195121951219512, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.2282229965156795, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 1.2369337979094077, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 1.245644599303136, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 1.254355400696864, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 1.2630662020905923, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.2717770034843205, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 1.2804878048780488, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 1.289198606271777, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 1.297909407665505, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 1.3066202090592334, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.3153310104529616, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 1.32404181184669, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 1.3327526132404182, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 1.3414634146341464, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 1.3501742160278747, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.3588850174216027, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 1.367595818815331, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 1.3763066202090593, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 1.3850174216027875, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 1.3937282229965158, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.4024390243902438, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 1.411149825783972, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 1.4198606271777003, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 1.4372822299651569, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.445993031358885, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 1.4547038327526132, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 1.4634146341463414, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 1.4721254355400697, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 1.480836236933798, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.489547038327526, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 1.4982578397212545, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 1.5069686411149825, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 1.5156794425087108, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 1.524390243902439, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.533101045296167, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 1.5418118466898956, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 1.5505226480836236, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 1.5592334494773519, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 1.5679442508710801, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.5766550522648084, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 1.5853658536585367, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 1.5940766550522647, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 1.6027874564459932, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 1.6114982578397212, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 1.6202090592334495, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 1.6289198606271778, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 1.6376306620209058, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 1.6463414634146343, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 1.6550522648083623, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.6637630662020906, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 1.6724738675958188, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 1.6811846689895469, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 1.6898954703832754, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 1.6986062717770034, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 1.7073170731707317, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 1.71602787456446, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 1.7247386759581882, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 1.7334494773519165, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 1.7421602787456445, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.750871080139373, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 1.759581881533101, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 1.7682926829268293, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 1.7770034843205575, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 1.7857142857142856, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 1.794425087108014, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 1.8031358885017421, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 1.8118466898954704, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 1.8205574912891986, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 1.8292682926829267, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.8379790940766552, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 1.8466898954703832, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 1.8554006968641115, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 1.8641114982578397, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 1.872822299651568, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 1.8815331010452963, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 1.8902439024390243, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 1.8989547038327528, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 1.9076655052264808, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 1.916376306620209, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.9250871080139373, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 1.9337979094076654, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 1.9425087108013939, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 1.951219512195122, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 1.9599303135888502, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 1.9686411149825784, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 1.9773519163763065, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 1.986062717770035, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 1.994773519163763, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 2.0034843205574915, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 2.0121951219512195, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 2.0209059233449476, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 2.029616724738676, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 2.038327526132404, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 2.0470383275261326, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 2.0557491289198606, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 2.0644599303135887, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 2.073170731707317, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 2.081881533101045, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 2.0905923344947737, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 2.0993031358885017, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 2.10801393728223, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 2.1167247386759582, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 2.1254355400696863, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 2.1341463414634148, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 2.142857142857143, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 2.1515679442508713, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 2.1602787456445993, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 2.1689895470383274, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 2.177700348432056, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.186411149825784, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 2.1951219512195124, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 2.2038327526132404, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 2.2125435540069684, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 2.221254355400697, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 2.229965156794425, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 2.2386759581881535, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 2.2473867595818815, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 2.2560975609756095, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 2.264808362369338, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 2.273519163763066, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 2.2822299651567945, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 2.2909407665505226, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 2.2996515679442506, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 2.308362369337979, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 2.317073170731707, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 2.3257839721254356, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 2.3344947735191637, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 2.343205574912892, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 2.35191637630662, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 2.3606271777003482, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 2.3693379790940767, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 2.3780487804878048, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 2.3867595818815333, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 2.3954703832752613, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 2.40418118466899, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 2.412891986062718, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 2.421602787456446, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 2.4303135888501743, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 2.4390243902439024, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 2.447735191637631, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 2.456445993031359, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 2.465156794425087, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 2.4738675958188154, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 2.4825783972125435, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 2.491289198606272, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 2.508710801393728, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 2.5174216027874565, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 2.5261324041811846, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 2.534843205574913, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 2.543554006968641, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 2.552264808362369, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 2.5609756097560976, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 2.5696864111498257, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 2.578397212543554, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 2.587108013937282, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 2.59581881533101, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 2.6045296167247387, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 2.6132404181184667, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.6219512195121952, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 2.6306620209059233, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 2.6393728222996513, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 2.64808362369338, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 2.6567944250871083, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 2.6655052264808363, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 2.6742160278745644, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 2.682926829268293, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 2.691637630662021, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 2.7003484320557494, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.7090592334494774, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 2.7177700348432055, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 2.726480836236934, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 2.735191637630662, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 2.7439024390243905, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 2.7526132404181185, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 2.7613240418118465, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 2.770034843205575, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 2.778745644599303, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 2.7874564459930316, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 2.7961672473867596, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 2.8048780487804876, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 2.813588850174216, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 2.822299651567944, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 2.8310104529616726, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 2.8397212543554007, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 2.8484320557491287, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 2.857142857142857, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 2.8658536585365852, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3290 | |
| }, | |
| { | |
| "epoch": 2.8745644599303137, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 2.8832752613240418, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3310 | |
| }, | |
| { | |
| "epoch": 2.89198606271777, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3320 | |
| }, | |
| { | |
| "epoch": 2.9006968641114983, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3330 | |
| }, | |
| { | |
| "epoch": 2.9094076655052263, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3340 | |
| }, | |
| { | |
| "epoch": 2.918118466898955, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 2.926829268292683, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3360 | |
| }, | |
| { | |
| "epoch": 2.935540069686411, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3370 | |
| }, | |
| { | |
| "epoch": 2.9442508710801394, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3380 | |
| }, | |
| { | |
| "epoch": 2.952961672473868, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3390 | |
| }, | |
| { | |
| "epoch": 2.961672473867596, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 2.970383275261324, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3410 | |
| }, | |
| { | |
| "epoch": 2.979094076655052, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3420 | |
| }, | |
| { | |
| "epoch": 2.9878048780487805, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3430 | |
| }, | |
| { | |
| "epoch": 2.996515679442509, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3440 | |
| }, | |
| { | |
| "epoch": 3.005226480836237, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 3.013937282229965, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3460 | |
| }, | |
| { | |
| "epoch": 3.0226480836236935, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3470 | |
| }, | |
| { | |
| "epoch": 3.0313588850174216, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3480 | |
| }, | |
| { | |
| "epoch": 3.0400696864111496, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3490 | |
| }, | |
| { | |
| "epoch": 3.048780487804878, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 3.057491289198606, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3510 | |
| }, | |
| { | |
| "epoch": 3.0662020905923346, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3520 | |
| }, | |
| { | |
| "epoch": 3.0749128919860627, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3530 | |
| }, | |
| { | |
| "epoch": 3.083623693379791, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3540 | |
| }, | |
| { | |
| "epoch": 3.092334494773519, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 3.1010452961672472, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3560 | |
| }, | |
| { | |
| "epoch": 3.1097560975609757, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3570 | |
| }, | |
| { | |
| "epoch": 3.1184668989547037, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3580 | |
| }, | |
| { | |
| "epoch": 3.1271777003484322, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3590 | |
| }, | |
| { | |
| "epoch": 3.1358885017421603, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 3.1445993031358883, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3610 | |
| }, | |
| { | |
| "epoch": 3.153310104529617, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3620 | |
| }, | |
| { | |
| "epoch": 3.162020905923345, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3630 | |
| }, | |
| { | |
| "epoch": 3.1707317073170733, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3640 | |
| }, | |
| { | |
| "epoch": 3.1794425087108014, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 3.1881533101045294, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3660 | |
| }, | |
| { | |
| "epoch": 3.196864111498258, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3670 | |
| }, | |
| { | |
| "epoch": 3.205574912891986, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3680 | |
| }, | |
| { | |
| "epoch": 3.2142857142857144, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3690 | |
| }, | |
| { | |
| "epoch": 3.2229965156794425, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 3.231707317073171, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3710 | |
| }, | |
| { | |
| "epoch": 3.240418118466899, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3720 | |
| }, | |
| { | |
| "epoch": 3.249128919860627, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3730 | |
| }, | |
| { | |
| "epoch": 3.2578397212543555, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3740 | |
| }, | |
| { | |
| "epoch": 3.2665505226480835, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 3.275261324041812, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3760 | |
| }, | |
| { | |
| "epoch": 3.28397212543554, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3770 | |
| }, | |
| { | |
| "epoch": 3.292682926829268, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3780 | |
| }, | |
| { | |
| "epoch": 3.3013937282229966, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3790 | |
| }, | |
| { | |
| "epoch": 3.3101045296167246, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 3.318815331010453, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3810 | |
| }, | |
| { | |
| "epoch": 3.327526132404181, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3820 | |
| }, | |
| { | |
| "epoch": 3.3362369337979096, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3830 | |
| }, | |
| { | |
| "epoch": 3.3449477351916377, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3840 | |
| }, | |
| { | |
| "epoch": 3.3536585365853657, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 3.362369337979094, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3860 | |
| }, | |
| { | |
| "epoch": 3.3710801393728222, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3870 | |
| }, | |
| { | |
| "epoch": 3.3797909407665507, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3880 | |
| }, | |
| { | |
| "epoch": 3.3885017421602788, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3890 | |
| }, | |
| { | |
| "epoch": 3.397212543554007, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 3.4059233449477353, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3910 | |
| }, | |
| { | |
| "epoch": 3.4146341463414633, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3920 | |
| }, | |
| { | |
| "epoch": 3.423344947735192, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3930 | |
| }, | |
| { | |
| "epoch": 3.43205574912892, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3940 | |
| }, | |
| { | |
| "epoch": 3.440766550522648, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 3.4494773519163764, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3960 | |
| }, | |
| { | |
| "epoch": 3.4581881533101044, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3970 | |
| }, | |
| { | |
| "epoch": 3.466898954703833, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3980 | |
| }, | |
| { | |
| "epoch": 3.475609756097561, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 3990 | |
| }, | |
| { | |
| "epoch": 3.484320557491289, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 3.4930313588850175, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4010 | |
| }, | |
| { | |
| "epoch": 3.5017421602787455, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4020 | |
| }, | |
| { | |
| "epoch": 3.510452961672474, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4030 | |
| }, | |
| { | |
| "epoch": 3.519163763066202, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4040 | |
| }, | |
| { | |
| "epoch": 3.52787456445993, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 3.5365853658536586, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4060 | |
| }, | |
| { | |
| "epoch": 3.5452961672473866, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4070 | |
| }, | |
| { | |
| "epoch": 3.554006968641115, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4080 | |
| }, | |
| { | |
| "epoch": 3.562717770034843, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4090 | |
| }, | |
| { | |
| "epoch": 3.571428571428571, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 3.5801393728222997, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4110 | |
| }, | |
| { | |
| "epoch": 3.588850174216028, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4120 | |
| }, | |
| { | |
| "epoch": 3.597560975609756, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4130 | |
| }, | |
| { | |
| "epoch": 3.6062717770034842, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4140 | |
| }, | |
| { | |
| "epoch": 3.6149825783972127, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 3.6236933797909407, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4160 | |
| }, | |
| { | |
| "epoch": 3.6324041811846692, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4170 | |
| }, | |
| { | |
| "epoch": 3.6411149825783973, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4180 | |
| }, | |
| { | |
| "epoch": 3.6498257839721253, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4190 | |
| }, | |
| { | |
| "epoch": 3.658536585365854, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 3.667247386759582, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4210 | |
| }, | |
| { | |
| "epoch": 3.6759581881533103, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4220 | |
| }, | |
| { | |
| "epoch": 3.6846689895470384, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4230 | |
| }, | |
| { | |
| "epoch": 3.6933797909407664, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4240 | |
| }, | |
| { | |
| "epoch": 3.702090592334495, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 3.710801393728223, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4260 | |
| }, | |
| { | |
| "epoch": 3.7195121951219514, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4270 | |
| }, | |
| { | |
| "epoch": 3.7282229965156795, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4280 | |
| }, | |
| { | |
| "epoch": 3.7369337979094075, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4290 | |
| }, | |
| { | |
| "epoch": 3.745644599303136, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 3.754355400696864, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4310 | |
| }, | |
| { | |
| "epoch": 3.7630662020905925, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4320 | |
| }, | |
| { | |
| "epoch": 3.7717770034843205, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4330 | |
| }, | |
| { | |
| "epoch": 3.7804878048780486, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4340 | |
| }, | |
| { | |
| "epoch": 3.789198606271777, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 3.797909407665505, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4360 | |
| }, | |
| { | |
| "epoch": 3.8066202090592336, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4370 | |
| }, | |
| { | |
| "epoch": 3.8153310104529616, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4380 | |
| }, | |
| { | |
| "epoch": 3.8240418118466897, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4390 | |
| }, | |
| { | |
| "epoch": 3.832752613240418, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 3.841463414634146, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4410 | |
| }, | |
| { | |
| "epoch": 3.8501742160278747, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4420 | |
| }, | |
| { | |
| "epoch": 3.8588850174216027, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4430 | |
| }, | |
| { | |
| "epoch": 3.8675958188153308, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4440 | |
| }, | |
| { | |
| "epoch": 3.8763066202090593, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 3.8850174216027873, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4460 | |
| }, | |
| { | |
| "epoch": 3.8937282229965158, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4470 | |
| }, | |
| { | |
| "epoch": 3.902439024390244, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4480 | |
| }, | |
| { | |
| "epoch": 3.911149825783972, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4490 | |
| }, | |
| { | |
| "epoch": 3.9198606271777003, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 3.928571428571429, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4510 | |
| }, | |
| { | |
| "epoch": 3.937282229965157, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4520 | |
| }, | |
| { | |
| "epoch": 3.945993031358885, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4530 | |
| }, | |
| { | |
| "epoch": 3.9547038327526134, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4540 | |
| }, | |
| { | |
| "epoch": 3.9634146341463414, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 3.97212543554007, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4560 | |
| }, | |
| { | |
| "epoch": 3.980836236933798, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4570 | |
| }, | |
| { | |
| "epoch": 3.989547038327526, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4580 | |
| }, | |
| { | |
| "epoch": 3.9982578397212545, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4590 | |
| }, | |
| { | |
| "epoch": 4.006968641114983, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 4.015679442508711, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4610 | |
| }, | |
| { | |
| "epoch": 4.024390243902439, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4620 | |
| }, | |
| { | |
| "epoch": 4.033101045296167, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4630 | |
| }, | |
| { | |
| "epoch": 4.041811846689895, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4640 | |
| }, | |
| { | |
| "epoch": 4.050522648083624, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 4.059233449477352, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4660 | |
| }, | |
| { | |
| "epoch": 4.06794425087108, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4670 | |
| }, | |
| { | |
| "epoch": 4.076655052264808, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4680 | |
| }, | |
| { | |
| "epoch": 4.085365853658536, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4690 | |
| }, | |
| { | |
| "epoch": 4.094076655052265, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 4.102787456445993, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4710 | |
| }, | |
| { | |
| "epoch": 4.111498257839721, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4720 | |
| }, | |
| { | |
| "epoch": 4.120209059233449, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4730 | |
| }, | |
| { | |
| "epoch": 4.128919860627177, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4740 | |
| }, | |
| { | |
| "epoch": 4.137630662020906, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 4.146341463414634, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4760 | |
| }, | |
| { | |
| "epoch": 4.155052264808362, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4770 | |
| }, | |
| { | |
| "epoch": 4.16376306620209, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4780 | |
| }, | |
| { | |
| "epoch": 4.172473867595818, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4790 | |
| }, | |
| { | |
| "epoch": 4.181184668989547, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 4.189895470383275, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4810 | |
| }, | |
| { | |
| "epoch": 4.198606271777003, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4820 | |
| }, | |
| { | |
| "epoch": 4.2073170731707314, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4830 | |
| }, | |
| { | |
| "epoch": 4.21602787456446, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4840 | |
| }, | |
| { | |
| "epoch": 4.224738675958188, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 4.2334494773519165, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4860 | |
| }, | |
| { | |
| "epoch": 4.2421602787456445, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4870 | |
| }, | |
| { | |
| "epoch": 4.2508710801393725, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4880 | |
| }, | |
| { | |
| "epoch": 4.2595818815331015, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4890 | |
| }, | |
| { | |
| "epoch": 4.2682926829268295, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 4.2770034843205575, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4910 | |
| }, | |
| { | |
| "epoch": 4.285714285714286, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4920 | |
| }, | |
| { | |
| "epoch": 4.294425087108014, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4930 | |
| }, | |
| { | |
| "epoch": 4.303135888501743, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4940 | |
| }, | |
| { | |
| "epoch": 4.311846689895471, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 4.320557491289199, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4960 | |
| }, | |
| { | |
| "epoch": 4.329268292682927, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4970 | |
| }, | |
| { | |
| "epoch": 4.337979094076655, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4980 | |
| }, | |
| { | |
| "epoch": 4.346689895470384, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 4990 | |
| }, | |
| { | |
| "epoch": 4.355400696864112, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 4.36411149825784, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5010 | |
| }, | |
| { | |
| "epoch": 4.372822299651568, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5020 | |
| }, | |
| { | |
| "epoch": 4.381533101045296, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5030 | |
| }, | |
| { | |
| "epoch": 4.390243902439025, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5040 | |
| }, | |
| { | |
| "epoch": 4.398954703832753, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 4.407665505226481, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5060 | |
| }, | |
| { | |
| "epoch": 4.416376306620209, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5070 | |
| }, | |
| { | |
| "epoch": 4.425087108013937, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5080 | |
| }, | |
| { | |
| "epoch": 4.433797909407666, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5090 | |
| }, | |
| { | |
| "epoch": 4.442508710801394, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 4.451219512195122, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5110 | |
| }, | |
| { | |
| "epoch": 4.45993031358885, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5120 | |
| }, | |
| { | |
| "epoch": 4.468641114982578, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5130 | |
| }, | |
| { | |
| "epoch": 4.477351916376307, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5140 | |
| }, | |
| { | |
| "epoch": 4.486062717770035, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5150 | |
| }, | |
| { | |
| "epoch": 4.494773519163763, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5160 | |
| }, | |
| { | |
| "epoch": 4.503484320557491, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5170 | |
| }, | |
| { | |
| "epoch": 4.512195121951219, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5180 | |
| }, | |
| { | |
| "epoch": 4.520905923344948, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5190 | |
| }, | |
| { | |
| "epoch": 4.529616724738676, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 4.538327526132404, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5210 | |
| }, | |
| { | |
| "epoch": 4.547038327526132, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5220 | |
| }, | |
| { | |
| "epoch": 4.55574912891986, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5230 | |
| }, | |
| { | |
| "epoch": 4.564459930313589, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5240 | |
| }, | |
| { | |
| "epoch": 4.573170731707317, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 4.581881533101045, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5260 | |
| }, | |
| { | |
| "epoch": 4.590592334494773, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5270 | |
| }, | |
| { | |
| "epoch": 4.599303135888501, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5280 | |
| }, | |
| { | |
| "epoch": 4.60801393728223, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5290 | |
| }, | |
| { | |
| "epoch": 4.616724738675958, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 4.625435540069686, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5310 | |
| }, | |
| { | |
| "epoch": 4.634146341463414, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5320 | |
| }, | |
| { | |
| "epoch": 4.642857142857143, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5330 | |
| }, | |
| { | |
| "epoch": 4.651567944250871, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5340 | |
| }, | |
| { | |
| "epoch": 4.660278745644599, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5350 | |
| }, | |
| { | |
| "epoch": 4.668989547038327, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5360 | |
| }, | |
| { | |
| "epoch": 4.677700348432055, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5370 | |
| }, | |
| { | |
| "epoch": 4.686411149825784, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5380 | |
| }, | |
| { | |
| "epoch": 4.695121951219512, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5390 | |
| }, | |
| { | |
| "epoch": 4.70383275261324, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 4.7125435540069684, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5410 | |
| }, | |
| { | |
| "epoch": 4.7212543554006965, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5420 | |
| }, | |
| { | |
| "epoch": 4.729965156794425, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5430 | |
| }, | |
| { | |
| "epoch": 4.7386759581881535, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5440 | |
| }, | |
| { | |
| "epoch": 4.7473867595818815, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5450 | |
| }, | |
| { | |
| "epoch": 4.7560975609756095, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5460 | |
| }, | |
| { | |
| "epoch": 4.7648083623693385, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5470 | |
| }, | |
| { | |
| "epoch": 4.7735191637630665, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5480 | |
| }, | |
| { | |
| "epoch": 4.7822299651567945, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5490 | |
| }, | |
| { | |
| "epoch": 4.790940766550523, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 4.799651567944251, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5510 | |
| }, | |
| { | |
| "epoch": 4.80836236933798, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5520 | |
| }, | |
| { | |
| "epoch": 4.817073170731708, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5530 | |
| }, | |
| { | |
| "epoch": 4.825783972125436, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5540 | |
| }, | |
| { | |
| "epoch": 4.834494773519164, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5550 | |
| }, | |
| { | |
| "epoch": 4.843205574912892, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5560 | |
| }, | |
| { | |
| "epoch": 4.851916376306621, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5570 | |
| }, | |
| { | |
| "epoch": 4.860627177700349, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5580 | |
| }, | |
| { | |
| "epoch": 4.869337979094077, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5590 | |
| }, | |
| { | |
| "epoch": 4.878048780487805, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 4.886759581881533, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5610 | |
| }, | |
| { | |
| "epoch": 4.895470383275262, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5620 | |
| }, | |
| { | |
| "epoch": 4.90418118466899, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5630 | |
| }, | |
| { | |
| "epoch": 4.912891986062718, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5640 | |
| }, | |
| { | |
| "epoch": 4.921602787456446, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5650 | |
| }, | |
| { | |
| "epoch": 4.930313588850174, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5660 | |
| }, | |
| { | |
| "epoch": 4.939024390243903, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5670 | |
| }, | |
| { | |
| "epoch": 4.947735191637631, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5680 | |
| }, | |
| { | |
| "epoch": 4.956445993031359, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5690 | |
| }, | |
| { | |
| "epoch": 4.965156794425087, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 4.973867595818815, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5710 | |
| }, | |
| { | |
| "epoch": 4.982578397212544, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5720 | |
| }, | |
| { | |
| "epoch": 4.991289198606272, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5730 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.045721369683134e-06, | |
| "loss": 0.0, | |
| "step": 5740 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 5740, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2828953243944960.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |