| { | |
| "best_metric": 0.036797065287828445, | |
| "best_model_checkpoint": "saves/psy-course/Llama-3.1-8B-Instruct/train/fold9/checkpoint-1300", | |
| "epoch": 4.997121473805412, | |
| "eval_steps": 50, | |
| "global_step": 3255, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01535213970447131, | |
| "grad_norm": 4.642497539520264, | |
| "learning_rate": 3.067484662576687e-06, | |
| "loss": 1.6312, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03070427940894262, | |
| "grad_norm": 5.409369945526123, | |
| "learning_rate": 6.134969325153374e-06, | |
| "loss": 1.4429, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.04605641911341393, | |
| "grad_norm": 5.949030876159668, | |
| "learning_rate": 9.202453987730062e-06, | |
| "loss": 1.4142, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.06140855881788524, | |
| "grad_norm": 2.751120090484619, | |
| "learning_rate": 1.2269938650306748e-05, | |
| "loss": 0.9213, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.07676069852235655, | |
| "grad_norm": 1.7110086679458618, | |
| "learning_rate": 1.5337423312883436e-05, | |
| "loss": 0.6165, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07676069852235655, | |
| "eval_loss": 0.3982980251312256, | |
| "eval_runtime": 157.3623, | |
| "eval_samples_per_second": 7.365, | |
| "eval_steps_per_second": 7.365, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.09211283822682786, | |
| "grad_norm": 1.144059181213379, | |
| "learning_rate": 1.8404907975460123e-05, | |
| "loss": 0.4066, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.10746497793129918, | |
| "grad_norm": 1.527937412261963, | |
| "learning_rate": 2.1472392638036813e-05, | |
| "loss": 0.276, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.12281711763577048, | |
| "grad_norm": 1.0224796533584595, | |
| "learning_rate": 2.4539877300613496e-05, | |
| "loss": 0.1687, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.1381692573402418, | |
| "grad_norm": 0.9973428249359131, | |
| "learning_rate": 2.7607361963190186e-05, | |
| "loss": 0.1319, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.1535213970447131, | |
| "grad_norm": 0.7085102200508118, | |
| "learning_rate": 3.067484662576687e-05, | |
| "loss": 0.1035, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.1535213970447131, | |
| "eval_loss": 0.08296720683574677, | |
| "eval_runtime": 157.3193, | |
| "eval_samples_per_second": 7.367, | |
| "eval_steps_per_second": 7.367, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.16887353674918443, | |
| "grad_norm": 0.830489993095398, | |
| "learning_rate": 3.3742331288343556e-05, | |
| "loss": 0.0736, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.18422567645365573, | |
| "grad_norm": 0.8759797811508179, | |
| "learning_rate": 3.6809815950920246e-05, | |
| "loss": 0.0691, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.19957781615812703, | |
| "grad_norm": 1.1546653509140015, | |
| "learning_rate": 3.987730061349693e-05, | |
| "loss": 0.0809, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.21492995586259836, | |
| "grad_norm": 0.631569504737854, | |
| "learning_rate": 4.2944785276073626e-05, | |
| "loss": 0.0557, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.23028209556706966, | |
| "grad_norm": 1.1803288459777832, | |
| "learning_rate": 4.601226993865031e-05, | |
| "loss": 0.0746, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.23028209556706966, | |
| "eval_loss": 0.06817680597305298, | |
| "eval_runtime": 157.2581, | |
| "eval_samples_per_second": 7.37, | |
| "eval_steps_per_second": 7.37, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.24563423527154096, | |
| "grad_norm": 0.8749077916145325, | |
| "learning_rate": 4.907975460122699e-05, | |
| "loss": 0.0854, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.2609863749760123, | |
| "grad_norm": 0.7158986926078796, | |
| "learning_rate": 5.214723926380368e-05, | |
| "loss": 0.0763, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.2763385146804836, | |
| "grad_norm": 0.7949298024177551, | |
| "learning_rate": 5.521472392638037e-05, | |
| "loss": 0.0714, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.2916906543849549, | |
| "grad_norm": 2.1728086471557617, | |
| "learning_rate": 5.8282208588957056e-05, | |
| "loss": 0.0591, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.3070427940894262, | |
| "grad_norm": 0.8605216145515442, | |
| "learning_rate": 6.134969325153375e-05, | |
| "loss": 0.0591, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3070427940894262, | |
| "eval_loss": 0.059281475841999054, | |
| "eval_runtime": 157.2933, | |
| "eval_samples_per_second": 7.368, | |
| "eval_steps_per_second": 7.368, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3223949337938975, | |
| "grad_norm": 0.9615307450294495, | |
| "learning_rate": 6.441717791411042e-05, | |
| "loss": 0.0519, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.33774707349836885, | |
| "grad_norm": 0.60662442445755, | |
| "learning_rate": 6.748466257668711e-05, | |
| "loss": 0.0436, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.35309921320284016, | |
| "grad_norm": 0.3369896411895752, | |
| "learning_rate": 7.055214723926382e-05, | |
| "loss": 0.082, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.36845135290731146, | |
| "grad_norm": 0.5379114747047424, | |
| "learning_rate": 7.361963190184049e-05, | |
| "loss": 0.0583, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.38380349261178276, | |
| "grad_norm": 1.2060985565185547, | |
| "learning_rate": 7.668711656441718e-05, | |
| "loss": 0.0716, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.38380349261178276, | |
| "eval_loss": 0.052977994084358215, | |
| "eval_runtime": 157.3133, | |
| "eval_samples_per_second": 7.367, | |
| "eval_steps_per_second": 7.367, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.39915563231625406, | |
| "grad_norm": 0.752123236656189, | |
| "learning_rate": 7.975460122699386e-05, | |
| "loss": 0.0559, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.41450777202072536, | |
| "grad_norm": 1.1011897325515747, | |
| "learning_rate": 8.282208588957055e-05, | |
| "loss": 0.0611, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.4298599117251967, | |
| "grad_norm": 0.8125905394554138, | |
| "learning_rate": 8.588957055214725e-05, | |
| "loss": 0.0614, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.445212051429668, | |
| "grad_norm": 0.48761671781539917, | |
| "learning_rate": 8.895705521472393e-05, | |
| "loss": 0.0472, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.4605641911341393, | |
| "grad_norm": 0.3559521436691284, | |
| "learning_rate": 9.202453987730062e-05, | |
| "loss": 0.0571, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4605641911341393, | |
| "eval_loss": 0.04749785736203194, | |
| "eval_runtime": 157.325, | |
| "eval_samples_per_second": 7.367, | |
| "eval_steps_per_second": 7.367, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4759163308386106, | |
| "grad_norm": 0.600727915763855, | |
| "learning_rate": 9.50920245398773e-05, | |
| "loss": 0.0457, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.4912684705430819, | |
| "grad_norm": 0.4633491635322571, | |
| "learning_rate": 9.815950920245399e-05, | |
| "loss": 0.0464, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.5066206102475532, | |
| "grad_norm": 0.6039092540740967, | |
| "learning_rate": 9.999953982785432e-05, | |
| "loss": 0.0532, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.5219727499520246, | |
| "grad_norm": 0.35319024324417114, | |
| "learning_rate": 9.999436298849151e-05, | |
| "loss": 0.0476, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.5373248896564958, | |
| "grad_norm": 0.2034178227186203, | |
| "learning_rate": 9.998343469212352e-05, | |
| "loss": 0.0499, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5373248896564958, | |
| "eval_loss": 0.0483204647898674, | |
| "eval_runtime": 157.3109, | |
| "eval_samples_per_second": 7.368, | |
| "eval_steps_per_second": 7.368, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5526770293609672, | |
| "grad_norm": 0.7975025773048401, | |
| "learning_rate": 9.996675619596465e-05, | |
| "loss": 0.0486, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.5680291690654385, | |
| "grad_norm": 0.7012478709220886, | |
| "learning_rate": 9.99443294187443e-05, | |
| "loss": 0.0545, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5833813087699098, | |
| "grad_norm": 0.17911651730537415, | |
| "learning_rate": 9.991615694048621e-05, | |
| "loss": 0.0449, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.5987334484743811, | |
| "grad_norm": 0.5984811782836914, | |
| "learning_rate": 9.988224200221172e-05, | |
| "loss": 0.0606, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.6140855881788524, | |
| "grad_norm": 0.37629756331443787, | |
| "learning_rate": 9.984258850556693e-05, | |
| "loss": 0.0421, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6140855881788524, | |
| "eval_loss": 0.047019653022289276, | |
| "eval_runtime": 157.366, | |
| "eval_samples_per_second": 7.365, | |
| "eval_steps_per_second": 7.365, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6294377278833237, | |
| "grad_norm": 0.7881872653961182, | |
| "learning_rate": 9.979720101237375e-05, | |
| "loss": 0.0445, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.644789867587795, | |
| "grad_norm": 0.32935699820518494, | |
| "learning_rate": 9.974608474410512e-05, | |
| "loss": 0.0492, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.6601420072922664, | |
| "grad_norm": 0.6060999631881714, | |
| "learning_rate": 9.968924558128445e-05, | |
| "loss": 0.0482, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.6754941469967377, | |
| "grad_norm": 0.3332858979701996, | |
| "learning_rate": 9.962669006280894e-05, | |
| "loss": 0.0408, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.690846286701209, | |
| "grad_norm": 0.4833207428455353, | |
| "learning_rate": 9.95584253851974e-05, | |
| "loss": 0.0514, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.690846286701209, | |
| "eval_loss": 0.047262296080589294, | |
| "eval_runtime": 157.2723, | |
| "eval_samples_per_second": 7.369, | |
| "eval_steps_per_second": 7.369, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7061984264056803, | |
| "grad_norm": 0.20673757791519165, | |
| "learning_rate": 9.948445940176243e-05, | |
| "loss": 0.0515, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.7215505661101516, | |
| "grad_norm": 0.23464085161685944, | |
| "learning_rate": 9.940480062170679e-05, | |
| "loss": 0.0435, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.7369027058146229, | |
| "grad_norm": 0.3727859854698181, | |
| "learning_rate": 9.931945820914462e-05, | |
| "loss": 0.0544, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.7522548455190943, | |
| "grad_norm": 0.4665268361568451, | |
| "learning_rate": 9.922844198204715e-05, | |
| "loss": 0.0407, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.7676069852235655, | |
| "grad_norm": 0.4792843461036682, | |
| "learning_rate": 9.913176241111319e-05, | |
| "loss": 0.042, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7676069852235655, | |
| "eval_loss": 0.04632759094238281, | |
| "eval_runtime": 157.3047, | |
| "eval_samples_per_second": 7.368, | |
| "eval_steps_per_second": 7.368, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7829591249280369, | |
| "grad_norm": 0.6618539094924927, | |
| "learning_rate": 9.902943061856456e-05, | |
| "loss": 0.0374, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.7983112646325081, | |
| "grad_norm": 0.3555477559566498, | |
| "learning_rate": 9.892145837686657e-05, | |
| "loss": 0.0651, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.8136634043369795, | |
| "grad_norm": 0.46410855650901794, | |
| "learning_rate": 9.880785810737378e-05, | |
| "loss": 0.0585, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.8290155440414507, | |
| "grad_norm": 0.20220302045345306, | |
| "learning_rate": 9.868864287890083e-05, | |
| "loss": 0.0438, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.8443676837459221, | |
| "grad_norm": 0.275446355342865, | |
| "learning_rate": 9.856382640621917e-05, | |
| "loss": 0.0672, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8443676837459221, | |
| "eval_loss": 0.04763461649417877, | |
| "eval_runtime": 157.2892, | |
| "eval_samples_per_second": 7.369, | |
| "eval_steps_per_second": 7.369, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8597198234503934, | |
| "grad_norm": 0.31557002663612366, | |
| "learning_rate": 9.84334230484792e-05, | |
| "loss": 0.0402, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.8750719631548647, | |
| "grad_norm": 0.22945792973041534, | |
| "learning_rate": 9.82974478075583e-05, | |
| "loss": 0.0381, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.890424102859336, | |
| "grad_norm": 0.28530406951904297, | |
| "learning_rate": 9.815591632633509e-05, | |
| "loss": 0.0354, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.9057762425638073, | |
| "grad_norm": 0.3434598743915558, | |
| "learning_rate": 9.800884488688985e-05, | |
| "loss": 0.0443, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.9211283822682786, | |
| "grad_norm": 0.33944979310035706, | |
| "learning_rate": 9.785625040863124e-05, | |
| "loss": 0.0388, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9211283822682786, | |
| "eval_loss": 0.04456296190619469, | |
| "eval_runtime": 157.234, | |
| "eval_samples_per_second": 7.371, | |
| "eval_steps_per_second": 7.371, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.93648052197275, | |
| "grad_norm": 0.3150840401649475, | |
| "learning_rate": 9.769815044635005e-05, | |
| "loss": 0.0352, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.9518326616772212, | |
| "grad_norm": 0.35916006565093994, | |
| "learning_rate": 9.753456318819946e-05, | |
| "loss": 0.0485, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.9671848013816926, | |
| "grad_norm": 0.5761755704879761, | |
| "learning_rate": 9.736550745360292e-05, | |
| "loss": 0.0413, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.9825369410861639, | |
| "grad_norm": 0.2672175467014313, | |
| "learning_rate": 9.719100269108872e-05, | |
| "loss": 0.0399, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.9978890807906352, | |
| "grad_norm": 0.2594519853591919, | |
| "learning_rate": 9.701106897605304e-05, | |
| "loss": 0.0391, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.9978890807906352, | |
| "eval_loss": 0.04029757156968117, | |
| "eval_runtime": 157.2496, | |
| "eval_samples_per_second": 7.37, | |
| "eval_steps_per_second": 7.37, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0132412204951065, | |
| "grad_norm": 0.3294517397880554, | |
| "learning_rate": 9.682572700845006e-05, | |
| "loss": 0.032, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.0285933601995778, | |
| "grad_norm": 0.3584151566028595, | |
| "learning_rate": 9.663499811041082e-05, | |
| "loss": 0.0361, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.0439454999040492, | |
| "grad_norm": 0.24276219308376312, | |
| "learning_rate": 9.643890422379018e-05, | |
| "loss": 0.0341, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.0592976396085205, | |
| "grad_norm": 0.25595176219940186, | |
| "learning_rate": 9.623746790764261e-05, | |
| "loss": 0.0323, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.0746497793129917, | |
| "grad_norm": 0.33263134956359863, | |
| "learning_rate": 9.603071233562695e-05, | |
| "loss": 0.0304, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0746497793129917, | |
| "eval_loss": 0.042355190962553024, | |
| "eval_runtime": 157.2545, | |
| "eval_samples_per_second": 7.37, | |
| "eval_steps_per_second": 7.37, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.090001919017463, | |
| "grad_norm": 0.11574736982584, | |
| "learning_rate": 9.581866129334044e-05, | |
| "loss": 0.0416, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.1053540587219344, | |
| "grad_norm": 0.304192453622818, | |
| "learning_rate": 9.560133917558242e-05, | |
| "loss": 0.048, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.1207061984264057, | |
| "grad_norm": 0.21460862457752228, | |
| "learning_rate": 9.537877098354786e-05, | |
| "loss": 0.0355, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.136058338130877, | |
| "grad_norm": 0.3124927878379822, | |
| "learning_rate": 9.51509823219512e-05, | |
| "loss": 0.0301, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.1514104778353482, | |
| "grad_norm": 0.3640657067298889, | |
| "learning_rate": 9.491799939608065e-05, | |
| "loss": 0.0396, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1514104778353482, | |
| "eval_loss": 0.042958181351423264, | |
| "eval_runtime": 157.2635, | |
| "eval_samples_per_second": 7.37, | |
| "eval_steps_per_second": 7.37, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1667626175398196, | |
| "grad_norm": 0.33257851004600525, | |
| "learning_rate": 9.467984900878364e-05, | |
| "loss": 0.0336, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.182114757244291, | |
| "grad_norm": 0.21673721075057983, | |
| "learning_rate": 9.443655855738321e-05, | |
| "loss": 0.029, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.1974668969487623, | |
| "grad_norm": 0.2433970421552658, | |
| "learning_rate": 9.41881560305262e-05, | |
| "loss": 0.0335, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.2128190366532334, | |
| "grad_norm": 0.3440137207508087, | |
| "learning_rate": 9.393467000496344e-05, | |
| "loss": 0.0409, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.2281711763577048, | |
| "grad_norm": 0.2682274580001831, | |
| "learning_rate": 9.367612964226218e-05, | |
| "loss": 0.0347, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2281711763577048, | |
| "eval_loss": 0.039533697068691254, | |
| "eval_runtime": 157.2692, | |
| "eval_samples_per_second": 7.37, | |
| "eval_steps_per_second": 7.37, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2435233160621761, | |
| "grad_norm": 0.12573248147964478, | |
| "learning_rate": 9.341256468545122e-05, | |
| "loss": 0.0246, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.2588754557666475, | |
| "grad_norm": 0.5845211744308472, | |
| "learning_rate": 9.314400545559934e-05, | |
| "loss": 0.0287, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.2742275954711189, | |
| "grad_norm": 0.43409937620162964, | |
| "learning_rate": 9.287048284832698e-05, | |
| "loss": 0.0358, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.28957973517559, | |
| "grad_norm": 0.47061261534690857, | |
| "learning_rate": 9.2592028330252e-05, | |
| "loss": 0.0339, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.3049318748800613, | |
| "grad_norm": 0.21692904829978943, | |
| "learning_rate": 9.230867393536972e-05, | |
| "loss": 0.0356, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3049318748800613, | |
| "eval_loss": 0.04097624123096466, | |
| "eval_runtime": 157.2932, | |
| "eval_samples_per_second": 7.368, | |
| "eval_steps_per_second": 7.368, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3202840145845327, | |
| "grad_norm": 0.4102625846862793, | |
| "learning_rate": 9.202045226136757e-05, | |
| "loss": 0.0315, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.335636154289004, | |
| "grad_norm": 0.10215727984905243, | |
| "learning_rate": 9.172739646587509e-05, | |
| "loss": 0.0242, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.3509882939934754, | |
| "grad_norm": 0.14424508810043335, | |
| "learning_rate": 9.142954026264931e-05, | |
| "loss": 0.0313, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.3663404336979466, | |
| "grad_norm": 0.21271869540214539, | |
| "learning_rate": 9.112691791769634e-05, | |
| "loss": 0.0296, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.381692573402418, | |
| "grad_norm": 0.39837774634361267, | |
| "learning_rate": 9.081956424532926e-05, | |
| "loss": 0.0333, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.381692573402418, | |
| "eval_loss": 0.03842702880501747, | |
| "eval_runtime": 157.3229, | |
| "eval_samples_per_second": 7.367, | |
| "eval_steps_per_second": 7.367, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.3970447131068893, | |
| "grad_norm": 0.22184543311595917, | |
| "learning_rate": 9.050751460416305e-05, | |
| "loss": 0.0334, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.4123968528113606, | |
| "grad_norm": 0.191727876663208, | |
| "learning_rate": 9.019080489304685e-05, | |
| "loss": 0.0367, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.427748992515832, | |
| "grad_norm": 0.39098262786865234, | |
| "learning_rate": 8.986947154693408e-05, | |
| "loss": 0.0487, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.4431011322203031, | |
| "grad_norm": 0.3450808823108673, | |
| "learning_rate": 8.954355153269088e-05, | |
| "loss": 0.0427, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.4584532719247745, | |
| "grad_norm": 0.1870778352022171, | |
| "learning_rate": 8.921308234484336e-05, | |
| "loss": 0.0318, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4584532719247745, | |
| "eval_loss": 0.037589143961668015, | |
| "eval_runtime": 157.2893, | |
| "eval_samples_per_second": 7.369, | |
| "eval_steps_per_second": 7.369, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4738054116292458, | |
| "grad_norm": 0.18667995929718018, | |
| "learning_rate": 8.887810200126419e-05, | |
| "loss": 0.0352, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.4891575513337172, | |
| "grad_norm": 0.40204545855522156, | |
| "learning_rate": 8.853864903879889e-05, | |
| "loss": 0.0383, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.5045096910381885, | |
| "grad_norm": 0.15286822617053986, | |
| "learning_rate": 8.81947625088325e-05, | |
| "loss": 0.0345, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.5198618307426597, | |
| "grad_norm": 0.17312301695346832, | |
| "learning_rate": 8.784648197279701e-05, | |
| "loss": 0.032, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.535213970447131, | |
| "grad_norm": 0.31317198276519775, | |
| "learning_rate": 8.749384749762015e-05, | |
| "loss": 0.0309, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.535213970447131, | |
| "eval_loss": 0.03832419961690903, | |
| "eval_runtime": 157.292, | |
| "eval_samples_per_second": 7.368, | |
| "eval_steps_per_second": 7.368, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5505661101516024, | |
| "grad_norm": 0.2550368010997772, | |
| "learning_rate": 8.713689965111602e-05, | |
| "loss": 0.0246, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.5659182498560735, | |
| "grad_norm": 0.2712564468383789, | |
| "learning_rate": 8.677567949731801e-05, | |
| "loss": 0.0372, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.581270389560545, | |
| "grad_norm": 0.3656397759914398, | |
| "learning_rate": 8.64102285917548e-05, | |
| "loss": 0.0502, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.5966225292650162, | |
| "grad_norm": 0.1517850011587143, | |
| "learning_rate": 8.604058897666962e-05, | |
| "loss": 0.0333, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.6119746689694876, | |
| "grad_norm": 0.24108999967575073, | |
| "learning_rate": 8.566680317618377e-05, | |
| "loss": 0.0216, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.6119746689694876, | |
| "eval_loss": 0.037316225469112396, | |
| "eval_runtime": 157.2218, | |
| "eval_samples_per_second": 7.372, | |
| "eval_steps_per_second": 7.372, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.627326808673959, | |
| "grad_norm": 0.21492698788642883, | |
| "learning_rate": 8.528891419140438e-05, | |
| "loss": 0.0387, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.64267894837843, | |
| "grad_norm": 0.2957633137702942, | |
| "learning_rate": 8.490696549547761e-05, | |
| "loss": 0.0272, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.6580310880829017, | |
| "grad_norm": 0.3371316194534302, | |
| "learning_rate": 8.452100102858734e-05, | |
| "loss": 0.0401, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.6733832277873728, | |
| "grad_norm": 0.28173157572746277, | |
| "learning_rate": 8.413106519290023e-05, | |
| "loss": 0.0322, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.6887353674918442, | |
| "grad_norm": 0.2843840718269348, | |
| "learning_rate": 8.373720284745757e-05, | |
| "loss": 0.0315, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.6887353674918442, | |
| "eval_loss": 0.03704118728637695, | |
| "eval_runtime": 157.1462, | |
| "eval_samples_per_second": 7.375, | |
| "eval_steps_per_second": 7.375, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.7040875071963155, | |
| "grad_norm": 0.18807817995548248, | |
| "learning_rate": 8.333945930301459e-05, | |
| "loss": 0.0312, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.7194396469007867, | |
| "grad_norm": 0.199060320854187, | |
| "learning_rate": 8.293788031682789e-05, | |
| "loss": 0.0305, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.7347917866052582, | |
| "grad_norm": 0.22268791496753693, | |
| "learning_rate": 8.253251208739137e-05, | |
| "loss": 0.0303, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.7501439263097294, | |
| "grad_norm": 0.3476749658584595, | |
| "learning_rate": 8.21234012491215e-05, | |
| "loss": 0.0292, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.7654960660142007, | |
| "grad_norm": 0.32021111249923706, | |
| "learning_rate": 8.171059486699224e-05, | |
| "loss": 0.0273, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7654960660142007, | |
| "eval_loss": 0.03893882781267166, | |
| "eval_runtime": 157.1622, | |
| "eval_samples_per_second": 7.375, | |
| "eval_steps_per_second": 7.375, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.780848205718672, | |
| "grad_norm": 0.20445989072322845, | |
| "learning_rate": 8.129414043112087e-05, | |
| "loss": 0.035, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.7962003454231432, | |
| "grad_norm": 0.2090442031621933, | |
| "learning_rate": 8.087408585130438e-05, | |
| "loss": 0.0283, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.8115524851276148, | |
| "grad_norm": 0.16279439628124237, | |
| "learning_rate": 8.045047945150796e-05, | |
| "loss": 0.0348, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.826904624832086, | |
| "grad_norm": 0.277883380651474, | |
| "learning_rate": 8.002336996430561e-05, | |
| "loss": 0.0309, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.8422567645365573, | |
| "grad_norm": 0.10432474315166473, | |
| "learning_rate": 7.959280652527394e-05, | |
| "loss": 0.0188, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8422567645365573, | |
| "eval_loss": 0.03792406991124153, | |
| "eval_runtime": 157.189, | |
| "eval_samples_per_second": 7.373, | |
| "eval_steps_per_second": 7.373, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8576089042410286, | |
| "grad_norm": 0.1943598836660385, | |
| "learning_rate": 7.915883866733946e-05, | |
| "loss": 0.0348, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.8729610439454998, | |
| "grad_norm": 0.36124810576438904, | |
| "learning_rate": 7.872151631508022e-05, | |
| "loss": 0.0311, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.8883131836499714, | |
| "grad_norm": 0.24344752728939056, | |
| "learning_rate": 7.828088977898234e-05, | |
| "loss": 0.0361, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.9036653233544425, | |
| "grad_norm": 0.2729787826538086, | |
| "learning_rate": 7.783700974965225e-05, | |
| "loss": 0.043, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.9190174630589139, | |
| "grad_norm": 0.20694006979465485, | |
| "learning_rate": 7.738992729198511e-05, | |
| "loss": 0.0362, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9190174630589139, | |
| "eval_loss": 0.037059471011161804, | |
| "eval_runtime": 157.1426, | |
| "eval_samples_per_second": 7.375, | |
| "eval_steps_per_second": 7.375, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9343696027633852, | |
| "grad_norm": 0.13363595306873322, | |
| "learning_rate": 7.693969383929017e-05, | |
| "loss": 0.0247, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.9497217424678563, | |
| "grad_norm": 0.4264666438102722, | |
| "learning_rate": 7.648636118737385e-05, | |
| "loss": 0.0375, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.965073882172328, | |
| "grad_norm": 0.2813757359981537, | |
| "learning_rate": 7.602998148858089e-05, | |
| "loss": 0.0302, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.980426021876799, | |
| "grad_norm": 0.3236190974712372, | |
| "learning_rate": 7.557060724579484e-05, | |
| "loss": 0.0325, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.9957781615812704, | |
| "grad_norm": 0.4948330223560333, | |
| "learning_rate": 7.51082913063978e-05, | |
| "loss": 0.0435, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9957781615812704, | |
| "eval_loss": 0.036797065287828445, | |
| "eval_runtime": 157.0855, | |
| "eval_samples_per_second": 7.378, | |
| "eval_steps_per_second": 7.378, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.0111303012857418, | |
| "grad_norm": 0.22444044053554535, | |
| "learning_rate": 7.464308685619099e-05, | |
| "loss": 0.0272, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.026482440990213, | |
| "grad_norm": 0.3434648811817169, | |
| "learning_rate": 7.417504741327587e-05, | |
| "loss": 0.0168, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.0418345806946845, | |
| "grad_norm": 0.4413999617099762, | |
| "learning_rate": 7.370422682189755e-05, | |
| "loss": 0.0209, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.0571867203991556, | |
| "grad_norm": 0.16911853849887848, | |
| "learning_rate": 7.323067924625024e-05, | |
| "loss": 0.0172, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.0725388601036268, | |
| "grad_norm": 0.21051056683063507, | |
| "learning_rate": 7.275445916424627e-05, | |
| "loss": 0.0212, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0725388601036268, | |
| "eval_loss": 0.03962073475122452, | |
| "eval_runtime": 157.106, | |
| "eval_samples_per_second": 7.377, | |
| "eval_steps_per_second": 7.377, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0878909998080983, | |
| "grad_norm": 0.4296096861362457, | |
| "learning_rate": 7.227562136124864e-05, | |
| "loss": 0.0179, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.1032431395125695, | |
| "grad_norm": 0.23325330018997192, | |
| "learning_rate": 7.179422092376856e-05, | |
| "loss": 0.02, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.118595279217041, | |
| "grad_norm": 0.4717545211315155, | |
| "learning_rate": 7.13103132331281e-05, | |
| "loss": 0.0223, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.133947418921512, | |
| "grad_norm": 0.42749759554862976, | |
| "learning_rate": 7.082395395908903e-05, | |
| "loss": 0.0249, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.1492995586259833, | |
| "grad_norm": 0.20575480163097382, | |
| "learning_rate": 7.033519905344846e-05, | |
| "loss": 0.0184, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.1492995586259833, | |
| "eval_loss": 0.03931669518351555, | |
| "eval_runtime": 157.1133, | |
| "eval_samples_per_second": 7.377, | |
| "eval_steps_per_second": 7.377, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.164651698330455, | |
| "grad_norm": 0.22173267602920532, | |
| "learning_rate": 6.984410474360195e-05, | |
| "loss": 0.0243, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.180003838034926, | |
| "grad_norm": 0.2206946164369583, | |
| "learning_rate": 6.935072752607511e-05, | |
| "loss": 0.0271, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.1953559777393976, | |
| "grad_norm": 0.21722865104675293, | |
| "learning_rate": 6.885512416002412e-05, | |
| "loss": 0.0308, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.2107081174438687, | |
| "grad_norm": 0.264813631772995, | |
| "learning_rate": 6.835735166070587e-05, | |
| "loss": 0.0205, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.22606025714834, | |
| "grad_norm": 0.23824357986450195, | |
| "learning_rate": 6.785746729291897e-05, | |
| "loss": 0.0205, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.22606025714834, | |
| "eval_loss": 0.03809254616498947, | |
| "eval_runtime": 157.1629, | |
| "eval_samples_per_second": 7.375, | |
| "eval_steps_per_second": 7.375, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.2414123968528115, | |
| "grad_norm": 0.23905223608016968, | |
| "learning_rate": 6.735552856441585e-05, | |
| "loss": 0.0232, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.2567645365572826, | |
| "grad_norm": 0.32694312930107117, | |
| "learning_rate": 6.685159321928691e-05, | |
| "loss": 0.0216, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 2.272116676261754, | |
| "grad_norm": 0.2926463186740875, | |
| "learning_rate": 6.634571923131756e-05, | |
| "loss": 0.0239, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.2874688159662253, | |
| "grad_norm": 0.154023677110672, | |
| "learning_rate": 6.583796479731872e-05, | |
| "loss": 0.0175, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 2.3028209556706964, | |
| "grad_norm": 0.10085402429103851, | |
| "learning_rate": 6.532838833043189e-05, | |
| "loss": 0.0316, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.3028209556706964, | |
| "eval_loss": 0.03903723508119583, | |
| "eval_runtime": 157.1873, | |
| "eval_samples_per_second": 7.373, | |
| "eval_steps_per_second": 7.373, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.318173095375168, | |
| "grad_norm": 0.36862310767173767, | |
| "learning_rate": 6.481704845340894e-05, | |
| "loss": 0.025, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 2.333525235079639, | |
| "grad_norm": 0.3234424591064453, | |
| "learning_rate": 6.43040039918683e-05, | |
| "loss": 0.0219, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 2.3488773747841103, | |
| "grad_norm": 0.5369458198547363, | |
| "learning_rate": 6.37893139675273e-05, | |
| "loss": 0.019, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 2.364229514488582, | |
| "grad_norm": 0.31320443749427795, | |
| "learning_rate": 6.327303759141235e-05, | |
| "loss": 0.0188, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 2.379581654193053, | |
| "grad_norm": 0.35849952697753906, | |
| "learning_rate": 6.275523425704707e-05, | |
| "loss": 0.0234, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.379581654193053, | |
| "eval_loss": 0.03996282070875168, | |
| "eval_runtime": 157.1594, | |
| "eval_samples_per_second": 7.375, | |
| "eval_steps_per_second": 7.375, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.3949337938975246, | |
| "grad_norm": 0.32893258333206177, | |
| "learning_rate": 6.223596353361961e-05, | |
| "loss": 0.0226, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 2.4102859336019957, | |
| "grad_norm": 0.03046131506562233, | |
| "learning_rate": 6.171528515912965e-05, | |
| "loss": 0.0162, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 2.425638073306467, | |
| "grad_norm": 0.3565883934497833, | |
| "learning_rate": 6.119325903351599e-05, | |
| "loss": 0.0253, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 2.4409902130109384, | |
| "grad_norm": 0.3316241204738617, | |
| "learning_rate": 6.0669945211765585e-05, | |
| "loss": 0.0312, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 2.4563423527154096, | |
| "grad_norm": 0.338368684053421, | |
| "learning_rate": 6.0145403897004696e-05, | |
| "loss": 0.0258, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4563423527154096, | |
| "eval_loss": 0.038748133927583694, | |
| "eval_runtime": 157.2369, | |
| "eval_samples_per_second": 7.371, | |
| "eval_steps_per_second": 7.371, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.471694492419881, | |
| "grad_norm": 0.45592939853668213, | |
| "learning_rate": 5.961969543357292e-05, | |
| "loss": 0.0247, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 2.4870466321243523, | |
| "grad_norm": 0.17753665149211884, | |
| "learning_rate": 5.9092880300081123e-05, | |
| "loss": 0.0277, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 2.5023987718288234, | |
| "grad_norm": 0.4086242914199829, | |
| "learning_rate": 5.8565019102453844e-05, | |
| "loss": 0.0237, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 2.517750911533295, | |
| "grad_norm": 0.24414750933647156, | |
| "learning_rate": 5.8036172566957006e-05, | |
| "loss": 0.0231, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 2.533103051237766, | |
| "grad_norm": 0.44709134101867676, | |
| "learning_rate": 5.750640153321194e-05, | |
| "loss": 0.0273, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.533103051237766, | |
| "eval_loss": 0.0392879992723465, | |
| "eval_runtime": 157.2456, | |
| "eval_samples_per_second": 7.371, | |
| "eval_steps_per_second": 7.371, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.5484551909422377, | |
| "grad_norm": 0.3048468232154846, | |
| "learning_rate": 5.697576694719616e-05, | |
| "loss": 0.0215, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 2.563807330646709, | |
| "grad_norm": 0.22297337651252747, | |
| "learning_rate": 5.644432985423206e-05, | |
| "loss": 0.0229, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 2.57915947035118, | |
| "grad_norm": 0.3140508830547333, | |
| "learning_rate": 5.591215139196414e-05, | |
| "loss": 0.0242, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 2.5945116100556516, | |
| "grad_norm": 0.22138510644435883, | |
| "learning_rate": 5.5379292783325585e-05, | |
| "loss": 0.0286, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 2.6098637497601227, | |
| "grad_norm": 0.3354733884334564, | |
| "learning_rate": 5.4845815329495054e-05, | |
| "loss": 0.0199, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.6098637497601227, | |
| "eval_loss": 0.03853138908743858, | |
| "eval_runtime": 157.254, | |
| "eval_samples_per_second": 7.37, | |
| "eval_steps_per_second": 7.37, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.6252158894645943, | |
| "grad_norm": 0.283342182636261, | |
| "learning_rate": 5.431178040284446e-05, | |
| "loss": 0.0223, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 2.6405680291690654, | |
| "grad_norm": 1.3571743965148926, | |
| "learning_rate": 5.377724943987855e-05, | |
| "loss": 0.0181, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 2.6559201688735365, | |
| "grad_norm": 0.10672520101070404, | |
| "learning_rate": 5.324228393416718e-05, | |
| "loss": 0.0181, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 2.671272308578008, | |
| "grad_norm": 0.5193530321121216, | |
| "learning_rate": 5.270694542927088e-05, | |
| "loss": 0.0257, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 2.6866244482824793, | |
| "grad_norm": 0.22108089923858643, | |
| "learning_rate": 5.21712955116608e-05, | |
| "loss": 0.0167, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.6866244482824793, | |
| "eval_loss": 0.03936908766627312, | |
| "eval_runtime": 157.2427, | |
| "eval_samples_per_second": 7.371, | |
| "eval_steps_per_second": 7.371, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.701976587986951, | |
| "grad_norm": 0.09635429084300995, | |
| "learning_rate": 5.1635395803633666e-05, | |
| "loss": 0.0197, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 2.717328727691422, | |
| "grad_norm": 0.14154723286628723, | |
| "learning_rate": 5.109930795622265e-05, | |
| "loss": 0.0153, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 2.732680867395893, | |
| "grad_norm": 0.19964846968650818, | |
| "learning_rate": 5.056309364210483e-05, | |
| "loss": 0.0191, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 2.7480330071003647, | |
| "grad_norm": 0.32814568281173706, | |
| "learning_rate": 5.002681454850632e-05, | |
| "loss": 0.024, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 2.763385146804836, | |
| "grad_norm": 0.3342536985874176, | |
| "learning_rate": 4.949053237010554e-05, | |
| "loss": 0.0288, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.763385146804836, | |
| "eval_loss": 0.042679354548454285, | |
| "eval_runtime": 157.2517, | |
| "eval_samples_per_second": 7.37, | |
| "eval_steps_per_second": 7.37, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.7787372865093074, | |
| "grad_norm": 0.3160741329193115, | |
| "learning_rate": 4.89543088019359e-05, | |
| "loss": 0.0172, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 2.7940894262137785, | |
| "grad_norm": 0.3922727108001709, | |
| "learning_rate": 4.841820553228805e-05, | |
| "loss": 0.0284, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 2.8094415659182497, | |
| "grad_norm": 0.3402051031589508, | |
| "learning_rate": 4.7882284235613324e-05, | |
| "loss": 0.0281, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 2.8247937056227213, | |
| "grad_norm": 0.25749653577804565, | |
| "learning_rate": 4.734660656542846e-05, | |
| "loss": 0.0231, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 2.8401458453271924, | |
| "grad_norm": 0.2327001392841339, | |
| "learning_rate": 4.681123414722291e-05, | |
| "loss": 0.022, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.8401458453271924, | |
| "eval_loss": 0.03755514323711395, | |
| "eval_runtime": 157.276, | |
| "eval_samples_per_second": 7.369, | |
| "eval_steps_per_second": 7.369, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.855497985031664, | |
| "grad_norm": 0.2501806616783142, | |
| "learning_rate": 4.627622857136929e-05, | |
| "loss": 0.0193, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 2.870850124736135, | |
| "grad_norm": 0.7793471217155457, | |
| "learning_rate": 4.5741651386037883e-05, | |
| "loss": 0.0298, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 2.8862022644406062, | |
| "grad_norm": 0.41012799739837646, | |
| "learning_rate": 4.5207564090116064e-05, | |
| "loss": 0.0183, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 2.901554404145078, | |
| "grad_norm": 0.3157999813556671, | |
| "learning_rate": 4.467402812613323e-05, | |
| "loss": 0.0182, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 2.916906543849549, | |
| "grad_norm": 0.3505517542362213, | |
| "learning_rate": 4.414110487319244e-05, | |
| "loss": 0.0237, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.916906543849549, | |
| "eval_loss": 0.03842131420969963, | |
| "eval_runtime": 157.2876, | |
| "eval_samples_per_second": 7.369, | |
| "eval_steps_per_second": 7.369, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.9322586835540205, | |
| "grad_norm": 0.3128899931907654, | |
| "learning_rate": 4.360885563990919e-05, | |
| "loss": 0.0226, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 2.9476108232584917, | |
| "grad_norm": 0.2808375656604767, | |
| "learning_rate": 4.307734165735829e-05, | |
| "loss": 0.0165, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 2.962962962962963, | |
| "grad_norm": 0.22949250042438507, | |
| "learning_rate": 4.254662407202976e-05, | |
| "loss": 0.022, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 2.9783151026674344, | |
| "grad_norm": 0.3448737859725952, | |
| "learning_rate": 4.201676393879446e-05, | |
| "loss": 0.021, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 2.9936672423719055, | |
| "grad_norm": 0.13228139281272888, | |
| "learning_rate": 4.148782221388007e-05, | |
| "loss": 0.0176, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.9936672423719055, | |
| "eval_loss": 0.03724433481693268, | |
| "eval_runtime": 157.321, | |
| "eval_samples_per_second": 7.367, | |
| "eval_steps_per_second": 7.367, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 3.009019382076377, | |
| "grad_norm": 0.19390782713890076, | |
| "learning_rate": 4.0959859747858706e-05, | |
| "loss": 0.0113, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 3.0243715217808482, | |
| "grad_norm": 0.4046991467475891, | |
| "learning_rate": 4.043293727864644e-05, | |
| "loss": 0.0108, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 3.0397236614853194, | |
| "grad_norm": 0.3703497052192688, | |
| "learning_rate": 3.990711542451591e-05, | |
| "loss": 0.0097, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 3.055075801189791, | |
| "grad_norm": 0.2644294202327728, | |
| "learning_rate": 3.9382454677122704e-05, | |
| "loss": 0.0127, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 3.070427940894262, | |
| "grad_norm": 0.15409328043460846, | |
| "learning_rate": 3.885901539454623e-05, | |
| "loss": 0.0057, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.070427940894262, | |
| "eval_loss": 0.04431070759892464, | |
| "eval_runtime": 157.3002, | |
| "eval_samples_per_second": 7.368, | |
| "eval_steps_per_second": 7.368, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.0857800805987337, | |
| "grad_norm": 0.4345476031303406, | |
| "learning_rate": 3.833685779434597e-05, | |
| "loss": 0.0172, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 3.101132220303205, | |
| "grad_norm": 0.23631352186203003, | |
| "learning_rate": 3.7816041946634024e-05, | |
| "loss": 0.0111, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 3.116484360007676, | |
| "grad_norm": 0.4313735365867615, | |
| "learning_rate": 3.729662776716439e-05, | |
| "loss": 0.0126, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 3.1318364997121475, | |
| "grad_norm": 0.13873319327831268, | |
| "learning_rate": 3.677867501044029e-05, | |
| "loss": 0.0059, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 3.1471886394166186, | |
| "grad_norm": 0.2149595320224762, | |
| "learning_rate": 3.6262243262839654e-05, | |
| "loss": 0.0148, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.1471886394166186, | |
| "eval_loss": 0.04357267543673515, | |
| "eval_runtime": 157.232, | |
| "eval_samples_per_second": 7.371, | |
| "eval_steps_per_second": 7.371, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.16254077912109, | |
| "grad_norm": 0.07435044646263123, | |
| "learning_rate": 3.574739193576042e-05, | |
| "loss": 0.007, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 3.1778929188255614, | |
| "grad_norm": 0.41588830947875977, | |
| "learning_rate": 3.5234180258785554e-05, | |
| "loss": 0.0091, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 3.1932450585300325, | |
| "grad_norm": 0.038579829037189484, | |
| "learning_rate": 3.472266727286928e-05, | |
| "loss": 0.0073, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 3.208597198234504, | |
| "grad_norm": 0.15435343980789185, | |
| "learning_rate": 3.4212911823544746e-05, | |
| "loss": 0.0123, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 3.223949337938975, | |
| "grad_norm": 3.7131805419921875, | |
| "learning_rate": 3.370497255415443e-05, | |
| "loss": 0.0153, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.223949337938975, | |
| "eval_loss": 0.04993755370378494, | |
| "eval_runtime": 157.2762, | |
| "eval_samples_per_second": 7.369, | |
| "eval_steps_per_second": 7.369, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.239301477643447, | |
| "grad_norm": 0.7524594664573669, | |
| "learning_rate": 3.319890789910364e-05, | |
| "loss": 0.0171, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 3.254653617347918, | |
| "grad_norm": 0.08906435966491699, | |
| "learning_rate": 3.269477607713802e-05, | |
| "loss": 0.0155, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 3.270005757052389, | |
| "grad_norm": 0.5883983373641968, | |
| "learning_rate": 3.219263508464614e-05, | |
| "loss": 0.0125, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 3.2853578967568606, | |
| "grad_norm": 0.8002910017967224, | |
| "learning_rate": 3.169254268898725e-05, | |
| "loss": 0.0113, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 3.3007100364613318, | |
| "grad_norm": 0.023618843406438828, | |
| "learning_rate": 3.119455642184572e-05, | |
| "loss": 0.0116, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.3007100364613318, | |
| "eval_loss": 0.044418033212423325, | |
| "eval_runtime": 157.2945, | |
| "eval_samples_per_second": 7.368, | |
| "eval_steps_per_second": 7.368, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.3160621761658033, | |
| "grad_norm": 0.24659119546413422, | |
| "learning_rate": 3.069873357261249e-05, | |
| "loss": 0.0135, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 3.3314143158702745, | |
| "grad_norm": 0.2751818597316742, | |
| "learning_rate": 3.020513118179428e-05, | |
| "loss": 0.0156, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 3.3467664555747456, | |
| "grad_norm": 0.17336240410804749, | |
| "learning_rate": 2.9713806034451652e-05, | |
| "loss": 0.0107, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 3.362118595279217, | |
| "grad_norm": 0.03696625307202339, | |
| "learning_rate": 2.9224814653666242e-05, | |
| "loss": 0.0107, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 3.3774707349836883, | |
| "grad_norm": 0.14558713138103485, | |
| "learning_rate": 2.8738213294038212e-05, | |
| "loss": 0.0116, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.3774707349836883, | |
| "eval_loss": 0.04727363586425781, | |
| "eval_runtime": 157.2356, | |
| "eval_samples_per_second": 7.371, | |
| "eval_steps_per_second": 7.371, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.39282287468816, | |
| "grad_norm": 0.4054368734359741, | |
| "learning_rate": 2.8254057935214735e-05, | |
| "loss": 0.0118, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 3.408175014392631, | |
| "grad_norm": 0.3659217059612274, | |
| "learning_rate": 2.7772404275449825e-05, | |
| "loss": 0.0126, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 3.423527154097102, | |
| "grad_norm": 0.2420692890882492, | |
| "learning_rate": 2.7293307725196793e-05, | |
| "loss": 0.0084, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 3.4388792938015738, | |
| "grad_norm": 0.11345157027244568, | |
| "learning_rate": 2.6816823400733625e-05, | |
| "loss": 0.008, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 3.454231433506045, | |
| "grad_norm": 0.6473270654678345, | |
| "learning_rate": 2.6343006117822437e-05, | |
| "loss": 0.0133, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.454231433506045, | |
| "eval_loss": 0.05019335821270943, | |
| "eval_runtime": 157.2449, | |
| "eval_samples_per_second": 7.371, | |
| "eval_steps_per_second": 7.371, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.469583573210516, | |
| "grad_norm": 0.18109719455242157, | |
| "learning_rate": 2.587191038540317e-05, | |
| "loss": 0.0106, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 3.4849357129149876, | |
| "grad_norm": 0.05215310677886009, | |
| "learning_rate": 2.5403590399322886e-05, | |
| "loss": 0.0048, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 3.5002878526194587, | |
| "grad_norm": 0.39558789134025574, | |
| "learning_rate": 2.493810003610092e-05, | |
| "loss": 0.0134, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 3.51563999232393, | |
| "grad_norm": 0.2711808979511261, | |
| "learning_rate": 2.4475492846730835e-05, | |
| "loss": 0.0158, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 3.5309921320284015, | |
| "grad_norm": 0.31566399335861206, | |
| "learning_rate": 2.4015822050519794e-05, | |
| "loss": 0.0095, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.5309921320284015, | |
| "eval_loss": 0.05009521171450615, | |
| "eval_runtime": 157.2158, | |
| "eval_samples_per_second": 7.372, | |
| "eval_steps_per_second": 7.372, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.546344271732873, | |
| "grad_norm": 0.49842846393585205, | |
| "learning_rate": 2.3559140528966074e-05, | |
| "loss": 0.0067, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 3.561696411437344, | |
| "grad_norm": 0.45083844661712646, | |
| "learning_rate": 2.3105500819675495e-05, | |
| "loss": 0.0129, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 3.5770485511418153, | |
| "grad_norm": 0.521081268787384, | |
| "learning_rate": 2.265495511031742e-05, | |
| "loss": 0.0151, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 3.5924006908462864, | |
| "grad_norm": 0.4393942356109619, | |
| "learning_rate": 2.2207555232620893e-05, | |
| "loss": 0.0124, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 3.607752830550758, | |
| "grad_norm": 0.13999144732952118, | |
| "learning_rate": 2.1763352656411785e-05, | |
| "loss": 0.011, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.607752830550758, | |
| "eval_loss": 0.046690210700035095, | |
| "eval_runtime": 157.2409, | |
| "eval_samples_per_second": 7.371, | |
| "eval_steps_per_second": 7.371, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.6231049702552296, | |
| "grad_norm": 0.3251853585243225, | |
| "learning_rate": 2.1322398483691787e-05, | |
| "loss": 0.0069, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 3.6384571099597007, | |
| "grad_norm": 0.30056822299957275, | |
| "learning_rate": 2.08847434427593e-05, | |
| "loss": 0.0144, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 3.653809249664172, | |
| "grad_norm": 0.10957108438014984, | |
| "learning_rate": 2.0450437882373697e-05, | |
| "loss": 0.0115, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 3.669161389368643, | |
| "grad_norm": 0.2679850459098816, | |
| "learning_rate": 2.0019531765962995e-05, | |
| "loss": 0.0087, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 3.6845135290731146, | |
| "grad_norm": 0.5390302538871765, | |
| "learning_rate": 1.9592074665876026e-05, | |
| "loss": 0.0136, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.6845135290731146, | |
| "eval_loss": 0.05131568759679794, | |
| "eval_runtime": 157.2039, | |
| "eval_samples_per_second": 7.373, | |
| "eval_steps_per_second": 7.373, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.6998656687775857, | |
| "grad_norm": 0.13512513041496277, | |
| "learning_rate": 1.9168115757679535e-05, | |
| "loss": 0.0155, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 3.7152178084820573, | |
| "grad_norm": 0.1011972650885582, | |
| "learning_rate": 1.8747703814500866e-05, | |
| "loss": 0.0095, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 3.7305699481865284, | |
| "grad_norm": 0.4893917739391327, | |
| "learning_rate": 1.833088720141698e-05, | |
| "loss": 0.0099, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 3.7459220878909996, | |
| "grad_norm": 0.725928544998169, | |
| "learning_rate": 1.7917713869890557e-05, | |
| "loss": 0.0121, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 3.761274227595471, | |
| "grad_norm": 0.11228631436824799, | |
| "learning_rate": 1.7508231352253435e-05, | |
| "loss": 0.0074, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.761274227595471, | |
| "eval_loss": 0.04784177616238594, | |
| "eval_runtime": 157.3439, | |
| "eval_samples_per_second": 7.366, | |
| "eval_steps_per_second": 7.366, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.7766263672999423, | |
| "grad_norm": 0.11749975383281708, | |
| "learning_rate": 1.7102486756238435e-05, | |
| "loss": 0.0115, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 3.791978507004414, | |
| "grad_norm": 0.3067283630371094, | |
| "learning_rate": 1.6700526759560002e-05, | |
| "loss": 0.0094, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 3.807330646708885, | |
| "grad_norm": 0.4429052472114563, | |
| "learning_rate": 1.6302397604544257e-05, | |
| "loss": 0.0142, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 3.822682786413356, | |
| "grad_norm": 0.26236897706985474, | |
| "learning_rate": 1.5908145092809272e-05, | |
| "loss": 0.0074, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 3.8380349261178277, | |
| "grad_norm": 0.14888080954551697, | |
| "learning_rate": 1.551781457999586e-05, | |
| "loss": 0.0104, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.8380349261178277, | |
| "eval_loss": 0.04920686408877373, | |
| "eval_runtime": 157.3187, | |
| "eval_samples_per_second": 7.367, | |
| "eval_steps_per_second": 7.367, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.853387065822299, | |
| "grad_norm": 0.155548557639122, | |
| "learning_rate": 1.513145097054977e-05, | |
| "loss": 0.0083, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 3.8687392055267704, | |
| "grad_norm": 0.4206964075565338, | |
| "learning_rate": 1.4749098712555854e-05, | |
| "loss": 0.0116, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 3.8840913452312416, | |
| "grad_norm": 0.49253013730049133, | |
| "learning_rate": 1.4370801792624656e-05, | |
| "loss": 0.0063, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 3.8994434849357127, | |
| "grad_norm": 0.3531017601490021, | |
| "learning_rate": 1.399660373083203e-05, | |
| "loss": 0.0098, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 3.9147956246401843, | |
| "grad_norm": 0.17188264429569244, | |
| "learning_rate": 1.3626547575712545e-05, | |
| "loss": 0.0131, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.9147956246401843, | |
| "eval_loss": 0.051388129591941833, | |
| "eval_runtime": 157.2859, | |
| "eval_samples_per_second": 7.369, | |
| "eval_steps_per_second": 7.369, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.9301477643446554, | |
| "grad_norm": 0.5859571695327759, | |
| "learning_rate": 1.3260675899307096e-05, | |
| "loss": 0.0177, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 3.945499904049127, | |
| "grad_norm": 0.1914072185754776, | |
| "learning_rate": 1.2899030792265349e-05, | |
| "loss": 0.0107, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 3.960852043753598, | |
| "grad_norm": 0.21298187971115112, | |
| "learning_rate": 1.2541653859003437e-05, | |
| "loss": 0.0124, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 3.9762041834580693, | |
| "grad_norm": 0.3145734667778015, | |
| "learning_rate": 1.2188586212917846e-05, | |
| "loss": 0.0121, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 3.991556323162541, | |
| "grad_norm": 0.27779558300971985, | |
| "learning_rate": 1.1839868471655523e-05, | |
| "loss": 0.0113, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.991556323162541, | |
| "eval_loss": 0.04836370795965195, | |
| "eval_runtime": 157.3072, | |
| "eval_samples_per_second": 7.368, | |
| "eval_steps_per_second": 7.368, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 4.006908462867012, | |
| "grad_norm": 0.08331400901079178, | |
| "learning_rate": 1.1495540752441235e-05, | |
| "loss": 0.0093, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 4.0222606025714835, | |
| "grad_norm": 0.275341272354126, | |
| "learning_rate": 1.1155642667462318e-05, | |
| "loss": 0.0047, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 4.037612742275955, | |
| "grad_norm": 0.5107333064079285, | |
| "learning_rate": 1.082021331931164e-05, | |
| "loss": 0.0039, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 4.052964881980426, | |
| "grad_norm": 0.2175411880016327, | |
| "learning_rate": 1.0489291296489152e-05, | |
| "loss": 0.0046, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 4.068317021684897, | |
| "grad_norm": 0.031360745429992676, | |
| "learning_rate": 1.0162914668962631e-05, | |
| "loss": 0.0028, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.068317021684897, | |
| "eval_loss": 0.05359194427728653, | |
| "eval_runtime": 157.3201, | |
| "eval_samples_per_second": 7.367, | |
| "eval_steps_per_second": 7.367, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.083669161389369, | |
| "grad_norm": 0.12836819887161255, | |
| "learning_rate": 9.841120983787915e-06, | |
| "loss": 0.0034, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 4.09902130109384, | |
| "grad_norm": 0.2958119809627533, | |
| "learning_rate": 9.523947260789546e-06, | |
| "loss": 0.0043, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 4.114373440798311, | |
| "grad_norm": 0.2515159249305725, | |
| "learning_rate": 9.211429988301823e-06, | |
| "loss": 0.0035, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 4.129725580502782, | |
| "grad_norm": 0.7020996809005737, | |
| "learning_rate": 8.90360511897121e-06, | |
| "loss": 0.0065, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 4.1450777202072535, | |
| "grad_norm": 0.1990860551595688, | |
| "learning_rate": 8.600508065620161e-06, | |
| "loss": 0.0046, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.1450777202072535, | |
| "eval_loss": 0.057626817375421524, | |
| "eval_runtime": 157.3149, | |
| "eval_samples_per_second": 7.367, | |
| "eval_steps_per_second": 7.367, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.1604298599117255, | |
| "grad_norm": 0.21370138227939606, | |
| "learning_rate": 8.302173697173226e-06, | |
| "loss": 0.0036, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 4.175781999616197, | |
| "grad_norm": 0.1672574132680893, | |
| "learning_rate": 8.008636334645631e-06, | |
| "loss": 0.0033, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 4.191134139320668, | |
| "grad_norm": 0.128431499004364, | |
| "learning_rate": 7.71992974719491e-06, | |
| "loss": 0.0024, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 4.206486279025139, | |
| "grad_norm": 0.04721586033701897, | |
| "learning_rate": 7.436087148236054e-06, | |
| "loss": 0.0019, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 4.22183841872961, | |
| "grad_norm": 0.1882028728723526, | |
| "learning_rate": 7.157141191620548e-06, | |
| "loss": 0.0038, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.22183841872961, | |
| "eval_loss": 0.06159433349967003, | |
| "eval_runtime": 157.3463, | |
| "eval_samples_per_second": 7.366, | |
| "eval_steps_per_second": 7.366, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.237190558434082, | |
| "grad_norm": 0.08508176356554031, | |
| "learning_rate": 6.883123967879796e-06, | |
| "loss": 0.0052, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 4.252542698138553, | |
| "grad_norm": 0.07488391548395157, | |
| "learning_rate": 6.6140670005334136e-06, | |
| "loss": 0.0036, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 4.267894837843024, | |
| "grad_norm": 0.30203863978385925, | |
| "learning_rate": 6.350001242462617e-06, | |
| "loss": 0.0048, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 4.2832469775474955, | |
| "grad_norm": 0.3173889219760895, | |
| "learning_rate": 6.090957072349385e-06, | |
| "loss": 0.0045, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 4.298599117251967, | |
| "grad_norm": 0.03237370029091835, | |
| "learning_rate": 5.836964291181624e-06, | |
| "loss": 0.0029, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.298599117251967, | |
| "eval_loss": 0.062100403010845184, | |
| "eval_runtime": 157.3803, | |
| "eval_samples_per_second": 7.364, | |
| "eval_steps_per_second": 7.364, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.313951256956439, | |
| "grad_norm": 0.2693499028682709, | |
| "learning_rate": 5.588052118824804e-06, | |
| "loss": 0.0033, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 4.32930339666091, | |
| "grad_norm": 0.23804733157157898, | |
| "learning_rate": 5.344249190660428e-06, | |
| "loss": 0.0021, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 4.344655536365381, | |
| "grad_norm": 0.39224836230278015, | |
| "learning_rate": 5.105583554291765e-06, | |
| "loss": 0.0036, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 4.360007676069852, | |
| "grad_norm": 0.03341009467840195, | |
| "learning_rate": 4.872082666317207e-06, | |
| "loss": 0.0027, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 4.375359815774323, | |
| "grad_norm": 0.10064135491847992, | |
| "learning_rate": 4.6437733891715905e-06, | |
| "loss": 0.0025, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.375359815774323, | |
| "eval_loss": 0.06398806720972061, | |
| "eval_runtime": 157.3861, | |
| "eval_samples_per_second": 7.364, | |
| "eval_steps_per_second": 7.364, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.390711955478795, | |
| "grad_norm": 0.1537330597639084, | |
| "learning_rate": 4.420681988035891e-06, | |
| "loss": 0.0042, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 4.406064095183266, | |
| "grad_norm": 0.08775684237480164, | |
| "learning_rate": 4.2028341278156026e-06, | |
| "loss": 0.0055, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 4.4214162348877375, | |
| "grad_norm": 0.15035293996334076, | |
| "learning_rate": 3.990254870188221e-06, | |
| "loss": 0.0052, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 4.436768374592209, | |
| "grad_norm": 0.026623625308275223, | |
| "learning_rate": 3.7829686707200827e-06, | |
| "loss": 0.0046, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 4.45212051429668, | |
| "grad_norm": 0.39092618227005005, | |
| "learning_rate": 3.580999376052946e-06, | |
| "loss": 0.0037, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.45212051429668, | |
| "eval_loss": 0.06381876021623611, | |
| "eval_runtime": 157.3824, | |
| "eval_samples_per_second": 7.364, | |
| "eval_steps_per_second": 7.364, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.467472654001152, | |
| "grad_norm": 0.3707815110683441, | |
| "learning_rate": 3.3843702211606153e-06, | |
| "loss": 0.0031, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 4.482824793705623, | |
| "grad_norm": 0.05645956099033356, | |
| "learning_rate": 3.193103826675947e-06, | |
| "loss": 0.0058, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 4.498176933410094, | |
| "grad_norm": 0.10445626080036163, | |
| "learning_rate": 3.007222196288545e-06, | |
| "loss": 0.002, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 4.513529073114565, | |
| "grad_norm": 0.05766968056559563, | |
| "learning_rate": 2.8267467142133687e-06, | |
| "loss": 0.0025, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 4.528881212819036, | |
| "grad_norm": 3.2273781299591064, | |
| "learning_rate": 2.651698142730674e-06, | |
| "loss": 0.0071, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.528881212819036, | |
| "eval_loss": 0.06478563696146011, | |
| "eval_runtime": 157.419, | |
| "eval_samples_per_second": 7.363, | |
| "eval_steps_per_second": 7.363, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.544233352523508, | |
| "grad_norm": 0.0076272995211184025, | |
| "learning_rate": 2.4820966197974748e-06, | |
| "loss": 0.0053, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 4.5595854922279795, | |
| "grad_norm": 0.1376052349805832, | |
| "learning_rate": 2.3179616567308216e-06, | |
| "loss": 0.002, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 4.574937631932451, | |
| "grad_norm": 0.10553725808858871, | |
| "learning_rate": 2.1593121359631873e-06, | |
| "loss": 0.0026, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 4.590289771636922, | |
| "grad_norm": 0.043689433485269547, | |
| "learning_rate": 2.006166308870189e-06, | |
| "loss": 0.0045, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 4.605641911341393, | |
| "grad_norm": 0.04369067773222923, | |
| "learning_rate": 1.8585417936709038e-06, | |
| "loss": 0.0015, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.605641911341393, | |
| "eval_loss": 0.0653165951371193, | |
| "eval_runtime": 157.3862, | |
| "eval_samples_per_second": 7.364, | |
| "eval_steps_per_second": 7.364, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.620994051045864, | |
| "grad_norm": 0.09847624599933624, | |
| "learning_rate": 1.7164555734010545e-06, | |
| "loss": 0.0093, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 4.636346190750336, | |
| "grad_norm": 0.10092262178659439, | |
| "learning_rate": 1.5799239939592204e-06, | |
| "loss": 0.0031, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 4.651698330454807, | |
| "grad_norm": 0.11192231625318527, | |
| "learning_rate": 1.4489627622263747e-06, | |
| "loss": 0.0034, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 4.667050470159278, | |
| "grad_norm": 0.49182823300361633, | |
| "learning_rate": 1.3235869442589255e-06, | |
| "loss": 0.0083, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 4.6824026098637495, | |
| "grad_norm": 0.25427430868148804, | |
| "learning_rate": 1.2038109635555406e-06, | |
| "loss": 0.002, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.6824026098637495, | |
| "eval_loss": 0.06511291116476059, | |
| "eval_runtime": 157.4295, | |
| "eval_samples_per_second": 7.362, | |
| "eval_steps_per_second": 7.362, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.697754749568221, | |
| "grad_norm": 0.004702554550021887, | |
| "learning_rate": 1.0896485993977467e-06, | |
| "loss": 0.005, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 4.713106889272693, | |
| "grad_norm": 0.1602880358695984, | |
| "learning_rate": 9.811129852647982e-07, | |
| "loss": 0.0044, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 4.728459028977164, | |
| "grad_norm": 0.5000692009925842, | |
| "learning_rate": 8.782166073227515e-07, | |
| "loss": 0.0104, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 4.743811168681635, | |
| "grad_norm": 0.015439675189554691, | |
| "learning_rate": 7.809713029880428e-07, | |
| "loss": 0.0048, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 4.759163308386106, | |
| "grad_norm": 0.3154544532299042, | |
| "learning_rate": 6.893882595656598e-07, | |
| "loss": 0.0073, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.759163308386106, | |
| "eval_loss": 0.0645485520362854, | |
| "eval_runtime": 157.4614, | |
| "eval_samples_per_second": 7.361, | |
| "eval_steps_per_second": 7.361, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.774515448090577, | |
| "grad_norm": 0.36479201912879944, | |
| "learning_rate": 6.034780129621664e-07, | |
| "loss": 0.008, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 4.789867587795049, | |
| "grad_norm": 0.5649594068527222, | |
| "learning_rate": 5.232504464735833e-07, | |
| "loss": 0.0069, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 4.80521972749952, | |
| "grad_norm": 0.4218672513961792, | |
| "learning_rate": 4.487147896484523e-07, | |
| "loss": 0.005, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 4.8205718672039914, | |
| "grad_norm": 0.02274581603705883, | |
| "learning_rate": 3.7987961722599773e-07, | |
| "loss": 0.0077, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 4.835924006908463, | |
| "grad_norm": 0.11879457533359528, | |
| "learning_rate": 3.167528481496984e-07, | |
| "loss": 0.0061, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.835924006908463, | |
| "eval_loss": 0.06395623832941055, | |
| "eval_runtime": 157.4124, | |
| "eval_samples_per_second": 7.363, | |
| "eval_steps_per_second": 7.363, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.851276146612934, | |
| "grad_norm": 0.07862385362386703, | |
| "learning_rate": 2.593417446562607e-07, | |
| "loss": 0.0103, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 4.866628286317406, | |
| "grad_norm": 0.10271923989057541, | |
| "learning_rate": 2.0765291144016486e-07, | |
| "loss": 0.0017, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 4.881980426021877, | |
| "grad_norm": 0.023111067712306976, | |
| "learning_rate": 1.6169229489385595e-07, | |
| "loss": 0.0041, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 4.897332565726348, | |
| "grad_norm": 0.38597050309181213, | |
| "learning_rate": 1.2146518242363014e-07, | |
| "loss": 0.003, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 4.912684705430819, | |
| "grad_norm": 0.10105849802494049, | |
| "learning_rate": 8.697620184138222e-08, | |
| "loss": 0.0024, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.912684705430819, | |
| "eval_loss": 0.0639420673251152, | |
| "eval_runtime": 157.414, | |
| "eval_samples_per_second": 7.363, | |
| "eval_steps_per_second": 7.363, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.92803684513529, | |
| "grad_norm": 0.06087774783372879, | |
| "learning_rate": 5.822932083221488e-08, | |
| "loss": 0.0098, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 4.943388984839762, | |
| "grad_norm": 0.1292206346988678, | |
| "learning_rate": 3.5227846497970504e-08, | |
| "loss": 0.0037, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 4.958741124544233, | |
| "grad_norm": 0.042202211916446686, | |
| "learning_rate": 1.7974424976796577e-08, | |
| "loss": 0.0014, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 4.974093264248705, | |
| "grad_norm": 0.06265482306480408, | |
| "learning_rate": 6.47104113870034e-09, | |
| "loss": 0.0055, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 4.989445403953176, | |
| "grad_norm": 0.3988359868526459, | |
| "learning_rate": 7.190183572314269e-10, | |
| "loss": 0.0054, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.989445403953176, | |
| "eval_loss": 0.06391575932502747, | |
| "eval_runtime": 157.4119, | |
| "eval_samples_per_second": 7.363, | |
| "eval_steps_per_second": 7.363, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.997121473805412, | |
| "step": 3255, | |
| "total_flos": 8.175027015212728e+17, | |
| "train_loss": 0.04520614844061629, | |
| "train_runtime": 35604.7112, | |
| "train_samples_per_second": 1.464, | |
| "train_steps_per_second": 0.091 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 3255, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.175027015212728e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |