| { | |
| "best_metric": 0.02887474000453949, | |
| "best_model_checkpoint": "saves/psy-course/MentaLLaMA-chat-7B/train/fold2/checkpoint-1900", | |
| "epoch": 5.0, | |
| "eval_steps": 50, | |
| "global_step": 3225, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.015503875968992248, | |
| "grad_norm": 1.8097485303878784, | |
| "learning_rate": 3.0959752321981426e-06, | |
| "loss": 1.678, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.031007751937984496, | |
| "grad_norm": 1.9774603843688965, | |
| "learning_rate": 6.191950464396285e-06, | |
| "loss": 1.7145, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.046511627906976744, | |
| "grad_norm": 2.036001443862915, | |
| "learning_rate": 9.287925696594429e-06, | |
| "loss": 1.5924, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.06201550387596899, | |
| "grad_norm": 3.265268087387085, | |
| "learning_rate": 1.238390092879257e-05, | |
| "loss": 1.2457, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.07751937984496124, | |
| "grad_norm": 1.1102505922317505, | |
| "learning_rate": 1.5479876160990712e-05, | |
| "loss": 0.7519, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07751937984496124, | |
| "eval_loss": 0.638915479183197, | |
| "eval_runtime": 177.4395, | |
| "eval_samples_per_second": 6.464, | |
| "eval_steps_per_second": 6.464, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.09302325581395349, | |
| "grad_norm": 0.6308065056800842, | |
| "learning_rate": 1.8575851393188857e-05, | |
| "loss": 0.6351, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.10852713178294573, | |
| "grad_norm": 0.8012827634811401, | |
| "learning_rate": 2.1671826625387e-05, | |
| "loss": 0.4782, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.12403100775193798, | |
| "grad_norm": 0.6199682354927063, | |
| "learning_rate": 2.476780185758514e-05, | |
| "loss": 0.2779, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.13953488372093023, | |
| "grad_norm": 0.5094736218452454, | |
| "learning_rate": 2.7863777089783283e-05, | |
| "loss": 0.1896, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.15503875968992248, | |
| "grad_norm": 0.5959056615829468, | |
| "learning_rate": 3.0959752321981425e-05, | |
| "loss": 0.1485, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.15503875968992248, | |
| "eval_loss": 0.11244650185108185, | |
| "eval_runtime": 177.3837, | |
| "eval_samples_per_second": 6.466, | |
| "eval_steps_per_second": 6.466, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.17054263565891473, | |
| "grad_norm": 0.919588029384613, | |
| "learning_rate": 3.4055727554179566e-05, | |
| "loss": 0.1271, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.18604651162790697, | |
| "grad_norm": 0.9113105535507202, | |
| "learning_rate": 3.7151702786377715e-05, | |
| "loss": 0.101, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.20155038759689922, | |
| "grad_norm": 0.74349045753479, | |
| "learning_rate": 4.024767801857585e-05, | |
| "loss": 0.1009, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.21705426356589147, | |
| "grad_norm": 0.6830365061759949, | |
| "learning_rate": 4.3343653250774e-05, | |
| "loss": 0.096, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.23255813953488372, | |
| "grad_norm": 0.5473061203956604, | |
| "learning_rate": 4.6439628482972134e-05, | |
| "loss": 0.074, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.23255813953488372, | |
| "eval_loss": 0.06496970355510712, | |
| "eval_runtime": 177.3867, | |
| "eval_samples_per_second": 6.466, | |
| "eval_steps_per_second": 6.466, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.24806201550387597, | |
| "grad_norm": 0.45949482917785645, | |
| "learning_rate": 4.953560371517028e-05, | |
| "loss": 0.076, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.26356589147286824, | |
| "grad_norm": 0.6822295188903809, | |
| "learning_rate": 5.2631578947368424e-05, | |
| "loss": 0.0689, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.27906976744186046, | |
| "grad_norm": 0.5395657420158386, | |
| "learning_rate": 5.5727554179566566e-05, | |
| "loss": 0.0634, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.29457364341085274, | |
| "grad_norm": 1.2032052278518677, | |
| "learning_rate": 5.882352941176471e-05, | |
| "loss": 0.0687, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.31007751937984496, | |
| "grad_norm": 0.8278073072433472, | |
| "learning_rate": 6.191950464396285e-05, | |
| "loss": 0.0655, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.31007751937984496, | |
| "eval_loss": 0.06194576248526573, | |
| "eval_runtime": 177.5104, | |
| "eval_samples_per_second": 6.462, | |
| "eval_steps_per_second": 6.462, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.32558139534883723, | |
| "grad_norm": 0.48666349053382874, | |
| "learning_rate": 6.501547987616098e-05, | |
| "loss": 0.0591, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.34108527131782945, | |
| "grad_norm": 0.6362942457199097, | |
| "learning_rate": 6.811145510835913e-05, | |
| "loss": 0.0557, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.35658914728682173, | |
| "grad_norm": 0.44466376304626465, | |
| "learning_rate": 7.120743034055728e-05, | |
| "loss": 0.0531, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.37209302325581395, | |
| "grad_norm": 0.473111093044281, | |
| "learning_rate": 7.430340557275543e-05, | |
| "loss": 0.0704, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.3875968992248062, | |
| "grad_norm": 0.6141083836555481, | |
| "learning_rate": 7.739938080495357e-05, | |
| "loss": 0.0598, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.3875968992248062, | |
| "eval_loss": 0.05116499215364456, | |
| "eval_runtime": 177.5255, | |
| "eval_samples_per_second": 6.461, | |
| "eval_steps_per_second": 6.461, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.40310077519379844, | |
| "grad_norm": 0.34960028529167175, | |
| "learning_rate": 8.04953560371517e-05, | |
| "loss": 0.0502, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.4186046511627907, | |
| "grad_norm": 0.6346738338470459, | |
| "learning_rate": 8.359133126934985e-05, | |
| "loss": 0.0469, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.43410852713178294, | |
| "grad_norm": 0.34611088037490845, | |
| "learning_rate": 8.6687306501548e-05, | |
| "loss": 0.0769, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.4496124031007752, | |
| "grad_norm": 0.746246337890625, | |
| "learning_rate": 8.978328173374613e-05, | |
| "loss": 0.0547, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.46511627906976744, | |
| "grad_norm": 0.3229033350944519, | |
| "learning_rate": 9.287925696594427e-05, | |
| "loss": 0.0414, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.46511627906976744, | |
| "eval_loss": 0.04494742304086685, | |
| "eval_runtime": 177.4386, | |
| "eval_samples_per_second": 6.464, | |
| "eval_steps_per_second": 6.464, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4806201550387597, | |
| "grad_norm": 0.7018037438392639, | |
| "learning_rate": 9.597523219814242e-05, | |
| "loss": 0.0469, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.49612403100775193, | |
| "grad_norm": 0.3599799871444702, | |
| "learning_rate": 9.907120743034056e-05, | |
| "loss": 0.0377, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.5116279069767442, | |
| "grad_norm": 0.3153678774833679, | |
| "learning_rate": 9.999856438185238e-05, | |
| "loss": 0.0441, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.5271317829457365, | |
| "grad_norm": 0.2962203621864319, | |
| "learning_rate": 9.999153298122152e-05, | |
| "loss": 0.0522, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.5426356589147286, | |
| "grad_norm": 0.1656709760427475, | |
| "learning_rate": 9.997864293614054e-05, | |
| "loss": 0.0427, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5426356589147286, | |
| "eval_loss": 0.04140669107437134, | |
| "eval_runtime": 177.5437, | |
| "eval_samples_per_second": 6.46, | |
| "eval_steps_per_second": 6.46, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5581395348837209, | |
| "grad_norm": 0.2917194068431854, | |
| "learning_rate": 9.995989575722902e-05, | |
| "loss": 0.047, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.5736434108527132, | |
| "grad_norm": 0.3477674424648285, | |
| "learning_rate": 9.993529364152018e-05, | |
| "loss": 0.0501, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5891472868217055, | |
| "grad_norm": 0.48425808548927307, | |
| "learning_rate": 9.990483947220319e-05, | |
| "loss": 0.0593, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.6046511627906976, | |
| "grad_norm": 0.21694518625736237, | |
| "learning_rate": 9.986853681828546e-05, | |
| "loss": 0.0513, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.6201550387596899, | |
| "grad_norm": 0.5227199792861938, | |
| "learning_rate": 9.982638993417425e-05, | |
| "loss": 0.0471, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6201550387596899, | |
| "eval_loss": 0.038663338869810104, | |
| "eval_runtime": 177.5346, | |
| "eval_samples_per_second": 6.461, | |
| "eval_steps_per_second": 6.461, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6356589147286822, | |
| "grad_norm": 0.25696733593940735, | |
| "learning_rate": 9.977840375917817e-05, | |
| "loss": 0.0475, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.6511627906976745, | |
| "grad_norm": 0.22936882078647614, | |
| "learning_rate": 9.972458391692827e-05, | |
| "loss": 0.0405, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.31321045756340027, | |
| "learning_rate": 9.966493671471904e-05, | |
| "loss": 0.0449, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.6821705426356589, | |
| "grad_norm": 0.3151184916496277, | |
| "learning_rate": 9.959946914276922e-05, | |
| "loss": 0.056, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.6976744186046512, | |
| "grad_norm": 0.50681471824646, | |
| "learning_rate": 9.952818887340257e-05, | |
| "loss": 0.0433, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.6976744186046512, | |
| "eval_loss": 0.03619127348065376, | |
| "eval_runtime": 177.5548, | |
| "eval_samples_per_second": 6.46, | |
| "eval_steps_per_second": 6.46, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7131782945736435, | |
| "grad_norm": 0.31854957342147827, | |
| "learning_rate": 9.945110426014878e-05, | |
| "loss": 0.0494, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.7286821705426356, | |
| "grad_norm": 0.28269049525260925, | |
| "learning_rate": 9.936822433676444e-05, | |
| "loss": 0.0387, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.7441860465116279, | |
| "grad_norm": 0.24601627886295319, | |
| "learning_rate": 9.927955881617444e-05, | |
| "loss": 0.0479, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.7596899224806202, | |
| "grad_norm": 0.19982942938804626, | |
| "learning_rate": 9.918511808933358e-05, | |
| "loss": 0.0378, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.7751937984496124, | |
| "grad_norm": 0.20261038839817047, | |
| "learning_rate": 9.908491322400885e-05, | |
| "loss": 0.0432, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7751937984496124, | |
| "eval_loss": 0.03534514829516411, | |
| "eval_runtime": 177.4652, | |
| "eval_samples_per_second": 6.463, | |
| "eval_steps_per_second": 6.463, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7906976744186046, | |
| "grad_norm": 0.1283186674118042, | |
| "learning_rate": 9.897895596348247e-05, | |
| "loss": 0.0395, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.8062015503875969, | |
| "grad_norm": 0.2949117124080658, | |
| "learning_rate": 9.886725872517552e-05, | |
| "loss": 0.0326, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.8217054263565892, | |
| "grad_norm": 0.42478370666503906, | |
| "learning_rate": 9.874983459919277e-05, | |
| "loss": 0.0442, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.8372093023255814, | |
| "grad_norm": 0.270944207906723, | |
| "learning_rate": 9.862669734678867e-05, | |
| "loss": 0.038, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.8527131782945736, | |
| "grad_norm": 0.19820506870746613, | |
| "learning_rate": 9.849786139875452e-05, | |
| "loss": 0.0445, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8527131782945736, | |
| "eval_loss": 0.03533301129937172, | |
| "eval_runtime": 177.5388, | |
| "eval_samples_per_second": 6.461, | |
| "eval_steps_per_second": 6.461, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8682170542635659, | |
| "grad_norm": 0.33879902958869934, | |
| "learning_rate": 9.836334185372738e-05, | |
| "loss": 0.0464, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.8837209302325582, | |
| "grad_norm": 0.330963134765625, | |
| "learning_rate": 9.822315447642056e-05, | |
| "loss": 0.0368, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.8992248062015504, | |
| "grad_norm": 0.22305184602737427, | |
| "learning_rate": 9.807731569577615e-05, | |
| "loss": 0.0296, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.9147286821705426, | |
| "grad_norm": 0.28611594438552856, | |
| "learning_rate": 9.792584260303964e-05, | |
| "loss": 0.0364, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.9302325581395349, | |
| "grad_norm": 0.24084076285362244, | |
| "learning_rate": 9.776875294975698e-05, | |
| "loss": 0.0529, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9302325581395349, | |
| "eval_loss": 0.03533976525068283, | |
| "eval_runtime": 177.6164, | |
| "eval_samples_per_second": 6.458, | |
| "eval_steps_per_second": 6.458, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9457364341085271, | |
| "grad_norm": 0.28942936658859253, | |
| "learning_rate": 9.760606514569416e-05, | |
| "loss": 0.0381, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.9612403100775194, | |
| "grad_norm": 0.2056342214345932, | |
| "learning_rate": 9.743779825667984e-05, | |
| "loss": 0.0337, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.9767441860465116, | |
| "grad_norm": 0.14191323518753052, | |
| "learning_rate": 9.726397200237085e-05, | |
| "loss": 0.0368, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.9922480620155039, | |
| "grad_norm": 0.33825528621673584, | |
| "learning_rate": 9.708460675394126e-05, | |
| "loss": 0.0465, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.0077519379844961, | |
| "grad_norm": 0.12557874619960785, | |
| "learning_rate": 9.689972353169499e-05, | |
| "loss": 0.0313, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0077519379844961, | |
| "eval_loss": 0.03178540617227554, | |
| "eval_runtime": 177.627, | |
| "eval_samples_per_second": 6.457, | |
| "eval_steps_per_second": 6.457, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0232558139534884, | |
| "grad_norm": 0.35606396198272705, | |
| "learning_rate": 9.67093440026024e-05, | |
| "loss": 0.0303, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.0387596899224807, | |
| "grad_norm": 0.183203786611557, | |
| "learning_rate": 9.651349047776112e-05, | |
| "loss": 0.0386, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.054263565891473, | |
| "grad_norm": 0.23599869012832642, | |
| "learning_rate": 9.631218590978126e-05, | |
| "loss": 0.0222, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.069767441860465, | |
| "grad_norm": 0.13795703649520874, | |
| "learning_rate": 9.610545389009562e-05, | |
| "loss": 0.0307, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.0852713178294573, | |
| "grad_norm": 0.2756401002407074, | |
| "learning_rate": 9.589331864619486e-05, | |
| "loss": 0.0301, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0852713178294573, | |
| "eval_loss": 0.03220707178115845, | |
| "eval_runtime": 177.6556, | |
| "eval_samples_per_second": 6.456, | |
| "eval_steps_per_second": 6.456, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.1007751937984496, | |
| "grad_norm": 0.22982142865657806, | |
| "learning_rate": 9.567580503878833e-05, | |
| "loss": 0.025, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.1162790697674418, | |
| "grad_norm": 0.21597911417484283, | |
| "learning_rate": 9.545293855889043e-05, | |
| "loss": 0.0342, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.1317829457364341, | |
| "grad_norm": 0.16282838582992554, | |
| "learning_rate": 9.522474532483337e-05, | |
| "loss": 0.0275, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.1472868217054264, | |
| "grad_norm": 0.32020196318626404, | |
| "learning_rate": 9.499125207920622e-05, | |
| "loss": 0.0402, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.1627906976744187, | |
| "grad_norm": 0.12987101078033447, | |
| "learning_rate": 9.475248618572096e-05, | |
| "loss": 0.0289, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1627906976744187, | |
| "eval_loss": 0.03383904695510864, | |
| "eval_runtime": 177.667, | |
| "eval_samples_per_second": 6.456, | |
| "eval_steps_per_second": 6.456, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.178294573643411, | |
| "grad_norm": 0.37133800983428955, | |
| "learning_rate": 9.45084756260055e-05, | |
| "loss": 0.035, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.193798449612403, | |
| "grad_norm": 0.07524913549423218, | |
| "learning_rate": 9.425924899632458e-05, | |
| "loss": 0.0291, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.2093023255813953, | |
| "grad_norm": 0.14250116050243378, | |
| "learning_rate": 9.400483550422845e-05, | |
| "loss": 0.0221, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.2248062015503876, | |
| "grad_norm": 0.33380234241485596, | |
| "learning_rate": 9.374526496512992e-05, | |
| "loss": 0.0303, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.2403100775193798, | |
| "grad_norm": 0.6035688519477844, | |
| "learning_rate": 9.348056779881025e-05, | |
| "loss": 0.0267, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2403100775193798, | |
| "eval_loss": 0.03135337680578232, | |
| "eval_runtime": 177.6172, | |
| "eval_samples_per_second": 6.458, | |
| "eval_steps_per_second": 6.458, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.255813953488372, | |
| "grad_norm": 0.25495055317878723, | |
| "learning_rate": 9.321077502585417e-05, | |
| "loss": 0.0267, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.2713178294573644, | |
| "grad_norm": 0.352885365486145, | |
| "learning_rate": 9.293591826401451e-05, | |
| "loss": 0.0351, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.2868217054263567, | |
| "grad_norm": 0.25123533606529236, | |
| "learning_rate": 9.265602972450673e-05, | |
| "loss": 0.0328, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.302325581395349, | |
| "grad_norm": 0.47959160804748535, | |
| "learning_rate": 9.237114220823413e-05, | |
| "loss": 0.0326, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.3178294573643412, | |
| "grad_norm": 0.13225722312927246, | |
| "learning_rate": 9.208128910194377e-05, | |
| "loss": 0.0314, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3178294573643412, | |
| "eval_loss": 0.03171215206384659, | |
| "eval_runtime": 177.6409, | |
| "eval_samples_per_second": 6.457, | |
| "eval_steps_per_second": 6.457, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.27190810441970825, | |
| "learning_rate": 9.17865043743138e-05, | |
| "loss": 0.031, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.3488372093023255, | |
| "grad_norm": 0.10395022481679916, | |
| "learning_rate": 9.148682257197245e-05, | |
| "loss": 0.0238, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.3643410852713178, | |
| "grad_norm": 0.33697041869163513, | |
| "learning_rate": 9.118227881544967e-05, | |
| "loss": 0.0397, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.37984496124031, | |
| "grad_norm": 0.2214643359184265, | |
| "learning_rate": 9.087290879506104e-05, | |
| "loss": 0.0281, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.3953488372093024, | |
| "grad_norm": 0.11761048436164856, | |
| "learning_rate": 9.055874876672519e-05, | |
| "loss": 0.0382, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.3953488372093024, | |
| "eval_loss": 0.03273608908057213, | |
| "eval_runtime": 177.7052, | |
| "eval_samples_per_second": 6.455, | |
| "eval_steps_per_second": 6.455, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.4108527131782946, | |
| "grad_norm": 0.17550455033779144, | |
| "learning_rate": 9.023983554771492e-05, | |
| "loss": 0.0263, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.4263565891472867, | |
| "grad_norm": 0.21578268706798553, | |
| "learning_rate": 8.991620651234242e-05, | |
| "loss": 0.0261, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.441860465116279, | |
| "grad_norm": 0.21228443086147308, | |
| "learning_rate": 8.958789958757928e-05, | |
| "loss": 0.029, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.4573643410852712, | |
| "grad_norm": 0.2734227180480957, | |
| "learning_rate": 8.925495324861177e-05, | |
| "loss": 0.0286, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.4728682170542635, | |
| "grad_norm": 0.4174731373786926, | |
| "learning_rate": 8.89174065143318e-05, | |
| "loss": 0.0354, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4728682170542635, | |
| "eval_loss": 0.031981565058231354, | |
| "eval_runtime": 177.7226, | |
| "eval_samples_per_second": 6.454, | |
| "eval_steps_per_second": 6.454, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4883720930232558, | |
| "grad_norm": 0.3433147370815277, | |
| "learning_rate": 8.857529894276412e-05, | |
| "loss": 0.0352, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.503875968992248, | |
| "grad_norm": 0.21089136600494385, | |
| "learning_rate": 8.822867062643059e-05, | |
| "loss": 0.0298, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.5193798449612403, | |
| "grad_norm": 0.1780281960964203, | |
| "learning_rate": 8.78775621876514e-05, | |
| "loss": 0.0319, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.5348837209302326, | |
| "grad_norm": 0.3001708686351776, | |
| "learning_rate": 8.752201477378459e-05, | |
| "loss": 0.0217, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.550387596899225, | |
| "grad_norm": 0.15130403637886047, | |
| "learning_rate": 8.716207005240382e-05, | |
| "loss": 0.0265, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.550387596899225, | |
| "eval_loss": 0.03209313005208969, | |
| "eval_runtime": 177.6849, | |
| "eval_samples_per_second": 6.455, | |
| "eval_steps_per_second": 6.455, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5658914728682172, | |
| "grad_norm": 0.28896772861480713, | |
| "learning_rate": 8.679777020641525e-05, | |
| "loss": 0.0345, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.5813953488372094, | |
| "grad_norm": 0.4542350471019745, | |
| "learning_rate": 8.6429157929114e-05, | |
| "loss": 0.033, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.5968992248062015, | |
| "grad_norm": 0.26365530490875244, | |
| "learning_rate": 8.60562764191808e-05, | |
| "loss": 0.0277, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.6124031007751938, | |
| "grad_norm": 0.17537464201450348, | |
| "learning_rate": 8.567916937561944e-05, | |
| "loss": 0.0307, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.627906976744186, | |
| "grad_norm": 0.39150458574295044, | |
| "learning_rate": 8.529788099263554e-05, | |
| "loss": 0.0301, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.627906976744186, | |
| "eval_loss": 0.03326583281159401, | |
| "eval_runtime": 177.6473, | |
| "eval_samples_per_second": 6.457, | |
| "eval_steps_per_second": 6.457, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.6434108527131783, | |
| "grad_norm": 0.2252993881702423, | |
| "learning_rate": 8.49124559544573e-05, | |
| "loss": 0.0279, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.6589147286821704, | |
| "grad_norm": 0.3254898488521576, | |
| "learning_rate": 8.452293943009889e-05, | |
| "loss": 0.0269, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.6744186046511627, | |
| "grad_norm": 0.2006569355726242, | |
| "learning_rate": 8.412937706806692e-05, | |
| "loss": 0.0361, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.689922480620155, | |
| "grad_norm": 0.2423427700996399, | |
| "learning_rate": 8.373181499101077e-05, | |
| "loss": 0.0301, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.7054263565891472, | |
| "grad_norm": 0.36214545369148254, | |
| "learning_rate": 8.333029979031737e-05, | |
| "loss": 0.0262, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.7054263565891472, | |
| "eval_loss": 0.031156621873378754, | |
| "eval_runtime": 177.8049, | |
| "eval_samples_per_second": 6.451, | |
| "eval_steps_per_second": 6.451, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.7209302325581395, | |
| "grad_norm": 0.17312179505825043, | |
| "learning_rate": 8.292487852065104e-05, | |
| "loss": 0.0392, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.7364341085271318, | |
| "grad_norm": 0.275713711977005, | |
| "learning_rate": 8.251559869443897e-05, | |
| "loss": 0.0322, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.751937984496124, | |
| "grad_norm": 0.33000582456588745, | |
| "learning_rate": 8.210250827630313e-05, | |
| "loss": 0.029, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.7674418604651163, | |
| "grad_norm": 0.29563483595848083, | |
| "learning_rate": 8.168565567743924e-05, | |
| "loss": 0.0263, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.7829457364341086, | |
| "grad_norm": 0.31916943192481995, | |
| "learning_rate": 8.126508974994321e-05, | |
| "loss": 0.0273, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7829457364341086, | |
| "eval_loss": 0.030605774372816086, | |
| "eval_runtime": 177.725, | |
| "eval_samples_per_second": 6.454, | |
| "eval_steps_per_second": 6.454, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7984496124031009, | |
| "grad_norm": 0.1885177344083786, | |
| "learning_rate": 8.084085978108611e-05, | |
| "loss": 0.0169, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.8139534883720931, | |
| "grad_norm": 0.15678612887859344, | |
| "learning_rate": 8.041301548753804e-05, | |
| "loss": 0.0284, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.8294573643410854, | |
| "grad_norm": 0.24891909956932068, | |
| "learning_rate": 7.998160700954165e-05, | |
| "loss": 0.0244, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.8449612403100775, | |
| "grad_norm": 0.27802011370658875, | |
| "learning_rate": 7.954668490503621e-05, | |
| "loss": 0.027, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.8604651162790697, | |
| "grad_norm": 0.2340599000453949, | |
| "learning_rate": 7.910830014373237e-05, | |
| "loss": 0.0283, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8604651162790697, | |
| "eval_loss": 0.029726749286055565, | |
| "eval_runtime": 177.8483, | |
| "eval_samples_per_second": 6.449, | |
| "eval_steps_per_second": 6.449, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.875968992248062, | |
| "grad_norm": 0.13624481856822968, | |
| "learning_rate": 7.866650410113907e-05, | |
| "loss": 0.0243, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.8914728682170543, | |
| "grad_norm": 0.30038079619407654, | |
| "learning_rate": 7.822134855254263e-05, | |
| "loss": 0.0296, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.9069767441860463, | |
| "grad_norm": 0.23166057467460632, | |
| "learning_rate": 7.777288566693905e-05, | |
| "loss": 0.0273, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.9224806201550386, | |
| "grad_norm": 0.26034536957740784, | |
| "learning_rate": 7.732116800092018e-05, | |
| "loss": 0.029, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.937984496124031, | |
| "grad_norm": 0.2269134223461151, | |
| "learning_rate": 7.686624849251453e-05, | |
| "loss": 0.0381, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.937984496124031, | |
| "eval_loss": 0.029867732897400856, | |
| "eval_runtime": 177.8147, | |
| "eval_samples_per_second": 6.451, | |
| "eval_steps_per_second": 6.451, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9534883720930232, | |
| "grad_norm": 0.30028459429740906, | |
| "learning_rate": 7.640818045498324e-05, | |
| "loss": 0.0378, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.9689922480620154, | |
| "grad_norm": 0.10794702172279358, | |
| "learning_rate": 7.59470175705722e-05, | |
| "loss": 0.0303, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.9844961240310077, | |
| "grad_norm": 0.27119511365890503, | |
| "learning_rate": 7.548281388422088e-05, | |
| "loss": 0.0361, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.22147968411445618, | |
| "learning_rate": 7.50156237972286e-05, | |
| "loss": 0.0263, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.0155038759689923, | |
| "grad_norm": 0.12922464311122894, | |
| "learning_rate": 7.454550206087921e-05, | |
| "loss": 0.0207, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.0155038759689923, | |
| "eval_loss": 0.029372563585639, | |
| "eval_runtime": 177.7077, | |
| "eval_samples_per_second": 6.454, | |
| "eval_steps_per_second": 6.454, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.0310077519379846, | |
| "grad_norm": 0.20252908766269684, | |
| "learning_rate": 7.407250377002451e-05, | |
| "loss": 0.0184, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.046511627906977, | |
| "grad_norm": 0.20628713071346283, | |
| "learning_rate": 7.359668435662757e-05, | |
| "loss": 0.0216, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.062015503875969, | |
| "grad_norm": 0.16820959746837616, | |
| "learning_rate": 7.311809958326659e-05, | |
| "loss": 0.0153, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.0775193798449614, | |
| "grad_norm": 0.28085780143737793, | |
| "learning_rate": 7.263680553659983e-05, | |
| "loss": 0.0172, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.0930232558139537, | |
| "grad_norm": 0.4147201180458069, | |
| "learning_rate": 7.21528586207927e-05, | |
| "loss": 0.0163, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0930232558139537, | |
| "eval_loss": 0.03293036296963692, | |
| "eval_runtime": 177.2709, | |
| "eval_samples_per_second": 6.47, | |
| "eval_steps_per_second": 6.47, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.108527131782946, | |
| "grad_norm": 0.2516096830368042, | |
| "learning_rate": 7.166631555090759e-05, | |
| "loss": 0.025, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.124031007751938, | |
| "grad_norm": 0.26919105648994446, | |
| "learning_rate": 7.117723334625731e-05, | |
| "loss": 0.024, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.13953488372093, | |
| "grad_norm": 0.228290855884552, | |
| "learning_rate": 7.068566932372278e-05, | |
| "loss": 0.0183, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.1550387596899223, | |
| "grad_norm": 0.28446343541145325, | |
| "learning_rate": 7.019168109103599e-05, | |
| "loss": 0.0162, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.1705426356589146, | |
| "grad_norm": 0.19133096933364868, | |
| "learning_rate": 6.96953265400287e-05, | |
| "loss": 0.0236, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.1705426356589146, | |
| "eval_loss": 0.031053410843014717, | |
| "eval_runtime": 176.3297, | |
| "eval_samples_per_second": 6.505, | |
| "eval_steps_per_second": 6.505, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.186046511627907, | |
| "grad_norm": 0.14185263216495514, | |
| "learning_rate": 6.9196663839848e-05, | |
| "loss": 0.0142, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.201550387596899, | |
| "grad_norm": 0.2277035117149353, | |
| "learning_rate": 6.869575143013934e-05, | |
| "loss": 0.0166, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.2170542635658914, | |
| "grad_norm": 0.24037382006645203, | |
| "learning_rate": 6.819264801419771e-05, | |
| "loss": 0.0274, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.2325581395348837, | |
| "grad_norm": 0.10074973106384277, | |
| "learning_rate": 6.768741255208813e-05, | |
| "loss": 0.0154, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.248062015503876, | |
| "grad_norm": 0.24921472370624542, | |
| "learning_rate": 6.718010425373599e-05, | |
| "loss": 0.0191, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.248062015503876, | |
| "eval_loss": 0.030957119539380074, | |
| "eval_runtime": 175.6681, | |
| "eval_samples_per_second": 6.529, | |
| "eval_steps_per_second": 6.529, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.2635658914728682, | |
| "grad_norm": 0.22070886194705963, | |
| "learning_rate": 6.667078257198796e-05, | |
| "loss": 0.0184, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.2790697674418605, | |
| "grad_norm": 0.39727696776390076, | |
| "learning_rate": 6.615950719564466e-05, | |
| "loss": 0.0142, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 2.294573643410853, | |
| "grad_norm": 0.31478437781333923, | |
| "learning_rate": 6.56463380424655e-05, | |
| "loss": 0.0252, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.310077519379845, | |
| "grad_norm": 0.1986107975244522, | |
| "learning_rate": 6.51313352521468e-05, | |
| "loss": 0.0151, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 2.3255813953488373, | |
| "grad_norm": 0.3045531213283539, | |
| "learning_rate": 6.46145591792738e-05, | |
| "loss": 0.0243, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.3255813953488373, | |
| "eval_loss": 0.03084620088338852, | |
| "eval_runtime": 175.2668, | |
| "eval_samples_per_second": 6.544, | |
| "eval_steps_per_second": 6.544, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.3410852713178296, | |
| "grad_norm": 0.08667182922363281, | |
| "learning_rate": 6.409607038624759e-05, | |
| "loss": 0.0142, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 2.356589147286822, | |
| "grad_norm": 0.17333078384399414, | |
| "learning_rate": 6.357592963618761e-05, | |
| "loss": 0.0241, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 2.3720930232558137, | |
| "grad_norm": 0.20276910066604614, | |
| "learning_rate": 6.305419788581067e-05, | |
| "loss": 0.0185, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 2.387596899224806, | |
| "grad_norm": 0.287349134683609, | |
| "learning_rate": 6.253093627828724e-05, | |
| "loss": 0.0176, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 2.4031007751937983, | |
| "grad_norm": 0.32019197940826416, | |
| "learning_rate": 6.200620613607597e-05, | |
| "loss": 0.0165, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.4031007751937983, | |
| "eval_loss": 0.03274994716048241, | |
| "eval_runtime": 174.5607, | |
| "eval_samples_per_second": 6.571, | |
| "eval_steps_per_second": 6.571, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.4186046511627906, | |
| "grad_norm": 0.34726911783218384, | |
| "learning_rate": 6.148006895373706e-05, | |
| "loss": 0.0196, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 2.434108527131783, | |
| "grad_norm": 0.15877018868923187, | |
| "learning_rate": 6.0952586390725644e-05, | |
| "loss": 0.0171, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 2.449612403100775, | |
| "grad_norm": 0.21150444447994232, | |
| "learning_rate": 6.042382026416563e-05, | |
| "loss": 0.0164, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 2.4651162790697674, | |
| "grad_norm": 0.19103744626045227, | |
| "learning_rate": 5.989383254160529e-05, | |
| "loss": 0.0171, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 2.4806201550387597, | |
| "grad_norm": 0.44117987155914307, | |
| "learning_rate": 5.936268533375506e-05, | |
| "loss": 0.0224, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4806201550387597, | |
| "eval_loss": 0.03294115141034126, | |
| "eval_runtime": 174.4995, | |
| "eval_samples_per_second": 6.573, | |
| "eval_steps_per_second": 6.573, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.496124031007752, | |
| "grad_norm": 0.3606320321559906, | |
| "learning_rate": 5.883044088720865e-05, | |
| "loss": 0.0214, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 2.511627906976744, | |
| "grad_norm": 0.13814851641654968, | |
| "learning_rate": 5.829716157714814e-05, | |
| "loss": 0.0177, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 2.5271317829457365, | |
| "grad_norm": 0.23484356701374054, | |
| "learning_rate": 5.77629099000341e-05, | |
| "loss": 0.0263, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 2.5426356589147288, | |
| "grad_norm": 0.21404676139354706, | |
| "learning_rate": 5.7227748466281486e-05, | |
| "loss": 0.0187, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 2.558139534883721, | |
| "grad_norm": 0.3276318609714508, | |
| "learning_rate": 5.669173999292208e-05, | |
| "loss": 0.0289, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.558139534883721, | |
| "eval_loss": 0.03190125152468681, | |
| "eval_runtime": 174.4148, | |
| "eval_samples_per_second": 6.576, | |
| "eval_steps_per_second": 6.576, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.5736434108527133, | |
| "grad_norm": 0.41611889004707336, | |
| "learning_rate": 5.615494729625458e-05, | |
| "loss": 0.0237, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 2.5891472868217056, | |
| "grad_norm": 0.372302383184433, | |
| "learning_rate": 5.561743328448296e-05, | |
| "loss": 0.0246, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 2.604651162790698, | |
| "grad_norm": 0.1654350608587265, | |
| "learning_rate": 5.5079260950344035e-05, | |
| "loss": 0.019, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 2.62015503875969, | |
| "grad_norm": 0.09135215729475021, | |
| "learning_rate": 5.454049336372531e-05, | |
| "loss": 0.0165, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 2.6356589147286824, | |
| "grad_norm": 0.24403300881385803, | |
| "learning_rate": 5.4001193664273454e-05, | |
| "loss": 0.014, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.6356589147286824, | |
| "eval_loss": 0.03161098435521126, | |
| "eval_runtime": 174.2648, | |
| "eval_samples_per_second": 6.582, | |
| "eval_steps_per_second": 6.582, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.6511627906976747, | |
| "grad_norm": 0.19514554738998413, | |
| "learning_rate": 5.346142505399495e-05, | |
| "loss": 0.0198, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 2.6666666666666665, | |
| "grad_norm": 0.13203318417072296, | |
| "learning_rate": 5.292125078984925e-05, | |
| "loss": 0.0181, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 2.682170542635659, | |
| "grad_norm": 0.09618645161390305, | |
| "learning_rate": 5.2380734176335425e-05, | |
| "loss": 0.0171, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 2.697674418604651, | |
| "grad_norm": 0.17840640246868134, | |
| "learning_rate": 5.183993855807343e-05, | |
| "loss": 0.0186, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 2.7131782945736433, | |
| "grad_norm": 0.12565214931964874, | |
| "learning_rate": 5.1298927312380586e-05, | |
| "loss": 0.0182, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.7131782945736433, | |
| "eval_loss": 0.03337998315691948, | |
| "eval_runtime": 174.2061, | |
| "eval_samples_per_second": 6.584, | |
| "eval_steps_per_second": 6.584, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.7286821705426356, | |
| "grad_norm": 0.1932082623243332, | |
| "learning_rate": 5.075776384184411e-05, | |
| "loss": 0.0275, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 2.744186046511628, | |
| "grad_norm": 0.14624637365341187, | |
| "learning_rate": 5.021651156689094e-05, | |
| "loss": 0.0284, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 2.75968992248062, | |
| "grad_norm": 0.20881560444831848, | |
| "learning_rate": 4.967523391835521e-05, | |
| "loss": 0.0217, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 2.7751937984496124, | |
| "grad_norm": 0.2833938002586365, | |
| "learning_rate": 4.9133994330044644e-05, | |
| "loss": 0.0234, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 2.7906976744186047, | |
| "grad_norm": 0.23201899230480194, | |
| "learning_rate": 4.85928562313066e-05, | |
| "loss": 0.0175, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.7906976744186047, | |
| "eval_loss": 0.029777053743600845, | |
| "eval_runtime": 174.1005, | |
| "eval_samples_per_second": 6.588, | |
| "eval_steps_per_second": 6.588, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.806201550387597, | |
| "grad_norm": 0.2119215726852417, | |
| "learning_rate": 4.8051883039594616e-05, | |
| "loss": 0.0151, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 2.8217054263565893, | |
| "grad_norm": 0.30587702989578247, | |
| "learning_rate": 4.751113815303624e-05, | |
| "loss": 0.0187, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 2.8372093023255816, | |
| "grad_norm": 0.18018727004528046, | |
| "learning_rate": 4.697068494300343e-05, | |
| "loss": 0.0151, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 2.8527131782945734, | |
| "grad_norm": 0.21248644590377808, | |
| "learning_rate": 4.6430586746685724e-05, | |
| "loss": 0.0222, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 2.8682170542635657, | |
| "grad_norm": 0.18737761676311493, | |
| "learning_rate": 4.589090685966758e-05, | |
| "loss": 0.0218, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.8682170542635657, | |
| "eval_loss": 0.029682571068406105, | |
| "eval_runtime": 174.0789, | |
| "eval_samples_per_second": 6.589, | |
| "eval_steps_per_second": 6.589, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.883720930232558, | |
| "grad_norm": 0.3078942894935608, | |
| "learning_rate": 4.535170852851073e-05, | |
| "loss": 0.0265, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 2.89922480620155, | |
| "grad_norm": 0.2036602944135666, | |
| "learning_rate": 4.481305494334201e-05, | |
| "loss": 0.0167, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 2.9147286821705425, | |
| "grad_norm": 0.08146923035383224, | |
| "learning_rate": 4.427500923044801e-05, | |
| "loss": 0.0199, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 2.9302325581395348, | |
| "grad_norm": 0.12505419552326202, | |
| "learning_rate": 4.373763444487705e-05, | |
| "loss": 0.0179, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 2.945736434108527, | |
| "grad_norm": 0.18068896234035492, | |
| "learning_rate": 4.3200993563049725e-05, | |
| "loss": 0.018, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.945736434108527, | |
| "eval_loss": 0.02887474000453949, | |
| "eval_runtime": 173.8738, | |
| "eval_samples_per_second": 6.597, | |
| "eval_steps_per_second": 6.597, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.9612403100775193, | |
| "grad_norm": 0.19300121068954468, | |
| "learning_rate": 4.266514947537839e-05, | |
| "loss": 0.0157, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 2.9767441860465116, | |
| "grad_norm": 0.1891685128211975, | |
| "learning_rate": 4.2130164978896916e-05, | |
| "loss": 0.0188, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 2.992248062015504, | |
| "grad_norm": 0.027009131386876106, | |
| "learning_rate": 4.159610276990137e-05, | |
| "loss": 0.0118, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 3.007751937984496, | |
| "grad_norm": 0.11215299367904663, | |
| "learning_rate": 4.106302543660235e-05, | |
| "loss": 0.0164, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 3.0232558139534884, | |
| "grad_norm": 0.07163859158754349, | |
| "learning_rate": 4.053099545179028e-05, | |
| "loss": 0.01, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 3.0232558139534884, | |
| "eval_loss": 0.030872860923409462, | |
| "eval_runtime": 173.945, | |
| "eval_samples_per_second": 6.594, | |
| "eval_steps_per_second": 6.594, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 3.0387596899224807, | |
| "grad_norm": 0.12158221006393433, | |
| "learning_rate": 4.0000075165513845e-05, | |
| "loss": 0.014, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 3.054263565891473, | |
| "grad_norm": 0.17903171479701996, | |
| "learning_rate": 3.9470326797773216e-05, | |
| "loss": 0.0085, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 3.0697674418604652, | |
| "grad_norm": 0.23486894369125366, | |
| "learning_rate": 3.8941812431228166e-05, | |
| "loss": 0.012, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 3.0852713178294575, | |
| "grad_norm": 0.19141437113285065, | |
| "learning_rate": 3.8414594003922515e-05, | |
| "loss": 0.0141, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 3.10077519379845, | |
| "grad_norm": 0.047276828438043594, | |
| "learning_rate": 3.788873330202544e-05, | |
| "loss": 0.0109, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.10077519379845, | |
| "eval_loss": 0.033768460154533386, | |
| "eval_runtime": 173.8687, | |
| "eval_samples_per_second": 6.597, | |
| "eval_steps_per_second": 6.597, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.116279069767442, | |
| "grad_norm": 0.3079964518547058, | |
| "learning_rate": 3.736429195259051e-05, | |
| "loss": 0.0069, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 3.1317829457364343, | |
| "grad_norm": 0.207097128033638, | |
| "learning_rate": 3.684133141633358e-05, | |
| "loss": 0.009, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 3.147286821705426, | |
| "grad_norm": 0.16703131794929504, | |
| "learning_rate": 3.6319912980429846e-05, | |
| "loss": 0.0098, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 3.1627906976744184, | |
| "grad_norm": 0.1511267125606537, | |
| "learning_rate": 3.580009775133168e-05, | |
| "loss": 0.0076, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 3.1782945736434107, | |
| "grad_norm": 0.184535413980484, | |
| "learning_rate": 3.528194664760714e-05, | |
| "loss": 0.0076, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.1782945736434107, | |
| "eval_loss": 0.03470822051167488, | |
| "eval_runtime": 173.7828, | |
| "eval_samples_per_second": 6.6, | |
| "eval_steps_per_second": 6.6, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.193798449612403, | |
| "grad_norm": 0.19592364132404327, | |
| "learning_rate": 3.476552039280096e-05, | |
| "loss": 0.0089, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 3.2093023255813953, | |
| "grad_norm": 0.1496165543794632, | |
| "learning_rate": 3.42508795083181e-05, | |
| "loss": 0.0092, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 3.2248062015503876, | |
| "grad_norm": 0.19381049275398254, | |
| "learning_rate": 3.373808430633106e-05, | |
| "loss": 0.0086, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 3.24031007751938, | |
| "grad_norm": 0.30487295985221863, | |
| "learning_rate": 3.32271948827118e-05, | |
| "loss": 0.0099, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 3.255813953488372, | |
| "grad_norm": 0.21724191308021545, | |
| "learning_rate": 3.2718271109988863e-05, | |
| "loss": 0.0087, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.255813953488372, | |
| "eval_loss": 0.03577126935124397, | |
| "eval_runtime": 173.7017, | |
| "eval_samples_per_second": 6.603, | |
| "eval_steps_per_second": 6.603, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.2713178294573644, | |
| "grad_norm": 0.8536518812179565, | |
| "learning_rate": 3.2211372630330835e-05, | |
| "loss": 0.0094, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 3.2868217054263567, | |
| "grad_norm": 0.23373013734817505, | |
| "learning_rate": 3.170655884855661e-05, | |
| "loss": 0.0121, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 3.302325581395349, | |
| "grad_norm": 0.09981165081262589, | |
| "learning_rate": 3.120388892517368e-05, | |
| "loss": 0.0193, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 3.317829457364341, | |
| "grad_norm": 0.2072608470916748, | |
| "learning_rate": 3.070342176944494e-05, | |
| "loss": 0.0135, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 3.3333333333333335, | |
| "grad_norm": 0.07687430083751678, | |
| "learning_rate": 3.0205216032484805e-05, | |
| "loss": 0.0092, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.3333333333333335, | |
| "eval_loss": 0.032335054129362106, | |
| "eval_runtime": 173.7494, | |
| "eval_samples_per_second": 6.601, | |
| "eval_steps_per_second": 6.601, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.3488372093023258, | |
| "grad_norm": 0.2054612785577774, | |
| "learning_rate": 2.970933010038599e-05, | |
| "loss": 0.013, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 3.3643410852713176, | |
| "grad_norm": 0.17977559566497803, | |
| "learning_rate": 2.921582208737681e-05, | |
| "loss": 0.0088, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 3.37984496124031, | |
| "grad_norm": 0.23377564549446106, | |
| "learning_rate": 2.872474982901081e-05, | |
| "loss": 0.0136, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 3.395348837209302, | |
| "grad_norm": 0.16202200949192047, | |
| "learning_rate": 2.8236170875388744e-05, | |
| "loss": 0.01, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 3.4108527131782944, | |
| "grad_norm": 0.2739998698234558, | |
| "learning_rate": 2.775014248441422e-05, | |
| "loss": 0.0078, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.4108527131782944, | |
| "eval_loss": 0.03312241658568382, | |
| "eval_runtime": 173.7097, | |
| "eval_samples_per_second": 6.603, | |
| "eval_steps_per_second": 6.603, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.4263565891472867, | |
| "grad_norm": 0.10180757939815521, | |
| "learning_rate": 2.726672161508341e-05, | |
| "loss": 0.0098, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 3.441860465116279, | |
| "grad_norm": 0.27830150723457336, | |
| "learning_rate": 2.678596492080984e-05, | |
| "loss": 0.0077, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 3.4573643410852712, | |
| "grad_norm": 0.25859320163726807, | |
| "learning_rate": 2.630792874278516e-05, | |
| "loss": 0.0099, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 3.4728682170542635, | |
| "grad_norm": 0.4165274500846863, | |
| "learning_rate": 2.583266910337624e-05, | |
| "loss": 0.0095, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 3.488372093023256, | |
| "grad_norm": 0.5020641684532166, | |
| "learning_rate": 2.5360241699559816e-05, | |
| "loss": 0.0109, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.488372093023256, | |
| "eval_loss": 0.035617418587207794, | |
| "eval_runtime": 173.6138, | |
| "eval_samples_per_second": 6.607, | |
| "eval_steps_per_second": 6.607, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.503875968992248, | |
| "grad_norm": 0.19337324798107147, | |
| "learning_rate": 2.4890701896395146e-05, | |
| "loss": 0.0104, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 3.5193798449612403, | |
| "grad_norm": 0.1879683881998062, | |
| "learning_rate": 2.4424104720535735e-05, | |
| "loss": 0.0101, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 3.5348837209302326, | |
| "grad_norm": 0.063876673579216, | |
| "learning_rate": 2.3960504853780462e-05, | |
| "loss": 0.0115, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 3.550387596899225, | |
| "grad_norm": 0.11843185126781464, | |
| "learning_rate": 2.349995662666547e-05, | |
| "loss": 0.0085, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 3.565891472868217, | |
| "grad_norm": 0.21422308683395386, | |
| "learning_rate": 2.3042514012096843e-05, | |
| "loss": 0.0137, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.565891472868217, | |
| "eval_loss": 0.035960037261247635, | |
| "eval_runtime": 173.6653, | |
| "eval_samples_per_second": 6.605, | |
| "eval_steps_per_second": 6.605, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.5813953488372094, | |
| "grad_norm": 0.354839563369751, | |
| "learning_rate": 2.2588230619025407e-05, | |
| "loss": 0.0159, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 3.5968992248062017, | |
| "grad_norm": 0.2568114995956421, | |
| "learning_rate": 2.213715968616425e-05, | |
| "loss": 0.0109, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 3.612403100775194, | |
| "grad_norm": 0.06852281093597412, | |
| "learning_rate": 2.16893540757494e-05, | |
| "loss": 0.0091, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 3.6279069767441863, | |
| "grad_norm": 0.194693461060524, | |
| "learning_rate": 2.1244866267344866e-05, | |
| "loss": 0.0084, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 3.6434108527131785, | |
| "grad_norm": 0.3488699495792389, | |
| "learning_rate": 2.080374835169235e-05, | |
| "loss": 0.013, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.6434108527131785, | |
| "eval_loss": 0.03496725112199783, | |
| "eval_runtime": 173.6559, | |
| "eval_samples_per_second": 6.605, | |
| "eval_steps_per_second": 6.605, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.6589147286821704, | |
| "grad_norm": 0.16774657368659973, | |
| "learning_rate": 2.0366052024606612e-05, | |
| "loss": 0.0082, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 3.6744186046511627, | |
| "grad_norm": 0.2563646733760834, | |
| "learning_rate": 1.9931828580917107e-05, | |
| "loss": 0.0114, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 3.689922480620155, | |
| "grad_norm": 0.254089891910553, | |
| "learning_rate": 1.9501128908456523e-05, | |
| "loss": 0.0087, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 3.705426356589147, | |
| "grad_norm": 0.2673187255859375, | |
| "learning_rate": 1.9074003482097258e-05, | |
| "loss": 0.0093, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 3.7209302325581395, | |
| "grad_norm": 0.47327151894569397, | |
| "learning_rate": 1.8650502357835925e-05, | |
| "loss": 0.0133, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.7209302325581395, | |
| "eval_loss": 0.03533458709716797, | |
| "eval_runtime": 173.6028, | |
| "eval_samples_per_second": 6.607, | |
| "eval_steps_per_second": 6.607, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.7364341085271318, | |
| "grad_norm": 0.4366307556629181, | |
| "learning_rate": 1.82306751669274e-05, | |
| "loss": 0.0085, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 3.751937984496124, | |
| "grad_norm": 0.39723125100135803, | |
| "learning_rate": 1.7814571110068135e-05, | |
| "loss": 0.0123, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 3.7674418604651163, | |
| "grad_norm": 0.16450275480747223, | |
| "learning_rate": 1.740223895163039e-05, | |
| "loss": 0.0091, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 3.7829457364341086, | |
| "grad_norm": 0.06844169646501541, | |
| "learning_rate": 1.6993727013947336e-05, | |
| "loss": 0.0109, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 3.798449612403101, | |
| "grad_norm": 0.29116564989089966, | |
| "learning_rate": 1.6589083171649977e-05, | |
| "loss": 0.0068, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.798449612403101, | |
| "eval_loss": 0.035682931542396545, | |
| "eval_runtime": 173.5613, | |
| "eval_samples_per_second": 6.609, | |
| "eval_steps_per_second": 6.609, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.813953488372093, | |
| "grad_norm": 0.1012692078948021, | |
| "learning_rate": 1.6188354846056698e-05, | |
| "loss": 0.008, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 3.8294573643410854, | |
| "grad_norm": 0.1340951770544052, | |
| "learning_rate": 1.579158899961575e-05, | |
| "loss": 0.0054, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 3.8449612403100772, | |
| "grad_norm": 0.22337476909160614, | |
| "learning_rate": 1.5398832130401637e-05, | |
| "loss": 0.0147, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 3.8604651162790695, | |
| "grad_norm": 0.18346606194972992, | |
| "learning_rate": 1.5010130266665807e-05, | |
| "loss": 0.0108, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 3.875968992248062, | |
| "grad_norm": 0.2582853436470032, | |
| "learning_rate": 1.4625528961442591e-05, | |
| "loss": 0.012, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.875968992248062, | |
| "eval_loss": 0.034770578145980835, | |
| "eval_runtime": 173.5234, | |
| "eval_samples_per_second": 6.61, | |
| "eval_steps_per_second": 6.61, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.891472868217054, | |
| "grad_norm": 0.22837190330028534, | |
| "learning_rate": 1.4245073287210608e-05, | |
| "loss": 0.0098, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 3.9069767441860463, | |
| "grad_norm": 0.49539613723754883, | |
| "learning_rate": 1.3868807830610653e-05, | |
| "loss": 0.016, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 3.9224806201550386, | |
| "grad_norm": 0.17993128299713135, | |
| "learning_rate": 1.3496776687220514e-05, | |
| "loss": 0.0129, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 3.937984496124031, | |
| "grad_norm": 0.25489673018455505, | |
| "learning_rate": 1.3129023456387151e-05, | |
| "loss": 0.0068, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 3.953488372093023, | |
| "grad_norm": 0.06681143492460251, | |
| "learning_rate": 1.2765591236117324e-05, | |
| "loss": 0.0088, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.953488372093023, | |
| "eval_loss": 0.034369926899671555, | |
| "eval_runtime": 173.574, | |
| "eval_samples_per_second": 6.608, | |
| "eval_steps_per_second": 6.608, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.9689922480620154, | |
| "grad_norm": 0.14447127282619476, | |
| "learning_rate": 1.240652261802669e-05, | |
| "loss": 0.0105, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 3.9844961240310077, | |
| "grad_norm": 0.2608015239238739, | |
| "learning_rate": 1.205185968234847e-05, | |
| "loss": 0.0136, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.17687876522541046, | |
| "learning_rate": 1.1701643993001916e-05, | |
| "loss": 0.0093, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 4.015503875968992, | |
| "grad_norm": 0.1070009246468544, | |
| "learning_rate": 1.1355916592721316e-05, | |
| "loss": 0.0045, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 4.0310077519379846, | |
| "grad_norm": 0.11302992701530457, | |
| "learning_rate": 1.1014717998246099e-05, | |
| "loss": 0.0066, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 4.0310077519379846, | |
| "eval_loss": 0.03459889069199562, | |
| "eval_runtime": 173.5445, | |
| "eval_samples_per_second": 6.609, | |
| "eval_steps_per_second": 6.609, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 4.046511627906977, | |
| "grad_norm": 0.107364721596241, | |
| "learning_rate": 1.0678088195572517e-05, | |
| "loss": 0.0053, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 4.062015503875969, | |
| "grad_norm": 0.2821330428123474, | |
| "learning_rate": 1.0346066635267676e-05, | |
| "loss": 0.0069, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 4.077519379844961, | |
| "grad_norm": 0.17317578196525574, | |
| "learning_rate": 1.001869222784611e-05, | |
| "loss": 0.0064, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 4.093023255813954, | |
| "grad_norm": 0.08701343834400177, | |
| "learning_rate": 9.696003339209819e-06, | |
| "loss": 0.007, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 4.108527131782946, | |
| "grad_norm": 0.23731686174869537, | |
| "learning_rate": 9.378037786152055e-06, | |
| "loss": 0.0052, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.108527131782946, | |
| "eval_loss": 0.03613542765378952, | |
| "eval_runtime": 173.8232, | |
| "eval_samples_per_second": 6.599, | |
| "eval_steps_per_second": 6.599, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.124031007751938, | |
| "grad_norm": 0.13787880539894104, | |
| "learning_rate": 9.064832831925441e-06, | |
| "loss": 0.0054, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 4.1395348837209305, | |
| "grad_norm": 0.1381884068250656, | |
| "learning_rate": 8.756425181875028e-06, | |
| "loss": 0.0035, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 4.155038759689923, | |
| "grad_norm": 0.16911053657531738, | |
| "learning_rate": 8.452850979136617e-06, | |
| "loss": 0.0045, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 4.170542635658915, | |
| "grad_norm": 0.04475264623761177, | |
| "learning_rate": 8.15414580040117e-06, | |
| "loss": 0.0032, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 4.186046511627907, | |
| "grad_norm": 0.09773126989603043, | |
| "learning_rate": 7.860344651745366e-06, | |
| "loss": 0.008, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.186046511627907, | |
| "eval_loss": 0.037369657307863235, | |
| "eval_runtime": 173.9253, | |
| "eval_samples_per_second": 6.595, | |
| "eval_steps_per_second": 6.595, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.2015503875969, | |
| "grad_norm": 0.1751956194639206, | |
| "learning_rate": 7.571481964529226e-06, | |
| "loss": 0.0057, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 4.217054263565892, | |
| "grad_norm": 0.06625787168741226, | |
| "learning_rate": 7.287591591360926e-06, | |
| "loss": 0.0028, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 4.232558139534884, | |
| "grad_norm": 0.1029408872127533, | |
| "learning_rate": 7.008706802129606e-06, | |
| "loss": 0.0069, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 4.248062015503876, | |
| "grad_norm": 0.01715020090341568, | |
| "learning_rate": 6.734860280106292e-06, | |
| "loss": 0.0021, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 4.263565891472869, | |
| "grad_norm": 0.5773080587387085, | |
| "learning_rate": 6.466084118113735e-06, | |
| "loss": 0.0062, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.263565891472869, | |
| "eval_loss": 0.0383269228041172, | |
| "eval_runtime": 173.7893, | |
| "eval_samples_per_second": 6.6, | |
| "eval_steps_per_second": 6.6, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.27906976744186, | |
| "grad_norm": 0.09289313852787018, | |
| "learning_rate": 6.202409814765325e-06, | |
| "loss": 0.0043, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 4.294573643410852, | |
| "grad_norm": 0.16086533665657043, | |
| "learning_rate": 5.9438682707736725e-06, | |
| "loss": 0.0064, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 4.310077519379845, | |
| "grad_norm": 0.175432026386261, | |
| "learning_rate": 5.690489785329301e-06, | |
| "loss": 0.0052, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 4.325581395348837, | |
| "grad_norm": 0.03287180885672569, | |
| "learning_rate": 5.442304052549752e-06, | |
| "loss": 0.0037, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 4.341085271317829, | |
| "grad_norm": 0.09991113096475601, | |
| "learning_rate": 5.199340157999733e-06, | |
| "loss": 0.005, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.341085271317829, | |
| "eval_loss": 0.038646504282951355, | |
| "eval_runtime": 173.5908, | |
| "eval_samples_per_second": 6.607, | |
| "eval_steps_per_second": 6.607, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.3565891472868215, | |
| "grad_norm": 0.07446504384279251, | |
| "learning_rate": 4.961626575282396e-06, | |
| "loss": 0.0028, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 4.372093023255814, | |
| "grad_norm": 0.1563124656677246, | |
| "learning_rate": 4.7291911627025235e-06, | |
| "loss": 0.0045, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 4.387596899224806, | |
| "grad_norm": 0.09954715520143509, | |
| "learning_rate": 4.502061160001725e-06, | |
| "loss": 0.0038, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 4.403100775193798, | |
| "grad_norm": 0.03728143870830536, | |
| "learning_rate": 4.280263185166078e-06, | |
| "loss": 0.007, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 4.4186046511627906, | |
| "grad_norm": 0.14879867434501648, | |
| "learning_rate": 4.063823231306757e-06, | |
| "loss": 0.004, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.4186046511627906, | |
| "eval_loss": 0.03950056806206703, | |
| "eval_runtime": 173.7546, | |
| "eval_samples_per_second": 6.601, | |
| "eval_steps_per_second": 6.601, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.434108527131783, | |
| "grad_norm": 0.04011745750904083, | |
| "learning_rate": 3.8527666636137885e-06, | |
| "loss": 0.0039, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 4.449612403100775, | |
| "grad_norm": 0.16904973983764648, | |
| "learning_rate": 3.6471182163834825e-06, | |
| "loss": 0.0075, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 4.465116279069767, | |
| "grad_norm": 0.05882781371474266, | |
| "learning_rate": 3.4469019901197054e-06, | |
| "loss": 0.0027, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 4.48062015503876, | |
| "grad_norm": 0.36288192868232727, | |
| "learning_rate": 3.252141448709495e-06, | |
| "loss": 0.0052, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 4.496124031007752, | |
| "grad_norm": 0.25636863708496094, | |
| "learning_rate": 3.0628594166732693e-06, | |
| "loss": 0.0075, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.496124031007752, | |
| "eval_loss": 0.040012069046497345, | |
| "eval_runtime": 173.5684, | |
| "eval_samples_per_second": 6.608, | |
| "eval_steps_per_second": 6.608, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.511627906976744, | |
| "grad_norm": 0.07002587616443634, | |
| "learning_rate": 2.8790780764899384e-06, | |
| "loss": 0.0053, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 4.5271317829457365, | |
| "grad_norm": 0.18481026589870453, | |
| "learning_rate": 2.700818965997315e-06, | |
| "loss": 0.0064, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 4.542635658914729, | |
| "grad_norm": 0.1123792976140976, | |
| "learning_rate": 2.528102975867991e-06, | |
| "loss": 0.0033, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 4.558139534883721, | |
| "grad_norm": 0.039597250521183014, | |
| "learning_rate": 2.3609503471611284e-06, | |
| "loss": 0.0041, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 4.573643410852713, | |
| "grad_norm": 0.05195772275328636, | |
| "learning_rate": 2.1993806689503738e-06, | |
| "loss": 0.003, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.573643410852713, | |
| "eval_loss": 0.040207840502262115, | |
| "eval_runtime": 173.5139, | |
| "eval_samples_per_second": 6.61, | |
| "eval_steps_per_second": 6.61, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.589147286821706, | |
| "grad_norm": 0.06371759623289108, | |
| "learning_rate": 2.0434128760281167e-06, | |
| "loss": 0.0021, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 4.604651162790698, | |
| "grad_norm": 0.0519726499915123, | |
| "learning_rate": 1.893065246686504e-06, | |
| "loss": 0.0029, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 4.62015503875969, | |
| "grad_norm": 0.05901438370347023, | |
| "learning_rate": 1.74835540057533e-06, | |
| "loss": 0.0067, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 4.635658914728682, | |
| "grad_norm": 0.16322703659534454, | |
| "learning_rate": 1.6093002966371617e-06, | |
| "loss": 0.0044, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 4.651162790697675, | |
| "grad_norm": 0.25651177763938904, | |
| "learning_rate": 1.4759162311198783e-06, | |
| "loss": 0.0066, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.651162790697675, | |
| "eval_loss": 0.0405086986720562, | |
| "eval_runtime": 173.4642, | |
| "eval_samples_per_second": 6.612, | |
| "eval_steps_per_second": 6.612, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.666666666666667, | |
| "grad_norm": 0.009503856301307678, | |
| "learning_rate": 1.348218835666859e-06, | |
| "loss": 0.0032, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 4.682170542635659, | |
| "grad_norm": 0.12524276971817017, | |
| "learning_rate": 1.2262230754850445e-06, | |
| "loss": 0.0032, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 4.6976744186046515, | |
| "grad_norm": 0.06170937791466713, | |
| "learning_rate": 1.109943247591172e-06, | |
| "loss": 0.0045, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 4.713178294573644, | |
| "grad_norm": 0.12521466612815857, | |
| "learning_rate": 9.993929791362323e-07, | |
| "loss": 0.0052, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 4.728682170542635, | |
| "grad_norm": 0.17083865404129028, | |
| "learning_rate": 8.945852258084863e-07, | |
| "loss": 0.005, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.728682170542635, | |
| "eval_loss": 0.04061457887291908, | |
| "eval_runtime": 173.5187, | |
| "eval_samples_per_second": 6.61, | |
| "eval_steps_per_second": 6.61, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.7441860465116275, | |
| "grad_norm": 0.35159188508987427, | |
| "learning_rate": 7.955322703151358e-07, | |
| "loss": 0.0046, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 4.75968992248062, | |
| "grad_norm": 0.2449195384979248, | |
| "learning_rate": 7.022457209428901e-07, | |
| "loss": 0.0044, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 4.775193798449612, | |
| "grad_norm": 0.2619343101978302, | |
| "learning_rate": 6.147365101975666e-07, | |
| "loss": 0.0035, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 4.790697674418604, | |
| "grad_norm": 0.09077528119087219, | |
| "learning_rate": 5.33014893522854e-07, | |
| "loss": 0.0032, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 4.8062015503875966, | |
| "grad_norm": 0.14284414052963257, | |
| "learning_rate": 4.57090448098485e-07, | |
| "loss": 0.0067, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.8062015503875966, | |
| "eval_loss": 0.04071947932243347, | |
| "eval_runtime": 173.7232, | |
| "eval_samples_per_second": 6.602, | |
| "eval_steps_per_second": 6.602, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.821705426356589, | |
| "grad_norm": 0.10482044517993927, | |
| "learning_rate": 3.8697207171781714e-07, | |
| "loss": 0.0037, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 4.837209302325581, | |
| "grad_norm": 0.03253592178225517, | |
| "learning_rate": 3.2266798174512837e-07, | |
| "loss": 0.0045, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 4.852713178294573, | |
| "grad_norm": 0.03622818365693092, | |
| "learning_rate": 2.6418571415255387e-07, | |
| "loss": 0.0093, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 4.868217054263566, | |
| "grad_norm": 0.09220151603221893, | |
| "learning_rate": 2.1153212263695378e-07, | |
| "loss": 0.0046, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 4.883720930232558, | |
| "grad_norm": 0.1942395716905594, | |
| "learning_rate": 1.6471337781669982e-07, | |
| "loss": 0.0067, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.883720930232558, | |
| "eval_loss": 0.040707945823669434, | |
| "eval_runtime": 173.7396, | |
| "eval_samples_per_second": 6.602, | |
| "eval_steps_per_second": 6.602, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.89922480620155, | |
| "grad_norm": 0.09173876792192459, | |
| "learning_rate": 1.237349665085097e-07, | |
| "loss": 0.0049, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 4.9147286821705425, | |
| "grad_norm": 0.045584529638290405, | |
| "learning_rate": 8.86016910844667e-08, | |
| "loss": 0.003, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 4.930232558139535, | |
| "grad_norm": 0.46576347947120667, | |
| "learning_rate": 5.9317668909192323e-08, | |
| "loss": 0.0061, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 4.945736434108527, | |
| "grad_norm": 0.059987977147102356, | |
| "learning_rate": 3.588633185730994e-08, | |
| "loss": 0.0062, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 4.961240310077519, | |
| "grad_norm": 0.2531404197216034, | |
| "learning_rate": 1.8310425911294283e-08, | |
| "loss": 0.006, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.961240310077519, | |
| "eval_loss": 0.04073048010468483, | |
| "eval_runtime": 173.54, | |
| "eval_samples_per_second": 6.609, | |
| "eval_steps_per_second": 6.609, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.976744186046512, | |
| "grad_norm": 0.24996767938137054, | |
| "learning_rate": 6.592010839612251e-09, | |
| "loss": 0.0076, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 4.992248062015504, | |
| "grad_norm": 0.020084669813513756, | |
| "learning_rate": 7.324599553770739e-10, | |
| "loss": 0.0025, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 3225, | |
| "total_flos": 8.718478050646426e+17, | |
| "train_loss": 0.049027663960821866, | |
| "train_runtime": 36714.2196, | |
| "train_samples_per_second": 1.405, | |
| "train_steps_per_second": 0.088 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 3225, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.718478050646426e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |