| { | |
| "best_metric": 0.03400569409132004, | |
| "best_model_checkpoint": "saves/psy-course/Llama-3.1-8B-Instruct/train/fold2/checkpoint-1300", | |
| "epoch": 5.0, | |
| "eval_steps": 50, | |
| "global_step": 3225, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.015503875968992248, | |
| "grad_norm": 4.586867332458496, | |
| "learning_rate": 3.0959752321981426e-06, | |
| "loss": 1.585, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.031007751937984496, | |
| "grad_norm": 4.746698379516602, | |
| "learning_rate": 6.191950464396285e-06, | |
| "loss": 1.523, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.046511627906976744, | |
| "grad_norm": 5.548886775970459, | |
| "learning_rate": 9.287925696594429e-06, | |
| "loss": 1.318, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.06201550387596899, | |
| "grad_norm": 2.0926177501678467, | |
| "learning_rate": 1.238390092879257e-05, | |
| "loss": 0.8377, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.07751937984496124, | |
| "grad_norm": 1.533339500427246, | |
| "learning_rate": 1.5479876160990712e-05, | |
| "loss": 0.5122, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07751937984496124, | |
| "eval_loss": 0.41176876425743103, | |
| "eval_runtime": 173.3032, | |
| "eval_samples_per_second": 6.618, | |
| "eval_steps_per_second": 6.618, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.09302325581395349, | |
| "grad_norm": 0.9713454246520996, | |
| "learning_rate": 1.8575851393188857e-05, | |
| "loss": 0.3858, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.10852713178294573, | |
| "grad_norm": 1.3564386367797852, | |
| "learning_rate": 2.1671826625387e-05, | |
| "loss": 0.2398, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.12403100775193798, | |
| "grad_norm": 1.194110631942749, | |
| "learning_rate": 2.476780185758514e-05, | |
| "loss": 0.1607, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.13953488372093023, | |
| "grad_norm": 1.0269737243652344, | |
| "learning_rate": 2.7863777089783283e-05, | |
| "loss": 0.1322, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.15503875968992248, | |
| "grad_norm": 0.9308303594589233, | |
| "learning_rate": 3.0959752321981425e-05, | |
| "loss": 0.0913, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.15503875968992248, | |
| "eval_loss": 0.08312761038541794, | |
| "eval_runtime": 172.5746, | |
| "eval_samples_per_second": 6.646, | |
| "eval_steps_per_second": 6.646, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.17054263565891473, | |
| "grad_norm": 1.6514289379119873, | |
| "learning_rate": 3.4055727554179566e-05, | |
| "loss": 0.0996, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.18604651162790697, | |
| "grad_norm": 1.6203035116195679, | |
| "learning_rate": 3.7151702786377715e-05, | |
| "loss": 0.0914, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.20155038759689922, | |
| "grad_norm": 0.9771865606307983, | |
| "learning_rate": 4.024767801857585e-05, | |
| "loss": 0.0855, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.21705426356589147, | |
| "grad_norm": 0.6767793297767639, | |
| "learning_rate": 4.3343653250774e-05, | |
| "loss": 0.0888, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.23255813953488372, | |
| "grad_norm": 0.7697650790214539, | |
| "learning_rate": 4.6439628482972134e-05, | |
| "loss": 0.0638, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.23255813953488372, | |
| "eval_loss": 0.06616330146789551, | |
| "eval_runtime": 172.3668, | |
| "eval_samples_per_second": 6.654, | |
| "eval_steps_per_second": 6.654, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.24806201550387597, | |
| "grad_norm": 0.8017315864562988, | |
| "learning_rate": 4.953560371517028e-05, | |
| "loss": 0.0788, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.26356589147286824, | |
| "grad_norm": 1.0115169286727905, | |
| "learning_rate": 5.2631578947368424e-05, | |
| "loss": 0.073, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.27906976744186046, | |
| "grad_norm": 0.4792479872703552, | |
| "learning_rate": 5.5727554179566566e-05, | |
| "loss": 0.0644, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.29457364341085274, | |
| "grad_norm": 2.296128273010254, | |
| "learning_rate": 5.882352941176471e-05, | |
| "loss": 0.0679, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.31007751937984496, | |
| "grad_norm": 1.2886589765548706, | |
| "learning_rate": 6.191950464396285e-05, | |
| "loss": 0.0589, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.31007751937984496, | |
| "eval_loss": 0.05583063140511513, | |
| "eval_runtime": 172.0552, | |
| "eval_samples_per_second": 6.666, | |
| "eval_steps_per_second": 6.666, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.32558139534883723, | |
| "grad_norm": 0.5177517533302307, | |
| "learning_rate": 6.501547987616098e-05, | |
| "loss": 0.0585, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.34108527131782945, | |
| "grad_norm": 0.7501789331436157, | |
| "learning_rate": 6.811145510835913e-05, | |
| "loss": 0.0556, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.35658914728682173, | |
| "grad_norm": 0.5279349088668823, | |
| "learning_rate": 7.120743034055728e-05, | |
| "loss": 0.0586, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.37209302325581395, | |
| "grad_norm": 0.6271268725395203, | |
| "learning_rate": 7.430340557275543e-05, | |
| "loss": 0.0776, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.3875968992248062, | |
| "grad_norm": 0.8573686480522156, | |
| "learning_rate": 7.739938080495357e-05, | |
| "loss": 0.0628, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.3875968992248062, | |
| "eval_loss": 0.0528356209397316, | |
| "eval_runtime": 171.8837, | |
| "eval_samples_per_second": 6.673, | |
| "eval_steps_per_second": 6.673, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.40310077519379844, | |
| "grad_norm": 0.38616809248924255, | |
| "learning_rate": 8.04953560371517e-05, | |
| "loss": 0.0509, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.4186046511627907, | |
| "grad_norm": 0.4749118983745575, | |
| "learning_rate": 8.359133126934985e-05, | |
| "loss": 0.0489, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.43410852713178294, | |
| "grad_norm": 0.6327615976333618, | |
| "learning_rate": 8.6687306501548e-05, | |
| "loss": 0.0845, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.4496124031007752, | |
| "grad_norm": 0.9490478038787842, | |
| "learning_rate": 8.978328173374613e-05, | |
| "loss": 0.0613, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.46511627906976744, | |
| "grad_norm": 0.5700337886810303, | |
| "learning_rate": 9.287925696594427e-05, | |
| "loss": 0.0482, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.46511627906976744, | |
| "eval_loss": 0.04843495413661003, | |
| "eval_runtime": 171.945, | |
| "eval_samples_per_second": 6.671, | |
| "eval_steps_per_second": 6.671, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4806201550387597, | |
| "grad_norm": 0.6147510409355164, | |
| "learning_rate": 9.597523219814242e-05, | |
| "loss": 0.0594, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.49612403100775193, | |
| "grad_norm": 0.5589321851730347, | |
| "learning_rate": 9.907120743034056e-05, | |
| "loss": 0.044, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.5116279069767442, | |
| "grad_norm": 0.3106387257575989, | |
| "learning_rate": 9.999856438185238e-05, | |
| "loss": 0.0505, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.5271317829457365, | |
| "grad_norm": 0.4362027943134308, | |
| "learning_rate": 9.999153298122152e-05, | |
| "loss": 0.0536, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.5426356589147286, | |
| "grad_norm": 0.2365163117647171, | |
| "learning_rate": 9.997864293614054e-05, | |
| "loss": 0.0429, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5426356589147286, | |
| "eval_loss": 0.04433372616767883, | |
| "eval_runtime": 171.9903, | |
| "eval_samples_per_second": 6.669, | |
| "eval_steps_per_second": 6.669, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5581395348837209, | |
| "grad_norm": 0.47546619176864624, | |
| "learning_rate": 9.995989575722902e-05, | |
| "loss": 0.0432, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.5736434108527132, | |
| "grad_norm": 0.3624158203601837, | |
| "learning_rate": 9.993529364152018e-05, | |
| "loss": 0.0612, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5891472868217055, | |
| "grad_norm": 0.48345044255256653, | |
| "learning_rate": 9.990483947220319e-05, | |
| "loss": 0.0611, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.6046511627906976, | |
| "grad_norm": 0.272274374961853, | |
| "learning_rate": 9.986853681828546e-05, | |
| "loss": 0.0574, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.6201550387596899, | |
| "grad_norm": 0.7242560982704163, | |
| "learning_rate": 9.982638993417425e-05, | |
| "loss": 0.0526, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6201550387596899, | |
| "eval_loss": 0.04319106042385101, | |
| "eval_runtime": 171.8171, | |
| "eval_samples_per_second": 6.676, | |
| "eval_steps_per_second": 6.676, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6356589147286822, | |
| "grad_norm": 0.44166314601898193, | |
| "learning_rate": 9.977840375917817e-05, | |
| "loss": 0.0545, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.6511627906976745, | |
| "grad_norm": 0.3073597252368927, | |
| "learning_rate": 9.972458391692827e-05, | |
| "loss": 0.0529, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.4054962396621704, | |
| "learning_rate": 9.966493671471904e-05, | |
| "loss": 0.0484, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.6821705426356589, | |
| "grad_norm": 0.38832733035087585, | |
| "learning_rate": 9.959946914276922e-05, | |
| "loss": 0.0607, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.6976744186046512, | |
| "grad_norm": 0.5320560932159424, | |
| "learning_rate": 9.952818887340257e-05, | |
| "loss": 0.0446, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.6976744186046512, | |
| "eval_loss": 0.03905312716960907, | |
| "eval_runtime": 171.6179, | |
| "eval_samples_per_second": 6.683, | |
| "eval_steps_per_second": 6.683, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7131782945736435, | |
| "grad_norm": 0.2739388048648834, | |
| "learning_rate": 9.945110426014878e-05, | |
| "loss": 0.0507, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.7286821705426356, | |
| "grad_norm": 0.5490703582763672, | |
| "learning_rate": 9.936822433676444e-05, | |
| "loss": 0.0424, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.7441860465116279, | |
| "grad_norm": 0.2568321228027344, | |
| "learning_rate": 9.927955881617444e-05, | |
| "loss": 0.047, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.7596899224806202, | |
| "grad_norm": 0.5469834804534912, | |
| "learning_rate": 9.918511808933358e-05, | |
| "loss": 0.0443, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.7751937984496124, | |
| "grad_norm": 0.3519169092178345, | |
| "learning_rate": 9.908491322400885e-05, | |
| "loss": 0.0503, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7751937984496124, | |
| "eval_loss": 0.0377386212348938, | |
| "eval_runtime": 171.3737, | |
| "eval_samples_per_second": 6.693, | |
| "eval_steps_per_second": 6.693, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7906976744186046, | |
| "grad_norm": 0.18305093050003052, | |
| "learning_rate": 9.897895596348247e-05, | |
| "loss": 0.042, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.8062015503875969, | |
| "grad_norm": 0.34450390934944153, | |
| "learning_rate": 9.886725872517552e-05, | |
| "loss": 0.037, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.8217054263565892, | |
| "grad_norm": 0.3624585270881653, | |
| "learning_rate": 9.874983459919277e-05, | |
| "loss": 0.0471, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.8372093023255814, | |
| "grad_norm": 0.3540893495082855, | |
| "learning_rate": 9.862669734678867e-05, | |
| "loss": 0.0391, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.8527131782945736, | |
| "grad_norm": 0.26056310534477234, | |
| "learning_rate": 9.849786139875452e-05, | |
| "loss": 0.048, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8527131782945736, | |
| "eval_loss": 0.03856755420565605, | |
| "eval_runtime": 171.2806, | |
| "eval_samples_per_second": 6.697, | |
| "eval_steps_per_second": 6.697, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8682170542635659, | |
| "grad_norm": 0.4234136939048767, | |
| "learning_rate": 9.836334185372738e-05, | |
| "loss": 0.048, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.8837209302325582, | |
| "grad_norm": 0.3422122299671173, | |
| "learning_rate": 9.822315447642056e-05, | |
| "loss": 0.0432, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.8992248062015504, | |
| "grad_norm": 0.4335425794124603, | |
| "learning_rate": 9.807731569577615e-05, | |
| "loss": 0.0337, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.9147286821705426, | |
| "grad_norm": 0.6463870406150818, | |
| "learning_rate": 9.792584260303964e-05, | |
| "loss": 0.0379, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.9302325581395349, | |
| "grad_norm": 0.4832146167755127, | |
| "learning_rate": 9.776875294975698e-05, | |
| "loss": 0.0636, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9302325581395349, | |
| "eval_loss": 0.04392372816801071, | |
| "eval_runtime": 171.0631, | |
| "eval_samples_per_second": 6.705, | |
| "eval_steps_per_second": 6.705, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9457364341085271, | |
| "grad_norm": 0.42290669679641724, | |
| "learning_rate": 9.760606514569416e-05, | |
| "loss": 0.043, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.9612403100775194, | |
| "grad_norm": 0.2062637358903885, | |
| "learning_rate": 9.743779825667984e-05, | |
| "loss": 0.039, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.9767441860465116, | |
| "grad_norm": 0.2891997694969177, | |
| "learning_rate": 9.726397200237085e-05, | |
| "loss": 0.0424, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.9922480620155039, | |
| "grad_norm": 0.4399895966053009, | |
| "learning_rate": 9.708460675394126e-05, | |
| "loss": 0.0506, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.0077519379844961, | |
| "grad_norm": 0.20108899474143982, | |
| "learning_rate": 9.689972353169499e-05, | |
| "loss": 0.0333, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0077519379844961, | |
| "eval_loss": 0.03613699600100517, | |
| "eval_runtime": 171.0726, | |
| "eval_samples_per_second": 6.705, | |
| "eval_steps_per_second": 6.705, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0232558139534884, | |
| "grad_norm": 0.40628916025161743, | |
| "learning_rate": 9.67093440026024e-05, | |
| "loss": 0.0322, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.0387596899224807, | |
| "grad_norm": 0.23710253834724426, | |
| "learning_rate": 9.651349047776112e-05, | |
| "loss": 0.0473, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.054263565891473, | |
| "grad_norm": 0.4060989022254944, | |
| "learning_rate": 9.631218590978126e-05, | |
| "loss": 0.0257, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.069767441860465, | |
| "grad_norm": 0.29535090923309326, | |
| "learning_rate": 9.610545389009562e-05, | |
| "loss": 0.0339, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.0852713178294573, | |
| "grad_norm": 0.336176335811615, | |
| "learning_rate": 9.589331864619486e-05, | |
| "loss": 0.0319, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0852713178294573, | |
| "eval_loss": 0.038464464247226715, | |
| "eval_runtime": 170.8431, | |
| "eval_samples_per_second": 6.714, | |
| "eval_steps_per_second": 6.714, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.1007751937984496, | |
| "grad_norm": 0.35942918062210083, | |
| "learning_rate": 9.567580503878833e-05, | |
| "loss": 0.0285, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.1162790697674418, | |
| "grad_norm": 0.30677011609077454, | |
| "learning_rate": 9.545293855889043e-05, | |
| "loss": 0.0368, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.1317829457364341, | |
| "grad_norm": 0.16257011890411377, | |
| "learning_rate": 9.522474532483337e-05, | |
| "loss": 0.0351, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.1472868217054264, | |
| "grad_norm": 0.3570394515991211, | |
| "learning_rate": 9.499125207920622e-05, | |
| "loss": 0.0477, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.1627906976744187, | |
| "grad_norm": 0.145313560962677, | |
| "learning_rate": 9.475248618572096e-05, | |
| "loss": 0.033, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1627906976744187, | |
| "eval_loss": 0.03572777658700943, | |
| "eval_runtime": 170.7702, | |
| "eval_samples_per_second": 6.717, | |
| "eval_steps_per_second": 6.717, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.178294573643411, | |
| "grad_norm": 0.43808048963546753, | |
| "learning_rate": 9.45084756260055e-05, | |
| "loss": 0.0392, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.193798449612403, | |
| "grad_norm": 0.1340464949607849, | |
| "learning_rate": 9.425924899632458e-05, | |
| "loss": 0.0344, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.2093023255813953, | |
| "grad_norm": 0.14380404353141785, | |
| "learning_rate": 9.400483550422845e-05, | |
| "loss": 0.0295, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.2248062015503876, | |
| "grad_norm": 0.28932222723960876, | |
| "learning_rate": 9.374526496512992e-05, | |
| "loss": 0.0363, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.2403100775193798, | |
| "grad_norm": 0.353335440158844, | |
| "learning_rate": 9.348056779881025e-05, | |
| "loss": 0.0242, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2403100775193798, | |
| "eval_loss": 0.037058863788843155, | |
| "eval_runtime": 170.7322, | |
| "eval_samples_per_second": 6.718, | |
| "eval_steps_per_second": 6.718, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.255813953488372, | |
| "grad_norm": 0.43459367752075195, | |
| "learning_rate": 9.321077502585417e-05, | |
| "loss": 0.0315, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.2713178294573644, | |
| "grad_norm": 0.3110826313495636, | |
| "learning_rate": 9.293591826401451e-05, | |
| "loss": 0.0342, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.2868217054263567, | |
| "grad_norm": 0.38052013516426086, | |
| "learning_rate": 9.265602972450673e-05, | |
| "loss": 0.0376, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.302325581395349, | |
| "grad_norm": 0.25867798924446106, | |
| "learning_rate": 9.237114220823413e-05, | |
| "loss": 0.0366, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.3178294573643412, | |
| "grad_norm": 0.1531626284122467, | |
| "learning_rate": 9.208128910194377e-05, | |
| "loss": 0.0363, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3178294573643412, | |
| "eval_loss": 0.03434592857956886, | |
| "eval_runtime": 170.8504, | |
| "eval_samples_per_second": 6.713, | |
| "eval_steps_per_second": 6.713, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.33476677536964417, | |
| "learning_rate": 9.17865043743138e-05, | |
| "loss": 0.0377, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.3488372093023255, | |
| "grad_norm": 0.14141975343227386, | |
| "learning_rate": 9.148682257197245e-05, | |
| "loss": 0.0294, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.3643410852713178, | |
| "grad_norm": 0.42156022787094116, | |
| "learning_rate": 9.118227881544967e-05, | |
| "loss": 0.0469, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.37984496124031, | |
| "grad_norm": 0.3194252550601959, | |
| "learning_rate": 9.087290879506104e-05, | |
| "loss": 0.0285, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.3953488372093024, | |
| "grad_norm": 0.14607588946819305, | |
| "learning_rate": 9.055874876672519e-05, | |
| "loss": 0.0419, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.3953488372093024, | |
| "eval_loss": 0.036030739545822144, | |
| "eval_runtime": 171.2298, | |
| "eval_samples_per_second": 6.699, | |
| "eval_steps_per_second": 6.699, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.4108527131782946, | |
| "grad_norm": 0.21363942325115204, | |
| "learning_rate": 9.023983554771492e-05, | |
| "loss": 0.0311, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.4263565891472867, | |
| "grad_norm": 0.23408649861812592, | |
| "learning_rate": 8.991620651234242e-05, | |
| "loss": 0.0285, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.441860465116279, | |
| "grad_norm": 0.3802158236503601, | |
| "learning_rate": 8.958789958757928e-05, | |
| "loss": 0.0331, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.4573643410852712, | |
| "grad_norm": 0.20695924758911133, | |
| "learning_rate": 8.925495324861177e-05, | |
| "loss": 0.034, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.4728682170542635, | |
| "grad_norm": 0.429266095161438, | |
| "learning_rate": 8.89174065143318e-05, | |
| "loss": 0.0444, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4728682170542635, | |
| "eval_loss": 0.03494586795568466, | |
| "eval_runtime": 171.5354, | |
| "eval_samples_per_second": 6.687, | |
| "eval_steps_per_second": 6.687, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4883720930232558, | |
| "grad_norm": 0.3035537898540497, | |
| "learning_rate": 8.857529894276412e-05, | |
| "loss": 0.036, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.503875968992248, | |
| "grad_norm": 0.23642411828041077, | |
| "learning_rate": 8.822867062643059e-05, | |
| "loss": 0.0325, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.5193798449612403, | |
| "grad_norm": 0.22683683037757874, | |
| "learning_rate": 8.78775621876514e-05, | |
| "loss": 0.0375, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.5348837209302326, | |
| "grad_norm": 0.3267425298690796, | |
| "learning_rate": 8.752201477378459e-05, | |
| "loss": 0.0266, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.550387596899225, | |
| "grad_norm": 0.1990777552127838, | |
| "learning_rate": 8.716207005240382e-05, | |
| "loss": 0.0297, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.550387596899225, | |
| "eval_loss": 0.03623834624886513, | |
| "eval_runtime": 171.9583, | |
| "eval_samples_per_second": 6.67, | |
| "eval_steps_per_second": 6.67, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5658914728682172, | |
| "grad_norm": 0.45734983682632446, | |
| "learning_rate": 8.679777020641525e-05, | |
| "loss": 0.0423, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.5813953488372094, | |
| "grad_norm": 0.3885669410228729, | |
| "learning_rate": 8.6429157929114e-05, | |
| "loss": 0.04, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.5968992248062015, | |
| "grad_norm": 0.3537205755710602, | |
| "learning_rate": 8.60562764191808e-05, | |
| "loss": 0.0331, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.6124031007751938, | |
| "grad_norm": 0.2690904140472412, | |
| "learning_rate": 8.567916937561944e-05, | |
| "loss": 0.0342, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.627906976744186, | |
| "grad_norm": 0.12849485874176025, | |
| "learning_rate": 8.529788099263554e-05, | |
| "loss": 0.0348, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.627906976744186, | |
| "eval_loss": 0.03483985364437103, | |
| "eval_runtime": 172.4575, | |
| "eval_samples_per_second": 6.651, | |
| "eval_steps_per_second": 6.651, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.6434108527131783, | |
| "grad_norm": 0.16510334610939026, | |
| "learning_rate": 8.49124559544573e-05, | |
| "loss": 0.0289, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.6589147286821704, | |
| "grad_norm": 0.3688533902168274, | |
| "learning_rate": 8.452293943009889e-05, | |
| "loss": 0.0289, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.6744186046511627, | |
| "grad_norm": 0.3377443253993988, | |
| "learning_rate": 8.412937706806692e-05, | |
| "loss": 0.0368, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.689922480620155, | |
| "grad_norm": 0.24271418154239655, | |
| "learning_rate": 8.373181499101077e-05, | |
| "loss": 0.0328, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.7054263565891472, | |
| "grad_norm": 0.6041512489318848, | |
| "learning_rate": 8.333029979031737e-05, | |
| "loss": 0.0271, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.7054263565891472, | |
| "eval_loss": 0.03539975360035896, | |
| "eval_runtime": 172.8867, | |
| "eval_samples_per_second": 6.634, | |
| "eval_steps_per_second": 6.634, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.7209302325581395, | |
| "grad_norm": 0.1947883814573288, | |
| "learning_rate": 8.292487852065104e-05, | |
| "loss": 0.0488, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.7364341085271318, | |
| "grad_norm": 0.5195293426513672, | |
| "learning_rate": 8.251559869443897e-05, | |
| "loss": 0.0341, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.751937984496124, | |
| "grad_norm": 0.3926396369934082, | |
| "learning_rate": 8.210250827630313e-05, | |
| "loss": 0.0327, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.7674418604651163, | |
| "grad_norm": 0.31271877884864807, | |
| "learning_rate": 8.168565567743924e-05, | |
| "loss": 0.0261, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.7829457364341086, | |
| "grad_norm": 0.6073179841041565, | |
| "learning_rate": 8.126508974994321e-05, | |
| "loss": 0.0362, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7829457364341086, | |
| "eval_loss": 0.036610543727874756, | |
| "eval_runtime": 173.3111, | |
| "eval_samples_per_second": 6.618, | |
| "eval_steps_per_second": 6.618, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7984496124031009, | |
| "grad_norm": 0.29566118121147156, | |
| "learning_rate": 8.084085978108611e-05, | |
| "loss": 0.02, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.8139534883720931, | |
| "grad_norm": 0.2208748310804367, | |
| "learning_rate": 8.041301548753804e-05, | |
| "loss": 0.0274, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.8294573643410854, | |
| "grad_norm": 0.33662283420562744, | |
| "learning_rate": 7.998160700954165e-05, | |
| "loss": 0.026, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.8449612403100775, | |
| "grad_norm": 0.3825443387031555, | |
| "learning_rate": 7.954668490503621e-05, | |
| "loss": 0.0326, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.8604651162790697, | |
| "grad_norm": 0.4023016691207886, | |
| "learning_rate": 7.910830014373237e-05, | |
| "loss": 0.034, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8604651162790697, | |
| "eval_loss": 0.03443226218223572, | |
| "eval_runtime": 173.7001, | |
| "eval_samples_per_second": 6.603, | |
| "eval_steps_per_second": 6.603, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.875968992248062, | |
| "grad_norm": 0.21011577546596527, | |
| "learning_rate": 7.866650410113907e-05, | |
| "loss": 0.0314, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.8914728682170543, | |
| "grad_norm": 0.4147494435310364, | |
| "learning_rate": 7.822134855254263e-05, | |
| "loss": 0.0386, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.9069767441860463, | |
| "grad_norm": 0.2679975926876068, | |
| "learning_rate": 7.777288566693905e-05, | |
| "loss": 0.0347, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.9224806201550386, | |
| "grad_norm": 0.30710622668266296, | |
| "learning_rate": 7.732116800092018e-05, | |
| "loss": 0.0309, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.937984496124031, | |
| "grad_norm": 0.2684403359889984, | |
| "learning_rate": 7.686624849251453e-05, | |
| "loss": 0.039, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.937984496124031, | |
| "eval_loss": 0.03436611220240593, | |
| "eval_runtime": 173.9789, | |
| "eval_samples_per_second": 6.593, | |
| "eval_steps_per_second": 6.593, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9534883720930232, | |
| "grad_norm": 0.381550133228302, | |
| "learning_rate": 7.640818045498324e-05, | |
| "loss": 0.0471, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.9689922480620154, | |
| "grad_norm": 0.12695617973804474, | |
| "learning_rate": 7.59470175705722e-05, | |
| "loss": 0.0362, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.9844961240310077, | |
| "grad_norm": 0.2954389452934265, | |
| "learning_rate": 7.548281388422088e-05, | |
| "loss": 0.0395, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.21959000825881958, | |
| "learning_rate": 7.50156237972286e-05, | |
| "loss": 0.0334, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.0155038759689923, | |
| "grad_norm": 0.22099755704402924, | |
| "learning_rate": 7.454550206087921e-05, | |
| "loss": 0.0248, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.0155038759689923, | |
| "eval_loss": 0.03400569409132004, | |
| "eval_runtime": 174.5025, | |
| "eval_samples_per_second": 6.573, | |
| "eval_steps_per_second": 6.573, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.0310077519379846, | |
| "grad_norm": 0.25023770332336426, | |
| "learning_rate": 7.407250377002451e-05, | |
| "loss": 0.0241, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.046511627906977, | |
| "grad_norm": 0.21477766335010529, | |
| "learning_rate": 7.359668435662757e-05, | |
| "loss": 0.0223, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.062015503875969, | |
| "grad_norm": 0.2844666540622711, | |
| "learning_rate": 7.311809958326659e-05, | |
| "loss": 0.0168, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.0775193798449614, | |
| "grad_norm": 0.33521831035614014, | |
| "learning_rate": 7.263680553659983e-05, | |
| "loss": 0.0216, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.0930232558139537, | |
| "grad_norm": 0.3837973177433014, | |
| "learning_rate": 7.21528586207927e-05, | |
| "loss": 0.0209, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0930232558139537, | |
| "eval_loss": 0.03692101314663887, | |
| "eval_runtime": 174.8765, | |
| "eval_samples_per_second": 6.559, | |
| "eval_steps_per_second": 6.559, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.108527131782946, | |
| "grad_norm": 0.6733439564704895, | |
| "learning_rate": 7.166631555090759e-05, | |
| "loss": 0.0269, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.124031007751938, | |
| "grad_norm": 0.2908807098865509, | |
| "learning_rate": 7.117723334625731e-05, | |
| "loss": 0.0219, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.13953488372093, | |
| "grad_norm": 0.3064257502555847, | |
| "learning_rate": 7.068566932372278e-05, | |
| "loss": 0.0239, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.1550387596899223, | |
| "grad_norm": 0.4648115336894989, | |
| "learning_rate": 7.019168109103599e-05, | |
| "loss": 0.0199, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.1705426356589146, | |
| "grad_norm": 0.24724262952804565, | |
| "learning_rate": 6.96953265400287e-05, | |
| "loss": 0.0211, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.1705426356589146, | |
| "eval_loss": 0.03518850728869438, | |
| "eval_runtime": 174.9572, | |
| "eval_samples_per_second": 6.556, | |
| "eval_steps_per_second": 6.556, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.186046511627907, | |
| "grad_norm": 0.25550487637519836, | |
| "learning_rate": 6.9196663839848e-05, | |
| "loss": 0.0195, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.201550387596899, | |
| "grad_norm": 0.37793949246406555, | |
| "learning_rate": 6.869575143013934e-05, | |
| "loss": 0.0201, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.2170542635658914, | |
| "grad_norm": 0.4046792685985565, | |
| "learning_rate": 6.819264801419771e-05, | |
| "loss": 0.0314, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.2325581395348837, | |
| "grad_norm": 0.19130779802799225, | |
| "learning_rate": 6.768741255208813e-05, | |
| "loss": 0.0203, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.248062015503876, | |
| "grad_norm": 0.3072698414325714, | |
| "learning_rate": 6.718010425373599e-05, | |
| "loss": 0.0178, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.248062015503876, | |
| "eval_loss": 0.0378645583987236, | |
| "eval_runtime": 175.3079, | |
| "eval_samples_per_second": 6.543, | |
| "eval_steps_per_second": 6.543, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.2635658914728682, | |
| "grad_norm": 0.518846333026886, | |
| "learning_rate": 6.667078257198796e-05, | |
| "loss": 0.0215, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.2790697674418605, | |
| "grad_norm": 0.31176698207855225, | |
| "learning_rate": 6.615950719564466e-05, | |
| "loss": 0.016, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 2.294573643410853, | |
| "grad_norm": 0.3617725372314453, | |
| "learning_rate": 6.56463380424655e-05, | |
| "loss": 0.0331, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.310077519379845, | |
| "grad_norm": 0.19033777713775635, | |
| "learning_rate": 6.51313352521468e-05, | |
| "loss": 0.0198, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 2.3255813953488373, | |
| "grad_norm": 0.3381364047527313, | |
| "learning_rate": 6.46145591792738e-05, | |
| "loss": 0.026, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.3255813953488373, | |
| "eval_loss": 0.03550190106034279, | |
| "eval_runtime": 175.3413, | |
| "eval_samples_per_second": 6.542, | |
| "eval_steps_per_second": 6.542, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.3410852713178296, | |
| "grad_norm": 0.11263620853424072, | |
| "learning_rate": 6.409607038624759e-05, | |
| "loss": 0.0168, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 2.356589147286822, | |
| "grad_norm": 0.24013790488243103, | |
| "learning_rate": 6.357592963618761e-05, | |
| "loss": 0.0303, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 2.3720930232558137, | |
| "grad_norm": 0.19617025554180145, | |
| "learning_rate": 6.305419788581067e-05, | |
| "loss": 0.0218, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 2.387596899224806, | |
| "grad_norm": 0.19198082387447357, | |
| "learning_rate": 6.253093627828724e-05, | |
| "loss": 0.0192, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 2.4031007751937983, | |
| "grad_norm": 0.32724863290786743, | |
| "learning_rate": 6.200620613607597e-05, | |
| "loss": 0.0166, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.4031007751937983, | |
| "eval_loss": 0.03754343092441559, | |
| "eval_runtime": 175.4702, | |
| "eval_samples_per_second": 6.537, | |
| "eval_steps_per_second": 6.537, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.4186046511627906, | |
| "grad_norm": 0.369870662689209, | |
| "learning_rate": 6.148006895373706e-05, | |
| "loss": 0.0226, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 2.434108527131783, | |
| "grad_norm": 0.2815375030040741, | |
| "learning_rate": 6.0952586390725644e-05, | |
| "loss": 0.0177, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 2.449612403100775, | |
| "grad_norm": 0.3072023391723633, | |
| "learning_rate": 6.042382026416563e-05, | |
| "loss": 0.0208, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 2.4651162790697674, | |
| "grad_norm": 0.2327968031167984, | |
| "learning_rate": 5.989383254160529e-05, | |
| "loss": 0.0233, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 2.4806201550387597, | |
| "grad_norm": 0.49106815457344055, | |
| "learning_rate": 5.936268533375506e-05, | |
| "loss": 0.0237, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4806201550387597, | |
| "eval_loss": 0.03549407050013542, | |
| "eval_runtime": 175.559, | |
| "eval_samples_per_second": 6.533, | |
| "eval_steps_per_second": 6.533, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.496124031007752, | |
| "grad_norm": 0.662108838558197, | |
| "learning_rate": 5.883044088720865e-05, | |
| "loss": 0.027, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 2.511627906976744, | |
| "grad_norm": 0.1898617446422577, | |
| "learning_rate": 5.829716157714814e-05, | |
| "loss": 0.02, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 2.5271317829457365, | |
| "grad_norm": 0.3116423189640045, | |
| "learning_rate": 5.77629099000341e-05, | |
| "loss": 0.029, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 2.5426356589147288, | |
| "grad_norm": 0.32322463393211365, | |
| "learning_rate": 5.7227748466281486e-05, | |
| "loss": 0.0277, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 2.558139534883721, | |
| "grad_norm": 0.42573103308677673, | |
| "learning_rate": 5.669173999292208e-05, | |
| "loss": 0.037, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.558139534883721, | |
| "eval_loss": 0.03468121960759163, | |
| "eval_runtime": 175.6858, | |
| "eval_samples_per_second": 6.529, | |
| "eval_steps_per_second": 6.529, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.5736434108527133, | |
| "grad_norm": 0.5278474688529968, | |
| "learning_rate": 5.615494729625458e-05, | |
| "loss": 0.0225, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 2.5891472868217056, | |
| "grad_norm": 0.3459230661392212, | |
| "learning_rate": 5.561743328448296e-05, | |
| "loss": 0.0268, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 2.604651162790698, | |
| "grad_norm": 0.1962781399488449, | |
| "learning_rate": 5.5079260950344035e-05, | |
| "loss": 0.0197, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 2.62015503875969, | |
| "grad_norm": 0.19597306847572327, | |
| "learning_rate": 5.454049336372531e-05, | |
| "loss": 0.0215, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 2.6356589147286824, | |
| "grad_norm": 0.39598584175109863, | |
| "learning_rate": 5.4001193664273454e-05, | |
| "loss": 0.0161, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.6356589147286824, | |
| "eval_loss": 0.03870782628655434, | |
| "eval_runtime": 175.9342, | |
| "eval_samples_per_second": 6.519, | |
| "eval_steps_per_second": 6.519, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.6511627906976747, | |
| "grad_norm": 0.19037537276744843, | |
| "learning_rate": 5.346142505399495e-05, | |
| "loss": 0.0189, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 2.6666666666666665, | |
| "grad_norm": 0.3089650273323059, | |
| "learning_rate": 5.292125078984925e-05, | |
| "loss": 0.0191, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 2.682170542635659, | |
| "grad_norm": 0.12442419677972794, | |
| "learning_rate": 5.2380734176335425e-05, | |
| "loss": 0.0182, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 2.697674418604651, | |
| "grad_norm": 0.21611304581165314, | |
| "learning_rate": 5.183993855807343e-05, | |
| "loss": 0.0222, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 2.7131782945736433, | |
| "grad_norm": 0.0941254049539566, | |
| "learning_rate": 5.1298927312380586e-05, | |
| "loss": 0.0174, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.7131782945736433, | |
| "eval_loss": 0.03829415142536163, | |
| "eval_runtime": 176.3284, | |
| "eval_samples_per_second": 6.505, | |
| "eval_steps_per_second": 6.505, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.7286821705426356, | |
| "grad_norm": 0.2699877917766571, | |
| "learning_rate": 5.075776384184411e-05, | |
| "loss": 0.0298, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 2.744186046511628, | |
| "grad_norm": 0.2365487664937973, | |
| "learning_rate": 5.021651156689094e-05, | |
| "loss": 0.0294, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 2.75968992248062, | |
| "grad_norm": 0.26761698722839355, | |
| "learning_rate": 4.967523391835521e-05, | |
| "loss": 0.0264, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 2.7751937984496124, | |
| "grad_norm": 0.4799739122390747, | |
| "learning_rate": 4.9133994330044644e-05, | |
| "loss": 0.0245, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 2.7906976744186047, | |
| "grad_norm": 0.4386084973812103, | |
| "learning_rate": 4.85928562313066e-05, | |
| "loss": 0.0222, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.7906976744186047, | |
| "eval_loss": 0.0363197959959507, | |
| "eval_runtime": 176.3046, | |
| "eval_samples_per_second": 6.506, | |
| "eval_steps_per_second": 6.506, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.806201550387597, | |
| "grad_norm": 0.40001216530799866, | |
| "learning_rate": 4.8051883039594616e-05, | |
| "loss": 0.0167, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 2.8217054263565893, | |
| "grad_norm": 0.33203211426734924, | |
| "learning_rate": 4.751113815303624e-05, | |
| "loss": 0.0263, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 2.8372093023255816, | |
| "grad_norm": 0.6366134285926819, | |
| "learning_rate": 4.697068494300343e-05, | |
| "loss": 0.0197, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 2.8527131782945734, | |
| "grad_norm": 0.321431428194046, | |
| "learning_rate": 4.6430586746685724e-05, | |
| "loss": 0.024, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 2.8682170542635657, | |
| "grad_norm": 0.30680930614471436, | |
| "learning_rate": 4.589090685966758e-05, | |
| "loss": 0.0217, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.8682170542635657, | |
| "eval_loss": 0.03761348873376846, | |
| "eval_runtime": 176.3984, | |
| "eval_samples_per_second": 6.502, | |
| "eval_steps_per_second": 6.502, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.883720930232558, | |
| "grad_norm": 0.30222904682159424, | |
| "learning_rate": 4.535170852851073e-05, | |
| "loss": 0.0327, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 2.89922480620155, | |
| "grad_norm": 0.5061004757881165, | |
| "learning_rate": 4.481305494334201e-05, | |
| "loss": 0.0146, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 2.9147286821705425, | |
| "grad_norm": 0.1436406373977661, | |
| "learning_rate": 4.427500923044801e-05, | |
| "loss": 0.0211, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 2.9302325581395348, | |
| "grad_norm": 0.32733285427093506, | |
| "learning_rate": 4.373763444487705e-05, | |
| "loss": 0.0186, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 2.945736434108527, | |
| "grad_norm": 0.1981613039970398, | |
| "learning_rate": 4.3200993563049725e-05, | |
| "loss": 0.0198, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.945736434108527, | |
| "eval_loss": 0.0361945740878582, | |
| "eval_runtime": 176.4884, | |
| "eval_samples_per_second": 6.499, | |
| "eval_steps_per_second": 6.499, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.9612403100775193, | |
| "grad_norm": 0.22807447612285614, | |
| "learning_rate": 4.266514947537839e-05, | |
| "loss": 0.0195, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 2.9767441860465116, | |
| "grad_norm": 0.30690813064575195, | |
| "learning_rate": 4.2130164978896916e-05, | |
| "loss": 0.0236, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 2.992248062015504, | |
| "grad_norm": 0.02489878423511982, | |
| "learning_rate": 4.159610276990137e-05, | |
| "loss": 0.0148, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 3.007751937984496, | |
| "grad_norm": 0.15840907394886017, | |
| "learning_rate": 4.106302543660235e-05, | |
| "loss": 0.0154, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 3.0232558139534884, | |
| "grad_norm": 0.14962005615234375, | |
| "learning_rate": 4.053099545179028e-05, | |
| "loss": 0.0105, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 3.0232558139534884, | |
| "eval_loss": 0.03878239914774895, | |
| "eval_runtime": 176.4369, | |
| "eval_samples_per_second": 6.501, | |
| "eval_steps_per_second": 6.501, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 3.0387596899224807, | |
| "grad_norm": 0.21889622509479523, | |
| "learning_rate": 4.0000075165513845e-05, | |
| "loss": 0.0141, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 3.054263565891473, | |
| "grad_norm": 0.1822437047958374, | |
| "learning_rate": 3.9470326797773216e-05, | |
| "loss": 0.0084, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 3.0697674418604652, | |
| "grad_norm": 0.4426482319831848, | |
| "learning_rate": 3.8941812431228166e-05, | |
| "loss": 0.0113, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 3.0852713178294575, | |
| "grad_norm": 0.27718520164489746, | |
| "learning_rate": 3.8414594003922515e-05, | |
| "loss": 0.0156, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 3.10077519379845, | |
| "grad_norm": 0.08083080500364304, | |
| "learning_rate": 3.788873330202544e-05, | |
| "loss": 0.0097, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.10077519379845, | |
| "eval_loss": 0.043953340500593185, | |
| "eval_runtime": 176.3739, | |
| "eval_samples_per_second": 6.503, | |
| "eval_steps_per_second": 6.503, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.116279069767442, | |
| "grad_norm": 0.519051730632782, | |
| "learning_rate": 3.736429195259051e-05, | |
| "loss": 0.0057, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 3.1317829457364343, | |
| "grad_norm": 0.356781005859375, | |
| "learning_rate": 3.684133141633358e-05, | |
| "loss": 0.0105, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 3.147286821705426, | |
| "grad_norm": 0.13736553490161896, | |
| "learning_rate": 3.6319912980429846e-05, | |
| "loss": 0.0109, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 3.1627906976744184, | |
| "grad_norm": 0.1859646737575531, | |
| "learning_rate": 3.580009775133168e-05, | |
| "loss": 0.009, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 3.1782945736434107, | |
| "grad_norm": 0.12270783632993698, | |
| "learning_rate": 3.528194664760714e-05, | |
| "loss": 0.008, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.1782945736434107, | |
| "eval_loss": 0.04818466678261757, | |
| "eval_runtime": 176.2871, | |
| "eval_samples_per_second": 6.506, | |
| "eval_steps_per_second": 6.506, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.193798449612403, | |
| "grad_norm": 0.20273350179195404, | |
| "learning_rate": 3.476552039280096e-05, | |
| "loss": 0.0104, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 3.2093023255813953, | |
| "grad_norm": 0.1353200376033783, | |
| "learning_rate": 3.42508795083181e-05, | |
| "loss": 0.0106, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 3.2248062015503876, | |
| "grad_norm": 0.3596494197845459, | |
| "learning_rate": 3.373808430633106e-05, | |
| "loss": 0.01, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 3.24031007751938, | |
| "grad_norm": 0.6581428647041321, | |
| "learning_rate": 3.32271948827118e-05, | |
| "loss": 0.0105, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 3.255813953488372, | |
| "grad_norm": 0.2087380737066269, | |
| "learning_rate": 3.2718271109988863e-05, | |
| "loss": 0.0114, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.255813953488372, | |
| "eval_loss": 0.043491754680871964, | |
| "eval_runtime": 176.0628, | |
| "eval_samples_per_second": 6.515, | |
| "eval_steps_per_second": 6.515, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.2713178294573644, | |
| "grad_norm": 0.3167926073074341, | |
| "learning_rate": 3.2211372630330835e-05, | |
| "loss": 0.0093, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 3.2868217054263567, | |
| "grad_norm": 0.3298277258872986, | |
| "learning_rate": 3.170655884855661e-05, | |
| "loss": 0.0162, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 3.302325581395349, | |
| "grad_norm": 0.2147381752729416, | |
| "learning_rate": 3.120388892517368e-05, | |
| "loss": 0.0182, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 3.317829457364341, | |
| "grad_norm": 0.4693920314311981, | |
| "learning_rate": 3.070342176944494e-05, | |
| "loss": 0.0147, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 3.3333333333333335, | |
| "grad_norm": 0.10257305949926376, | |
| "learning_rate": 3.0205216032484805e-05, | |
| "loss": 0.0112, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.3333333333333335, | |
| "eval_loss": 0.040094662457704544, | |
| "eval_runtime": 176.3863, | |
| "eval_samples_per_second": 6.503, | |
| "eval_steps_per_second": 6.503, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.3488372093023258, | |
| "grad_norm": 0.17424806952476501, | |
| "learning_rate": 2.970933010038599e-05, | |
| "loss": 0.0153, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 3.3643410852713176, | |
| "grad_norm": 0.2841956317424774, | |
| "learning_rate": 2.921582208737681e-05, | |
| "loss": 0.0101, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 3.37984496124031, | |
| "grad_norm": 0.20709052681922913, | |
| "learning_rate": 2.872474982901081e-05, | |
| "loss": 0.0121, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 3.395348837209302, | |
| "grad_norm": 0.20538711547851562, | |
| "learning_rate": 2.8236170875388744e-05, | |
| "loss": 0.0139, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 3.4108527131782944, | |
| "grad_norm": 0.4432328939437866, | |
| "learning_rate": 2.775014248441422e-05, | |
| "loss": 0.0079, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.4108527131782944, | |
| "eval_loss": 0.04478052258491516, | |
| "eval_runtime": 176.5264, | |
| "eval_samples_per_second": 6.498, | |
| "eval_steps_per_second": 6.498, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.4263565891472867, | |
| "grad_norm": 0.1635671854019165, | |
| "learning_rate": 2.726672161508341e-05, | |
| "loss": 0.01, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 3.441860465116279, | |
| "grad_norm": 0.39635413885116577, | |
| "learning_rate": 2.678596492080984e-05, | |
| "loss": 0.0087, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 3.4573643410852712, | |
| "grad_norm": 0.4210352599620819, | |
| "learning_rate": 2.630792874278516e-05, | |
| "loss": 0.0081, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 3.4728682170542635, | |
| "grad_norm": 0.5623159408569336, | |
| "learning_rate": 2.583266910337624e-05, | |
| "loss": 0.0084, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 3.488372093023256, | |
| "grad_norm": 0.9574199318885803, | |
| "learning_rate": 2.5360241699559816e-05, | |
| "loss": 0.0133, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.488372093023256, | |
| "eval_loss": 0.04834475368261337, | |
| "eval_runtime": 176.6597, | |
| "eval_samples_per_second": 6.493, | |
| "eval_steps_per_second": 6.493, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.503875968992248, | |
| "grad_norm": 0.19124262034893036, | |
| "learning_rate": 2.4890701896395146e-05, | |
| "loss": 0.0129, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 3.5193798449612403, | |
| "grad_norm": 0.27771103382110596, | |
| "learning_rate": 2.4424104720535735e-05, | |
| "loss": 0.0132, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 3.5348837209302326, | |
| "grad_norm": 0.3095415532588959, | |
| "learning_rate": 2.3960504853780462e-05, | |
| "loss": 0.0107, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 3.550387596899225, | |
| "grad_norm": 0.11446672677993774, | |
| "learning_rate": 2.349995662666547e-05, | |
| "loss": 0.0061, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 3.565891472868217, | |
| "grad_norm": 0.244533509016037, | |
| "learning_rate": 2.3042514012096843e-05, | |
| "loss": 0.0128, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.565891472868217, | |
| "eval_loss": 0.04713751748204231, | |
| "eval_runtime": 176.5166, | |
| "eval_samples_per_second": 6.498, | |
| "eval_steps_per_second": 6.498, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.5813953488372094, | |
| "grad_norm": 0.5505859851837158, | |
| "learning_rate": 2.2588230619025407e-05, | |
| "loss": 0.0182, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 3.5968992248062017, | |
| "grad_norm": 0.2529643177986145, | |
| "learning_rate": 2.213715968616425e-05, | |
| "loss": 0.0122, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 3.612403100775194, | |
| "grad_norm": 0.16935287415981293, | |
| "learning_rate": 2.16893540757494e-05, | |
| "loss": 0.0094, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 3.6279069767441863, | |
| "grad_norm": 0.3461015820503235, | |
| "learning_rate": 2.1244866267344866e-05, | |
| "loss": 0.0094, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 3.6434108527131785, | |
| "grad_norm": 0.4279143214225769, | |
| "learning_rate": 2.080374835169235e-05, | |
| "loss": 0.012, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.6434108527131785, | |
| "eval_loss": 0.04670505225658417, | |
| "eval_runtime": 176.4379, | |
| "eval_samples_per_second": 6.501, | |
| "eval_steps_per_second": 6.501, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.6589147286821704, | |
| "grad_norm": 0.43128836154937744, | |
| "learning_rate": 2.0366052024606612e-05, | |
| "loss": 0.0116, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 3.6744186046511627, | |
| "grad_norm": 0.36545148491859436, | |
| "learning_rate": 1.9931828580917107e-05, | |
| "loss": 0.0097, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 3.689922480620155, | |
| "grad_norm": 0.4567084312438965, | |
| "learning_rate": 1.9501128908456523e-05, | |
| "loss": 0.0107, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 3.705426356589147, | |
| "grad_norm": 0.22181279957294464, | |
| "learning_rate": 1.9074003482097258e-05, | |
| "loss": 0.0083, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 3.7209302325581395, | |
| "grad_norm": 0.6979092359542847, | |
| "learning_rate": 1.8650502357835925e-05, | |
| "loss": 0.0143, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.7209302325581395, | |
| "eval_loss": 0.04637313261628151, | |
| "eval_runtime": 176.2066, | |
| "eval_samples_per_second": 6.509, | |
| "eval_steps_per_second": 6.509, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.7364341085271318, | |
| "grad_norm": 0.3935830891132355, | |
| "learning_rate": 1.82306751669274e-05, | |
| "loss": 0.0055, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 3.751937984496124, | |
| "grad_norm": 0.8239232301712036, | |
| "learning_rate": 1.7814571110068135e-05, | |
| "loss": 0.0141, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 3.7674418604651163, | |
| "grad_norm": 0.2028406262397766, | |
| "learning_rate": 1.740223895163039e-05, | |
| "loss": 0.0095, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 3.7829457364341086, | |
| "grad_norm": 0.11796276271343231, | |
| "learning_rate": 1.6993727013947336e-05, | |
| "loss": 0.0121, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 3.798449612403101, | |
| "grad_norm": 0.46211379766464233, | |
| "learning_rate": 1.6589083171649977e-05, | |
| "loss": 0.0063, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.798449612403101, | |
| "eval_loss": 0.048657044768333435, | |
| "eval_runtime": 176.2691, | |
| "eval_samples_per_second": 6.507, | |
| "eval_steps_per_second": 6.507, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.813953488372093, | |
| "grad_norm": 0.41259831190109253, | |
| "learning_rate": 1.6188354846056698e-05, | |
| "loss": 0.0102, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 3.8294573643410854, | |
| "grad_norm": 0.2538934648036957, | |
| "learning_rate": 1.579158899961575e-05, | |
| "loss": 0.0067, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 3.8449612403100772, | |
| "grad_norm": 0.7502545714378357, | |
| "learning_rate": 1.5398832130401637e-05, | |
| "loss": 0.0109, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 3.8604651162790695, | |
| "grad_norm": 0.22138559818267822, | |
| "learning_rate": 1.5010130266665807e-05, | |
| "loss": 0.012, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 3.875968992248062, | |
| "grad_norm": 0.4992906153202057, | |
| "learning_rate": 1.4625528961442591e-05, | |
| "loss": 0.0098, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.875968992248062, | |
| "eval_loss": 0.0469583123922348, | |
| "eval_runtime": 176.489, | |
| "eval_samples_per_second": 6.499, | |
| "eval_steps_per_second": 6.499, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.891472868217054, | |
| "grad_norm": 0.332923948764801, | |
| "learning_rate": 1.4245073287210608e-05, | |
| "loss": 0.0111, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 3.9069767441860463, | |
| "grad_norm": 0.5175971388816833, | |
| "learning_rate": 1.3868807830610653e-05, | |
| "loss": 0.0165, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 3.9224806201550386, | |
| "grad_norm": 0.21356546878814697, | |
| "learning_rate": 1.3496776687220514e-05, | |
| "loss": 0.0158, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 3.937984496124031, | |
| "grad_norm": 0.2587060034275055, | |
| "learning_rate": 1.3129023456387151e-05, | |
| "loss": 0.0049, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 3.953488372093023, | |
| "grad_norm": 0.22560270130634308, | |
| "learning_rate": 1.2765591236117324e-05, | |
| "loss": 0.0088, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.953488372093023, | |
| "eval_loss": 0.046130869537591934, | |
| "eval_runtime": 175.5058, | |
| "eval_samples_per_second": 6.535, | |
| "eval_steps_per_second": 6.535, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.9689922480620154, | |
| "grad_norm": 0.27367764711380005, | |
| "learning_rate": 1.240652261802669e-05, | |
| "loss": 0.0097, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 3.9844961240310077, | |
| "grad_norm": 0.47304585576057434, | |
| "learning_rate": 1.205185968234847e-05, | |
| "loss": 0.0143, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.7547179460525513, | |
| "learning_rate": 1.1701643993001916e-05, | |
| "loss": 0.0122, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 4.015503875968992, | |
| "grad_norm": 0.09287036210298538, | |
| "learning_rate": 1.1355916592721316e-05, | |
| "loss": 0.0043, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 4.0310077519379846, | |
| "grad_norm": 0.20291487872600555, | |
| "learning_rate": 1.1014717998246099e-05, | |
| "loss": 0.0057, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 4.0310077519379846, | |
| "eval_loss": 0.046690039336681366, | |
| "eval_runtime": 176.7343, | |
| "eval_samples_per_second": 6.49, | |
| "eval_steps_per_second": 6.49, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 4.046511627906977, | |
| "grad_norm": 0.11436055600643158, | |
| "learning_rate": 1.0678088195572517e-05, | |
| "loss": 0.0049, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 4.062015503875969, | |
| "grad_norm": 0.12677472829818726, | |
| "learning_rate": 1.0346066635267676e-05, | |
| "loss": 0.0038, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 4.077519379844961, | |
| "grad_norm": 0.0559503436088562, | |
| "learning_rate": 1.001869222784611e-05, | |
| "loss": 0.0064, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 4.093023255813954, | |
| "grad_norm": 0.20214763283729553, | |
| "learning_rate": 9.696003339209819e-06, | |
| "loss": 0.0081, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 4.108527131782946, | |
| "grad_norm": 0.6340795755386353, | |
| "learning_rate": 9.378037786152055e-06, | |
| "loss": 0.006, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.108527131782946, | |
| "eval_loss": 0.0505899041891098, | |
| "eval_runtime": 177.0154, | |
| "eval_samples_per_second": 6.48, | |
| "eval_steps_per_second": 6.48, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.124031007751938, | |
| "grad_norm": 0.2011650651693344, | |
| "learning_rate": 9.064832831925441e-06, | |
| "loss": 0.0036, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 4.1395348837209305, | |
| "grad_norm": 0.054464973509311676, | |
| "learning_rate": 8.756425181875028e-06, | |
| "loss": 0.0029, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 4.155038759689923, | |
| "grad_norm": 0.2652602195739746, | |
| "learning_rate": 8.452850979136617e-06, | |
| "loss": 0.004, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 4.170542635658915, | |
| "grad_norm": 0.04914356395602226, | |
| "learning_rate": 8.15414580040117e-06, | |
| "loss": 0.0018, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 4.186046511627907, | |
| "grad_norm": 0.16105438768863678, | |
| "learning_rate": 7.860344651745366e-06, | |
| "loss": 0.0058, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.186046511627907, | |
| "eval_loss": 0.0564822219312191, | |
| "eval_runtime": 177.0095, | |
| "eval_samples_per_second": 6.48, | |
| "eval_steps_per_second": 6.48, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.2015503875969, | |
| "grad_norm": 0.0615265853703022, | |
| "learning_rate": 7.571481964529226e-06, | |
| "loss": 0.004, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 4.217054263565892, | |
| "grad_norm": 0.23362399637699127, | |
| "learning_rate": 7.287591591360926e-06, | |
| "loss": 0.0021, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 4.232558139534884, | |
| "grad_norm": 0.14505736529827118, | |
| "learning_rate": 7.008706802129606e-06, | |
| "loss": 0.0066, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 4.248062015503876, | |
| "grad_norm": 0.0062782918103039265, | |
| "learning_rate": 6.734860280106292e-06, | |
| "loss": 0.0022, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 4.263565891472869, | |
| "grad_norm": 0.3620900809764862, | |
| "learning_rate": 6.466084118113735e-06, | |
| "loss": 0.0057, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.263565891472869, | |
| "eval_loss": 0.059196386486291885, | |
| "eval_runtime": 177.1851, | |
| "eval_samples_per_second": 6.473, | |
| "eval_steps_per_second": 6.473, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.27906976744186, | |
| "grad_norm": 0.2628999352455139, | |
| "learning_rate": 6.202409814765325e-06, | |
| "loss": 0.0048, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 4.294573643410852, | |
| "grad_norm": 0.21285632252693176, | |
| "learning_rate": 5.9438682707736725e-06, | |
| "loss": 0.0068, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 4.310077519379845, | |
| "grad_norm": 0.27160316705703735, | |
| "learning_rate": 5.690489785329301e-06, | |
| "loss": 0.0053, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 4.325581395348837, | |
| "grad_norm": 0.010958620347082615, | |
| "learning_rate": 5.442304052549752e-06, | |
| "loss": 0.0039, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 4.341085271317829, | |
| "grad_norm": 0.10000675171613693, | |
| "learning_rate": 5.199340157999733e-06, | |
| "loss": 0.0043, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.341085271317829, | |
| "eval_loss": 0.05947728455066681, | |
| "eval_runtime": 177.3633, | |
| "eval_samples_per_second": 6.467, | |
| "eval_steps_per_second": 6.467, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.3565891472868215, | |
| "grad_norm": 0.08185016363859177, | |
| "learning_rate": 4.961626575282396e-06, | |
| "loss": 0.0026, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 4.372093023255814, | |
| "grad_norm": 0.3243257403373718, | |
| "learning_rate": 4.7291911627025235e-06, | |
| "loss": 0.0051, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 4.387596899224806, | |
| "grad_norm": 0.22814767062664032, | |
| "learning_rate": 4.502061160001725e-06, | |
| "loss": 0.002, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 4.403100775193798, | |
| "grad_norm": 0.04655148833990097, | |
| "learning_rate": 4.280263185166078e-06, | |
| "loss": 0.0045, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 4.4186046511627906, | |
| "grad_norm": 0.3352436423301697, | |
| "learning_rate": 4.063823231306757e-06, | |
| "loss": 0.0043, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.4186046511627906, | |
| "eval_loss": 0.061167895793914795, | |
| "eval_runtime": 177.4729, | |
| "eval_samples_per_second": 6.463, | |
| "eval_steps_per_second": 6.463, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.434108527131783, | |
| "grad_norm": 0.03668573871254921, | |
| "learning_rate": 3.8527666636137885e-06, | |
| "loss": 0.0038, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 4.449612403100775, | |
| "grad_norm": 0.07714797556400299, | |
| "learning_rate": 3.6471182163834825e-06, | |
| "loss": 0.0047, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 4.465116279069767, | |
| "grad_norm": 0.25801295042037964, | |
| "learning_rate": 3.4469019901197054e-06, | |
| "loss": 0.0027, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 4.48062015503876, | |
| "grad_norm": 0.06319399923086166, | |
| "learning_rate": 3.252141448709495e-06, | |
| "loss": 0.0033, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 4.496124031007752, | |
| "grad_norm": 0.1602359414100647, | |
| "learning_rate": 3.0628594166732693e-06, | |
| "loss": 0.0055, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.496124031007752, | |
| "eval_loss": 0.061658285558223724, | |
| "eval_runtime": 177.7423, | |
| "eval_samples_per_second": 6.453, | |
| "eval_steps_per_second": 6.453, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.511627906976744, | |
| "grad_norm": 0.04129059985280037, | |
| "learning_rate": 2.8790780764899384e-06, | |
| "loss": 0.0056, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 4.5271317829457365, | |
| "grad_norm": 0.07882144302129745, | |
| "learning_rate": 2.700818965997315e-06, | |
| "loss": 0.0089, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 4.542635658914729, | |
| "grad_norm": 0.019726574420928955, | |
| "learning_rate": 2.528102975867991e-06, | |
| "loss": 0.002, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 4.558139534883721, | |
| "grad_norm": 0.036272820085287094, | |
| "learning_rate": 2.3609503471611284e-06, | |
| "loss": 0.0025, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 4.573643410852713, | |
| "grad_norm": 0.2767897844314575, | |
| "learning_rate": 2.1993806689503738e-06, | |
| "loss": 0.0029, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.573643410852713, | |
| "eval_loss": 0.06082376092672348, | |
| "eval_runtime": 177.687, | |
| "eval_samples_per_second": 6.455, | |
| "eval_steps_per_second": 6.455, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.589147286821706, | |
| "grad_norm": 0.02878037840127945, | |
| "learning_rate": 2.0434128760281167e-06, | |
| "loss": 0.0019, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 4.604651162790698, | |
| "grad_norm": 0.09522771835327148, | |
| "learning_rate": 1.893065246686504e-06, | |
| "loss": 0.002, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 4.62015503875969, | |
| "grad_norm": 0.007287740241736174, | |
| "learning_rate": 1.74835540057533e-06, | |
| "loss": 0.005, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 4.635658914728682, | |
| "grad_norm": 0.1458829939365387, | |
| "learning_rate": 1.6093002966371617e-06, | |
| "loss": 0.0017, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 4.651162790697675, | |
| "grad_norm": 0.18788699805736542, | |
| "learning_rate": 1.4759162311198783e-06, | |
| "loss": 0.0029, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.651162790697675, | |
| "eval_loss": 0.06145617738366127, | |
| "eval_runtime": 177.5386, | |
| "eval_samples_per_second": 6.461, | |
| "eval_steps_per_second": 6.461, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.666666666666667, | |
| "grad_norm": 0.06372984498739243, | |
| "learning_rate": 1.348218835666859e-06, | |
| "loss": 0.0037, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 4.682170542635659, | |
| "grad_norm": 0.02428070269525051, | |
| "learning_rate": 1.2262230754850445e-06, | |
| "loss": 0.0032, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 4.6976744186046515, | |
| "grad_norm": 0.06479329615831375, | |
| "learning_rate": 1.109943247591172e-06, | |
| "loss": 0.0059, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 4.713178294573644, | |
| "grad_norm": 0.18652325868606567, | |
| "learning_rate": 9.993929791362323e-07, | |
| "loss": 0.0041, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 4.728682170542635, | |
| "grad_norm": 0.1359170526266098, | |
| "learning_rate": 8.945852258084863e-07, | |
| "loss": 0.0039, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.728682170542635, | |
| "eval_loss": 0.061951201409101486, | |
| "eval_runtime": 177.3573, | |
| "eval_samples_per_second": 6.467, | |
| "eval_steps_per_second": 6.467, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.7441860465116275, | |
| "grad_norm": 0.5252036452293396, | |
| "learning_rate": 7.955322703151358e-07, | |
| "loss": 0.008, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 4.75968992248062, | |
| "grad_norm": 0.3898888826370239, | |
| "learning_rate": 7.022457209428901e-07, | |
| "loss": 0.0068, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 4.775193798449612, | |
| "grad_norm": 0.22847306728363037, | |
| "learning_rate": 6.147365101975666e-07, | |
| "loss": 0.003, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 4.790697674418604, | |
| "grad_norm": 0.12396875023841858, | |
| "learning_rate": 5.33014893522854e-07, | |
| "loss": 0.0046, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 4.8062015503875966, | |
| "grad_norm": 0.335860013961792, | |
| "learning_rate": 4.57090448098485e-07, | |
| "loss": 0.0034, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.8062015503875966, | |
| "eval_loss": 0.06197270005941391, | |
| "eval_runtime": 177.4972, | |
| "eval_samples_per_second": 6.462, | |
| "eval_steps_per_second": 6.462, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.821705426356589, | |
| "grad_norm": 0.04163197800517082, | |
| "learning_rate": 3.8697207171781714e-07, | |
| "loss": 0.0044, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 4.837209302325581, | |
| "grad_norm": 0.05860529839992523, | |
| "learning_rate": 3.2266798174512837e-07, | |
| "loss": 0.0052, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 4.852713178294573, | |
| "grad_norm": 0.08096914738416672, | |
| "learning_rate": 2.6418571415255387e-07, | |
| "loss": 0.0043, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 4.868217054263566, | |
| "grad_norm": 0.07322604209184647, | |
| "learning_rate": 2.1153212263695378e-07, | |
| "loss": 0.0051, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 4.883720930232558, | |
| "grad_norm": 0.3177713453769684, | |
| "learning_rate": 1.6471337781669982e-07, | |
| "loss": 0.0061, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.883720930232558, | |
| "eval_loss": 0.06184118613600731, | |
| "eval_runtime": 177.919, | |
| "eval_samples_per_second": 6.447, | |
| "eval_steps_per_second": 6.447, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.89922480620155, | |
| "grad_norm": 0.1601889431476593, | |
| "learning_rate": 1.237349665085097e-07, | |
| "loss": 0.0066, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 4.9147286821705425, | |
| "grad_norm": 0.1714223474264145, | |
| "learning_rate": 8.86016910844667e-08, | |
| "loss": 0.0065, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 4.930232558139535, | |
| "grad_norm": 0.9038301706314087, | |
| "learning_rate": 5.9317668909192323e-08, | |
| "loss": 0.006, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 4.945736434108527, | |
| "grad_norm": 0.08108743280172348, | |
| "learning_rate": 3.588633185730994e-08, | |
| "loss": 0.0082, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 4.961240310077519, | |
| "grad_norm": 0.058073218911886215, | |
| "learning_rate": 1.8310425911294283e-08, | |
| "loss": 0.0045, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.961240310077519, | |
| "eval_loss": 0.061884406954050064, | |
| "eval_runtime": 177.8676, | |
| "eval_samples_per_second": 6.449, | |
| "eval_steps_per_second": 6.449, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.976744186046512, | |
| "grad_norm": 0.48708316683769226, | |
| "learning_rate": 6.592010839612251e-09, | |
| "loss": 0.0074, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 4.992248062015504, | |
| "grad_norm": 0.044914498925209045, | |
| "learning_rate": 7.324599553770739e-10, | |
| "loss": 0.0016, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 3225, | |
| "total_flos": 8.096268812599296e+17, | |
| "train_loss": 0.04488553278003783, | |
| "train_runtime": 38904.1603, | |
| "train_samples_per_second": 1.326, | |
| "train_steps_per_second": 0.083 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 3225, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.096268812599296e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |