| { | |
| "best_metric": 0.03119882568717003, | |
| "best_model_checkpoint": "saves/psy-course/Ministral-8B-Instruct-2410/train/fold5/checkpoint-1350", | |
| "epoch": 4.994228549442093, | |
| "eval_steps": 50, | |
| "global_step": 3245, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.015390534821085032, | |
| "grad_norm": 2.7206928730010986, | |
| "learning_rate": 3.0769230769230774e-06, | |
| "loss": 1.336, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.030781069642170065, | |
| "grad_norm": 2.8094022274017334, | |
| "learning_rate": 6.153846153846155e-06, | |
| "loss": 1.271, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0461716044632551, | |
| "grad_norm": 3.173107862472534, | |
| "learning_rate": 9.230769230769232e-06, | |
| "loss": 1.0347, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.06156213928434013, | |
| "grad_norm": 2.2601401805877686, | |
| "learning_rate": 1.230769230769231e-05, | |
| "loss": 0.5543, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.07695267410542517, | |
| "grad_norm": 1.1410682201385498, | |
| "learning_rate": 1.5384615384615387e-05, | |
| "loss": 0.2581, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07695267410542517, | |
| "eval_loss": 0.24166099727153778, | |
| "eval_runtime": 176.1734, | |
| "eval_samples_per_second": 6.562, | |
| "eval_steps_per_second": 6.562, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.0923432089265102, | |
| "grad_norm": 1.4863005876541138, | |
| "learning_rate": 1.8461538461538465e-05, | |
| "loss": 0.1594, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.10773374374759523, | |
| "grad_norm": 1.223218321800232, | |
| "learning_rate": 2.1538461538461542e-05, | |
| "loss": 0.1276, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.12312427856868026, | |
| "grad_norm": 1.2243907451629639, | |
| "learning_rate": 2.461538461538462e-05, | |
| "loss": 0.1018, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.1385148133897653, | |
| "grad_norm": 1.4652044773101807, | |
| "learning_rate": 2.7692307692307694e-05, | |
| "loss": 0.118, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.15390534821085033, | |
| "grad_norm": 0.7412689924240112, | |
| "learning_rate": 3.0769230769230774e-05, | |
| "loss": 0.0853, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.15390534821085033, | |
| "eval_loss": 0.06947828084230423, | |
| "eval_runtime": 176.0378, | |
| "eval_samples_per_second": 6.567, | |
| "eval_steps_per_second": 6.567, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.16929588303193535, | |
| "grad_norm": 0.8059899806976318, | |
| "learning_rate": 3.384615384615385e-05, | |
| "loss": 0.0648, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.1846864178530204, | |
| "grad_norm": 0.7964231371879578, | |
| "learning_rate": 3.692307692307693e-05, | |
| "loss": 0.0723, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2000769526741054, | |
| "grad_norm": 1.1007932424545288, | |
| "learning_rate": 4e-05, | |
| "loss": 0.0696, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.21546748749519046, | |
| "grad_norm": 0.7742824554443359, | |
| "learning_rate": 4.3076923076923084e-05, | |
| "loss": 0.0786, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.2308580223162755, | |
| "grad_norm": 0.9034144878387451, | |
| "learning_rate": 4.615384615384616e-05, | |
| "loss": 0.0606, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.2308580223162755, | |
| "eval_loss": 0.05840924009680748, | |
| "eval_runtime": 176.2462, | |
| "eval_samples_per_second": 6.559, | |
| "eval_steps_per_second": 6.559, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.24624855713736052, | |
| "grad_norm": 0.7853645086288452, | |
| "learning_rate": 4.923076923076924e-05, | |
| "loss": 0.0612, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.26163909195844554, | |
| "grad_norm": 0.7825936079025269, | |
| "learning_rate": 5.230769230769231e-05, | |
| "loss": 0.0616, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.2770296267795306, | |
| "grad_norm": 0.8186638951301575, | |
| "learning_rate": 5.538461538461539e-05, | |
| "loss": 0.053, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.2924201616006156, | |
| "grad_norm": 0.368308961391449, | |
| "learning_rate": 5.846153846153847e-05, | |
| "loss": 0.0444, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.30781069642170067, | |
| "grad_norm": 0.5306175351142883, | |
| "learning_rate": 6.153846153846155e-05, | |
| "loss": 0.0581, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.30781069642170067, | |
| "eval_loss": 0.0543404258787632, | |
| "eval_runtime": 176.4225, | |
| "eval_samples_per_second": 6.552, | |
| "eval_steps_per_second": 6.552, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3232012312427857, | |
| "grad_norm": 0.7254672050476074, | |
| "learning_rate": 6.461538461538462e-05, | |
| "loss": 0.0555, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.3385917660638707, | |
| "grad_norm": 0.8422511219978333, | |
| "learning_rate": 6.76923076923077e-05, | |
| "loss": 0.0495, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.35398230088495575, | |
| "grad_norm": 0.3299127519130707, | |
| "learning_rate": 7.076923076923078e-05, | |
| "loss": 0.0378, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.3693728357060408, | |
| "grad_norm": 0.45191943645477295, | |
| "learning_rate": 7.384615384615386e-05, | |
| "loss": 0.046, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.38476337052712584, | |
| "grad_norm": 0.4278528392314911, | |
| "learning_rate": 7.692307692307693e-05, | |
| "loss": 0.0439, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.38476337052712584, | |
| "eval_loss": 0.04281606152653694, | |
| "eval_runtime": 176.3946, | |
| "eval_samples_per_second": 6.553, | |
| "eval_steps_per_second": 6.553, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.4001539053482108, | |
| "grad_norm": 0.5845353603363037, | |
| "learning_rate": 8e-05, | |
| "loss": 0.0398, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.41554444016929587, | |
| "grad_norm": 0.42944037914276123, | |
| "learning_rate": 8.307692307692309e-05, | |
| "loss": 0.0415, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.4309349749903809, | |
| "grad_norm": 0.5371568202972412, | |
| "learning_rate": 8.615384615384617e-05, | |
| "loss": 0.0534, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.44632550981146596, | |
| "grad_norm": 0.4480336904525757, | |
| "learning_rate": 8.923076923076924e-05, | |
| "loss": 0.0521, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.461716044632551, | |
| "grad_norm": 0.3966178894042969, | |
| "learning_rate": 9.230769230769232e-05, | |
| "loss": 0.0405, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.461716044632551, | |
| "eval_loss": 0.046050868928432465, | |
| "eval_runtime": 176.5479, | |
| "eval_samples_per_second": 6.548, | |
| "eval_steps_per_second": 6.548, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.477106579453636, | |
| "grad_norm": 0.4799180030822754, | |
| "learning_rate": 9.53846153846154e-05, | |
| "loss": 0.051, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.49249711427472104, | |
| "grad_norm": 0.3221646249294281, | |
| "learning_rate": 9.846153846153848e-05, | |
| "loss": 0.0482, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.5078876490958061, | |
| "grad_norm": 0.4233333468437195, | |
| "learning_rate": 9.999927654251793e-05, | |
| "loss": 0.0485, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.5232781839168911, | |
| "grad_norm": 0.436909019947052, | |
| "learning_rate": 9.99934890082745e-05, | |
| "loss": 0.0456, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.5386687187379762, | |
| "grad_norm": 0.4717266261577606, | |
| "learning_rate": 9.99819146097084e-05, | |
| "loss": 0.0428, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5386687187379762, | |
| "eval_loss": 0.04454828426241875, | |
| "eval_runtime": 176.4355, | |
| "eval_samples_per_second": 6.552, | |
| "eval_steps_per_second": 6.552, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5540592535590612, | |
| "grad_norm": 0.3741532564163208, | |
| "learning_rate": 9.996455468658355e-05, | |
| "loss": 0.0563, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.5694497883801463, | |
| "grad_norm": 0.41537120938301086, | |
| "learning_rate": 9.994141124835203e-05, | |
| "loss": 0.037, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5848403232012312, | |
| "grad_norm": 0.6107032299041748, | |
| "learning_rate": 9.991248697392142e-05, | |
| "loss": 0.0391, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.6002308580223162, | |
| "grad_norm": 0.4859623610973358, | |
| "learning_rate": 9.987778521134476e-05, | |
| "loss": 0.0346, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.6156213928434013, | |
| "grad_norm": 0.29415926337242126, | |
| "learning_rate": 9.983730997743294e-05, | |
| "loss": 0.0485, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6156213928434013, | |
| "eval_loss": 0.043341998010873795, | |
| "eval_runtime": 176.3509, | |
| "eval_samples_per_second": 6.555, | |
| "eval_steps_per_second": 6.555, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6310119276644863, | |
| "grad_norm": 0.3123447299003601, | |
| "learning_rate": 9.979106595728978e-05, | |
| "loss": 0.0404, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.6464024624855714, | |
| "grad_norm": 0.5193583369255066, | |
| "learning_rate": 9.973905850376977e-05, | |
| "loss": 0.0448, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.6617929973066564, | |
| "grad_norm": 0.5041560530662537, | |
| "learning_rate": 9.968129363685833e-05, | |
| "loss": 0.0439, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.6771835321277414, | |
| "grad_norm": 0.46721431612968445, | |
| "learning_rate": 9.961777804297514e-05, | |
| "loss": 0.0593, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.6925740669488265, | |
| "grad_norm": 0.49613869190216064, | |
| "learning_rate": 9.954851907420009e-05, | |
| "loss": 0.0286, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.6925740669488265, | |
| "eval_loss": 0.04055383428931236, | |
| "eval_runtime": 176.5544, | |
| "eval_samples_per_second": 6.548, | |
| "eval_steps_per_second": 6.548, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7079646017699115, | |
| "grad_norm": 0.16034717857837677, | |
| "learning_rate": 9.947352474742222e-05, | |
| "loss": 0.0358, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.7233551365909965, | |
| "grad_norm": 0.3644324541091919, | |
| "learning_rate": 9.93928037434118e-05, | |
| "loss": 0.0325, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.7387456714120816, | |
| "grad_norm": 0.665282666683197, | |
| "learning_rate": 9.930636540581556e-05, | |
| "loss": 0.0424, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.7541362062331666, | |
| "grad_norm": 0.22589579224586487, | |
| "learning_rate": 9.921421974007506e-05, | |
| "loss": 0.0458, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.7695267410542517, | |
| "grad_norm": 0.28870707750320435, | |
| "learning_rate": 9.911637741226849e-05, | |
| "loss": 0.0287, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7695267410542517, | |
| "eval_loss": 0.03942457213997841, | |
| "eval_runtime": 176.5603, | |
| "eval_samples_per_second": 6.547, | |
| "eval_steps_per_second": 6.547, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7849172758753367, | |
| "grad_norm": 0.43207934498786926, | |
| "learning_rate": 9.90128497478762e-05, | |
| "loss": 0.0307, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.8003078106964217, | |
| "grad_norm": 0.29495513439178467, | |
| "learning_rate": 9.890364873046965e-05, | |
| "loss": 0.0427, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.8156983455175068, | |
| "grad_norm": 0.40809714794158936, | |
| "learning_rate": 9.878878700032427e-05, | |
| "loss": 0.0535, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.8310888803385917, | |
| "grad_norm": 0.3146349787712097, | |
| "learning_rate": 9.866827785295638e-05, | |
| "loss": 0.0423, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.8464794151596768, | |
| "grad_norm": 0.24875745177268982, | |
| "learning_rate": 9.854213523758413e-05, | |
| "loss": 0.0426, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8464794151596768, | |
| "eval_loss": 0.035120412707328796, | |
| "eval_runtime": 176.5293, | |
| "eval_samples_per_second": 6.548, | |
| "eval_steps_per_second": 6.548, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8618699499807618, | |
| "grad_norm": 0.2829786241054535, | |
| "learning_rate": 9.841037375551294e-05, | |
| "loss": 0.0359, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.8772604848018468, | |
| "grad_norm": 0.24912841618061066, | |
| "learning_rate": 9.827300865844527e-05, | |
| "loss": 0.0251, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.8926510196229319, | |
| "grad_norm": 0.3470945954322815, | |
| "learning_rate": 9.813005584671522e-05, | |
| "loss": 0.0343, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.9080415544440169, | |
| "grad_norm": 0.23871859908103943, | |
| "learning_rate": 9.79815318674481e-05, | |
| "loss": 0.0508, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.923432089265102, | |
| "grad_norm": 0.29896286129951477, | |
| "learning_rate": 9.782745391264498e-05, | |
| "loss": 0.0338, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.923432089265102, | |
| "eval_loss": 0.03507744148373604, | |
| "eval_runtime": 176.5725, | |
| "eval_samples_per_second": 6.547, | |
| "eval_steps_per_second": 6.547, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.938822624086187, | |
| "grad_norm": 0.41820308566093445, | |
| "learning_rate": 9.76678398171927e-05, | |
| "loss": 0.0309, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.954213158907272, | |
| "grad_norm": 0.28623440861701965, | |
| "learning_rate": 9.750270805679945e-05, | |
| "loss": 0.0429, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.9696036937283571, | |
| "grad_norm": 0.45469871163368225, | |
| "learning_rate": 9.733207774585618e-05, | |
| "loss": 0.0426, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.9849942285494421, | |
| "grad_norm": 0.2690652012825012, | |
| "learning_rate": 9.715596863522398e-05, | |
| "loss": 0.0332, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.0003847633705272, | |
| "grad_norm": 0.2812378704547882, | |
| "learning_rate": 9.697440110994801e-05, | |
| "loss": 0.0301, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0003847633705272, | |
| "eval_loss": 0.036583807319402695, | |
| "eval_runtime": 176.5985, | |
| "eval_samples_per_second": 6.546, | |
| "eval_steps_per_second": 6.546, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0157752981916122, | |
| "grad_norm": 0.2946244180202484, | |
| "learning_rate": 9.678739618689775e-05, | |
| "loss": 0.0353, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.0311658330126972, | |
| "grad_norm": 0.23367083072662354, | |
| "learning_rate": 9.659497551233431e-05, | |
| "loss": 0.0315, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.0465563678337821, | |
| "grad_norm": 0.2701486647129059, | |
| "learning_rate": 9.639716135940485e-05, | |
| "loss": 0.0332, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.0619469026548674, | |
| "grad_norm": 0.4597524404525757, | |
| "learning_rate": 9.619397662556435e-05, | |
| "loss": 0.043, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.0773374374759523, | |
| "grad_norm": 0.3910011649131775, | |
| "learning_rate": 9.598544482992517e-05, | |
| "loss": 0.0339, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0773374374759523, | |
| "eval_loss": 0.037031788378953934, | |
| "eval_runtime": 176.6917, | |
| "eval_samples_per_second": 6.542, | |
| "eval_steps_per_second": 6.542, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0927279722970373, | |
| "grad_norm": 0.3245735168457031, | |
| "learning_rate": 9.57715901105348e-05, | |
| "loss": 0.0309, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.1081185071181223, | |
| "grad_norm": 0.2758731245994568, | |
| "learning_rate": 9.555243722158158e-05, | |
| "loss": 0.031, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.1235090419392073, | |
| "grad_norm": 0.22663231194019318, | |
| "learning_rate": 9.532801153052958e-05, | |
| "loss": 0.0266, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.1388995767602923, | |
| "grad_norm": 0.5782523155212402, | |
| "learning_rate": 9.509833901518209e-05, | |
| "loss": 0.0361, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.1542901115813775, | |
| "grad_norm": 0.3744742274284363, | |
| "learning_rate": 9.486344626067475e-05, | |
| "loss": 0.0269, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1542901115813775, | |
| "eval_loss": 0.035565976053476334, | |
| "eval_runtime": 176.7396, | |
| "eval_samples_per_second": 6.541, | |
| "eval_steps_per_second": 6.541, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1696806464024625, | |
| "grad_norm": 0.24146856367588043, | |
| "learning_rate": 9.462336045639815e-05, | |
| "loss": 0.0351, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.1850711812235475, | |
| "grad_norm": 0.25894129276275635, | |
| "learning_rate": 9.437810939285068e-05, | |
| "loss": 0.0365, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.2004617160446325, | |
| "grad_norm": 0.31034451723098755, | |
| "learning_rate": 9.412772145842165e-05, | |
| "loss": 0.0342, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.2158522508657175, | |
| "grad_norm": 0.32283952832221985, | |
| "learning_rate": 9.387222563610535e-05, | |
| "loss": 0.035, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.2312427856868027, | |
| "grad_norm": 0.22168344259262085, | |
| "learning_rate": 9.361165150014606e-05, | |
| "loss": 0.0276, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2312427856868027, | |
| "eval_loss": 0.034503836184740067, | |
| "eval_runtime": 176.7383, | |
| "eval_samples_per_second": 6.541, | |
| "eval_steps_per_second": 6.541, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2466333205078877, | |
| "grad_norm": 0.10351806879043579, | |
| "learning_rate": 9.334602921261492e-05, | |
| "loss": 0.0294, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.2620238553289727, | |
| "grad_norm": 0.4381557106971741, | |
| "learning_rate": 9.307538951991852e-05, | |
| "loss": 0.0281, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.2774143901500576, | |
| "grad_norm": 0.26532843708992004, | |
| "learning_rate": 9.279976374923988e-05, | |
| "loss": 0.0307, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.2928049249711426, | |
| "grad_norm": 0.38265183568000793, | |
| "learning_rate": 9.251918380491238e-05, | |
| "loss": 0.0254, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.3081954597922278, | |
| "grad_norm": 0.34733256697654724, | |
| "learning_rate": 9.223368216472668e-05, | |
| "loss": 0.0293, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3081954597922278, | |
| "eval_loss": 0.033552058041095734, | |
| "eval_runtime": 176.7151, | |
| "eval_samples_per_second": 6.542, | |
| "eval_steps_per_second": 6.542, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3235859946133128, | |
| "grad_norm": 0.23357051610946655, | |
| "learning_rate": 9.194329187617127e-05, | |
| "loss": 0.0261, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.3389765294343978, | |
| "grad_norm": 0.2974819839000702, | |
| "learning_rate": 9.164804655260735e-05, | |
| "loss": 0.0264, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.3543670642554828, | |
| "grad_norm": 0.1959962546825409, | |
| "learning_rate": 9.134798036937778e-05, | |
| "loss": 0.0292, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.3697575990765678, | |
| "grad_norm": 0.31932732462882996, | |
| "learning_rate": 9.104312805985134e-05, | |
| "loss": 0.0362, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.385148133897653, | |
| "grad_norm": 0.301615446805954, | |
| "learning_rate": 9.073352491140221e-05, | |
| "loss": 0.0216, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.385148133897653, | |
| "eval_loss": 0.033949967473745346, | |
| "eval_runtime": 176.7728, | |
| "eval_samples_per_second": 6.539, | |
| "eval_steps_per_second": 6.539, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.400538668718738, | |
| "grad_norm": 0.3114616572856903, | |
| "learning_rate": 9.041920676132543e-05, | |
| "loss": 0.0312, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.415929203539823, | |
| "grad_norm": 0.2502421438694, | |
| "learning_rate": 9.010020999268854e-05, | |
| "loss": 0.0329, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.431319738360908, | |
| "grad_norm": 0.2229824811220169, | |
| "learning_rate": 8.977657153012027e-05, | |
| "loss": 0.0283, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.446710273181993, | |
| "grad_norm": 0.5619177222251892, | |
| "learning_rate": 8.944832883553633e-05, | |
| "loss": 0.0285, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.4621008080030782, | |
| "grad_norm": 0.15374122560024261, | |
| "learning_rate": 8.91155199038032e-05, | |
| "loss": 0.036, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4621008080030782, | |
| "eval_loss": 0.03325812891125679, | |
| "eval_runtime": 176.7292, | |
| "eval_samples_per_second": 6.541, | |
| "eval_steps_per_second": 6.541, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4774913428241632, | |
| "grad_norm": 0.24114222824573517, | |
| "learning_rate": 8.877818325834006e-05, | |
| "loss": 0.0283, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.4928818776452482, | |
| "grad_norm": 0.09029097855091095, | |
| "learning_rate": 8.843635794665961e-05, | |
| "loss": 0.0219, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.5082724124663334, | |
| "grad_norm": 0.41925254464149475, | |
| "learning_rate": 8.809008353584829e-05, | |
| "loss": 0.0303, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.5236629472874181, | |
| "grad_norm": 0.22500939667224884, | |
| "learning_rate": 8.773940010798627e-05, | |
| "loss": 0.0304, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.5390534821085033, | |
| "grad_norm": 0.3530929386615753, | |
| "learning_rate": 8.738434825550785e-05, | |
| "loss": 0.0319, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5390534821085033, | |
| "eval_loss": 0.03614142909646034, | |
| "eval_runtime": 176.7074, | |
| "eval_samples_per_second": 6.542, | |
| "eval_steps_per_second": 6.542, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5544440169295883, | |
| "grad_norm": 0.3209225535392761, | |
| "learning_rate": 8.702496907650274e-05, | |
| "loss": 0.0337, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.5698345517506733, | |
| "grad_norm": 0.307273805141449, | |
| "learning_rate": 8.666130416995897e-05, | |
| "loss": 0.0311, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.5852250865717585, | |
| "grad_norm": 0.2404596507549286, | |
| "learning_rate": 8.629339563094758e-05, | |
| "loss": 0.0303, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.6006156213928433, | |
| "grad_norm": 0.40445876121520996, | |
| "learning_rate": 8.592128604575014e-05, | |
| "loss": 0.0271, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.6160061562139285, | |
| "grad_norm": 0.25610461831092834, | |
| "learning_rate": 8.554501848692921e-05, | |
| "loss": 0.0312, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.6160061562139285, | |
| "eval_loss": 0.032448507845401764, | |
| "eval_runtime": 176.7221, | |
| "eval_samples_per_second": 6.541, | |
| "eval_steps_per_second": 6.541, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.6313966910350135, | |
| "grad_norm": 0.17163515090942383, | |
| "learning_rate": 8.51646365083426e-05, | |
| "loss": 0.0215, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.6467872258560985, | |
| "grad_norm": 0.2591453194618225, | |
| "learning_rate": 8.478018414010191e-05, | |
| "loss": 0.0267, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.6621777606771835, | |
| "grad_norm": 0.4842621088027954, | |
| "learning_rate": 8.439170588347598e-05, | |
| "loss": 0.0223, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.6775682954982685, | |
| "grad_norm": 1.0715452432632446, | |
| "learning_rate": 8.399924670573969e-05, | |
| "loss": 0.0288, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.6929588303193537, | |
| "grad_norm": 0.1876203417778015, | |
| "learning_rate": 8.360285203496894e-05, | |
| "loss": 0.0333, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.6929588303193537, | |
| "eval_loss": 0.03803209587931633, | |
| "eval_runtime": 176.8671, | |
| "eval_samples_per_second": 6.536, | |
| "eval_steps_per_second": 6.536, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.7083493651404387, | |
| "grad_norm": 0.3779871165752411, | |
| "learning_rate": 8.320256775478228e-05, | |
| "loss": 0.0325, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.7237398999615237, | |
| "grad_norm": 0.2203492522239685, | |
| "learning_rate": 8.279844019902968e-05, | |
| "loss": 0.0237, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.7391304347826086, | |
| "grad_norm": 0.4551416039466858, | |
| "learning_rate": 8.239051614642934e-05, | |
| "loss": 0.0347, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.7545209696036936, | |
| "grad_norm": 0.2498016655445099, | |
| "learning_rate": 8.197884281515292e-05, | |
| "loss": 0.038, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.7699115044247788, | |
| "grad_norm": 0.16282948851585388, | |
| "learning_rate": 8.156346785735997e-05, | |
| "loss": 0.0228, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7699115044247788, | |
| "eval_loss": 0.03305572643876076, | |
| "eval_runtime": 176.8475, | |
| "eval_samples_per_second": 6.537, | |
| "eval_steps_per_second": 6.537, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7853020392458638, | |
| "grad_norm": 0.1479075402021408, | |
| "learning_rate": 8.114443935368198e-05, | |
| "loss": 0.0342, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.8006925740669488, | |
| "grad_norm": 0.2684459090232849, | |
| "learning_rate": 8.072180580765709e-05, | |
| "loss": 0.0235, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.8160831088880338, | |
| "grad_norm": 0.2641782760620117, | |
| "learning_rate": 8.02956161401155e-05, | |
| "loss": 0.0244, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.8314736437091188, | |
| "grad_norm": 0.2599339187145233, | |
| "learning_rate": 7.98659196835169e-05, | |
| "loss": 0.0295, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.846864178530204, | |
| "grad_norm": 0.12659390270709991, | |
| "learning_rate": 7.943276617624014e-05, | |
| "loss": 0.0217, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.846864178530204, | |
| "eval_loss": 0.03583278879523277, | |
| "eval_runtime": 176.7116, | |
| "eval_samples_per_second": 6.542, | |
| "eval_steps_per_second": 6.542, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.862254713351289, | |
| "grad_norm": 0.4283539056777954, | |
| "learning_rate": 7.899620575682578e-05, | |
| "loss": 0.0286, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.877645248172374, | |
| "grad_norm": 0.1819627583026886, | |
| "learning_rate": 7.85562889581725e-05, | |
| "loss": 0.0221, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.893035782993459, | |
| "grad_norm": 0.13169661164283752, | |
| "learning_rate": 7.811306670168788e-05, | |
| "loss": 0.0288, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.908426317814544, | |
| "grad_norm": 0.3242426812648773, | |
| "learning_rate": 7.766659029139393e-05, | |
| "loss": 0.0332, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.9238168526356292, | |
| "grad_norm": 0.38562312722206116, | |
| "learning_rate": 7.72169114079887e-05, | |
| "loss": 0.0272, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9238168526356292, | |
| "eval_loss": 0.03237110376358032, | |
| "eval_runtime": 176.7523, | |
| "eval_samples_per_second": 6.54, | |
| "eval_steps_per_second": 6.54, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9392073874567142, | |
| "grad_norm": 0.8540070056915283, | |
| "learning_rate": 7.676408210286407e-05, | |
| "loss": 0.0289, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.9545979222777992, | |
| "grad_norm": 0.23134426772594452, | |
| "learning_rate": 7.630815479208066e-05, | |
| "loss": 0.0284, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.9699884570988841, | |
| "grad_norm": 0.26955580711364746, | |
| "learning_rate": 7.584918225030053e-05, | |
| "loss": 0.0243, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.9853789919199691, | |
| "grad_norm": 0.22115769982337952, | |
| "learning_rate": 7.538721760467845e-05, | |
| "loss": 0.0319, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.0007695267410543, | |
| "grad_norm": 0.20801742374897003, | |
| "learning_rate": 7.49223143287122e-05, | |
| "loss": 0.0217, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.0007695267410543, | |
| "eval_loss": 0.03183276578783989, | |
| "eval_runtime": 176.8781, | |
| "eval_samples_per_second": 6.536, | |
| "eval_steps_per_second": 6.536, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.016160061562139, | |
| "grad_norm": 0.33942127227783203, | |
| "learning_rate": 7.445452623605307e-05, | |
| "loss": 0.0168, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.0315505963832243, | |
| "grad_norm": 0.3799745440483093, | |
| "learning_rate": 7.398390747427662e-05, | |
| "loss": 0.02, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.0469411312043095, | |
| "grad_norm": 0.15786810219287872, | |
| "learning_rate": 7.351051251861501e-05, | |
| "loss": 0.0175, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.0623316660253943, | |
| "grad_norm": 0.0710686594247818, | |
| "learning_rate": 7.303439616565146e-05, | |
| "loss": 0.0194, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.0777222008464795, | |
| "grad_norm": 0.2171255350112915, | |
| "learning_rate": 7.255561352697732e-05, | |
| "loss": 0.0175, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0777222008464795, | |
| "eval_loss": 0.03119882568717003, | |
| "eval_runtime": 176.8301, | |
| "eval_samples_per_second": 6.537, | |
| "eval_steps_per_second": 6.537, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0931127356675643, | |
| "grad_norm": 0.1561356782913208, | |
| "learning_rate": 7.207422002281281e-05, | |
| "loss": 0.017, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.1085032704886495, | |
| "grad_norm": 0.2565746307373047, | |
| "learning_rate": 7.159027137559197e-05, | |
| "loss": 0.0156, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.1238938053097347, | |
| "grad_norm": 0.2514025568962097, | |
| "learning_rate": 7.110382360351267e-05, | |
| "loss": 0.0127, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.1392843401308195, | |
| "grad_norm": 0.0824156105518341, | |
| "learning_rate": 7.061493301405244e-05, | |
| "loss": 0.0136, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.1546748749519047, | |
| "grad_norm": 0.6427235007286072, | |
| "learning_rate": 7.012365619745064e-05, | |
| "loss": 0.021, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.1546748749519047, | |
| "eval_loss": 0.03412633389234543, | |
| "eval_runtime": 176.7342, | |
| "eval_samples_per_second": 6.541, | |
| "eval_steps_per_second": 6.541, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.1700654097729895, | |
| "grad_norm": 0.08514008671045303, | |
| "learning_rate": 6.963005002015808e-05, | |
| "loss": 0.0164, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.1854559445940747, | |
| "grad_norm": 0.20415261387825012, | |
| "learning_rate": 6.91341716182545e-05, | |
| "loss": 0.0213, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.20084647941516, | |
| "grad_norm": 0.2123573273420334, | |
| "learning_rate": 6.863607839083507e-05, | |
| "loss": 0.0127, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.2162370142362446, | |
| "grad_norm": 0.39403170347213745, | |
| "learning_rate": 6.813582799336624e-05, | |
| "loss": 0.0191, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.23162754905733, | |
| "grad_norm": 0.0883919969201088, | |
| "learning_rate": 6.763347833101192e-05, | |
| "loss": 0.009, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.23162754905733, | |
| "eval_loss": 0.039153777062892914, | |
| "eval_runtime": 176.7616, | |
| "eval_samples_per_second": 6.54, | |
| "eval_steps_per_second": 6.54, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.2470180838784146, | |
| "grad_norm": 0.5361382365226746, | |
| "learning_rate": 6.712908755193094e-05, | |
| "loss": 0.0191, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.2624086186995, | |
| "grad_norm": 0.14046382904052734, | |
| "learning_rate": 6.662271404054621e-05, | |
| "loss": 0.0236, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 2.2777991535205846, | |
| "grad_norm": 0.25211387872695923, | |
| "learning_rate": 6.611441641078657e-05, | |
| "loss": 0.0183, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.29318968834167, | |
| "grad_norm": 0.21686653792858124, | |
| "learning_rate": 6.560425349930208e-05, | |
| "loss": 0.0205, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 2.308580223162755, | |
| "grad_norm": 0.6495646238327026, | |
| "learning_rate": 6.509228435865357e-05, | |
| "loss": 0.0186, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.308580223162755, | |
| "eval_loss": 0.034755948930978775, | |
| "eval_runtime": 176.7081, | |
| "eval_samples_per_second": 6.542, | |
| "eval_steps_per_second": 6.542, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.32397075798384, | |
| "grad_norm": 0.21111302077770233, | |
| "learning_rate": 6.45785682504772e-05, | |
| "loss": 0.0201, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 2.339361292804925, | |
| "grad_norm": 0.6884567141532898, | |
| "learning_rate": 6.406316463862467e-05, | |
| "loss": 0.0177, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 2.3547518276260098, | |
| "grad_norm": 0.09983156621456146, | |
| "learning_rate": 6.35461331822803e-05, | |
| "loss": 0.0145, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 2.370142362447095, | |
| "grad_norm": 0.23539000749588013, | |
| "learning_rate": 6.302753372905515e-05, | |
| "loss": 0.0176, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 2.38553289726818, | |
| "grad_norm": 0.5075021982192993, | |
| "learning_rate": 6.250742630805971e-05, | |
| "loss": 0.0163, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.38553289726818, | |
| "eval_loss": 0.03947930037975311, | |
| "eval_runtime": 176.7236, | |
| "eval_samples_per_second": 6.541, | |
| "eval_steps_per_second": 6.541, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.400923432089265, | |
| "grad_norm": 0.22470280528068542, | |
| "learning_rate": 6.198587112295526e-05, | |
| "loss": 0.0146, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 2.41631396691035, | |
| "grad_norm": 0.3534475862979889, | |
| "learning_rate": 6.14629285449852e-05, | |
| "loss": 0.0176, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 2.431704501731435, | |
| "grad_norm": 0.11134660243988037, | |
| "learning_rate": 6.093865910598688e-05, | |
| "loss": 0.0198, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 2.44709503655252, | |
| "grad_norm": 0.19116228818893433, | |
| "learning_rate": 6.0413123491385025e-05, | |
| "loss": 0.018, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 2.4624855713736054, | |
| "grad_norm": 0.181107759475708, | |
| "learning_rate": 5.988638253316713e-05, | |
| "loss": 0.0123, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4624855713736054, | |
| "eval_loss": 0.035887934267520905, | |
| "eval_runtime": 176.7823, | |
| "eval_samples_per_second": 6.539, | |
| "eval_steps_per_second": 6.539, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.47787610619469, | |
| "grad_norm": 0.34913918375968933, | |
| "learning_rate": 5.935849720284206e-05, | |
| "loss": 0.0151, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 2.4932666410157753, | |
| "grad_norm": 0.22570717334747314, | |
| "learning_rate": 5.8829528604382436e-05, | |
| "loss": 0.0213, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 2.50865717583686, | |
| "grad_norm": 0.09938590228557587, | |
| "learning_rate": 5.829953796715176e-05, | |
| "loss": 0.0142, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 2.5240477106579453, | |
| "grad_norm": 0.43842482566833496, | |
| "learning_rate": 5.7768586638816924e-05, | |
| "loss": 0.0209, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 2.5394382454790305, | |
| "grad_norm": 0.1973314881324768, | |
| "learning_rate": 5.723673607824705e-05, | |
| "loss": 0.0196, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.5394382454790305, | |
| "eval_loss": 0.034880444407463074, | |
| "eval_runtime": 176.7697, | |
| "eval_samples_per_second": 6.54, | |
| "eval_steps_per_second": 6.54, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.5548287803001153, | |
| "grad_norm": 0.3357468247413635, | |
| "learning_rate": 5.670404784839953e-05, | |
| "loss": 0.0128, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 2.5702193151212005, | |
| "grad_norm": 0.3688250482082367, | |
| "learning_rate": 5.6170583609193984e-05, | |
| "loss": 0.0172, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 2.5856098499422853, | |
| "grad_norm": 0.3962181508541107, | |
| "learning_rate": 5.56364051103749e-05, | |
| "loss": 0.0221, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 2.6010003847633705, | |
| "grad_norm": 0.17332716286182404, | |
| "learning_rate": 5.5101574184364056e-05, | |
| "loss": 0.0179, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 2.6163909195844557, | |
| "grad_norm": 0.3918059468269348, | |
| "learning_rate": 5.4566152739103116e-05, | |
| "loss": 0.0201, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.6163909195844557, | |
| "eval_loss": 0.03516693413257599, | |
| "eval_runtime": 176.7671, | |
| "eval_samples_per_second": 6.54, | |
| "eval_steps_per_second": 6.54, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.6317814544055405, | |
| "grad_norm": 0.5600950717926025, | |
| "learning_rate": 5.403020275088786e-05, | |
| "loss": 0.0177, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 2.6471719892266257, | |
| "grad_norm": 0.5886516571044922, | |
| "learning_rate": 5.349378625719408e-05, | |
| "loss": 0.0247, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 2.6625625240477104, | |
| "grad_norm": 0.48264405131340027, | |
| "learning_rate": 5.2956965349496743e-05, | |
| "loss": 0.0188, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 2.6779530588687956, | |
| "grad_norm": 0.14684589207172394, | |
| "learning_rate": 5.241980216608261e-05, | |
| "loss": 0.0143, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 2.693343593689881, | |
| "grad_norm": 0.08048424124717712, | |
| "learning_rate": 5.188235888485773e-05, | |
| "loss": 0.0223, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.693343593689881, | |
| "eval_loss": 0.03291754424571991, | |
| "eval_runtime": 176.7369, | |
| "eval_samples_per_second": 6.541, | |
| "eval_steps_per_second": 6.541, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.7087341285109656, | |
| "grad_norm": 0.2262515276670456, | |
| "learning_rate": 5.13446977161501e-05, | |
| "loss": 0.0127, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 2.724124663332051, | |
| "grad_norm": 0.46145889163017273, | |
| "learning_rate": 5.0806880895508766e-05, | |
| "loss": 0.0179, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 2.7395151981531356, | |
| "grad_norm": 0.7554391622543335, | |
| "learning_rate": 5.026897067649976e-05, | |
| "loss": 0.0234, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 2.754905732974221, | |
| "grad_norm": 0.1861138641834259, | |
| "learning_rate": 4.973102932350026e-05, | |
| "loss": 0.0155, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 2.770296267795306, | |
| "grad_norm": 0.2975349724292755, | |
| "learning_rate": 4.9193119104491245e-05, | |
| "loss": 0.0192, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.770296267795306, | |
| "eval_loss": 0.03241715207695961, | |
| "eval_runtime": 176.853, | |
| "eval_samples_per_second": 6.537, | |
| "eval_steps_per_second": 6.537, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.785686802616391, | |
| "grad_norm": 0.42107677459716797, | |
| "learning_rate": 4.86553022838499e-05, | |
| "loss": 0.0181, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 2.801077337437476, | |
| "grad_norm": 0.351321280002594, | |
| "learning_rate": 4.8117641115142285e-05, | |
| "loss": 0.022, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 2.8164678722585608, | |
| "grad_norm": 0.4118291735649109, | |
| "learning_rate": 4.758019783391741e-05, | |
| "loss": 0.0191, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 2.831858407079646, | |
| "grad_norm": 0.30672571063041687, | |
| "learning_rate": 4.704303465050327e-05, | |
| "loss": 0.0197, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 2.847248941900731, | |
| "grad_norm": 0.1432432234287262, | |
| "learning_rate": 4.6506213742805924e-05, | |
| "loss": 0.0191, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.847248941900731, | |
| "eval_loss": 0.032423555850982666, | |
| "eval_runtime": 176.8384, | |
| "eval_samples_per_second": 6.537, | |
| "eval_steps_per_second": 6.537, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.862639476721816, | |
| "grad_norm": 0.28516244888305664, | |
| "learning_rate": 4.5969797249112146e-05, | |
| "loss": 0.014, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 2.878030011542901, | |
| "grad_norm": 0.3050757348537445, | |
| "learning_rate": 4.54338472608969e-05, | |
| "loss": 0.0225, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 2.893420546363986, | |
| "grad_norm": 0.15351629257202148, | |
| "learning_rate": 4.489842581563597e-05, | |
| "loss": 0.0149, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 2.908811081185071, | |
| "grad_norm": 0.12462982535362244, | |
| "learning_rate": 4.436359488962511e-05, | |
| "loss": 0.0163, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 2.9242016160061564, | |
| "grad_norm": 0.2019914984703064, | |
| "learning_rate": 4.382941639080602e-05, | |
| "loss": 0.0162, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.9242016160061564, | |
| "eval_loss": 0.03359019756317139, | |
| "eval_runtime": 176.7494, | |
| "eval_samples_per_second": 6.54, | |
| "eval_steps_per_second": 6.54, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.939592150827241, | |
| "grad_norm": 0.23269765079021454, | |
| "learning_rate": 4.3295952151600475e-05, | |
| "loss": 0.0204, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 2.9549826856483263, | |
| "grad_norm": 0.4043470323085785, | |
| "learning_rate": 4.276326392175296e-05, | |
| "loss": 0.0199, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 2.970373220469411, | |
| "grad_norm": 0.12517714500427246, | |
| "learning_rate": 4.2231413361183095e-05, | |
| "loss": 0.0145, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 2.9857637552904963, | |
| "grad_norm": 0.2903136610984802, | |
| "learning_rate": 4.1700462032848244e-05, | |
| "loss": 0.015, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 3.0011542901115815, | |
| "grad_norm": 0.15643754601478577, | |
| "learning_rate": 4.117047139561758e-05, | |
| "loss": 0.0238, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 3.0011542901115815, | |
| "eval_loss": 0.034210145473480225, | |
| "eval_runtime": 176.8536, | |
| "eval_samples_per_second": 6.536, | |
| "eval_steps_per_second": 6.536, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 3.0165448249326663, | |
| "grad_norm": 0.08787263184785843, | |
| "learning_rate": 4.0641502797157966e-05, | |
| "loss": 0.0064, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 3.0319353597537515, | |
| "grad_norm": 0.04949400946497917, | |
| "learning_rate": 4.011361746683288e-05, | |
| "loss": 0.0096, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 3.0473258945748363, | |
| "grad_norm": 0.4407104253768921, | |
| "learning_rate": 3.958687650861499e-05, | |
| "loss": 0.009, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 3.0627164293959215, | |
| "grad_norm": 0.28349077701568604, | |
| "learning_rate": 3.906134089401313e-05, | |
| "loss": 0.0055, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 3.0781069642170067, | |
| "grad_norm": 0.11285337805747986, | |
| "learning_rate": 3.853707145501482e-05, | |
| "loss": 0.0092, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.0781069642170067, | |
| "eval_loss": 0.03811664134263992, | |
| "eval_runtime": 176.7856, | |
| "eval_samples_per_second": 6.539, | |
| "eval_steps_per_second": 6.539, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.0934974990380915, | |
| "grad_norm": 0.40362584590911865, | |
| "learning_rate": 3.801412887704475e-05, | |
| "loss": 0.0049, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 3.1088880338591767, | |
| "grad_norm": 0.15941868722438812, | |
| "learning_rate": 3.749257369194029e-05, | |
| "loss": 0.0093, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 3.1242785686802614, | |
| "grad_norm": 0.06539350003004074, | |
| "learning_rate": 3.697246627094487e-05, | |
| "loss": 0.0082, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 3.1396691035013466, | |
| "grad_norm": 0.08355661481618881, | |
| "learning_rate": 3.6453866817719726e-05, | |
| "loss": 0.0088, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 3.155059638322432, | |
| "grad_norm": 0.5320833325386047, | |
| "learning_rate": 3.593683536137533e-05, | |
| "loss": 0.0105, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.155059638322432, | |
| "eval_loss": 0.040933817625045776, | |
| "eval_runtime": 176.6988, | |
| "eval_samples_per_second": 6.542, | |
| "eval_steps_per_second": 6.542, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.1704501731435166, | |
| "grad_norm": 0.049008555710315704, | |
| "learning_rate": 3.542143174952282e-05, | |
| "loss": 0.0043, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 3.185840707964602, | |
| "grad_norm": 0.42514464259147644, | |
| "learning_rate": 3.490771564134643e-05, | |
| "loss": 0.0092, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 3.2012312427856866, | |
| "grad_norm": 0.24654439091682434, | |
| "learning_rate": 3.4395746500697925e-05, | |
| "loss": 0.0115, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 3.216621777606772, | |
| "grad_norm": 0.408346027135849, | |
| "learning_rate": 3.388558358921343e-05, | |
| "loss": 0.0078, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 3.232012312427857, | |
| "grad_norm": 0.2585791051387787, | |
| "learning_rate": 3.337728595945378e-05, | |
| "loss": 0.0096, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.232012312427857, | |
| "eval_loss": 0.040773503482341766, | |
| "eval_runtime": 176.7674, | |
| "eval_samples_per_second": 6.54, | |
| "eval_steps_per_second": 6.54, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.247402847248942, | |
| "grad_norm": 0.14975428581237793, | |
| "learning_rate": 3.287091244806907e-05, | |
| "loss": 0.0088, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 3.262793382070027, | |
| "grad_norm": 0.47811585664749146, | |
| "learning_rate": 3.2366521668988106e-05, | |
| "loss": 0.0091, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 3.2781839168911118, | |
| "grad_norm": 0.4079134166240692, | |
| "learning_rate": 3.186417200663379e-05, | |
| "loss": 0.0075, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 3.293574451712197, | |
| "grad_norm": 0.14326900243759155, | |
| "learning_rate": 3.136392160916495e-05, | |
| "loss": 0.0072, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 3.308964986533282, | |
| "grad_norm": 0.4556676149368286, | |
| "learning_rate": 3.086582838174551e-05, | |
| "loss": 0.0078, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.308964986533282, | |
| "eval_loss": 0.04120837152004242, | |
| "eval_runtime": 176.7919, | |
| "eval_samples_per_second": 6.539, | |
| "eval_steps_per_second": 6.539, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.324355521354367, | |
| "grad_norm": 0.22372718155384064, | |
| "learning_rate": 3.036994997984194e-05, | |
| "loss": 0.0074, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 3.339746056175452, | |
| "grad_norm": 0.2346556931734085, | |
| "learning_rate": 2.9876343802549357e-05, | |
| "loss": 0.011, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 3.355136590996537, | |
| "grad_norm": 0.38766953349113464, | |
| "learning_rate": 2.938506698594755e-05, | |
| "loss": 0.0101, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 3.370527125817622, | |
| "grad_norm": 0.23224134743213654, | |
| "learning_rate": 2.8896176396487334e-05, | |
| "loss": 0.0062, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 3.3859176606387074, | |
| "grad_norm": 0.06096711382269859, | |
| "learning_rate": 2.8409728624408067e-05, | |
| "loss": 0.012, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.3859176606387074, | |
| "eval_loss": 0.04045013338327408, | |
| "eval_runtime": 176.7461, | |
| "eval_samples_per_second": 6.54, | |
| "eval_steps_per_second": 6.54, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.401308195459792, | |
| "grad_norm": 0.08051846921443939, | |
| "learning_rate": 2.7925779977187215e-05, | |
| "loss": 0.0073, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 3.4166987302808773, | |
| "grad_norm": 0.1640942096710205, | |
| "learning_rate": 2.7444386473022686e-05, | |
| "loss": 0.0063, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 3.432089265101962, | |
| "grad_norm": 0.11584623157978058, | |
| "learning_rate": 2.696560383434854e-05, | |
| "loss": 0.011, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 3.4474797999230473, | |
| "grad_norm": 0.2081407755613327, | |
| "learning_rate": 2.6489487481384988e-05, | |
| "loss": 0.012, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 3.4628703347441325, | |
| "grad_norm": 0.16449487209320068, | |
| "learning_rate": 2.601609252572339e-05, | |
| "loss": 0.0074, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.4628703347441325, | |
| "eval_loss": 0.0384063720703125, | |
| "eval_runtime": 176.7987, | |
| "eval_samples_per_second": 6.539, | |
| "eval_steps_per_second": 6.539, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.4782608695652173, | |
| "grad_norm": 0.04802235588431358, | |
| "learning_rate": 2.554547376394692e-05, | |
| "loss": 0.007, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 3.4936514043863025, | |
| "grad_norm": 0.23349222540855408, | |
| "learning_rate": 2.5077685671287808e-05, | |
| "loss": 0.0057, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 3.5090419392073873, | |
| "grad_norm": 0.11469706147909164, | |
| "learning_rate": 2.4612782395321593e-05, | |
| "loss": 0.0066, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 3.5244324740284725, | |
| "grad_norm": 0.21006856858730316, | |
| "learning_rate": 2.4150817749699496e-05, | |
| "loss": 0.0066, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 3.5398230088495577, | |
| "grad_norm": 0.1930118054151535, | |
| "learning_rate": 2.3691845207919357e-05, | |
| "loss": 0.0101, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.5398230088495577, | |
| "eval_loss": 0.04059465229511261, | |
| "eval_runtime": 176.7509, | |
| "eval_samples_per_second": 6.54, | |
| "eval_steps_per_second": 6.54, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.5552135436706425, | |
| "grad_norm": 0.05533791333436966, | |
| "learning_rate": 2.3235917897135934e-05, | |
| "loss": 0.0108, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 3.5706040784917277, | |
| "grad_norm": 0.6226391196250916, | |
| "learning_rate": 2.2783088592011303e-05, | |
| "loss": 0.0108, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 3.5859946133128124, | |
| "grad_norm": 0.2105931043624878, | |
| "learning_rate": 2.233340970860608e-05, | |
| "loss": 0.008, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 3.6013851481338977, | |
| "grad_norm": 0.07847414165735245, | |
| "learning_rate": 2.1886933298312125e-05, | |
| "loss": 0.0103, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 3.616775682954983, | |
| "grad_norm": 0.27414873242378235, | |
| "learning_rate": 2.1443711041827503e-05, | |
| "loss": 0.0056, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.616775682954983, | |
| "eval_loss": 0.03969665616750717, | |
| "eval_runtime": 176.7788, | |
| "eval_samples_per_second": 6.539, | |
| "eval_steps_per_second": 6.539, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.6321662177760676, | |
| "grad_norm": 0.27824512124061584, | |
| "learning_rate": 2.1003794243174252e-05, | |
| "loss": 0.0155, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 3.647556752597153, | |
| "grad_norm": 0.24925042688846588, | |
| "learning_rate": 2.0567233823759886e-05, | |
| "loss": 0.0096, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 3.6629472874182376, | |
| "grad_norm": 0.14463971555233002, | |
| "learning_rate": 2.0134080316483113e-05, | |
| "loss": 0.0091, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 3.678337822239323, | |
| "grad_norm": 0.21811449527740479, | |
| "learning_rate": 1.970438385988452e-05, | |
| "loss": 0.0117, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 3.693728357060408, | |
| "grad_norm": 0.18893787264823914, | |
| "learning_rate": 1.927819419234292e-05, | |
| "loss": 0.0114, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.693728357060408, | |
| "eval_loss": 0.03624710813164711, | |
| "eval_runtime": 176.7812, | |
| "eval_samples_per_second": 6.539, | |
| "eval_steps_per_second": 6.539, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.709118891881493, | |
| "grad_norm": 0.14122995734214783, | |
| "learning_rate": 1.885556064631801e-05, | |
| "loss": 0.0081, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 3.724509426702578, | |
| "grad_norm": 0.05854243040084839, | |
| "learning_rate": 1.843653214264003e-05, | |
| "loss": 0.0072, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 3.7398999615236628, | |
| "grad_norm": 0.2255101203918457, | |
| "learning_rate": 1.8021157184847087e-05, | |
| "loss": 0.0087, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 3.755290496344748, | |
| "grad_norm": 0.08764062821865082, | |
| "learning_rate": 1.7609483853570668e-05, | |
| "loss": 0.0053, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 3.770681031165833, | |
| "grad_norm": 0.02414390817284584, | |
| "learning_rate": 1.720155980097034e-05, | |
| "loss": 0.0056, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.770681031165833, | |
| "eval_loss": 0.03867615386843681, | |
| "eval_runtime": 176.7505, | |
| "eval_samples_per_second": 6.54, | |
| "eval_steps_per_second": 6.54, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.786071565986918, | |
| "grad_norm": 0.24228930473327637, | |
| "learning_rate": 1.6797432245217736e-05, | |
| "loss": 0.0067, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 3.801462100808003, | |
| "grad_norm": 0.2944065034389496, | |
| "learning_rate": 1.6397147965031072e-05, | |
| "loss": 0.0048, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 3.816852635629088, | |
| "grad_norm": 0.12245519459247589, | |
| "learning_rate": 1.600075329426033e-05, | |
| "loss": 0.0121, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 3.832243170450173, | |
| "grad_norm": 0.37902891635894775, | |
| "learning_rate": 1.560829411652403e-05, | |
| "loss": 0.0106, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 3.8476337052712584, | |
| "grad_norm": 0.17577596008777618, | |
| "learning_rate": 1.521981585989809e-05, | |
| "loss": 0.0074, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.8476337052712584, | |
| "eval_loss": 0.03889692202210426, | |
| "eval_runtime": 176.7082, | |
| "eval_samples_per_second": 6.542, | |
| "eval_steps_per_second": 6.542, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.863024240092343, | |
| "grad_norm": 0.03732967749238014, | |
| "learning_rate": 1.4835363491657412e-05, | |
| "loss": 0.0096, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 3.8784147749134283, | |
| "grad_norm": 0.21517157554626465, | |
| "learning_rate": 1.4454981513070791e-05, | |
| "loss": 0.0052, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 3.893805309734513, | |
| "grad_norm": 0.04260919243097305, | |
| "learning_rate": 1.4078713954249873e-05, | |
| "loss": 0.0065, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 3.9091958445555983, | |
| "grad_norm": 0.06845144182443619, | |
| "learning_rate": 1.3706604369052434e-05, | |
| "loss": 0.0039, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 3.9245863793766835, | |
| "grad_norm": 0.4723328948020935, | |
| "learning_rate": 1.3338695830041047e-05, | |
| "loss": 0.0089, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.9245863793766835, | |
| "eval_loss": 0.040084753185510635, | |
| "eval_runtime": 176.8028, | |
| "eval_samples_per_second": 6.538, | |
| "eval_steps_per_second": 6.538, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.9399769141977683, | |
| "grad_norm": 0.5228933095932007, | |
| "learning_rate": 1.2975030923497262e-05, | |
| "loss": 0.0088, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 3.9553674490188535, | |
| "grad_norm": 0.25889304280281067, | |
| "learning_rate": 1.2615651744492151e-05, | |
| "loss": 0.0058, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 3.9707579838399383, | |
| "grad_norm": 0.03479775786399841, | |
| "learning_rate": 1.2260599892013719e-05, | |
| "loss": 0.0117, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 3.9861485186610235, | |
| "grad_norm": 0.2841411828994751, | |
| "learning_rate": 1.1909916464151715e-05, | |
| "loss": 0.0069, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 4.001539053482109, | |
| "grad_norm": 0.017366070300340652, | |
| "learning_rate": 1.1563642053340406e-05, | |
| "loss": 0.0096, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 4.001539053482109, | |
| "eval_loss": 0.04020934924483299, | |
| "eval_runtime": 176.6968, | |
| "eval_samples_per_second": 6.542, | |
| "eval_steps_per_second": 6.542, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 4.016929588303194, | |
| "grad_norm": 0.031723860651254654, | |
| "learning_rate": 1.1221816741659964e-05, | |
| "loss": 0.0039, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 4.032320123124278, | |
| "grad_norm": 0.12695831060409546, | |
| "learning_rate": 1.0884480096196803e-05, | |
| "loss": 0.0022, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 4.047710657945363, | |
| "grad_norm": 0.12556283175945282, | |
| "learning_rate": 1.0551671164463666e-05, | |
| "loss": 0.0021, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 4.063101192766449, | |
| "grad_norm": 0.020965544506907463, | |
| "learning_rate": 1.0223428469879736e-05, | |
| "loss": 0.0044, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 4.078491727587534, | |
| "grad_norm": 0.03366950526833534, | |
| "learning_rate": 9.899790007311455e-06, | |
| "loss": 0.0019, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.078491727587534, | |
| "eval_loss": 0.04220307990908623, | |
| "eval_runtime": 176.7951, | |
| "eval_samples_per_second": 6.539, | |
| "eval_steps_per_second": 6.539, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.093882262408619, | |
| "grad_norm": 0.2073393017053604, | |
| "learning_rate": 9.580793238674573e-06, | |
| "loss": 0.0051, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 4.109272797229703, | |
| "grad_norm": 0.14802664518356323, | |
| "learning_rate": 9.266475088597781e-06, | |
| "loss": 0.0025, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 4.124663332050789, | |
| "grad_norm": 0.06581874936819077, | |
| "learning_rate": 8.956871940148681e-06, | |
| "loss": 0.0027, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 4.140053866871874, | |
| "grad_norm": 0.057812996208667755, | |
| "learning_rate": 8.65201963062225e-06, | |
| "loss": 0.0051, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 4.155444401692959, | |
| "grad_norm": 0.035848990082740784, | |
| "learning_rate": 8.351953447392674e-06, | |
| "loss": 0.0074, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.155444401692959, | |
| "eval_loss": 0.04460546746850014, | |
| "eval_runtime": 176.7731, | |
| "eval_samples_per_second": 6.539, | |
| "eval_steps_per_second": 6.539, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.170834936514044, | |
| "grad_norm": 0.029859701171517372, | |
| "learning_rate": 8.056708123828738e-06, | |
| "loss": 0.0038, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 4.186225471335129, | |
| "grad_norm": 0.07501061260700226, | |
| "learning_rate": 7.76631783527334e-06, | |
| "loss": 0.0082, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 4.201616006156214, | |
| "grad_norm": 0.04285607114434242, | |
| "learning_rate": 7.4808161950876145e-06, | |
| "loss": 0.0026, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 4.217006540977299, | |
| "grad_norm": 0.06884312629699707, | |
| "learning_rate": 7.200236250760111e-06, | |
| "loss": 0.0041, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 4.232397075798384, | |
| "grad_norm": 0.07443416863679886, | |
| "learning_rate": 6.924610480081478e-06, | |
| "loss": 0.0018, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.232397075798384, | |
| "eval_loss": 0.04528596252202988, | |
| "eval_runtime": 176.7651, | |
| "eval_samples_per_second": 6.54, | |
| "eval_steps_per_second": 6.54, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.247787610619469, | |
| "grad_norm": 0.04174703359603882, | |
| "learning_rate": 6.6539707873850774e-06, | |
| "loss": 0.0023, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 4.263178145440554, | |
| "grad_norm": 0.06728123128414154, | |
| "learning_rate": 6.3883484998539525e-06, | |
| "loss": 0.0016, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 4.278568680261639, | |
| "grad_norm": 0.10411453992128372, | |
| "learning_rate": 6.127774363894667e-06, | |
| "loss": 0.0021, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 4.293959215082724, | |
| "grad_norm": 0.07528620958328247, | |
| "learning_rate": 5.872278541578352e-06, | |
| "loss": 0.0034, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 4.309349749903809, | |
| "grad_norm": 0.07204550504684448, | |
| "learning_rate": 5.6218906071493275e-06, | |
| "loss": 0.0019, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.309349749903809, | |
| "eval_loss": 0.04676542431116104, | |
| "eval_runtime": 176.876, | |
| "eval_samples_per_second": 6.536, | |
| "eval_steps_per_second": 6.536, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.324740284724895, | |
| "grad_norm": 0.06287376582622528, | |
| "learning_rate": 5.376639543601858e-06, | |
| "loss": 0.0026, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 4.340130819545979, | |
| "grad_norm": 0.012383749708533287, | |
| "learning_rate": 5.136553739325256e-06, | |
| "loss": 0.0014, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 4.355521354367064, | |
| "grad_norm": 0.05571524053812027, | |
| "learning_rate": 4.901660984817908e-06, | |
| "loss": 0.0019, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 4.370911889188149, | |
| "grad_norm": 0.027088260278105736, | |
| "learning_rate": 4.671988469470434e-06, | |
| "loss": 0.0024, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 4.3863024240092345, | |
| "grad_norm": 0.02969769574701786, | |
| "learning_rate": 4.447562778418435e-06, | |
| "loss": 0.002, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.3863024240092345, | |
| "eval_loss": 0.048255208879709244, | |
| "eval_runtime": 176.7793, | |
| "eval_samples_per_second": 6.539, | |
| "eval_steps_per_second": 6.539, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.40169295883032, | |
| "grad_norm": 0.25782549381256104, | |
| "learning_rate": 4.228409889465218e-06, | |
| "loss": 0.0022, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 4.417083493651404, | |
| "grad_norm": 0.11440496146678925, | |
| "learning_rate": 4.014555170074824e-06, | |
| "loss": 0.0009, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 4.432474028472489, | |
| "grad_norm": 0.02885556034743786, | |
| "learning_rate": 3.8060233744356633e-06, | |
| "loss": 0.0072, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 4.4478645632935745, | |
| "grad_norm": 0.011130722239613533, | |
| "learning_rate": 3.602838640595152e-06, | |
| "loss": 0.0026, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 4.46325509811466, | |
| "grad_norm": 0.424197793006897, | |
| "learning_rate": 3.4050244876656866e-06, | |
| "loss": 0.0045, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.46325509811466, | |
| "eval_loss": 0.04861905798316002, | |
| "eval_runtime": 176.8295, | |
| "eval_samples_per_second": 6.537, | |
| "eval_steps_per_second": 6.537, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.478645632935745, | |
| "grad_norm": 0.1396138072013855, | |
| "learning_rate": 3.2126038131022552e-06, | |
| "loss": 0.0071, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 4.494036167756829, | |
| "grad_norm": 0.06447521597146988, | |
| "learning_rate": 3.025598890051995e-06, | |
| "loss": 0.0038, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 4.5094267025779144, | |
| "grad_norm": 0.0140528017655015, | |
| "learning_rate": 2.8440313647760288e-06, | |
| "loss": 0.003, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 4.524817237399, | |
| "grad_norm": 0.28481969237327576, | |
| "learning_rate": 2.6679222541438397e-06, | |
| "loss": 0.0058, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 4.540207772220085, | |
| "grad_norm": 0.025602344423532486, | |
| "learning_rate": 2.4972919432005582e-06, | |
| "loss": 0.002, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.540207772220085, | |
| "eval_loss": 0.04802686348557472, | |
| "eval_runtime": 176.8568, | |
| "eval_samples_per_second": 6.536, | |
| "eval_steps_per_second": 6.536, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.555598307041169, | |
| "grad_norm": 0.13335546851158142, | |
| "learning_rate": 2.3321601828073135e-06, | |
| "loss": 0.0012, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 4.570988841862254, | |
| "grad_norm": 0.0808529257774353, | |
| "learning_rate": 2.1725460873550317e-06, | |
| "loss": 0.0073, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 4.58637937668334, | |
| "grad_norm": 0.041818227618932724, | |
| "learning_rate": 2.0184681325519096e-06, | |
| "loss": 0.0027, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 4.601769911504425, | |
| "grad_norm": 0.020071454346179962, | |
| "learning_rate": 1.869944153284786e-06, | |
| "loss": 0.0026, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 4.61716044632551, | |
| "grad_norm": 0.06694600731134415, | |
| "learning_rate": 1.7269913415547456e-06, | |
| "loss": 0.0033, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.61716044632551, | |
| "eval_loss": 0.047850143164396286, | |
| "eval_runtime": 176.7617, | |
| "eval_samples_per_second": 6.54, | |
| "eval_steps_per_second": 6.54, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.632550981146595, | |
| "grad_norm": 0.16029523313045502, | |
| "learning_rate": 1.589626244487069e-06, | |
| "loss": 0.0024, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 4.64794151596768, | |
| "grad_norm": 0.3176231384277344, | |
| "learning_rate": 1.4578647624158726e-06, | |
| "loss": 0.0027, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 4.663332050788765, | |
| "grad_norm": 0.13648022711277008, | |
| "learning_rate": 1.3317221470436293e-06, | |
| "loss": 0.0041, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 4.67872258560985, | |
| "grad_norm": 0.5381929874420166, | |
| "learning_rate": 1.2112129996757315e-06, | |
| "loss": 0.0035, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 4.694113120430935, | |
| "grad_norm": 0.010145686566829681, | |
| "learning_rate": 1.0963512695303546e-06, | |
| "loss": 0.0054, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.694113120430935, | |
| "eval_loss": 0.04837736859917641, | |
| "eval_runtime": 176.7537, | |
| "eval_samples_per_second": 6.54, | |
| "eval_steps_per_second": 6.54, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.7095036552520195, | |
| "grad_norm": 0.19823475182056427, | |
| "learning_rate": 9.871502521237975e-07, | |
| "loss": 0.0023, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 4.724894190073105, | |
| "grad_norm": 0.2060529589653015, | |
| "learning_rate": 8.83622587731514e-07, | |
| "loss": 0.0027, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 4.74028472489419, | |
| "grad_norm": 0.6080601215362549, | |
| "learning_rate": 7.857802599249564e-07, | |
| "loss": 0.0072, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 4.755675259715275, | |
| "grad_norm": 0.11862636357545853, | |
| "learning_rate": 6.936345941844335e-07, | |
| "loss": 0.0028, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 4.77106579453636, | |
| "grad_norm": 0.06396199017763138, | |
| "learning_rate": 6.071962565881939e-07, | |
| "loss": 0.0055, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.77106579453636, | |
| "eval_loss": 0.04817047342658043, | |
| "eval_runtime": 176.8158, | |
| "eval_samples_per_second": 6.538, | |
| "eval_steps_per_second": 6.538, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.786456329357446, | |
| "grad_norm": 0.14032289385795593, | |
| "learning_rate": 5.264752525777961e-07, | |
| "loss": 0.0082, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 4.80184686417853, | |
| "grad_norm": 0.17890319228172302, | |
| "learning_rate": 4.5148092579991863e-07, | |
| "loss": 0.0046, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 4.817237398999615, | |
| "grad_norm": 0.008984181098639965, | |
| "learning_rate": 3.822219570248531e-07, | |
| "loss": 0.0043, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 4.8326279338207, | |
| "grad_norm": 0.012724678963422775, | |
| "learning_rate": 3.187063631416742e-07, | |
| "loss": 0.0015, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 4.8480184686417855, | |
| "grad_norm": 0.04591573029756546, | |
| "learning_rate": 2.6094149623024365e-07, | |
| "loss": 0.0034, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.8480184686417855, | |
| "eval_loss": 0.048183247447013855, | |
| "eval_runtime": 176.9728, | |
| "eval_samples_per_second": 6.532, | |
| "eval_steps_per_second": 6.532, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.86340900346287, | |
| "grad_norm": 0.05306975543498993, | |
| "learning_rate": 2.0893404271022376e-07, | |
| "loss": 0.0032, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 4.878799538283955, | |
| "grad_norm": 0.19906778633594513, | |
| "learning_rate": 1.626900225670691e-07, | |
| "loss": 0.0026, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 4.89419007310504, | |
| "grad_norm": 0.055820539593696594, | |
| "learning_rate": 1.2221478865524493e-07, | |
| "loss": 0.0018, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 4.9095806079261255, | |
| "grad_norm": 0.020210551097989082, | |
| "learning_rate": 8.751302607857836e-08, | |
| "loss": 0.0041, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 4.924971142747211, | |
| "grad_norm": 0.02825630083680153, | |
| "learning_rate": 5.8588751647975506e-08, | |
| "loss": 0.0038, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.924971142747211, | |
| "eval_loss": 0.04810521379113197, | |
| "eval_runtime": 177.0651, | |
| "eval_samples_per_second": 6.529, | |
| "eval_steps_per_second": 6.529, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.940361677568296, | |
| "grad_norm": 0.028862258419394493, | |
| "learning_rate": 3.544531341646007e-08, | |
| "loss": 0.0037, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 4.95575221238938, | |
| "grad_norm": 0.043836887925863266, | |
| "learning_rate": 1.808539029161116e-08, | |
| "loss": 0.0027, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 4.9711427472104655, | |
| "grad_norm": 0.05687952786684036, | |
| "learning_rate": 6.510991725500182e-09, | |
| "loss": 0.0058, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 4.986533282031551, | |
| "grad_norm": 0.06359147280454636, | |
| "learning_rate": 7.234574820769169e-10, | |
| "loss": 0.0032, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 4.994228549442093, | |
| "step": 3245, | |
| "total_flos": 8.169046237996646e+17, | |
| "train_loss": 0.03518175135534515, | |
| "train_runtime": 38986.9842, | |
| "train_samples_per_second": 1.333, | |
| "train_steps_per_second": 0.083 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 3245, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.169046237996646e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |