| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 1070, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09345794392523364, | |
| "grad_norm": 2.035820722579956, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 1.0122, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.18691588785046728, | |
| "grad_norm": 2.3610706329345703, | |
| "learning_rate": 3.518518518518519e-05, | |
| "loss": 0.3526, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2803738317757009, | |
| "grad_norm": 1.186436653137207, | |
| "learning_rate": 5.370370370370371e-05, | |
| "loss": 0.2888, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.37383177570093457, | |
| "grad_norm": 2.2752561569213867, | |
| "learning_rate": 7.222222222222222e-05, | |
| "loss": 0.2957, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.4672897196261682, | |
| "grad_norm": 1.9183247089385986, | |
| "learning_rate": 9.074074074074075e-05, | |
| "loss": 0.2282, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5607476635514018, | |
| "grad_norm": 1.8604793548583984, | |
| "learning_rate": 9.999402437003975e-05, | |
| "loss": 0.169, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.6542056074766355, | |
| "grad_norm": 1.4165388345718384, | |
| "learning_rate": 9.99462278999732e-05, | |
| "loss": 0.1587, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.7476635514018691, | |
| "grad_norm": 1.3570247888565063, | |
| "learning_rate": 9.985068065535225e-05, | |
| "loss": 0.1176, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.8411214953271028, | |
| "grad_norm": 0.8269003033638, | |
| "learning_rate": 9.970747398351445e-05, | |
| "loss": 0.1031, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.9345794392523364, | |
| "grad_norm": 1.9058301448822021, | |
| "learning_rate": 9.951674479629056e-05, | |
| "loss": 0.0991, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.02803738317757, | |
| "grad_norm": 1.9390472173690796, | |
| "learning_rate": 9.927867543911091e-05, | |
| "loss": 0.0873, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.1214953271028036, | |
| "grad_norm": 0.9300522208213806, | |
| "learning_rate": 9.899349351667522e-05, | |
| "loss": 0.0946, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.2149532710280373, | |
| "grad_norm": 0.9239804148674011, | |
| "learning_rate": 9.866147167535254e-05, | |
| "loss": 0.0794, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.308411214953271, | |
| "grad_norm": 1.4692777395248413, | |
| "learning_rate": 9.828292734251944e-05, | |
| "loss": 0.0898, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.4018691588785046, | |
| "grad_norm": 0.8343071937561035, | |
| "learning_rate": 9.785822242308562e-05, | |
| "loss": 0.0819, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.4953271028037383, | |
| "grad_norm": 0.7223255634307861, | |
| "learning_rate": 9.738776295349687e-05, | |
| "loss": 0.0803, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.588785046728972, | |
| "grad_norm": 0.7587630748748779, | |
| "learning_rate": 9.687199871354669e-05, | |
| "loss": 0.0745, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.6822429906542056, | |
| "grad_norm": 1.022656798362732, | |
| "learning_rate": 9.631142279636706e-05, | |
| "loss": 0.0894, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.7757009345794392, | |
| "grad_norm": 0.8869289755821228, | |
| "learning_rate": 9.570657113700985e-05, | |
| "loss": 0.0859, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.8691588785046729, | |
| "grad_norm": 0.9170877933502197, | |
| "learning_rate": 9.50580220000696e-05, | |
| "loss": 0.0737, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.9626168224299065, | |
| "grad_norm": 1.0185060501098633, | |
| "learning_rate": 9.436639542683727e-05, | |
| "loss": 0.0696, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.05607476635514, | |
| "grad_norm": 1.1690869331359863, | |
| "learning_rate": 9.363235264251369e-05, | |
| "loss": 0.0635, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.149532710280374, | |
| "grad_norm": 0.940513551235199, | |
| "learning_rate": 9.285659542404941e-05, | |
| "loss": 0.072, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.2429906542056073, | |
| "grad_norm": 0.5256121158599854, | |
| "learning_rate": 9.203986542921532e-05, | |
| "loss": 0.065, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.336448598130841, | |
| "grad_norm": 0.7171943783760071, | |
| "learning_rate": 9.11829434875454e-05, | |
| "loss": 0.0594, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.4299065420560746, | |
| "grad_norm": 0.6012755036354065, | |
| "learning_rate": 9.02866488538296e-05, | |
| "loss": 0.0657, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.5233644859813085, | |
| "grad_norm": 0.496242493391037, | |
| "learning_rate": 8.93518384248705e-05, | |
| "loss": 0.0712, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.616822429906542, | |
| "grad_norm": 0.7729496955871582, | |
| "learning_rate": 8.837940592025257e-05, | |
| "loss": 0.0581, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.710280373831776, | |
| "grad_norm": 0.6967052817344666, | |
| "learning_rate": 8.737028102790723e-05, | |
| "loss": 0.065, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.803738317757009, | |
| "grad_norm": 1.2209193706512451, | |
| "learning_rate": 8.632542851529051e-05, | |
| "loss": 0.0673, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.897196261682243, | |
| "grad_norm": 0.8657803535461426, | |
| "learning_rate": 8.524584730702339e-05, | |
| "loss": 0.0778, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.9906542056074765, | |
| "grad_norm": 0.6701989769935608, | |
| "learning_rate": 8.413256952987611e-05, | |
| "loss": 0.0617, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 3.0841121495327104, | |
| "grad_norm": 0.46317973732948303, | |
| "learning_rate": 8.298665952600999e-05, | |
| "loss": 0.061, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 3.177570093457944, | |
| "grad_norm": 0.549863338470459, | |
| "learning_rate": 8.180921283541986e-05, | |
| "loss": 0.0608, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 3.2710280373831777, | |
| "grad_norm": 0.5789127349853516, | |
| "learning_rate": 8.060135514854994e-05, | |
| "loss": 0.0588, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 3.364485981308411, | |
| "grad_norm": 0.5358679294586182, | |
| "learning_rate": 7.936424123008464e-05, | |
| "loss": 0.0584, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 3.457943925233645, | |
| "grad_norm": 0.682597815990448, | |
| "learning_rate": 7.809905381494316e-05, | |
| "loss": 0.0585, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 3.5514018691588785, | |
| "grad_norm": 0.7718446254730225, | |
| "learning_rate": 7.68070024775332e-05, | |
| "loss": 0.0525, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 3.6448598130841123, | |
| "grad_norm": 0.5267370343208313, | |
| "learning_rate": 7.548932247534506e-05, | |
| "loss": 0.0537, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 3.7383177570093458, | |
| "grad_norm": 0.8042123913764954, | |
| "learning_rate": 7.414727356799154e-05, | |
| "loss": 0.0613, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 3.831775700934579, | |
| "grad_norm": 0.5799658298492432, | |
| "learning_rate": 7.27821388128227e-05, | |
| "loss": 0.0535, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 3.925233644859813, | |
| "grad_norm": 0.8124668598175049, | |
| "learning_rate": 7.139522333826707e-05, | |
| "loss": 0.0493, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 4.018691588785047, | |
| "grad_norm": 0.8771295547485352, | |
| "learning_rate": 6.99878530960719e-05, | |
| "loss": 0.0615, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 4.11214953271028, | |
| "grad_norm": 0.7655673027038574, | |
| "learning_rate": 6.856137359363533e-05, | |
| "loss": 0.0605, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 4.205607476635514, | |
| "grad_norm": 0.6487105488777161, | |
| "learning_rate": 6.711714860764266e-05, | |
| "loss": 0.0559, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 4.299065420560748, | |
| "grad_norm": 0.7439901232719421, | |
| "learning_rate": 6.565655888023618e-05, | |
| "loss": 0.0562, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 4.392523364485982, | |
| "grad_norm": 0.566832423210144, | |
| "learning_rate": 6.418100079896556e-05, | |
| "loss": 0.0535, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 4.485981308411215, | |
| "grad_norm": 0.5109242796897888, | |
| "learning_rate": 6.269188506178019e-05, | |
| "loss": 0.0558, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 4.579439252336448, | |
| "grad_norm": 0.5645363926887512, | |
| "learning_rate": 6.11906353283405e-05, | |
| "loss": 0.0445, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 4.672897196261682, | |
| "grad_norm": 0.7012343406677246, | |
| "learning_rate": 5.967868685893715e-05, | |
| "loss": 0.0496, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 4.766355140186916, | |
| "grad_norm": 0.47865885496139526, | |
| "learning_rate": 5.815748514231944e-05, | |
| "loss": 0.0486, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 4.859813084112149, | |
| "grad_norm": 0.38202741742134094, | |
| "learning_rate": 5.6628484513745e-05, | |
| "loss": 0.0412, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 4.953271028037383, | |
| "grad_norm": 0.7451114058494568, | |
| "learning_rate": 5.5093146764571866e-05, | |
| "loss": 0.0561, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 5.046728971962617, | |
| "grad_norm": 0.29692840576171875, | |
| "learning_rate": 5.355293974472197e-05, | |
| "loss": 0.0432, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 5.140186915887851, | |
| "grad_norm": 0.5470758080482483, | |
| "learning_rate": 5.2009335959352666e-05, | |
| "loss": 0.0456, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 5.233644859813084, | |
| "grad_norm": 0.3530406057834625, | |
| "learning_rate": 5.046381116107742e-05, | |
| "loss": 0.05, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 5.327102803738318, | |
| "grad_norm": 0.5904394388198853, | |
| "learning_rate": 4.891784293908192e-05, | |
| "loss": 0.0479, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 5.420560747663552, | |
| "grad_norm": 0.6387467384338379, | |
| "learning_rate": 4.7372909306484276e-05, | |
| "loss": 0.0419, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 5.5140186915887845, | |
| "grad_norm": 0.3460151255130768, | |
| "learning_rate": 4.5830487287289966e-05, | |
| "loss": 0.0457, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 5.607476635514018, | |
| "grad_norm": 0.6098641157150269, | |
| "learning_rate": 4.429205150429241e-05, | |
| "loss": 0.038, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 5.700934579439252, | |
| "grad_norm": 0.5131182670593262, | |
| "learning_rate": 4.275907276926918e-05, | |
| "loss": 0.0518, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 5.794392523364486, | |
| "grad_norm": 0.5069635510444641, | |
| "learning_rate": 4.123301667682171e-05, | |
| "loss": 0.0447, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 5.88785046728972, | |
| "grad_norm": 0.4461626708507538, | |
| "learning_rate": 3.971534220320291e-05, | |
| "loss": 0.0465, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 5.981308411214953, | |
| "grad_norm": 0.5642176866531372, | |
| "learning_rate": 3.820750031147211e-05, | |
| "loss": 0.0415, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 6.074766355140187, | |
| "grad_norm": 0.525589108467102, | |
| "learning_rate": 3.67109325643111e-05, | |
| "loss": 0.0398, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 6.168224299065421, | |
| "grad_norm": 0.5913786292076111, | |
| "learning_rate": 3.522706974582717e-05, | |
| "loss": 0.0421, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 6.261682242990654, | |
| "grad_norm": 0.44272592663764954, | |
| "learning_rate": 3.375733049366115e-05, | |
| "loss": 0.0377, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 6.355140186915888, | |
| "grad_norm": 0.5991699695587158, | |
| "learning_rate": 3.2303119942707796e-05, | |
| "loss": 0.0401, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 6.4485981308411215, | |
| "grad_norm": 0.4788666069507599, | |
| "learning_rate": 3.086582838174551e-05, | |
| "loss": 0.0344, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 6.542056074766355, | |
| "grad_norm": 0.2347191423177719, | |
| "learning_rate": 2.944682992425959e-05, | |
| "loss": 0.0431, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 6.635514018691588, | |
| "grad_norm": 0.4982701241970062, | |
| "learning_rate": 2.804748119472969e-05, | |
| "loss": 0.0431, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 6.728971962616822, | |
| "grad_norm": 0.3474225103855133, | |
| "learning_rate": 2.6669120031637663e-05, | |
| "loss": 0.0312, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 6.822429906542056, | |
| "grad_norm": 0.484762042760849, | |
| "learning_rate": 2.5313064208435423e-05, | |
| "loss": 0.0402, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 6.91588785046729, | |
| "grad_norm": 0.3601996898651123, | |
| "learning_rate": 2.3980610173696255e-05, | |
| "loss": 0.0353, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 7.009345794392523, | |
| "grad_norm": 0.4373137056827545, | |
| "learning_rate": 2.2673031811653034e-05, | |
| "loss": 0.0515, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 7.102803738317757, | |
| "grad_norm": 0.4686773121356964, | |
| "learning_rate": 2.139157922430956e-05, | |
| "loss": 0.042, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 7.196261682242991, | |
| "grad_norm": 0.4924180507659912, | |
| "learning_rate": 2.01374775362883e-05, | |
| "loss": 0.0396, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 7.289719626168225, | |
| "grad_norm": 0.3904656767845154, | |
| "learning_rate": 1.8911925723557806e-05, | |
| "loss": 0.0399, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 7.383177570093458, | |
| "grad_norm": 0.283329039812088, | |
| "learning_rate": 1.7716095467159393e-05, | |
| "loss": 0.0402, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 7.4766355140186915, | |
| "grad_norm": 0.4036600887775421, | |
| "learning_rate": 1.6551130033028827e-05, | |
| "loss": 0.0373, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 7.570093457943925, | |
| "grad_norm": 0.3786483407020569, | |
| "learning_rate": 1.541814317898425e-05, | |
| "loss": 0.0399, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 7.663551401869158, | |
| "grad_norm": 0.34531161189079285, | |
| "learning_rate": 1.4318218089924962e-05, | |
| "loss": 0.04, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 7.757009345794392, | |
| "grad_norm": 0.3333149254322052, | |
| "learning_rate": 1.3252406342259527e-05, | |
| "loss": 0.0328, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 7.850467289719626, | |
| "grad_norm": 0.35911786556243896, | |
| "learning_rate": 1.2221726898552665e-05, | |
| "loss": 0.0359, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 7.94392523364486, | |
| "grad_norm": 0.37050867080688477, | |
| "learning_rate": 1.122716513335262e-05, | |
| "loss": 0.0468, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 8.037383177570094, | |
| "grad_norm": 0.27867504954338074, | |
| "learning_rate": 1.0269671891130123e-05, | |
| "loss": 0.0308, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 8.130841121495328, | |
| "grad_norm": 0.36876147985458374, | |
| "learning_rate": 9.350162577229432e-06, | |
| "loss": 0.0379, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 8.22429906542056, | |
| "grad_norm": 0.3114546835422516, | |
| "learning_rate": 8.46951628270098e-06, | |
| "loss": 0.0354, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 8.317757009345794, | |
| "grad_norm": 0.26373207569122314, | |
| "learning_rate": 7.628574943851852e-06, | |
| "loss": 0.0296, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 8.411214953271028, | |
| "grad_norm": 0.3879351019859314, | |
| "learning_rate": 6.82814253731801e-06, | |
| "loss": 0.0288, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 8.504672897196262, | |
| "grad_norm": 0.3110659718513489, | |
| "learning_rate": 6.06898431142745e-06, | |
| "loss": 0.0293, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 8.598130841121495, | |
| "grad_norm": 0.21370220184326172, | |
| "learning_rate": 5.351826054589393e-06, | |
| "loss": 0.0294, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 8.69158878504673, | |
| "grad_norm": 0.304156094789505, | |
| "learning_rate": 4.677353401408974e-06, | |
| "loss": 0.0323, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 8.785046728971963, | |
| "grad_norm": 0.30655941367149353, | |
| "learning_rate": 4.04621117719049e-06, | |
| "loss": 0.0356, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 8.878504672897197, | |
| "grad_norm": 0.43658676743507385, | |
| "learning_rate": 3.459002781456344e-06, | |
| "loss": 0.0332, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 8.97196261682243, | |
| "grad_norm": 0.486379474401474, | |
| "learning_rate": 2.9162896110707163e-06, | |
| "loss": 0.0336, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 9.065420560747663, | |
| "grad_norm": 0.35680046677589417, | |
| "learning_rate": 2.418590523519687e-06, | |
| "loss": 0.0313, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 9.158878504672897, | |
| "grad_norm": 0.2002888023853302, | |
| "learning_rate": 1.9663813408607845e-06, | |
| "loss": 0.0357, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 9.25233644859813, | |
| "grad_norm": 0.27867501974105835, | |
| "learning_rate": 1.5600943948163527e-06, | |
| "loss": 0.0336, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 9.345794392523365, | |
| "grad_norm": 0.2867244780063629, | |
| "learning_rate": 1.2001181134455475e-06, | |
| "loss": 0.0325, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 9.439252336448599, | |
| "grad_norm": 0.284397691488266, | |
| "learning_rate": 8.867966497901282e-07, | |
| "loss": 0.0278, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 9.532710280373832, | |
| "grad_norm": 0.24502161145210266, | |
| "learning_rate": 6.204295528491555e-07, | |
| "loss": 0.0377, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 9.626168224299064, | |
| "grad_norm": 0.3403957784175873, | |
| "learning_rate": 4.012714811970464e-07, | |
| "loss": 0.0294, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 9.719626168224298, | |
| "grad_norm": 0.311471551656723, | |
| "learning_rate": 2.295319595188805e-07, | |
| "loss": 0.0277, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 9.813084112149532, | |
| "grad_norm": 0.26186737418174744, | |
| "learning_rate": 1.0537517829562472e-07, | |
| "loss": 0.0281, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 9.906542056074766, | |
| "grad_norm": 0.20402012765407562, | |
| "learning_rate": 2.8919836830887392e-08, | |
| "loss": 0.0305, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.8117493391036987, | |
| "learning_rate": 2.3902976920009423e-10, | |
| "loss": 0.0289, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 1070, | |
| "total_flos": 0.0, | |
| "train_loss": 0.07138736554395372, | |
| "train_runtime": 1117.3117, | |
| "train_samples_per_second": 46.513, | |
| "train_steps_per_second": 0.958 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1070, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 49, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |