| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 2.996011396011396, |
| "eval_steps": 500, |
| "global_step": 1314, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.022792022792022793, |
| "grad_norm": 5.034788493406042, |
| "learning_rate": 1e-06, |
| "loss": 0.8037, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.045584045584045586, |
| "grad_norm": 1.1516983501374154, |
| "learning_rate": 1e-06, |
| "loss": 0.7481, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.06837606837606838, |
| "grad_norm": 0.9644157627889516, |
| "learning_rate": 1e-06, |
| "loss": 0.7182, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.09116809116809117, |
| "grad_norm": 0.9138466158983852, |
| "learning_rate": 1e-06, |
| "loss": 0.7142, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.11396011396011396, |
| "grad_norm": 0.8819965403325495, |
| "learning_rate": 1e-06, |
| "loss": 0.7104, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.13675213675213677, |
| "grad_norm": 0.8440638025989942, |
| "learning_rate": 1e-06, |
| "loss": 0.6912, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.15954415954415954, |
| "grad_norm": 0.7319419373524793, |
| "learning_rate": 1e-06, |
| "loss": 0.69, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.18233618233618235, |
| "grad_norm": 0.8692464858042348, |
| "learning_rate": 1e-06, |
| "loss": 0.6939, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.20512820512820512, |
| "grad_norm": 0.8198822622072399, |
| "learning_rate": 1e-06, |
| "loss": 0.6785, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.22792022792022792, |
| "grad_norm": 0.8964777538631004, |
| "learning_rate": 1e-06, |
| "loss": 0.6787, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.25071225071225073, |
| "grad_norm": 0.7914609606158064, |
| "learning_rate": 1e-06, |
| "loss": 0.6685, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.27350427350427353, |
| "grad_norm": 0.8495396983829627, |
| "learning_rate": 1e-06, |
| "loss": 0.6757, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.2962962962962963, |
| "grad_norm": 0.8256965578907156, |
| "learning_rate": 1e-06, |
| "loss": 0.6753, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.3190883190883191, |
| "grad_norm": 0.915388593142506, |
| "learning_rate": 1e-06, |
| "loss": 0.6767, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.3418803418803419, |
| "grad_norm": 0.7988576493690989, |
| "learning_rate": 1e-06, |
| "loss": 0.6681, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.3646723646723647, |
| "grad_norm": 0.7915015172676066, |
| "learning_rate": 1e-06, |
| "loss": 0.6715, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.38746438746438744, |
| "grad_norm": 0.8679944470731276, |
| "learning_rate": 1e-06, |
| "loss": 0.6634, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.41025641025641024, |
| "grad_norm": 0.7684645511702434, |
| "learning_rate": 1e-06, |
| "loss": 0.6677, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.43304843304843305, |
| "grad_norm": 0.8057726087504354, |
| "learning_rate": 1e-06, |
| "loss": 0.6591, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.45584045584045585, |
| "grad_norm": 0.8689057340663543, |
| "learning_rate": 1e-06, |
| "loss": 0.6666, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.47863247863247865, |
| "grad_norm": 0.7765639335225907, |
| "learning_rate": 1e-06, |
| "loss": 0.6618, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.5014245014245015, |
| "grad_norm": 0.7249039352079137, |
| "learning_rate": 1e-06, |
| "loss": 0.6645, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.5242165242165242, |
| "grad_norm": 0.7355294224095776, |
| "learning_rate": 1e-06, |
| "loss": 0.6587, |
| "step": 230 |
| }, |
| { |
| "epoch": 0.5470085470085471, |
| "grad_norm": 0.6734465272392729, |
| "learning_rate": 1e-06, |
| "loss": 0.6538, |
| "step": 240 |
| }, |
| { |
| "epoch": 0.5698005698005698, |
| "grad_norm": 0.6892725163579891, |
| "learning_rate": 1e-06, |
| "loss": 0.6582, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.5925925925925926, |
| "grad_norm": 0.6159757521740276, |
| "learning_rate": 1e-06, |
| "loss": 0.6558, |
| "step": 260 |
| }, |
| { |
| "epoch": 0.6153846153846154, |
| "grad_norm": 0.595301294273934, |
| "learning_rate": 1e-06, |
| "loss": 0.6522, |
| "step": 270 |
| }, |
| { |
| "epoch": 0.6381766381766382, |
| "grad_norm": 0.637130309442756, |
| "learning_rate": 1e-06, |
| "loss": 0.6626, |
| "step": 280 |
| }, |
| { |
| "epoch": 0.6609686609686609, |
| "grad_norm": 0.5622146617139986, |
| "learning_rate": 1e-06, |
| "loss": 0.6506, |
| "step": 290 |
| }, |
| { |
| "epoch": 0.6837606837606838, |
| "grad_norm": 0.515324053794224, |
| "learning_rate": 1e-06, |
| "loss": 0.6482, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.7065527065527065, |
| "grad_norm": 0.47443529295452097, |
| "learning_rate": 1e-06, |
| "loss": 0.6476, |
| "step": 310 |
| }, |
| { |
| "epoch": 0.7293447293447294, |
| "grad_norm": 0.4574412781585292, |
| "learning_rate": 1e-06, |
| "loss": 0.6478, |
| "step": 320 |
| }, |
| { |
| "epoch": 0.7521367521367521, |
| "grad_norm": 0.419400771069629, |
| "learning_rate": 1e-06, |
| "loss": 0.6443, |
| "step": 330 |
| }, |
| { |
| "epoch": 0.7749287749287749, |
| "grad_norm": 0.4144316263340871, |
| "learning_rate": 1e-06, |
| "loss": 0.6499, |
| "step": 340 |
| }, |
| { |
| "epoch": 0.7977207977207977, |
| "grad_norm": 0.3876622494990688, |
| "learning_rate": 1e-06, |
| "loss": 0.6572, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.8205128205128205, |
| "grad_norm": 0.39088107290030016, |
| "learning_rate": 1e-06, |
| "loss": 0.6488, |
| "step": 360 |
| }, |
| { |
| "epoch": 0.8433048433048433, |
| "grad_norm": 0.39095214681946544, |
| "learning_rate": 1e-06, |
| "loss": 0.6423, |
| "step": 370 |
| }, |
| { |
| "epoch": 0.8660968660968661, |
| "grad_norm": 0.37229603895098445, |
| "learning_rate": 1e-06, |
| "loss": 0.6544, |
| "step": 380 |
| }, |
| { |
| "epoch": 0.8888888888888888, |
| "grad_norm": 0.33560492196360237, |
| "learning_rate": 1e-06, |
| "loss": 0.6529, |
| "step": 390 |
| }, |
| { |
| "epoch": 0.9116809116809117, |
| "grad_norm": 0.3304669424027911, |
| "learning_rate": 1e-06, |
| "loss": 0.6564, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.9344729344729344, |
| "grad_norm": 0.306301682598447, |
| "learning_rate": 1e-06, |
| "loss": 0.6524, |
| "step": 410 |
| }, |
| { |
| "epoch": 0.9572649572649573, |
| "grad_norm": 0.32339107821315766, |
| "learning_rate": 1e-06, |
| "loss": 0.639, |
| "step": 420 |
| }, |
| { |
| "epoch": 0.98005698005698, |
| "grad_norm": 0.3270163954756601, |
| "learning_rate": 1e-06, |
| "loss": 0.6524, |
| "step": 430 |
| }, |
| { |
| "epoch": 0.9982905982905983, |
| "eval_loss": 0.6427425146102905, |
| "eval_runtime": 441.3018, |
| "eval_samples_per_second": 26.791, |
| "eval_steps_per_second": 0.419, |
| "step": 438 |
| }, |
| { |
| "epoch": 1.0034188034188034, |
| "grad_norm": 0.31473038315707025, |
| "learning_rate": 1e-06, |
| "loss": 0.6687, |
| "step": 440 |
| }, |
| { |
| "epoch": 1.0262108262108263, |
| "grad_norm": 0.3102379131144829, |
| "learning_rate": 1e-06, |
| "loss": 0.6408, |
| "step": 450 |
| }, |
| { |
| "epoch": 1.049002849002849, |
| "grad_norm": 0.33300198859690955, |
| "learning_rate": 1e-06, |
| "loss": 0.6342, |
| "step": 460 |
| }, |
| { |
| "epoch": 1.0717948717948718, |
| "grad_norm": 0.3105373509069977, |
| "learning_rate": 1e-06, |
| "loss": 0.6354, |
| "step": 470 |
| }, |
| { |
| "epoch": 1.0945868945868946, |
| "grad_norm": 0.3034997057005275, |
| "learning_rate": 1e-06, |
| "loss": 0.6424, |
| "step": 480 |
| }, |
| { |
| "epoch": 1.1173789173789175, |
| "grad_norm": 0.3190915396354621, |
| "learning_rate": 1e-06, |
| "loss": 0.6475, |
| "step": 490 |
| }, |
| { |
| "epoch": 1.1401709401709401, |
| "grad_norm": 0.2912727676817975, |
| "learning_rate": 1e-06, |
| "loss": 0.6368, |
| "step": 500 |
| }, |
| { |
| "epoch": 1.162962962962963, |
| "grad_norm": 0.30642725693588346, |
| "learning_rate": 1e-06, |
| "loss": 0.6342, |
| "step": 510 |
| }, |
| { |
| "epoch": 1.1857549857549858, |
| "grad_norm": 0.3084602255916208, |
| "learning_rate": 1e-06, |
| "loss": 0.6397, |
| "step": 520 |
| }, |
| { |
| "epoch": 1.2085470085470085, |
| "grad_norm": 0.29671604257938206, |
| "learning_rate": 1e-06, |
| "loss": 0.6321, |
| "step": 530 |
| }, |
| { |
| "epoch": 1.2313390313390313, |
| "grad_norm": 0.2964976735469402, |
| "learning_rate": 1e-06, |
| "loss": 0.6426, |
| "step": 540 |
| }, |
| { |
| "epoch": 1.2541310541310542, |
| "grad_norm": 0.2879899726201825, |
| "learning_rate": 1e-06, |
| "loss": 0.6389, |
| "step": 550 |
| }, |
| { |
| "epoch": 1.2769230769230768, |
| "grad_norm": 0.28479549380394836, |
| "learning_rate": 1e-06, |
| "loss": 0.6402, |
| "step": 560 |
| }, |
| { |
| "epoch": 1.2997150997150997, |
| "grad_norm": 0.2944407996223175, |
| "learning_rate": 1e-06, |
| "loss": 0.6336, |
| "step": 570 |
| }, |
| { |
| "epoch": 1.3225071225071225, |
| "grad_norm": 0.277107161761051, |
| "learning_rate": 1e-06, |
| "loss": 0.6324, |
| "step": 580 |
| }, |
| { |
| "epoch": 1.3452991452991454, |
| "grad_norm": 0.28528259113569654, |
| "learning_rate": 1e-06, |
| "loss": 0.632, |
| "step": 590 |
| }, |
| { |
| "epoch": 1.368091168091168, |
| "grad_norm": 0.29174620128895756, |
| "learning_rate": 1e-06, |
| "loss": 0.6375, |
| "step": 600 |
| }, |
| { |
| "epoch": 1.390883190883191, |
| "grad_norm": 0.2939947198692204, |
| "learning_rate": 1e-06, |
| "loss": 0.6375, |
| "step": 610 |
| }, |
| { |
| "epoch": 1.4136752136752135, |
| "grad_norm": 0.28336925542335817, |
| "learning_rate": 1e-06, |
| "loss": 0.6397, |
| "step": 620 |
| }, |
| { |
| "epoch": 1.4364672364672364, |
| "grad_norm": 0.2857142245692852, |
| "learning_rate": 1e-06, |
| "loss": 0.6376, |
| "step": 630 |
| }, |
| { |
| "epoch": 1.4592592592592593, |
| "grad_norm": 0.2992594870554089, |
| "learning_rate": 1e-06, |
| "loss": 0.6325, |
| "step": 640 |
| }, |
| { |
| "epoch": 1.4820512820512821, |
| "grad_norm": 0.3054173141100281, |
| "learning_rate": 1e-06, |
| "loss": 0.6301, |
| "step": 650 |
| }, |
| { |
| "epoch": 1.504843304843305, |
| "grad_norm": 0.28115769943000585, |
| "learning_rate": 1e-06, |
| "loss": 0.6296, |
| "step": 660 |
| }, |
| { |
| "epoch": 1.5276353276353276, |
| "grad_norm": 0.30107960985781584, |
| "learning_rate": 1e-06, |
| "loss": 0.6377, |
| "step": 670 |
| }, |
| { |
| "epoch": 1.5504273504273505, |
| "grad_norm": 0.2703569722996903, |
| "learning_rate": 1e-06, |
| "loss": 0.6256, |
| "step": 680 |
| }, |
| { |
| "epoch": 1.573219373219373, |
| "grad_norm": 0.3054477613186332, |
| "learning_rate": 1e-06, |
| "loss": 0.6298, |
| "step": 690 |
| }, |
| { |
| "epoch": 1.596011396011396, |
| "grad_norm": 0.2772611617069955, |
| "learning_rate": 1e-06, |
| "loss": 0.6453, |
| "step": 700 |
| }, |
| { |
| "epoch": 1.6188034188034188, |
| "grad_norm": 0.28420745248005985, |
| "learning_rate": 1e-06, |
| "loss": 0.632, |
| "step": 710 |
| }, |
| { |
| "epoch": 1.6415954415954417, |
| "grad_norm": 0.2873599952500497, |
| "learning_rate": 1e-06, |
| "loss": 0.6368, |
| "step": 720 |
| }, |
| { |
| "epoch": 1.6643874643874645, |
| "grad_norm": 0.2819204389557453, |
| "learning_rate": 1e-06, |
| "loss": 0.6294, |
| "step": 730 |
| }, |
| { |
| "epoch": 1.6871794871794872, |
| "grad_norm": 0.28873352605213115, |
| "learning_rate": 1e-06, |
| "loss": 0.6305, |
| "step": 740 |
| }, |
| { |
| "epoch": 1.7099715099715098, |
| "grad_norm": 0.26964085910780405, |
| "learning_rate": 1e-06, |
| "loss": 0.6275, |
| "step": 750 |
| }, |
| { |
| "epoch": 1.7327635327635327, |
| "grad_norm": 0.30955521675230674, |
| "learning_rate": 1e-06, |
| "loss": 0.632, |
| "step": 760 |
| }, |
| { |
| "epoch": 1.7555555555555555, |
| "grad_norm": 0.27202688381535234, |
| "learning_rate": 1e-06, |
| "loss": 0.628, |
| "step": 770 |
| }, |
| { |
| "epoch": 1.7783475783475784, |
| "grad_norm": 0.27319532588993084, |
| "learning_rate": 1e-06, |
| "loss": 0.6352, |
| "step": 780 |
| }, |
| { |
| "epoch": 1.8011396011396013, |
| "grad_norm": 0.2938366249126102, |
| "learning_rate": 1e-06, |
| "loss": 0.6323, |
| "step": 790 |
| }, |
| { |
| "epoch": 1.823931623931624, |
| "grad_norm": 0.27504961448122345, |
| "learning_rate": 1e-06, |
| "loss": 0.6187, |
| "step": 800 |
| }, |
| { |
| "epoch": 1.8467236467236468, |
| "grad_norm": 0.2893220159730567, |
| "learning_rate": 1e-06, |
| "loss": 0.6268, |
| "step": 810 |
| }, |
| { |
| "epoch": 1.8695156695156694, |
| "grad_norm": 0.27897704295152365, |
| "learning_rate": 1e-06, |
| "loss": 0.6245, |
| "step": 820 |
| }, |
| { |
| "epoch": 1.8923076923076922, |
| "grad_norm": 0.27613535391427213, |
| "learning_rate": 1e-06, |
| "loss": 0.6286, |
| "step": 830 |
| }, |
| { |
| "epoch": 1.915099715099715, |
| "grad_norm": 0.3019988870606758, |
| "learning_rate": 1e-06, |
| "loss": 0.633, |
| "step": 840 |
| }, |
| { |
| "epoch": 1.937891737891738, |
| "grad_norm": 0.29419466829470303, |
| "learning_rate": 1e-06, |
| "loss": 0.6308, |
| "step": 850 |
| }, |
| { |
| "epoch": 1.9606837606837608, |
| "grad_norm": 0.28238349512023986, |
| "learning_rate": 1e-06, |
| "loss": 0.6322, |
| "step": 860 |
| }, |
| { |
| "epoch": 1.9834757834757835, |
| "grad_norm": 0.31073538433094205, |
| "learning_rate": 1e-06, |
| "loss": 0.6297, |
| "step": 870 |
| }, |
| { |
| "epoch": 1.9994301994301993, |
| "eval_loss": 0.6338208317756653, |
| "eval_runtime": 443.4647, |
| "eval_samples_per_second": 26.661, |
| "eval_steps_per_second": 0.417, |
| "step": 877 |
| }, |
| { |
| "epoch": 2.006837606837607, |
| "grad_norm": 0.26674695581365887, |
| "learning_rate": 1e-06, |
| "loss": 0.6567, |
| "step": 880 |
| }, |
| { |
| "epoch": 2.0296296296296297, |
| "grad_norm": 0.2843780145864919, |
| "learning_rate": 1e-06, |
| "loss": 0.6093, |
| "step": 890 |
| }, |
| { |
| "epoch": 2.0524216524216525, |
| "grad_norm": 0.2908163022995648, |
| "learning_rate": 1e-06, |
| "loss": 0.6259, |
| "step": 900 |
| }, |
| { |
| "epoch": 2.0752136752136754, |
| "grad_norm": 0.2887371735906474, |
| "learning_rate": 1e-06, |
| "loss": 0.6235, |
| "step": 910 |
| }, |
| { |
| "epoch": 2.098005698005698, |
| "grad_norm": 0.27747683694165937, |
| "learning_rate": 1e-06, |
| "loss": 0.6269, |
| "step": 920 |
| }, |
| { |
| "epoch": 2.1207977207977207, |
| "grad_norm": 0.2772181529211208, |
| "learning_rate": 1e-06, |
| "loss": 0.6239, |
| "step": 930 |
| }, |
| { |
| "epoch": 2.1435897435897435, |
| "grad_norm": 0.2855833901767544, |
| "learning_rate": 1e-06, |
| "loss": 0.6184, |
| "step": 940 |
| }, |
| { |
| "epoch": 2.1663817663817664, |
| "grad_norm": 0.274366139495245, |
| "learning_rate": 1e-06, |
| "loss": 0.6196, |
| "step": 950 |
| }, |
| { |
| "epoch": 2.1891737891737892, |
| "grad_norm": 0.2792807181801054, |
| "learning_rate": 1e-06, |
| "loss": 0.6245, |
| "step": 960 |
| }, |
| { |
| "epoch": 2.211965811965812, |
| "grad_norm": 0.2976248839766054, |
| "learning_rate": 1e-06, |
| "loss": 0.6233, |
| "step": 970 |
| }, |
| { |
| "epoch": 2.234757834757835, |
| "grad_norm": 0.30462982257877474, |
| "learning_rate": 1e-06, |
| "loss": 0.6119, |
| "step": 980 |
| }, |
| { |
| "epoch": 2.2575498575498574, |
| "grad_norm": 0.28479471547789814, |
| "learning_rate": 1e-06, |
| "loss": 0.6209, |
| "step": 990 |
| }, |
| { |
| "epoch": 2.2803418803418802, |
| "grad_norm": 0.2730705018832951, |
| "learning_rate": 1e-06, |
| "loss": 0.6211, |
| "step": 1000 |
| }, |
| { |
| "epoch": 2.303133903133903, |
| "grad_norm": 0.2844791227372435, |
| "learning_rate": 1e-06, |
| "loss": 0.6183, |
| "step": 1010 |
| }, |
| { |
| "epoch": 2.325925925925926, |
| "grad_norm": 0.30150544154410436, |
| "learning_rate": 1e-06, |
| "loss": 0.6216, |
| "step": 1020 |
| }, |
| { |
| "epoch": 2.348717948717949, |
| "grad_norm": 0.28427861103469904, |
| "learning_rate": 1e-06, |
| "loss": 0.6233, |
| "step": 1030 |
| }, |
| { |
| "epoch": 2.3715099715099717, |
| "grad_norm": 0.28141354085428233, |
| "learning_rate": 1e-06, |
| "loss": 0.6201, |
| "step": 1040 |
| }, |
| { |
| "epoch": 2.394301994301994, |
| "grad_norm": 0.3108531442713316, |
| "learning_rate": 1e-06, |
| "loss": 0.6195, |
| "step": 1050 |
| }, |
| { |
| "epoch": 2.417094017094017, |
| "grad_norm": 0.27759323128287305, |
| "learning_rate": 1e-06, |
| "loss": 0.6284, |
| "step": 1060 |
| }, |
| { |
| "epoch": 2.43988603988604, |
| "grad_norm": 0.2723677631371072, |
| "learning_rate": 1e-06, |
| "loss": 0.6316, |
| "step": 1070 |
| }, |
| { |
| "epoch": 2.4626780626780627, |
| "grad_norm": 0.2727344470936269, |
| "learning_rate": 1e-06, |
| "loss": 0.6264, |
| "step": 1080 |
| }, |
| { |
| "epoch": 2.4854700854700855, |
| "grad_norm": 0.27628824767381605, |
| "learning_rate": 1e-06, |
| "loss": 0.6233, |
| "step": 1090 |
| }, |
| { |
| "epoch": 2.5082621082621084, |
| "grad_norm": 0.30889885857224914, |
| "learning_rate": 1e-06, |
| "loss": 0.6307, |
| "step": 1100 |
| }, |
| { |
| "epoch": 2.5310541310541312, |
| "grad_norm": 0.2777242491983728, |
| "learning_rate": 1e-06, |
| "loss": 0.6229, |
| "step": 1110 |
| }, |
| { |
| "epoch": 2.5538461538461537, |
| "grad_norm": 0.2738888791962237, |
| "learning_rate": 1e-06, |
| "loss": 0.621, |
| "step": 1120 |
| }, |
| { |
| "epoch": 2.5766381766381765, |
| "grad_norm": 0.28114438593954244, |
| "learning_rate": 1e-06, |
| "loss": 0.62, |
| "step": 1130 |
| }, |
| { |
| "epoch": 2.5994301994301994, |
| "grad_norm": 0.27193085676704964, |
| "learning_rate": 1e-06, |
| "loss": 0.6197, |
| "step": 1140 |
| }, |
| { |
| "epoch": 2.6222222222222222, |
| "grad_norm": 0.29287230344409826, |
| "learning_rate": 1e-06, |
| "loss": 0.6198, |
| "step": 1150 |
| }, |
| { |
| "epoch": 2.645014245014245, |
| "grad_norm": 0.28077690045908144, |
| "learning_rate": 1e-06, |
| "loss": 0.6202, |
| "step": 1160 |
| }, |
| { |
| "epoch": 2.667806267806268, |
| "grad_norm": 0.2743284835841236, |
| "learning_rate": 1e-06, |
| "loss": 0.6238, |
| "step": 1170 |
| }, |
| { |
| "epoch": 2.690598290598291, |
| "grad_norm": 0.27477566990307356, |
| "learning_rate": 1e-06, |
| "loss": 0.6185, |
| "step": 1180 |
| }, |
| { |
| "epoch": 2.7133903133903132, |
| "grad_norm": 0.2713788574187782, |
| "learning_rate": 1e-06, |
| "loss": 0.6191, |
| "step": 1190 |
| }, |
| { |
| "epoch": 2.736182336182336, |
| "grad_norm": 0.27862320606665736, |
| "learning_rate": 1e-06, |
| "loss": 0.6227, |
| "step": 1200 |
| }, |
| { |
| "epoch": 2.758974358974359, |
| "grad_norm": 0.27722113502385054, |
| "learning_rate": 1e-06, |
| "loss": 0.6259, |
| "step": 1210 |
| }, |
| { |
| "epoch": 2.781766381766382, |
| "grad_norm": 0.27383337175966405, |
| "learning_rate": 1e-06, |
| "loss": 0.6152, |
| "step": 1220 |
| }, |
| { |
| "epoch": 2.8045584045584047, |
| "grad_norm": 0.2698800509327578, |
| "learning_rate": 1e-06, |
| "loss": 0.6136, |
| "step": 1230 |
| }, |
| { |
| "epoch": 2.827350427350427, |
| "grad_norm": 0.2921901437125658, |
| "learning_rate": 1e-06, |
| "loss": 0.6293, |
| "step": 1240 |
| }, |
| { |
| "epoch": 2.8501424501424504, |
| "grad_norm": 0.28309763294075, |
| "learning_rate": 1e-06, |
| "loss": 0.6258, |
| "step": 1250 |
| }, |
| { |
| "epoch": 2.872934472934473, |
| "grad_norm": 0.29008135704032817, |
| "learning_rate": 1e-06, |
| "loss": 0.6225, |
| "step": 1260 |
| }, |
| { |
| "epoch": 2.8957264957264957, |
| "grad_norm": 0.29022489353027253, |
| "learning_rate": 1e-06, |
| "loss": 0.6233, |
| "step": 1270 |
| }, |
| { |
| "epoch": 2.9185185185185185, |
| "grad_norm": 0.28365997860783704, |
| "learning_rate": 1e-06, |
| "loss": 0.6132, |
| "step": 1280 |
| }, |
| { |
| "epoch": 2.9413105413105414, |
| "grad_norm": 0.26981910121457864, |
| "learning_rate": 1e-06, |
| "loss": 0.6129, |
| "step": 1290 |
| }, |
| { |
| "epoch": 2.9641025641025642, |
| "grad_norm": 0.29493722207739875, |
| "learning_rate": 1e-06, |
| "loss": 0.6144, |
| "step": 1300 |
| }, |
| { |
| "epoch": 2.9868945868945866, |
| "grad_norm": 0.27494371344536095, |
| "learning_rate": 1e-06, |
| "loss": 0.625, |
| "step": 1310 |
| }, |
| { |
| "epoch": 2.996011396011396, |
| "eval_loss": 0.6290851831436157, |
| "eval_runtime": 442.9565, |
| "eval_samples_per_second": 26.691, |
| "eval_steps_per_second": 0.418, |
| "step": 1314 |
| }, |
| { |
| "epoch": 2.996011396011396, |
| "step": 1314, |
| "total_flos": 2755219238682624.0, |
| "train_loss": 0.642158996931857, |
| "train_runtime": 70520.3628, |
| "train_samples_per_second": 9.556, |
| "train_steps_per_second": 0.019 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 1314, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 2755219238682624.0, |
| "train_batch_size": 8, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|