| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.996011396011396, | |
| "eval_steps": 500, | |
| "global_step": 1314, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022792022792022793, | |
| "grad_norm": 2.864508008766109, | |
| "learning_rate": 2e-06, | |
| "loss": 0.779, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.045584045584045586, | |
| "grad_norm": 0.9078935811597736, | |
| "learning_rate": 2e-06, | |
| "loss": 0.7215, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06837606837606838, | |
| "grad_norm": 0.9591168040971848, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6975, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09116809116809117, | |
| "grad_norm": 0.8767413401192973, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6978, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11396011396011396, | |
| "grad_norm": 0.8545665855899989, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6958, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13675213675213677, | |
| "grad_norm": 0.8280986731912829, | |
| "learning_rate": 2e-06, | |
| "loss": 0.677, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.15954415954415954, | |
| "grad_norm": 0.7119760771817697, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6753, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18233618233618235, | |
| "grad_norm": 0.8493364147676038, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6786, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.20512820512820512, | |
| "grad_norm": 0.794423341017749, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6624, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.22792022792022792, | |
| "grad_norm": 0.8659153934871785, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6618, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.25071225071225073, | |
| "grad_norm": 0.7487650632319812, | |
| "learning_rate": 2e-06, | |
| "loss": 0.651, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.27350427350427353, | |
| "grad_norm": 0.7717117299400736, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6575, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.7004524658993033, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6566, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3190883190883191, | |
| "grad_norm": 0.6752858564876342, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6573, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3418803418803419, | |
| "grad_norm": 0.5562623802865255, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6485, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3646723646723647, | |
| "grad_norm": 0.4789275636440992, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6527, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.38746438746438744, | |
| "grad_norm": 0.4387647670962861, | |
| "learning_rate": 2e-06, | |
| "loss": 0.644, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.41025641025641024, | |
| "grad_norm": 0.3651502046396496, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6491, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.43304843304843305, | |
| "grad_norm": 0.3587478512482826, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6412, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.45584045584045585, | |
| "grad_norm": 0.35057238218897174, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6492, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.47863247863247865, | |
| "grad_norm": 0.316969119781051, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6454, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5014245014245015, | |
| "grad_norm": 0.33352881670277756, | |
| "learning_rate": 2e-06, | |
| "loss": 0.649, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5242165242165242, | |
| "grad_norm": 0.33234583103322946, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6443, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5470085470085471, | |
| "grad_norm": 0.29765701008940304, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6402, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5698005698005698, | |
| "grad_norm": 0.3093961084707375, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6451, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.3170813416727422, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6433, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 0.2882757041299046, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6404, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6381766381766382, | |
| "grad_norm": 0.3081992136303576, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6512, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6609686609686609, | |
| "grad_norm": 0.3016510334414117, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6398, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6837606837606838, | |
| "grad_norm": 0.29654738723801505, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6379, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7065527065527065, | |
| "grad_norm": 0.2863099976470917, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6376, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7293447293447294, | |
| "grad_norm": 0.3049298985658395, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6381, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7521367521367521, | |
| "grad_norm": 0.29920289611631834, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6349, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.7749287749287749, | |
| "grad_norm": 0.28462260835565456, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6407, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7977207977207977, | |
| "grad_norm": 0.31005901743663883, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6479, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8205128205128205, | |
| "grad_norm": 0.29494933869606077, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6396, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8433048433048433, | |
| "grad_norm": 0.33568791338440107, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6334, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8660968660968661, | |
| "grad_norm": 0.3040287915954937, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6456, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.28864498526565957, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6442, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9116809116809117, | |
| "grad_norm": 0.293111101754528, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6475, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9344729344729344, | |
| "grad_norm": 0.27087911354534383, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6437, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9572649572649573, | |
| "grad_norm": 0.28437527266058993, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6305, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.98005698005698, | |
| "grad_norm": 0.3040557123080402, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6437, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.9982905982905983, | |
| "eval_loss": 0.634207010269165, | |
| "eval_runtime": 440.7496, | |
| "eval_samples_per_second": 26.825, | |
| "eval_steps_per_second": 0.42, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 1.0034188034188034, | |
| "grad_norm": 0.30691349645034643, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6589, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.0262108262108263, | |
| "grad_norm": 0.29205865755410254, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6262, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.049002849002849, | |
| "grad_norm": 0.30392093777188467, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6198, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.0717948717948718, | |
| "grad_norm": 0.2828326519597703, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6208, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.0945868945868946, | |
| "grad_norm": 0.28162501980854593, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6281, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.1173789173789175, | |
| "grad_norm": 0.2939036352432325, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6331, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.1401709401709401, | |
| "grad_norm": 0.2763230121687841, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6226, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.162962962962963, | |
| "grad_norm": 0.2854865008524285, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6201, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.1857549857549858, | |
| "grad_norm": 0.2855187866369504, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6257, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.2085470085470085, | |
| "grad_norm": 0.28579952821101995, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6183, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.2313390313390313, | |
| "grad_norm": 0.31286133152047063, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6287, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.2541310541310542, | |
| "grad_norm": 0.27149328112131427, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6252, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.2769230769230768, | |
| "grad_norm": 0.28442376744542436, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6264, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.2997150997150997, | |
| "grad_norm": 0.27944308671830026, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6201, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.3225071225071225, | |
| "grad_norm": 0.2745193653156677, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6187, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.3452991452991454, | |
| "grad_norm": 0.28421785591908916, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6183, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.368091168091168, | |
| "grad_norm": 0.31615427394285256, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6239, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.390883190883191, | |
| "grad_norm": 0.2895588155201198, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6239, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.4136752136752135, | |
| "grad_norm": 0.2748857458986795, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6263, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.4364672364672364, | |
| "grad_norm": 0.29452945089403426, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6242, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.4592592592592593, | |
| "grad_norm": 0.29177001933506297, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6191, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.4820512820512821, | |
| "grad_norm": 0.3201830623263587, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6167, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.504843304843305, | |
| "grad_norm": 0.27587844668467365, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6164, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.5276353276353276, | |
| "grad_norm": 0.29954875579985213, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6242, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.5504273504273505, | |
| "grad_norm": 0.2731664505058775, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6123, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.573219373219373, | |
| "grad_norm": 0.31129992079722374, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6164, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.596011396011396, | |
| "grad_norm": 0.2772886517333268, | |
| "learning_rate": 2e-06, | |
| "loss": 0.632, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.6188034188034188, | |
| "grad_norm": 0.2876973271112532, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6188, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.6415954415954417, | |
| "grad_norm": 0.30143918664865993, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6235, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.6643874643874645, | |
| "grad_norm": 0.2873047034444853, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6163, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.6871794871794872, | |
| "grad_norm": 0.28340344600988737, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6173, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.7099715099715098, | |
| "grad_norm": 0.2790689335967899, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6145, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.7327635327635327, | |
| "grad_norm": 0.28801564449039246, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6188, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.7555555555555555, | |
| "grad_norm": 0.28497713222065674, | |
| "learning_rate": 2e-06, | |
| "loss": 0.615, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.7783475783475784, | |
| "grad_norm": 0.2853302771792979, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6222, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.8011396011396013, | |
| "grad_norm": 0.2825413400545415, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6194, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.823931623931624, | |
| "grad_norm": 0.2864348042850986, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6059, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.8467236467236468, | |
| "grad_norm": 0.29963979013514164, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6139, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.8695156695156694, | |
| "grad_norm": 0.27740105545017113, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6116, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.8923076923076922, | |
| "grad_norm": 0.27815438089274597, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6156, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.915099715099715, | |
| "grad_norm": 0.2873161616115944, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6201, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.937891737891738, | |
| "grad_norm": 0.2923191233832773, | |
| "learning_rate": 2e-06, | |
| "loss": 0.618, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.9606837606837608, | |
| "grad_norm": 0.28243883384079016, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6193, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.9834757834757835, | |
| "grad_norm": 0.3024016197121533, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6168, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.9994301994301993, | |
| "eval_loss": 0.6261253356933594, | |
| "eval_runtime": 441.2387, | |
| "eval_samples_per_second": 26.795, | |
| "eval_steps_per_second": 0.419, | |
| "step": 877 | |
| }, | |
| { | |
| "epoch": 2.006837606837607, | |
| "grad_norm": 0.2780767198866166, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6416, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.0296296296296297, | |
| "grad_norm": 0.28632050007528936, | |
| "learning_rate": 2e-06, | |
| "loss": 0.5906, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.0524216524216525, | |
| "grad_norm": 0.28591330449055385, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6068, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0752136752136754, | |
| "grad_norm": 0.29332676336619445, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6046, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.098005698005698, | |
| "grad_norm": 0.28668389343499623, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6078, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.1207977207977207, | |
| "grad_norm": 0.289239255821366, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6052, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.1435897435897435, | |
| "grad_norm": 0.282673762278497, | |
| "learning_rate": 2e-06, | |
| "loss": 0.5997, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.1663817663817664, | |
| "grad_norm": 0.28582015763960644, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6009, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.1891737891737892, | |
| "grad_norm": 0.2802200672060606, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6058, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.211965811965812, | |
| "grad_norm": 0.29000853156799317, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6048, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.234757834757835, | |
| "grad_norm": 0.3095113688403279, | |
| "learning_rate": 2e-06, | |
| "loss": 0.5937, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.2575498575498574, | |
| "grad_norm": 0.296958733510943, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6028, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.2803418803418802, | |
| "grad_norm": 0.279130743733046, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6029, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.303133903133903, | |
| "grad_norm": 0.2829565255627152, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6002, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.325925925925926, | |
| "grad_norm": 0.30774789195383456, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6034, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.348717948717949, | |
| "grad_norm": 0.27543037768068174, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6049, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.3715099715099717, | |
| "grad_norm": 0.2800183284208171, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6021, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.394301994301994, | |
| "grad_norm": 0.3013281475062929, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6015, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.417094017094017, | |
| "grad_norm": 0.2905972470777562, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6102, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.43988603988604, | |
| "grad_norm": 0.286494804323914, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6133, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.4626780626780627, | |
| "grad_norm": 0.276343757422485, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6082, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.4854700854700855, | |
| "grad_norm": 0.2708146708371793, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6053, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.5082621082621084, | |
| "grad_norm": 0.2888450487275576, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6124, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.5310541310541312, | |
| "grad_norm": 0.2825228879995427, | |
| "learning_rate": 2e-06, | |
| "loss": 0.605, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.5538461538461537, | |
| "grad_norm": 0.2803594149293884, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6032, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.5766381766381765, | |
| "grad_norm": 0.2857182498890343, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6023, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.5994301994301994, | |
| "grad_norm": 0.28352170043467456, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6019, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.6222222222222222, | |
| "grad_norm": 0.3138498081549944, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6021, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.645014245014245, | |
| "grad_norm": 0.2879058037646963, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6028, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.667806267806268, | |
| "grad_norm": 0.2771838146813842, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6061, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.690598290598291, | |
| "grad_norm": 0.2783128898098319, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6009, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.7133903133903132, | |
| "grad_norm": 0.2815742815524037, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6018, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.736182336182336, | |
| "grad_norm": 0.2891453557368429, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6051, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.758974358974359, | |
| "grad_norm": 0.29088353404285416, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6085, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.781766381766382, | |
| "grad_norm": 0.28833617475662054, | |
| "learning_rate": 2e-06, | |
| "loss": 0.5979, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.8045584045584047, | |
| "grad_norm": 0.2853141216724547, | |
| "learning_rate": 2e-06, | |
| "loss": 0.5963, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.827350427350427, | |
| "grad_norm": 0.29954457976745724, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6117, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.8501424501424504, | |
| "grad_norm": 0.2791597452969322, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6083, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.872934472934473, | |
| "grad_norm": 0.295287854084783, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6052, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.8957264957264957, | |
| "grad_norm": 0.3030851891787806, | |
| "learning_rate": 2e-06, | |
| "loss": 0.606, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.9185185185185185, | |
| "grad_norm": 0.29008501545155757, | |
| "learning_rate": 2e-06, | |
| "loss": 0.5959, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.9413105413105414, | |
| "grad_norm": 0.28886132459109937, | |
| "learning_rate": 2e-06, | |
| "loss": 0.5958, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.9641025641025642, | |
| "grad_norm": 0.2930192014893465, | |
| "learning_rate": 2e-06, | |
| "loss": 0.5971, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.9868945868945866, | |
| "grad_norm": 0.2931492064869206, | |
| "learning_rate": 2e-06, | |
| "loss": 0.6075, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "eval_loss": 0.6223183870315552, | |
| "eval_runtime": 439.7512, | |
| "eval_samples_per_second": 26.886, | |
| "eval_steps_per_second": 0.421, | |
| "step": 1314 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "step": 1314, | |
| "total_flos": 2755219238682624.0, | |
| "train_loss": 0.6269598571495741, | |
| "train_runtime": 70419.7857, | |
| "train_samples_per_second": 9.569, | |
| "train_steps_per_second": 0.019 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1314, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2755219238682624.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |