| { | |
| "best_global_step": 700, | |
| "best_metric": 0.7593940496444702, | |
| "best_model_checkpoint": "./thinker_output/07-08_multi_audio/checkpoint-700", | |
| "epoch": 2.692489053553385, | |
| "eval_steps": 50, | |
| "global_step": 1000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02694509936005389, | |
| "grad_norm": 7.9127708678466355, | |
| "learning_rate": 5e-05, | |
| "loss": 2.2253, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.05389019872010778, | |
| "grad_norm": 2.129170820552183, | |
| "learning_rate": 4.954792043399639e-05, | |
| "loss": 1.4156, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.08083529808016167, | |
| "grad_norm": 2.213231596266166, | |
| "learning_rate": 4.909584086799277e-05, | |
| "loss": 1.2292, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.10778039744021556, | |
| "grad_norm": 1.9446166933638902, | |
| "learning_rate": 4.864376130198916e-05, | |
| "loss": 1.1526, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.13472549680026946, | |
| "grad_norm": 1.8624996582053734, | |
| "learning_rate": 4.8191681735985535e-05, | |
| "loss": 1.1116, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13472549680026946, | |
| "eval_loss": 1.0801820755004883, | |
| "eval_runtime": 116.0, | |
| "eval_samples_per_second": 43.103, | |
| "eval_steps_per_second": 0.681, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.16167059616032334, | |
| "grad_norm": 1.644929460056369, | |
| "learning_rate": 4.773960216998192e-05, | |
| "loss": 1.0736, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.18861569552037724, | |
| "grad_norm": 1.9154194240377669, | |
| "learning_rate": 4.7287522603978304e-05, | |
| "loss": 1.0532, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.21556079488043112, | |
| "grad_norm": 1.5827665298198546, | |
| "learning_rate": 4.683544303797468e-05, | |
| "loss": 1.0312, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.24250589424048502, | |
| "grad_norm": 1.429127675663522, | |
| "learning_rate": 4.638336347197107e-05, | |
| "loss": 1.0133, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.2694509936005389, | |
| "grad_norm": 1.4187637596535974, | |
| "learning_rate": 4.593128390596745e-05, | |
| "loss": 0.9944, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.2694509936005389, | |
| "eval_loss": 0.987108588218689, | |
| "eval_runtime": 113.3326, | |
| "eval_samples_per_second": 44.118, | |
| "eval_steps_per_second": 0.697, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.29639609296059277, | |
| "grad_norm": 1.6143534620181745, | |
| "learning_rate": 4.547920433996384e-05, | |
| "loss": 0.9861, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.3233411923206467, | |
| "grad_norm": 1.470799422498714, | |
| "learning_rate": 4.5027124773960215e-05, | |
| "loss": 0.9813, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.3502862916807006, | |
| "grad_norm": 1.5329678036861214, | |
| "learning_rate": 4.45750452079566e-05, | |
| "loss": 0.9725, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3772313910407545, | |
| "grad_norm": 1.5683570099493096, | |
| "learning_rate": 4.4122965641952984e-05, | |
| "loss": 0.9604, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.40417649040080833, | |
| "grad_norm": 1.4886000727881106, | |
| "learning_rate": 4.367088607594937e-05, | |
| "loss": 0.9494, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.40417649040080833, | |
| "eval_loss": 0.9371287226676941, | |
| "eval_runtime": 147.0071, | |
| "eval_samples_per_second": 34.012, | |
| "eval_steps_per_second": 0.537, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.43112158976086223, | |
| "grad_norm": 1.7059179320901345, | |
| "learning_rate": 4.3218806509945754e-05, | |
| "loss": 0.9409, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.45806668912091614, | |
| "grad_norm": 1.3194635069630314, | |
| "learning_rate": 4.276672694394214e-05, | |
| "loss": 0.9244, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.48501178848097004, | |
| "grad_norm": 1.3060771848163875, | |
| "learning_rate": 4.2314647377938523e-05, | |
| "loss": 0.9157, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.5119568878410239, | |
| "grad_norm": 1.4749915311735549, | |
| "learning_rate": 4.186256781193491e-05, | |
| "loss": 0.9119, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.5389019872010778, | |
| "grad_norm": 1.4869922949493382, | |
| "learning_rate": 4.1410488245931286e-05, | |
| "loss": 0.913, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5389019872010778, | |
| "eval_loss": 0.907091498374939, | |
| "eval_runtime": 111.8122, | |
| "eval_samples_per_second": 44.718, | |
| "eval_steps_per_second": 0.707, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5658470865611317, | |
| "grad_norm": 1.4160894102096444, | |
| "learning_rate": 4.095840867992767e-05, | |
| "loss": 0.901, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5927921859211855, | |
| "grad_norm": 1.5792102697843888, | |
| "learning_rate": 4.050632911392405e-05, | |
| "loss": 0.89, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.6197372852812395, | |
| "grad_norm": 1.2974993115890197, | |
| "learning_rate": 4.0054249547920434e-05, | |
| "loss": 0.8863, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.6466823846412934, | |
| "grad_norm": 1.3514939321309911, | |
| "learning_rate": 3.960216998191682e-05, | |
| "loss": 0.8854, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.6736274840013473, | |
| "grad_norm": 1.266918300985221, | |
| "learning_rate": 3.9150090415913203e-05, | |
| "loss": 0.8845, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.6736274840013473, | |
| "eval_loss": 0.8746693134307861, | |
| "eval_runtime": 144.3762, | |
| "eval_samples_per_second": 34.632, | |
| "eval_steps_per_second": 0.547, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.7005725833614012, | |
| "grad_norm": 1.3519572583606572, | |
| "learning_rate": 3.869801084990959e-05, | |
| "loss": 0.8721, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.727517682721455, | |
| "grad_norm": 1.2840451675298208, | |
| "learning_rate": 3.8245931283905966e-05, | |
| "loss": 0.8736, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.754462782081509, | |
| "grad_norm": 1.2178137913182443, | |
| "learning_rate": 3.779385171790235e-05, | |
| "loss": 0.8664, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.7814078814415628, | |
| "grad_norm": 1.371387951800443, | |
| "learning_rate": 3.7341772151898736e-05, | |
| "loss": 0.8554, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.8083529808016167, | |
| "grad_norm": 1.344734634165615, | |
| "learning_rate": 3.688969258589512e-05, | |
| "loss": 0.8638, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.8083529808016167, | |
| "eval_loss": 0.8491566181182861, | |
| "eval_runtime": 146.4614, | |
| "eval_samples_per_second": 34.139, | |
| "eval_steps_per_second": 0.539, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.8352980801616706, | |
| "grad_norm": 1.272788750854563, | |
| "learning_rate": 3.6437613019891505e-05, | |
| "loss": 0.8541, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.8622431795217245, | |
| "grad_norm": 1.3792910383877415, | |
| "learning_rate": 3.598553345388789e-05, | |
| "loss": 0.8481, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.8891882788817784, | |
| "grad_norm": 1.203782825434394, | |
| "learning_rate": 3.553345388788427e-05, | |
| "loss": 0.8376, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.9161333782418323, | |
| "grad_norm": 1.2646961187289552, | |
| "learning_rate": 3.508137432188065e-05, | |
| "loss": 0.839, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.9430784776018861, | |
| "grad_norm": 1.24393119289486, | |
| "learning_rate": 3.462929475587703e-05, | |
| "loss": 0.8381, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.9430784776018861, | |
| "eval_loss": 0.8305084109306335, | |
| "eval_runtime": 135.5403, | |
| "eval_samples_per_second": 36.889, | |
| "eval_steps_per_second": 0.583, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.9700235769619401, | |
| "grad_norm": 1.2087402616528558, | |
| "learning_rate": 3.4177215189873416e-05, | |
| "loss": 0.8264, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.9969686763219939, | |
| "grad_norm": 1.2771110484442496, | |
| "learning_rate": 3.37251356238698e-05, | |
| "loss": 0.8242, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.0215560794880432, | |
| "grad_norm": 1.332831610585688, | |
| "learning_rate": 3.3273056057866185e-05, | |
| "loss": 0.6297, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.048501178848097, | |
| "grad_norm": 1.2917409109187712, | |
| "learning_rate": 3.282097649186257e-05, | |
| "loss": 0.6539, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.0754462782081509, | |
| "grad_norm": 1.3245733128879162, | |
| "learning_rate": 3.2368896925858955e-05, | |
| "loss": 0.6544, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.0754462782081509, | |
| "eval_loss": 0.8334468007087708, | |
| "eval_runtime": 148.2965, | |
| "eval_samples_per_second": 33.716, | |
| "eval_steps_per_second": 0.533, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.1023913775682048, | |
| "grad_norm": 1.1724140281525666, | |
| "learning_rate": 3.191681735985534e-05, | |
| "loss": 0.6473, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.1293364769282586, | |
| "grad_norm": 1.2278740780504742, | |
| "learning_rate": 3.146473779385172e-05, | |
| "loss": 0.6459, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.1562815762883125, | |
| "grad_norm": 1.1783670358458123, | |
| "learning_rate": 3.10126582278481e-05, | |
| "loss": 0.6573, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.1832266756483665, | |
| "grad_norm": 1.29897205539554, | |
| "learning_rate": 3.056057866184449e-05, | |
| "loss": 0.6548, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.2101717750084204, | |
| "grad_norm": 1.1568119957121505, | |
| "learning_rate": 3.010849909584087e-05, | |
| "loss": 0.65, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.2101717750084204, | |
| "eval_loss": 0.8229044675827026, | |
| "eval_runtime": 113.6433, | |
| "eval_samples_per_second": 43.997, | |
| "eval_steps_per_second": 0.695, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.2371168743684742, | |
| "grad_norm": 1.3004916817149637, | |
| "learning_rate": 2.9656419529837253e-05, | |
| "loss": 0.6474, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.2640619737285281, | |
| "grad_norm": 1.212521588358061, | |
| "learning_rate": 2.9204339963833638e-05, | |
| "loss": 0.6518, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.291007073088582, | |
| "grad_norm": 1.2553077221366877, | |
| "learning_rate": 2.8752260397830023e-05, | |
| "loss": 0.6456, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.3179521724486358, | |
| "grad_norm": 1.184218802614123, | |
| "learning_rate": 2.83001808318264e-05, | |
| "loss": 0.6546, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.3448972718086898, | |
| "grad_norm": 1.136986753079325, | |
| "learning_rate": 2.7848101265822786e-05, | |
| "loss": 0.6415, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.3448972718086898, | |
| "eval_loss": 0.8110851049423218, | |
| "eval_runtime": 137.5711, | |
| "eval_samples_per_second": 36.345, | |
| "eval_steps_per_second": 0.574, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.3718423711687437, | |
| "grad_norm": 1.2143646330707367, | |
| "learning_rate": 2.7396021699819167e-05, | |
| "loss": 0.6558, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.3987874705287977, | |
| "grad_norm": 1.265789637913618, | |
| "learning_rate": 2.6943942133815552e-05, | |
| "loss": 0.6505, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.4257325698888514, | |
| "grad_norm": 1.2191031554639078, | |
| "learning_rate": 2.6491862567811937e-05, | |
| "loss": 0.6426, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.4526776692489054, | |
| "grad_norm": 1.222440942937553, | |
| "learning_rate": 2.603978300180832e-05, | |
| "loss": 0.644, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.4796227686089591, | |
| "grad_norm": 1.1495752286958087, | |
| "learning_rate": 2.5587703435804706e-05, | |
| "loss": 0.6415, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.4796227686089591, | |
| "eval_loss": 0.7958658337593079, | |
| "eval_runtime": 142.3592, | |
| "eval_samples_per_second": 35.122, | |
| "eval_steps_per_second": 0.555, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.506567867969013, | |
| "grad_norm": 1.2049482092443289, | |
| "learning_rate": 2.5135623869801084e-05, | |
| "loss": 0.6395, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.533512967329067, | |
| "grad_norm": 1.1839061997290048, | |
| "learning_rate": 2.468354430379747e-05, | |
| "loss": 0.6441, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.560458066689121, | |
| "grad_norm": 1.2057965805365276, | |
| "learning_rate": 2.423146473779385e-05, | |
| "loss": 0.6335, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.5874031660491748, | |
| "grad_norm": 1.1650282316989717, | |
| "learning_rate": 2.3779385171790235e-05, | |
| "loss": 0.6339, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.6143482654092287, | |
| "grad_norm": 1.153394411032144, | |
| "learning_rate": 2.332730560578662e-05, | |
| "loss": 0.6311, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.6143482654092287, | |
| "eval_loss": 0.784841775894165, | |
| "eval_runtime": 153.2005, | |
| "eval_samples_per_second": 32.637, | |
| "eval_steps_per_second": 0.516, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.6412933647692824, | |
| "grad_norm": 1.1963943501637924, | |
| "learning_rate": 2.2875226039783005e-05, | |
| "loss": 0.6267, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.6682384641293364, | |
| "grad_norm": 1.093066884488607, | |
| "learning_rate": 2.2423146473779386e-05, | |
| "loss": 0.6289, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.6951835634893904, | |
| "grad_norm": 1.115011570991967, | |
| "learning_rate": 2.197106690777577e-05, | |
| "loss": 0.6299, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.7221286628494443, | |
| "grad_norm": 1.1700606931618611, | |
| "learning_rate": 2.1518987341772153e-05, | |
| "loss": 0.6233, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.7490737622094983, | |
| "grad_norm": 1.1565551201360744, | |
| "learning_rate": 2.1066907775768534e-05, | |
| "loss": 0.624, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.7490737622094983, | |
| "eval_loss": 0.7739421129226685, | |
| "eval_runtime": 148.2001, | |
| "eval_samples_per_second": 33.738, | |
| "eval_steps_per_second": 0.533, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.776018861569552, | |
| "grad_norm": 1.2558394571278007, | |
| "learning_rate": 2.061482820976492e-05, | |
| "loss": 0.621, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.802963960929606, | |
| "grad_norm": 1.1487412967519839, | |
| "learning_rate": 2.0162748643761304e-05, | |
| "loss": 0.6201, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.8299090602896597, | |
| "grad_norm": 1.1633700130604714, | |
| "learning_rate": 1.971066907775769e-05, | |
| "loss": 0.6182, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.8568541596497137, | |
| "grad_norm": 1.0892080498580619, | |
| "learning_rate": 1.925858951175407e-05, | |
| "loss": 0.6218, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.8837992590097676, | |
| "grad_norm": 1.162193434275119, | |
| "learning_rate": 1.8806509945750454e-05, | |
| "loss": 0.612, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.8837992590097676, | |
| "eval_loss": 0.7593940496444702, | |
| "eval_runtime": 113.5973, | |
| "eval_samples_per_second": 44.015, | |
| "eval_steps_per_second": 0.695, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.9107443583698216, | |
| "grad_norm": 1.477614030615022, | |
| "learning_rate": 1.8354430379746836e-05, | |
| "loss": 0.4361, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.9376894577298753, | |
| "grad_norm": 1.211596696990758, | |
| "learning_rate": 1.7902350813743217e-05, | |
| "loss": 0.4174, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.9646345570899293, | |
| "grad_norm": 1.208841417920371, | |
| "learning_rate": 1.7450271247739602e-05, | |
| "loss": 0.4162, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.991579656449983, | |
| "grad_norm": 1.2740412194613278, | |
| "learning_rate": 1.6998191681735987e-05, | |
| "loss": 0.4115, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 2.0188615695520378, | |
| "grad_norm": 1.1897059830697447, | |
| "learning_rate": 1.654611211573237e-05, | |
| "loss": 0.4224, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.0188615695520378, | |
| "eval_loss": 0.8485522270202637, | |
| "eval_runtime": 115.805, | |
| "eval_samples_per_second": 43.176, | |
| "eval_steps_per_second": 0.682, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.0458066689120917, | |
| "grad_norm": 1.2120571201563564, | |
| "learning_rate": 1.6094032549728753e-05, | |
| "loss": 0.4192, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 2.0727517682721457, | |
| "grad_norm": 1.2006852199531266, | |
| "learning_rate": 1.5641952983725134e-05, | |
| "loss": 0.4101, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 2.099696867632199, | |
| "grad_norm": 1.2005979870584054, | |
| "learning_rate": 1.5189873417721521e-05, | |
| "loss": 0.4164, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 2.126641966992253, | |
| "grad_norm": 1.148665043003728, | |
| "learning_rate": 1.4737793851717904e-05, | |
| "loss": 0.4194, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 2.153587066352307, | |
| "grad_norm": 1.134303382163784, | |
| "learning_rate": 1.4285714285714285e-05, | |
| "loss": 0.4213, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.153587066352307, | |
| "eval_loss": 0.838707447052002, | |
| "eval_runtime": 139.7297, | |
| "eval_samples_per_second": 35.783, | |
| "eval_steps_per_second": 0.565, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.180532165712361, | |
| "grad_norm": 1.2301409422954543, | |
| "learning_rate": 1.383363471971067e-05, | |
| "loss": 0.4179, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 2.207477265072415, | |
| "grad_norm": 1.1312377076286286, | |
| "learning_rate": 1.3381555153707053e-05, | |
| "loss": 0.4178, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 2.234422364432469, | |
| "grad_norm": 1.2707527556350058, | |
| "learning_rate": 1.2929475587703435e-05, | |
| "loss": 0.4125, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 2.2613674637925225, | |
| "grad_norm": 1.185885154215189, | |
| "learning_rate": 1.247739602169982e-05, | |
| "loss": 0.414, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 2.2883125631525765, | |
| "grad_norm": 1.1012295953218187, | |
| "learning_rate": 1.2025316455696203e-05, | |
| "loss": 0.414, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 2.2883125631525765, | |
| "eval_loss": 0.831937313079834, | |
| "eval_runtime": 144.9314, | |
| "eval_samples_per_second": 34.499, | |
| "eval_steps_per_second": 0.545, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 2.3152576625126304, | |
| "grad_norm": 1.1388819607995708, | |
| "learning_rate": 1.1573236889692586e-05, | |
| "loss": 0.4151, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 2.3422027618726844, | |
| "grad_norm": 1.2140213674018405, | |
| "learning_rate": 1.112115732368897e-05, | |
| "loss": 0.4159, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 2.3691478612327384, | |
| "grad_norm": 1.1631774357185438, | |
| "learning_rate": 1.0669077757685354e-05, | |
| "loss": 0.4136, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.3960929605927923, | |
| "grad_norm": 1.24334716793248, | |
| "learning_rate": 1.0216998191681737e-05, | |
| "loss": 0.4076, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.4230380599528463, | |
| "grad_norm": 1.156296740645565, | |
| "learning_rate": 9.76491862567812e-06, | |
| "loss": 0.4145, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.4230380599528463, | |
| "eval_loss": 0.823464572429657, | |
| "eval_runtime": 141.3831, | |
| "eval_samples_per_second": 35.365, | |
| "eval_steps_per_second": 0.559, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.4499831593129, | |
| "grad_norm": 1.2360058092342656, | |
| "learning_rate": 9.312839059674505e-06, | |
| "loss": 0.4071, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.4769282586729537, | |
| "grad_norm": 1.1672627428975908, | |
| "learning_rate": 8.860759493670886e-06, | |
| "loss": 0.4052, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.5038733580330077, | |
| "grad_norm": 1.142198240950353, | |
| "learning_rate": 8.408679927667269e-06, | |
| "loss": 0.3995, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.5308184573930617, | |
| "grad_norm": 1.1336163759218327, | |
| "learning_rate": 7.956600361663654e-06, | |
| "loss": 0.4109, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.5577635567531156, | |
| "grad_norm": 1.1520513715336371, | |
| "learning_rate": 7.504520795660036e-06, | |
| "loss": 0.4021, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.5577635567531156, | |
| "eval_loss": 0.8257409930229187, | |
| "eval_runtime": 145.0235, | |
| "eval_samples_per_second": 34.477, | |
| "eval_steps_per_second": 0.545, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.5847086561131696, | |
| "grad_norm": 1.1137257622476067, | |
| "learning_rate": 7.05244122965642e-06, | |
| "loss": 0.3979, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.611653755473223, | |
| "grad_norm": 1.107368997807173, | |
| "learning_rate": 6.600361663652803e-06, | |
| "loss": 0.4011, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.638598854833277, | |
| "grad_norm": 1.1780589934268402, | |
| "learning_rate": 6.148282097649186e-06, | |
| "loss": 0.4013, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.665543954193331, | |
| "grad_norm": 1.174979594904003, | |
| "learning_rate": 5.69620253164557e-06, | |
| "loss": 0.3992, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.692489053553385, | |
| "grad_norm": 1.1164061245257322, | |
| "learning_rate": 5.244122965641953e-06, | |
| "loss": 0.393, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.692489053553385, | |
| "eval_loss": 0.8172587156295776, | |
| "eval_runtime": 147.8118, | |
| "eval_samples_per_second": 33.827, | |
| "eval_steps_per_second": 0.534, | |
| "step": 1000 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1116, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 588328797732864.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |