| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9994454567060128, | |
| "eval_steps": 500, | |
| "global_step": 1577, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.006337637645567614, | |
| "grad_norm": 1.6142889261245728, | |
| "learning_rate": 0.0002993658845909955, | |
| "loss": 2.409, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.012675275291135229, | |
| "grad_norm": 0.3150716722011566, | |
| "learning_rate": 0.0002987317691819911, | |
| "loss": 0.3274, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.019012912936702844, | |
| "grad_norm": 0.3215585947036743, | |
| "learning_rate": 0.00029809765377298664, | |
| "loss": 0.2974, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.025350550582270457, | |
| "grad_norm": 0.3300665318965912, | |
| "learning_rate": 0.00029746353836398223, | |
| "loss": 0.2816, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.031688188227838074, | |
| "grad_norm": 0.5483732223510742, | |
| "learning_rate": 0.0002968294229549778, | |
| "loss": 0.2657, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.03802582587340569, | |
| "grad_norm": 0.5116819143295288, | |
| "learning_rate": 0.00029619530754597335, | |
| "loss": 0.255, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.0443634635189733, | |
| "grad_norm": 0.36696526408195496, | |
| "learning_rate": 0.0002955611921369689, | |
| "loss": 0.2486, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.050701101164540914, | |
| "grad_norm": 0.6022621393203735, | |
| "learning_rate": 0.0002949270767279645, | |
| "loss": 0.2637, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.057038738810108534, | |
| "grad_norm": 0.542329728603363, | |
| "learning_rate": 0.00029429296131896, | |
| "loss": 0.2494, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.06337637645567615, | |
| "grad_norm": 0.5944090485572815, | |
| "learning_rate": 0.00029365884590995555, | |
| "loss": 0.2339, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.06971401410124377, | |
| "grad_norm": 0.3428844213485718, | |
| "learning_rate": 0.00029302473050095114, | |
| "loss": 0.2463, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.07605165174681137, | |
| "grad_norm": 0.7415475845336914, | |
| "learning_rate": 0.00029239061509194673, | |
| "loss": 0.2291, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.082389289392379, | |
| "grad_norm": 0.7960870862007141, | |
| "learning_rate": 0.00029175649968294227, | |
| "loss": 0.2414, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.0887269270379466, | |
| "grad_norm": 0.36076226830482483, | |
| "learning_rate": 0.00029112238427393786, | |
| "loss": 0.2427, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.09506456468351422, | |
| "grad_norm": 0.9236595034599304, | |
| "learning_rate": 0.0002904882688649334, | |
| "loss": 0.2407, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.10140220232908183, | |
| "grad_norm": 0.5588239431381226, | |
| "learning_rate": 0.00028985415345592893, | |
| "loss": 0.2228, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.10773983997464945, | |
| "grad_norm": 0.2992999255657196, | |
| "learning_rate": 0.0002892200380469245, | |
| "loss": 0.2234, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.11407747762021707, | |
| "grad_norm": 0.4005465805530548, | |
| "learning_rate": 0.00028858592263792006, | |
| "loss": 0.2246, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.12041511526578468, | |
| "grad_norm": 0.9351511001586914, | |
| "learning_rate": 0.0002879518072289156, | |
| "loss": 0.2345, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.1267527529113523, | |
| "grad_norm": 0.856308102607727, | |
| "learning_rate": 0.0002873176918199112, | |
| "loss": 0.2174, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1330903905569199, | |
| "grad_norm": 0.6678782105445862, | |
| "learning_rate": 0.0002866835764109068, | |
| "loss": 0.2325, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.13942802820248754, | |
| "grad_norm": 0.43734946846961975, | |
| "learning_rate": 0.0002860494610019023, | |
| "loss": 0.2287, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.14576566584805514, | |
| "grad_norm": 0.3104623854160309, | |
| "learning_rate": 0.0002854153455928979, | |
| "loss": 0.2104, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.15210330349362275, | |
| "grad_norm": 0.2922423183917999, | |
| "learning_rate": 0.00028478123018389344, | |
| "loss": 0.2252, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.15844094113919036, | |
| "grad_norm": 0.6300675272941589, | |
| "learning_rate": 0.000284147114774889, | |
| "loss": 0.2139, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.164778578784758, | |
| "grad_norm": 0.586909830570221, | |
| "learning_rate": 0.00028351299936588457, | |
| "loss": 0.2269, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.1711162164303256, | |
| "grad_norm": 0.9171651601791382, | |
| "learning_rate": 0.00028287888395688016, | |
| "loss": 0.2191, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.1774538540758932, | |
| "grad_norm": 0.6419245004653931, | |
| "learning_rate": 0.0002822447685478757, | |
| "loss": 0.2265, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.18379149172146084, | |
| "grad_norm": 0.3004674017429352, | |
| "learning_rate": 0.00028161065313887123, | |
| "loss": 0.2115, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.19012912936702844, | |
| "grad_norm": 0.5889032483100891, | |
| "learning_rate": 0.0002809765377298668, | |
| "loss": 0.2069, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.19646676701259605, | |
| "grad_norm": 0.7706992626190186, | |
| "learning_rate": 0.00028034242232086236, | |
| "loss": 0.2231, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.20280440465816366, | |
| "grad_norm": 0.45326951146125793, | |
| "learning_rate": 0.00027970830691185795, | |
| "loss": 0.224, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.2091420423037313, | |
| "grad_norm": 0.7560727596282959, | |
| "learning_rate": 0.0002790741915028535, | |
| "loss": 0.2133, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.2154796799492989, | |
| "grad_norm": 0.38263845443725586, | |
| "learning_rate": 0.000278440076093849, | |
| "loss": 0.2166, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.2218173175948665, | |
| "grad_norm": 0.3568837344646454, | |
| "learning_rate": 0.0002778059606848446, | |
| "loss": 0.216, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.22815495524043414, | |
| "grad_norm": 0.7373816967010498, | |
| "learning_rate": 0.0002771718452758402, | |
| "loss": 0.2145, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.23449259288600174, | |
| "grad_norm": 0.6037229299545288, | |
| "learning_rate": 0.00027653772986683574, | |
| "loss": 0.2094, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.24083023053156935, | |
| "grad_norm": 0.4252951741218567, | |
| "learning_rate": 0.00027590361445783133, | |
| "loss": 0.212, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.24716786817713698, | |
| "grad_norm": 0.5818207263946533, | |
| "learning_rate": 0.00027526949904882686, | |
| "loss": 0.2159, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.2535055058227046, | |
| "grad_norm": 0.7819476127624512, | |
| "learning_rate": 0.0002746353836398224, | |
| "loss": 0.2112, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.2598431434682722, | |
| "grad_norm": 0.6650575399398804, | |
| "learning_rate": 0.000274001268230818, | |
| "loss": 0.2081, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.2661807811138398, | |
| "grad_norm": 0.4109363555908203, | |
| "learning_rate": 0.0002733671528218136, | |
| "loss": 0.2029, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.2725184187594074, | |
| "grad_norm": 0.4742489755153656, | |
| "learning_rate": 0.0002727330374128091, | |
| "loss": 0.2229, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.2788560564049751, | |
| "grad_norm": 0.33365684747695923, | |
| "learning_rate": 0.00027209892200380465, | |
| "loss": 0.2053, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.2851936940505427, | |
| "grad_norm": 0.7879284620285034, | |
| "learning_rate": 0.00027146480659480024, | |
| "loss": 0.2081, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.2915313316961103, | |
| "grad_norm": 17.686824798583984, | |
| "learning_rate": 0.0002708306911857958, | |
| "loss": 0.3581, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.2978689693416779, | |
| "grad_norm": 1.2470951080322266, | |
| "learning_rate": 0.00027019657577679137, | |
| "loss": 0.2365, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.3042066069872455, | |
| "grad_norm": 0.5552864074707031, | |
| "learning_rate": 0.0002695624603677869, | |
| "loss": 0.2207, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.3105442446328131, | |
| "grad_norm": 0.9645744562149048, | |
| "learning_rate": 0.00026892834495878244, | |
| "loss": 0.2117, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.3168818822783807, | |
| "grad_norm": 0.5058091878890991, | |
| "learning_rate": 0.00026829422954977803, | |
| "loss": 0.207, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.3232195199239484, | |
| "grad_norm": 0.537139892578125, | |
| "learning_rate": 0.0002676601141407736, | |
| "loss": 0.2079, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.329557157569516, | |
| "grad_norm": 0.41485828161239624, | |
| "learning_rate": 0.00026702599873176916, | |
| "loss": 0.1947, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.3358947952150836, | |
| "grad_norm": 0.5344266891479492, | |
| "learning_rate": 0.0002663918833227647, | |
| "loss": 0.2161, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.3422324328606512, | |
| "grad_norm": 0.5433288812637329, | |
| "learning_rate": 0.0002657577679137603, | |
| "loss": 0.2084, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.3485700705062188, | |
| "grad_norm": 0.3724132776260376, | |
| "learning_rate": 0.0002651236525047558, | |
| "loss": 0.1986, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.3549077081517864, | |
| "grad_norm": 0.3660731613636017, | |
| "learning_rate": 0.0002644895370957514, | |
| "loss": 0.2035, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.361245345797354, | |
| "grad_norm": 0.5768698453903198, | |
| "learning_rate": 0.00026385542168674695, | |
| "loss": 0.2073, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.3675829834429217, | |
| "grad_norm": 0.305505633354187, | |
| "learning_rate": 0.00026322130627774254, | |
| "loss": 0.2027, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.3739206210884893, | |
| "grad_norm": 0.5128267407417297, | |
| "learning_rate": 0.0002625871908687381, | |
| "loss": 0.207, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.3802582587340569, | |
| "grad_norm": 0.4295850694179535, | |
| "learning_rate": 0.00026195307545973367, | |
| "loss": 0.2154, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.3865958963796245, | |
| "grad_norm": 0.5752875208854675, | |
| "learning_rate": 0.0002613189600507292, | |
| "loss": 0.2044, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.3929335340251921, | |
| "grad_norm": 0.32594314217567444, | |
| "learning_rate": 0.00026068484464172474, | |
| "loss": 0.1926, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.3992711716707597, | |
| "grad_norm": 0.34995755553245544, | |
| "learning_rate": 0.00026005072923272033, | |
| "loss": 0.2024, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.4056088093163273, | |
| "grad_norm": 0.30470767617225647, | |
| "learning_rate": 0.00025941661382371587, | |
| "loss": 0.204, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.411946446961895, | |
| "grad_norm": 0.9187641739845276, | |
| "learning_rate": 0.00025878249841471146, | |
| "loss": 0.211, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.4182840846074626, | |
| "grad_norm": 0.8582721948623657, | |
| "learning_rate": 0.00025814838300570705, | |
| "loss": 0.1983, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.4246217222530302, | |
| "grad_norm": 0.6197007894515991, | |
| "learning_rate": 0.0002575142675967026, | |
| "loss": 0.1975, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.4309593598985978, | |
| "grad_norm": 0.4113485515117645, | |
| "learning_rate": 0.0002568801521876981, | |
| "loss": 0.1993, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.4372969975441654, | |
| "grad_norm": 0.3764086663722992, | |
| "learning_rate": 0.0002562460367786937, | |
| "loss": 0.1884, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.443634635189733, | |
| "grad_norm": 0.6583007574081421, | |
| "learning_rate": 0.00025561192136968925, | |
| "loss": 0.2092, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.44997227283530067, | |
| "grad_norm": 0.6544474959373474, | |
| "learning_rate": 0.00025497780596068484, | |
| "loss": 0.1997, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.4563099104808683, | |
| "grad_norm": 0.3778369128704071, | |
| "learning_rate": 0.0002543436905516804, | |
| "loss": 0.2019, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.4626475481264359, | |
| "grad_norm": 0.3147338032722473, | |
| "learning_rate": 0.00025370957514267596, | |
| "loss": 0.1899, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.4689851857720035, | |
| "grad_norm": 0.4564040005207062, | |
| "learning_rate": 0.0002530754597336715, | |
| "loss": 0.2029, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.4753228234175711, | |
| "grad_norm": 0.5435693860054016, | |
| "learning_rate": 0.0002524413443246671, | |
| "loss": 0.2057, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.4816604610631387, | |
| "grad_norm": 0.34006282687187195, | |
| "learning_rate": 0.0002518072289156626, | |
| "loss": 0.1909, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.4879980987087063, | |
| "grad_norm": 0.46839743852615356, | |
| "learning_rate": 0.00025117311350665816, | |
| "loss": 0.1996, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.49433573635427397, | |
| "grad_norm": 0.3562382459640503, | |
| "learning_rate": 0.00025053899809765375, | |
| "loss": 0.1937, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.5006733739998416, | |
| "grad_norm": 0.35229429602622986, | |
| "learning_rate": 0.0002499048826886493, | |
| "loss": 0.198, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.5070110116454092, | |
| "grad_norm": 0.539817750453949, | |
| "learning_rate": 0.0002492707672796449, | |
| "loss": 0.1942, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5133486492909768, | |
| "grad_norm": 0.3757067322731018, | |
| "learning_rate": 0.00024863665187064047, | |
| "loss": 0.1966, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.5196862869365444, | |
| "grad_norm": 0.5510461926460266, | |
| "learning_rate": 0.000248002536461636, | |
| "loss": 0.2021, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.526023924582112, | |
| "grad_norm": 0.515483558177948, | |
| "learning_rate": 0.00024736842105263154, | |
| "loss": 0.2015, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.5323615622276796, | |
| "grad_norm": 0.4190600514411926, | |
| "learning_rate": 0.00024673430564362713, | |
| "loss": 0.1822, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.5386991998732472, | |
| "grad_norm": 0.7067417502403259, | |
| "learning_rate": 0.00024610019023462267, | |
| "loss": 0.201, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.5450368375188148, | |
| "grad_norm": 0.4557970464229584, | |
| "learning_rate": 0.0002454660748256182, | |
| "loss": 0.1944, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.5513744751643824, | |
| "grad_norm": 0.2890704274177551, | |
| "learning_rate": 0.0002448319594166138, | |
| "loss": 0.188, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.5577121128099501, | |
| "grad_norm": 0.7139009237289429, | |
| "learning_rate": 0.0002441978440076094, | |
| "loss": 0.1894, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.5640497504555178, | |
| "grad_norm": 0.46737805008888245, | |
| "learning_rate": 0.00024356372859860492, | |
| "loss": 0.1858, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.5703873881010854, | |
| "grad_norm": 0.5491992235183716, | |
| "learning_rate": 0.0002429296131896005, | |
| "loss": 0.2049, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.576725025746653, | |
| "grad_norm": 0.4642735719680786, | |
| "learning_rate": 0.00024229549778059605, | |
| "loss": 0.1963, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.5830626633922206, | |
| "grad_norm": 0.3879190981388092, | |
| "learning_rate": 0.0002416613823715916, | |
| "loss": 0.1952, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.5894003010377882, | |
| "grad_norm": 0.3346266746520996, | |
| "learning_rate": 0.00024102726696258715, | |
| "loss": 0.1921, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.5957379386833558, | |
| "grad_norm": 0.49900051951408386, | |
| "learning_rate": 0.0002403931515535827, | |
| "loss": 0.1809, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.6020755763289234, | |
| "grad_norm": 0.4249061644077301, | |
| "learning_rate": 0.00023975903614457828, | |
| "loss": 0.1865, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.608413213974491, | |
| "grad_norm": 0.39760246872901917, | |
| "learning_rate": 0.00023912492073557387, | |
| "loss": 0.1914, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.6147508516200586, | |
| "grad_norm": 0.4665059745311737, | |
| "learning_rate": 0.00023849080532656943, | |
| "loss": 0.1857, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.6210884892656262, | |
| "grad_norm": 0.4482426047325134, | |
| "learning_rate": 0.00023785668991756497, | |
| "loss": 0.1904, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.6274261269111938, | |
| "grad_norm": 0.5875295996665955, | |
| "learning_rate": 0.00023722257450856053, | |
| "loss": 0.1961, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.6337637645567614, | |
| "grad_norm": 0.3540550768375397, | |
| "learning_rate": 0.0002365884590995561, | |
| "loss": 0.1894, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.640101402202329, | |
| "grad_norm": 0.39407670497894287, | |
| "learning_rate": 0.00023595434369055166, | |
| "loss": 0.1868, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.6464390398478967, | |
| "grad_norm": 0.3376408517360687, | |
| "learning_rate": 0.00023532022828154722, | |
| "loss": 0.1892, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.6527766774934644, | |
| "grad_norm": 0.347345769405365, | |
| "learning_rate": 0.0002346861128725428, | |
| "loss": 0.1873, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.659114315139032, | |
| "grad_norm": 0.3818947374820709, | |
| "learning_rate": 0.00023405199746353835, | |
| "loss": 0.1836, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.6654519527845996, | |
| "grad_norm": 0.36459505558013916, | |
| "learning_rate": 0.0002334178820545339, | |
| "loss": 0.1976, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.6717895904301672, | |
| "grad_norm": 0.4520089030265808, | |
| "learning_rate": 0.00023278376664552947, | |
| "loss": 0.1964, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.6781272280757348, | |
| "grad_norm": 0.5031911134719849, | |
| "learning_rate": 0.00023214965123652504, | |
| "loss": 0.1892, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.6844648657213024, | |
| "grad_norm": 0.7651061415672302, | |
| "learning_rate": 0.00023151553582752057, | |
| "loss": 0.186, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.69080250336687, | |
| "grad_norm": 0.49742498993873596, | |
| "learning_rate": 0.00023088142041851614, | |
| "loss": 0.1812, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.6971401410124376, | |
| "grad_norm": 0.4136335551738739, | |
| "learning_rate": 0.0002302473050095117, | |
| "loss": 0.1944, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.7034777786580052, | |
| "grad_norm": 0.32826468348503113, | |
| "learning_rate": 0.0002296131896005073, | |
| "loss": 0.1959, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.7098154163035728, | |
| "grad_norm": 0.4136222004890442, | |
| "learning_rate": 0.00022897907419150285, | |
| "loss": 0.1817, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.7161530539491404, | |
| "grad_norm": 0.3633563816547394, | |
| "learning_rate": 0.0002283449587824984, | |
| "loss": 0.1899, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.722490691594708, | |
| "grad_norm": 0.4116598963737488, | |
| "learning_rate": 0.00022771084337349395, | |
| "loss": 0.1883, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.7288283292402757, | |
| "grad_norm": 0.43437546491622925, | |
| "learning_rate": 0.00022707672796448952, | |
| "loss": 0.1891, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.7351659668858433, | |
| "grad_norm": 0.7089232206344604, | |
| "learning_rate": 0.00022644261255548508, | |
| "loss": 0.1774, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.741503604531411, | |
| "grad_norm": 0.30157792568206787, | |
| "learning_rate": 0.00022580849714648062, | |
| "loss": 0.1912, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.7478412421769786, | |
| "grad_norm": 0.4012957215309143, | |
| "learning_rate": 0.0002251743817374762, | |
| "loss": 0.1821, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.7541788798225462, | |
| "grad_norm": 0.5319772958755493, | |
| "learning_rate": 0.00022454026632847177, | |
| "loss": 0.1748, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.7605165174681138, | |
| "grad_norm": 0.34556815028190613, | |
| "learning_rate": 0.00022390615091946733, | |
| "loss": 0.1946, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.7668541551136814, | |
| "grad_norm": 0.43295615911483765, | |
| "learning_rate": 0.0002232720355104629, | |
| "loss": 0.1799, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.773191792759249, | |
| "grad_norm": 0.3238721787929535, | |
| "learning_rate": 0.00022263792010145843, | |
| "loss": 0.1912, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.7795294304048166, | |
| "grad_norm": 0.39405253529548645, | |
| "learning_rate": 0.000222003804692454, | |
| "loss": 0.1902, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.7858670680503842, | |
| "grad_norm": 0.35559186339378357, | |
| "learning_rate": 0.00022136968928344956, | |
| "loss": 0.1756, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.7922047056959518, | |
| "grad_norm": 0.31320518255233765, | |
| "learning_rate": 0.00022073557387444512, | |
| "loss": 0.1948, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.7985423433415194, | |
| "grad_norm": 0.3586606979370117, | |
| "learning_rate": 0.0002201014584654407, | |
| "loss": 0.1768, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.804879980987087, | |
| "grad_norm": 0.37648022174835205, | |
| "learning_rate": 0.00021946734305643625, | |
| "loss": 0.1763, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.8112176186326546, | |
| "grad_norm": 0.4442378282546997, | |
| "learning_rate": 0.0002188332276474318, | |
| "loss": 0.1694, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.8175552562782223, | |
| "grad_norm": 0.5687398314476013, | |
| "learning_rate": 0.00021819911223842738, | |
| "loss": 0.1856, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.82389289392379, | |
| "grad_norm": 0.3056432008743286, | |
| "learning_rate": 0.00021756499682942294, | |
| "loss": 0.1847, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.8302305315693576, | |
| "grad_norm": 0.42267075181007385, | |
| "learning_rate": 0.0002169308814204185, | |
| "loss": 0.1889, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.8365681692149252, | |
| "grad_norm": 0.320975661277771, | |
| "learning_rate": 0.00021629676601141404, | |
| "loss": 0.187, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.8429058068604928, | |
| "grad_norm": 0.6148353815078735, | |
| "learning_rate": 0.0002156626506024096, | |
| "loss": 0.1748, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.8492434445060604, | |
| "grad_norm": 0.6128231883049011, | |
| "learning_rate": 0.0002150285351934052, | |
| "loss": 0.1797, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.855581082151628, | |
| "grad_norm": 0.32467806339263916, | |
| "learning_rate": 0.00021439441978440076, | |
| "loss": 0.1713, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.8619187197971956, | |
| "grad_norm": 0.34791630506515503, | |
| "learning_rate": 0.00021376030437539632, | |
| "loss": 0.1716, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.8682563574427632, | |
| "grad_norm": 0.36184781789779663, | |
| "learning_rate": 0.00021312618896639186, | |
| "loss": 0.1817, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.8745939950883308, | |
| "grad_norm": 0.2984809875488281, | |
| "learning_rate": 0.00021249207355738742, | |
| "loss": 0.1925, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.8809316327338984, | |
| "grad_norm": 0.3217010498046875, | |
| "learning_rate": 0.00021185795814838298, | |
| "loss": 0.1884, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.887269270379466, | |
| "grad_norm": 0.32817527651786804, | |
| "learning_rate": 0.00021122384273937855, | |
| "loss": 0.1797, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.8936069080250336, | |
| "grad_norm": 0.3350455164909363, | |
| "learning_rate": 0.00021058972733037414, | |
| "loss": 0.1746, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.8999445456706013, | |
| "grad_norm": 0.3384022116661072, | |
| "learning_rate": 0.00020995561192136967, | |
| "loss": 0.1796, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.9062821833161689, | |
| "grad_norm": 0.37266311049461365, | |
| "learning_rate": 0.00020932149651236524, | |
| "loss": 0.1798, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.9126198209617365, | |
| "grad_norm": 0.3353017270565033, | |
| "learning_rate": 0.0002086873811033608, | |
| "loss": 0.1824, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.9189574586073042, | |
| "grad_norm": 0.4691619277000427, | |
| "learning_rate": 0.00020805326569435636, | |
| "loss": 0.1722, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.9252950962528718, | |
| "grad_norm": 0.5046979784965515, | |
| "learning_rate": 0.0002074191502853519, | |
| "loss": 0.182, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.9316327338984394, | |
| "grad_norm": 0.43848350644111633, | |
| "learning_rate": 0.00020678503487634746, | |
| "loss": 0.1795, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.937970371544007, | |
| "grad_norm": 0.36225542426109314, | |
| "learning_rate": 0.00020615091946734303, | |
| "loss": 0.1834, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.9443080091895746, | |
| "grad_norm": 0.34802907705307007, | |
| "learning_rate": 0.00020551680405833862, | |
| "loss": 0.1809, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.9506456468351422, | |
| "grad_norm": 0.6984723806381226, | |
| "learning_rate": 0.00020488268864933418, | |
| "loss": 0.1767, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.9569832844807098, | |
| "grad_norm": 0.32185426354408264, | |
| "learning_rate": 0.00020424857324032972, | |
| "loss": 0.1801, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.9633209221262774, | |
| "grad_norm": 0.3562312424182892, | |
| "learning_rate": 0.00020361445783132528, | |
| "loss": 0.1962, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.969658559771845, | |
| "grad_norm": 0.4363330006599426, | |
| "learning_rate": 0.00020298034242232084, | |
| "loss": 0.1785, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.9759961974174126, | |
| "grad_norm": 0.3066689670085907, | |
| "learning_rate": 0.0002023462270133164, | |
| "loss": 0.1762, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.9823338350629802, | |
| "grad_norm": 0.3895578980445862, | |
| "learning_rate": 0.00020171211160431194, | |
| "loss": 0.1701, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.9886714727085479, | |
| "grad_norm": 0.3306889832019806, | |
| "learning_rate": 0.00020107799619530753, | |
| "loss": 0.1781, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.9950091103541155, | |
| "grad_norm": 0.40368223190307617, | |
| "learning_rate": 0.0002004438807863031, | |
| "loss": 0.1815, | |
| "step": 1570 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 4731, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.8508092447949062e+19, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |