| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 2190, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022860408629804256, | |
| "grad_norm": 0.31346356868743896, | |
| "learning_rate": 4.10958904109589e-06, | |
| "loss": 0.4615, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.04572081725960851, | |
| "grad_norm": 0.3634890019893646, | |
| "learning_rate": 8.675799086757991e-06, | |
| "loss": 0.4707, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06858122588941278, | |
| "grad_norm": 0.26333874464035034, | |
| "learning_rate": 1.3242009132420092e-05, | |
| "loss": 0.3731, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09144163451921702, | |
| "grad_norm": 0.19899441301822662, | |
| "learning_rate": 1.780821917808219e-05, | |
| "loss": 0.2918, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11430204314902129, | |
| "grad_norm": 0.10445400327444077, | |
| "learning_rate": 2.237442922374429e-05, | |
| "loss": 0.1518, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13716245177882555, | |
| "grad_norm": 0.060141440480947495, | |
| "learning_rate": 2.6940639269406392e-05, | |
| "loss": 0.0826, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.1600228604086298, | |
| "grad_norm": 0.11565729230642319, | |
| "learning_rate": 3.1506849315068496e-05, | |
| "loss": 0.0897, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18288326903843405, | |
| "grad_norm": 0.13073839247226715, | |
| "learning_rate": 3.60730593607306e-05, | |
| "loss": 0.0992, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.2057436776682383, | |
| "grad_norm": 0.08385579288005829, | |
| "learning_rate": 4.063926940639269e-05, | |
| "loss": 0.0867, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.22860408629804257, | |
| "grad_norm": 0.10868898034095764, | |
| "learning_rate": 4.520547945205479e-05, | |
| "loss": 0.1022, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.25146449492784684, | |
| "grad_norm": 0.10515399277210236, | |
| "learning_rate": 4.977168949771689e-05, | |
| "loss": 0.0866, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.2743249035576511, | |
| "grad_norm": 0.11809521168470383, | |
| "learning_rate": 5.4337899543379e-05, | |
| "loss": 0.0866, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.29718531218745536, | |
| "grad_norm": 0.09987114369869232, | |
| "learning_rate": 5.89041095890411e-05, | |
| "loss": 0.0942, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3200457208172596, | |
| "grad_norm": 0.1236502081155777, | |
| "learning_rate": 6.34703196347032e-05, | |
| "loss": 0.0935, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3429061294470639, | |
| "grad_norm": 0.09345820546150208, | |
| "learning_rate": 6.803652968036531e-05, | |
| "loss": 0.1001, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3657665380768681, | |
| "grad_norm": 0.12369757890701294, | |
| "learning_rate": 7.26027397260274e-05, | |
| "loss": 0.0805, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.38862694670667236, | |
| "grad_norm": 0.12654580175876617, | |
| "learning_rate": 7.716894977168951e-05, | |
| "loss": 0.0704, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.4114873553364766, | |
| "grad_norm": 0.10067084431648254, | |
| "learning_rate": 8.17351598173516e-05, | |
| "loss": 0.0812, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.4343477639662809, | |
| "grad_norm": 0.10523034632205963, | |
| "learning_rate": 8.630136986301371e-05, | |
| "loss": 0.0897, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.45720817259608515, | |
| "grad_norm": 0.08805034309625626, | |
| "learning_rate": 9.08675799086758e-05, | |
| "loss": 0.1039, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.4800685812258894, | |
| "grad_norm": 0.10564622282981873, | |
| "learning_rate": 9.543378995433791e-05, | |
| "loss": 0.0931, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5029289898556937, | |
| "grad_norm": 0.08823700249195099, | |
| "learning_rate": 0.0001, | |
| "loss": 0.0769, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5257893984854979, | |
| "grad_norm": 0.07316570729017258, | |
| "learning_rate": 9.999364877774174e-05, | |
| "loss": 0.0893, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5486498071153022, | |
| "grad_norm": 0.08555534482002258, | |
| "learning_rate": 9.997459672448794e-05, | |
| "loss": 0.0802, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5715102157451064, | |
| "grad_norm": 0.1843150109052658, | |
| "learning_rate": 9.994284868039156e-05, | |
| "loss": 0.0736, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5943706243749107, | |
| "grad_norm": 0.11401674896478653, | |
| "learning_rate": 9.9898412711008e-05, | |
| "loss": 0.0788, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6172310330047149, | |
| "grad_norm": 0.07360777258872986, | |
| "learning_rate": 9.984130010524597e-05, | |
| "loss": 0.0807, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6400914416345193, | |
| "grad_norm": 0.11163882166147232, | |
| "learning_rate": 9.977152537249958e-05, | |
| "loss": 0.072, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6629518502643235, | |
| "grad_norm": 0.12346392124891281, | |
| "learning_rate": 9.968910623896225e-05, | |
| "loss": 0.0712, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6858122588941278, | |
| "grad_norm": 0.13644632697105408, | |
| "learning_rate": 9.95940636431234e-05, | |
| "loss": 0.0765, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.708672667523932, | |
| "grad_norm": 0.09245093911886215, | |
| "learning_rate": 9.948642173044905e-05, | |
| "loss": 0.0797, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7315330761537362, | |
| "grad_norm": 0.11467203497886658, | |
| "learning_rate": 9.936620784724766e-05, | |
| "loss": 0.0756, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7543934847835405, | |
| "grad_norm": 0.12245265394449234, | |
| "learning_rate": 9.923345253372287e-05, | |
| "loss": 0.0671, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.7772538934133447, | |
| "grad_norm": 0.08666519820690155, | |
| "learning_rate": 9.908818951621474e-05, | |
| "loss": 0.0681, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.800114302043149, | |
| "grad_norm": 0.16961519420146942, | |
| "learning_rate": 9.89304556986317e-05, | |
| "loss": 0.072, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8229747106729532, | |
| "grad_norm": 0.2313849776983261, | |
| "learning_rate": 9.876029115307506e-05, | |
| "loss": 0.0707, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8458351193027576, | |
| "grad_norm": 0.14014111459255219, | |
| "learning_rate": 9.85777391096588e-05, | |
| "loss": 0.0807, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8686955279325618, | |
| "grad_norm": 0.0991162583231926, | |
| "learning_rate": 9.838284594552697e-05, | |
| "loss": 0.0736, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8915559365623661, | |
| "grad_norm": 0.14600183069705963, | |
| "learning_rate": 9.817566117307167e-05, | |
| "loss": 0.073, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9144163451921703, | |
| "grad_norm": 0.12519070506095886, | |
| "learning_rate": 9.79562374273544e-05, | |
| "loss": 0.0573, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9372767538219746, | |
| "grad_norm": 0.14949887990951538, | |
| "learning_rate": 9.77246304527343e-05, | |
| "loss": 0.0691, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9601371624517788, | |
| "grad_norm": 0.13667669892311096, | |
| "learning_rate": 9.748089908870627e-05, | |
| "loss": 0.052, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.982997571081583, | |
| "grad_norm": 0.07336350530385971, | |
| "learning_rate": 9.722510525495286e-05, | |
| "loss": 0.0698, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.004572081725961, | |
| "grad_norm": 0.1134289801120758, | |
| "learning_rate": 9.69573139356137e-05, | |
| "loss": 0.0621, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.027432490355765, | |
| "grad_norm": 0.11676439642906189, | |
| "learning_rate": 9.66775931627763e-05, | |
| "loss": 0.0474, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.0502928989855693, | |
| "grad_norm": 0.14386767148971558, | |
| "learning_rate": 9.638601399919259e-05, | |
| "loss": 0.0496, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.0731533076153736, | |
| "grad_norm": 0.16757084429264069, | |
| "learning_rate": 9.608265052022556e-05, | |
| "loss": 0.0493, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.096013716245178, | |
| "grad_norm": 0.12976950407028198, | |
| "learning_rate": 9.576757979503037e-05, | |
| "loss": 0.0513, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.118874124874982, | |
| "grad_norm": 0.21009308099746704, | |
| "learning_rate": 9.544088186697515e-05, | |
| "loss": 0.0464, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.1417345335047864, | |
| "grad_norm": 0.1863221377134323, | |
| "learning_rate": 9.5102639733306e-05, | |
| "loss": 0.0388, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.1645949421345907, | |
| "grad_norm": 0.1924988180398941, | |
| "learning_rate": 9.475293932406162e-05, | |
| "loss": 0.0517, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.1874553507643948, | |
| "grad_norm": 0.1228746697306633, | |
| "learning_rate": 9.439186948024297e-05, | |
| "loss": 0.0475, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.210315759394199, | |
| "grad_norm": 0.18384294211864471, | |
| "learning_rate": 9.401952193124315e-05, | |
| "loss": 0.046, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.2331761680240034, | |
| "grad_norm": 0.1352134793996811, | |
| "learning_rate": 9.363599127154383e-05, | |
| "loss": 0.0454, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.2560365766538077, | |
| "grad_norm": 0.07320581376552582, | |
| "learning_rate": 9.324137493668352e-05, | |
| "loss": 0.0379, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.278896985283612, | |
| "grad_norm": 0.14689098298549652, | |
| "learning_rate": 9.283577317850419e-05, | |
| "loss": 0.0419, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.3017573939134162, | |
| "grad_norm": 0.14588995277881622, | |
| "learning_rate": 9.24192890396824e-05, | |
| "loss": 0.0451, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.3246178025432205, | |
| "grad_norm": 0.17893975973129272, | |
| "learning_rate": 9.19920283275515e-05, | |
| "loss": 0.0405, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.3474782111730248, | |
| "grad_norm": 0.17961391806602478, | |
| "learning_rate": 9.155409958722124e-05, | |
| "loss": 0.0371, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.370338619802829, | |
| "grad_norm": 0.18520987033843994, | |
| "learning_rate": 9.110561407400218e-05, | |
| "loss": 0.0447, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.3931990284326332, | |
| "grad_norm": 0.14877571165561676, | |
| "learning_rate": 9.064668572514127e-05, | |
| "loss": 0.0429, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.4160594370624375, | |
| "grad_norm": 0.1054820641875267, | |
| "learning_rate": 9.01774311308763e-05, | |
| "loss": 0.036, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.4389198456922419, | |
| "grad_norm": 0.1029554158449173, | |
| "learning_rate": 8.96979695048162e-05, | |
| "loss": 0.0473, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.461780254322046, | |
| "grad_norm": 0.26157063245773315, | |
| "learning_rate": 8.920842265365503e-05, | |
| "loss": 0.0343, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.4846406629518503, | |
| "grad_norm": 0.12941773235797882, | |
| "learning_rate": 8.870891494622709e-05, | |
| "loss": 0.0522, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.5075010715816544, | |
| "grad_norm": 0.11229275912046432, | |
| "learning_rate": 8.819957328191117e-05, | |
| "loss": 0.0385, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.5303614802114587, | |
| "grad_norm": 0.2675265073776245, | |
| "learning_rate": 8.76805270583919e-05, | |
| "loss": 0.0492, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.553221888841263, | |
| "grad_norm": 0.18951663374900818, | |
| "learning_rate": 8.715190813878637e-05, | |
| "loss": 0.0353, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.5760822974710673, | |
| "grad_norm": 0.14573994278907776, | |
| "learning_rate": 8.661385081814453e-05, | |
| "loss": 0.0334, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.5989427061008716, | |
| "grad_norm": 0.08593331277370453, | |
| "learning_rate": 8.606649178933163e-05, | |
| "loss": 0.0315, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.621803114730676, | |
| "grad_norm": 0.20985154807567596, | |
| "learning_rate": 8.550997010830154e-05, | |
| "loss": 0.0369, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.64466352336048, | |
| "grad_norm": 0.2964492738246918, | |
| "learning_rate": 8.494442715876976e-05, | |
| "loss": 0.0283, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.6675239319902844, | |
| "grad_norm": 0.11921142041683197, | |
| "learning_rate": 8.437000661629506e-05, | |
| "loss": 0.0312, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.6903843406200885, | |
| "grad_norm": 0.20576898753643036, | |
| "learning_rate": 8.378685441177886e-05, | |
| "loss": 0.0348, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.7132447492498928, | |
| "grad_norm": 0.27900010347366333, | |
| "learning_rate": 8.31951186943916e-05, | |
| "loss": 0.0294, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.736105157879697, | |
| "grad_norm": 0.18257662653923035, | |
| "learning_rate": 8.259494979393563e-05, | |
| "loss": 0.0264, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.7589655665095014, | |
| "grad_norm": 0.16247795522212982, | |
| "learning_rate": 8.198650018265415e-05, | |
| "loss": 0.0385, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.7818259751393057, | |
| "grad_norm": 0.18227055668830872, | |
| "learning_rate": 8.136992443649571e-05, | |
| "loss": 0.0333, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.8046863837691098, | |
| "grad_norm": 0.1283257156610489, | |
| "learning_rate": 8.074537919584443e-05, | |
| "loss": 0.0373, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.8275467923989142, | |
| "grad_norm": 0.21598616242408752, | |
| "learning_rate": 8.011302312572566e-05, | |
| "loss": 0.0427, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.8504072010287183, | |
| "grad_norm": 0.18352819979190826, | |
| "learning_rate": 7.947301687549731e-05, | |
| "loss": 0.04, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.8732676096585226, | |
| "grad_norm": 0.28778374195098877, | |
| "learning_rate": 7.882552303803705e-05, | |
| "loss": 0.035, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.8961280182883269, | |
| "grad_norm": 0.11134298145771027, | |
| "learning_rate": 7.817070610843579e-05, | |
| "loss": 0.0281, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.9189884269181312, | |
| "grad_norm": 0.2428896129131317, | |
| "learning_rate": 7.750873244220787e-05, | |
| "loss": 0.0326, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.9418488355479355, | |
| "grad_norm": 0.21082095801830292, | |
| "learning_rate": 7.68397702130286e-05, | |
| "loss": 0.0316, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.9647092441777396, | |
| "grad_norm": 0.14655885100364685, | |
| "learning_rate": 7.616398937000999e-05, | |
| "loss": 0.0292, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.987569652807544, | |
| "grad_norm": 0.16203175485134125, | |
| "learning_rate": 7.548156159452531e-05, | |
| "loss": 0.0272, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 2.009144163451922, | |
| "grad_norm": 0.1306605190038681, | |
| "learning_rate": 7.479266025659365e-05, | |
| "loss": 0.0209, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.032004572081726, | |
| "grad_norm": 0.13496187329292297, | |
| "learning_rate": 7.409746037083548e-05, | |
| "loss": 0.0138, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.05486498071153, | |
| "grad_norm": 0.18669717013835907, | |
| "learning_rate": 7.339613855201032e-05, | |
| "loss": 0.0149, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0777253893413343, | |
| "grad_norm": 0.1247486025094986, | |
| "learning_rate": 7.2688872970148e-05, | |
| "loss": 0.0128, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.1005857979711386, | |
| "grad_norm": 0.1156638115644455, | |
| "learning_rate": 7.197584330528478e-05, | |
| "loss": 0.0117, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.123446206600943, | |
| "grad_norm": 0.03096736967563629, | |
| "learning_rate": 7.125723070181576e-05, | |
| "loss": 0.0143, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.1463066152307473, | |
| "grad_norm": 0.04739869013428688, | |
| "learning_rate": 7.053321772247545e-05, | |
| "loss": 0.0099, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.1691670238605516, | |
| "grad_norm": 0.1429280787706375, | |
| "learning_rate": 6.980398830195785e-05, | |
| "loss": 0.0109, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.192027432490356, | |
| "grad_norm": 0.2534593343734741, | |
| "learning_rate": 6.906972770018802e-05, | |
| "loss": 0.0163, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.2148878411201602, | |
| "grad_norm": 0.1354377567768097, | |
| "learning_rate": 6.833062245525705e-05, | |
| "loss": 0.0147, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.237748249749964, | |
| "grad_norm": 0.06828989833593369, | |
| "learning_rate": 6.758686033603225e-05, | |
| "loss": 0.0201, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.2606086583797684, | |
| "grad_norm": 0.14169731736183167, | |
| "learning_rate": 6.683863029445469e-05, | |
| "loss": 0.0133, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.2834690670095728, | |
| "grad_norm": 0.04132571816444397, | |
| "learning_rate": 6.608612241753614e-05, | |
| "loss": 0.0097, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.306329475639377, | |
| "grad_norm": 0.3469948172569275, | |
| "learning_rate": 6.532952787906771e-05, | |
| "loss": 0.0139, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.3291898842691814, | |
| "grad_norm": 0.04748811200261116, | |
| "learning_rate": 6.45690388910523e-05, | |
| "loss": 0.0153, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.3520502928989857, | |
| "grad_norm": 0.1330500990152359, | |
| "learning_rate": 6.380484865487347e-05, | |
| "loss": 0.015, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.3749107015287896, | |
| "grad_norm": 0.14051929116249084, | |
| "learning_rate": 6.303715131221264e-05, | |
| "loss": 0.0069, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.397771110158594, | |
| "grad_norm": 0.07501324266195297, | |
| "learning_rate": 6.22661418957279e-05, | |
| "loss": 0.0101, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.420631518788398, | |
| "grad_norm": 0.1099422499537468, | |
| "learning_rate": 6.149201627950593e-05, | |
| "loss": 0.013, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.4434919274182025, | |
| "grad_norm": 0.09372354298830032, | |
| "learning_rate": 6.071497112930047e-05, | |
| "loss": 0.0126, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.466352336048007, | |
| "grad_norm": 0.09480420500040054, | |
| "learning_rate": 5.99352038525697e-05, | |
| "loss": 0.0127, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.489212744677811, | |
| "grad_norm": 0.1486363410949707, | |
| "learning_rate": 5.9152912548324976e-05, | |
| "loss": 0.016, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.5120731533076155, | |
| "grad_norm": 0.10003609955310822, | |
| "learning_rate": 5.836829595680406e-05, | |
| "loss": 0.0128, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.53493356193742, | |
| "grad_norm": 0.08636961132287979, | |
| "learning_rate": 5.758155340898137e-05, | |
| "loss": 0.0119, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.557793970567224, | |
| "grad_norm": 0.11953787505626678, | |
| "learning_rate": 5.679288477592815e-05, | |
| "loss": 0.0097, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.580654379197028, | |
| "grad_norm": 0.06493639945983887, | |
| "learning_rate": 5.600249041803545e-05, | |
| "loss": 0.0105, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.6035147878268323, | |
| "grad_norm": 0.12084943801164627, | |
| "learning_rate": 5.5210571134112824e-05, | |
| "loss": 0.0122, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.6263751964566366, | |
| "grad_norm": 0.13135117292404175, | |
| "learning_rate": 5.44173281103756e-05, | |
| "loss": 0.0142, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.649235605086441, | |
| "grad_norm": 0.137589693069458, | |
| "learning_rate": 5.362296286933371e-05, | |
| "loss": 0.0126, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.6720960137162453, | |
| "grad_norm": 0.058550190180540085, | |
| "learning_rate": 5.282767721859516e-05, | |
| "loss": 0.0076, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.6949564223460496, | |
| "grad_norm": 0.07393407076597214, | |
| "learning_rate": 5.203167319959702e-05, | |
| "loss": 0.012, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.7178168309758535, | |
| "grad_norm": 0.1508752703666687, | |
| "learning_rate": 5.123515303627698e-05, | |
| "loss": 0.009, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.740677239605658, | |
| "grad_norm": 0.1534973382949829, | |
| "learning_rate": 5.0438319083698714e-05, | |
| "loss": 0.0101, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.763537648235462, | |
| "grad_norm": 0.14507421851158142, | |
| "learning_rate": 4.9641373776643616e-05, | |
| "loss": 0.0081, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.7863980568652664, | |
| "grad_norm": 0.1377381533384323, | |
| "learning_rate": 4.8844519578182604e-05, | |
| "loss": 0.0109, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.8092584654950707, | |
| "grad_norm": 0.2066635936498642, | |
| "learning_rate": 4.804795892824056e-05, | |
| "loss": 0.0135, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.832118874124875, | |
| "grad_norm": 0.05857717618346214, | |
| "learning_rate": 4.7251894192166654e-05, | |
| "loss": 0.0076, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.8549792827546794, | |
| "grad_norm": 0.168072909116745, | |
| "learning_rate": 4.645652760932376e-05, | |
| "loss": 0.009, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.8778396913844837, | |
| "grad_norm": 0.17164477705955505, | |
| "learning_rate": 4.566206124170963e-05, | |
| "loss": 0.0112, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.900700100014288, | |
| "grad_norm": 0.15097732841968536, | |
| "learning_rate": 4.486869692262337e-05, | |
| "loss": 0.0094, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.923560508644092, | |
| "grad_norm": 0.0871570035815239, | |
| "learning_rate": 4.407663620538985e-05, | |
| "loss": 0.0101, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.946420917273896, | |
| "grad_norm": 0.13305386900901794, | |
| "learning_rate": 4.328608031215539e-05, | |
| "loss": 0.0092, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.9692813259037005, | |
| "grad_norm": 0.23256619274616241, | |
| "learning_rate": 4.249723008276737e-05, | |
| "loss": 0.015, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.992141734533505, | |
| "grad_norm": 0.10129191726446152, | |
| "learning_rate": 4.171028592375125e-05, | |
| "loss": 0.0124, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 3.0137162451778825, | |
| "grad_norm": 0.15339592099189758, | |
| "learning_rate": 4.092544775739735e-05, | |
| "loss": 0.0067, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 3.036576653807687, | |
| "grad_norm": 0.03447636589407921, | |
| "learning_rate": 4.0142914970970926e-05, | |
| "loss": 0.0047, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 3.059437062437491, | |
| "grad_norm": 0.056881342083215714, | |
| "learning_rate": 3.9362886366058025e-05, | |
| "loss": 0.0074, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 3.0822974710672955, | |
| "grad_norm": 0.22134284675121307, | |
| "learning_rate": 3.858556010806013e-05, | |
| "loss": 0.004, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 3.1051578796970998, | |
| "grad_norm": 0.12356160581111908, | |
| "learning_rate": 3.781113367585053e-05, | |
| "loss": 0.0048, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 3.1280182883269037, | |
| "grad_norm": 0.03940752521157265, | |
| "learning_rate": 3.703980381160497e-05, | |
| "loss": 0.003, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 3.150878696956708, | |
| "grad_norm": 0.09747901558876038, | |
| "learning_rate": 3.627176647081954e-05, | |
| "loss": 0.0056, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 3.1737391055865123, | |
| "grad_norm": 0.016509221866726875, | |
| "learning_rate": 3.550721677252839e-05, | |
| "loss": 0.0017, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 3.1965995142163166, | |
| "grad_norm": 0.15755465626716614, | |
| "learning_rate": 3.4746348949733965e-05, | |
| "loss": 0.003, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 3.219459922846121, | |
| "grad_norm": 0.017669400200247765, | |
| "learning_rate": 3.398935630006236e-05, | |
| "loss": 0.0031, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 3.2423203314759252, | |
| "grad_norm": 0.07502660900354385, | |
| "learning_rate": 3.32364311366562e-05, | |
| "loss": 0.005, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 3.2651807401057296, | |
| "grad_norm": 0.4353109300136566, | |
| "learning_rate": 3.248776473931774e-05, | |
| "loss": 0.0058, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 3.288041148735534, | |
| "grad_norm": 0.06076102331280708, | |
| "learning_rate": 3.174354730591447e-05, | |
| "loss": 0.0055, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 3.3109015573653378, | |
| "grad_norm": 0.04932214692234993, | |
| "learning_rate": 3.100396790405948e-05, | |
| "loss": 0.0041, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 3.333761965995142, | |
| "grad_norm": 0.12315461784601212, | |
| "learning_rate": 3.026921442307916e-05, | |
| "loss": 0.0034, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 3.3566223746249464, | |
| "grad_norm": 0.08378969877958298, | |
| "learning_rate": 2.9539473526280005e-05, | |
| "loss": 0.0021, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 3.3794827832547507, | |
| "grad_norm": 0.1416824609041214, | |
| "learning_rate": 2.8814930603527068e-05, | |
| "loss": 0.0024, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 3.402343191884555, | |
| "grad_norm": 0.09372381865978241, | |
| "learning_rate": 2.809576972414587e-05, | |
| "loss": 0.0023, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 3.4252036005143593, | |
| "grad_norm": 0.16745346784591675, | |
| "learning_rate": 2.738217359015981e-05, | |
| "loss": 0.0041, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.4480640091441632, | |
| "grad_norm": 0.1455969363451004, | |
| "learning_rate": 2.6674323489874843e-05, | |
| "loss": 0.0056, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 3.4709244177739675, | |
| "grad_norm": 0.07298196107149124, | |
| "learning_rate": 2.5972399251823488e-05, | |
| "loss": 0.0034, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 3.493784826403772, | |
| "grad_norm": 0.024875745177268982, | |
| "learning_rate": 2.5276579199079486e-05, | |
| "loss": 0.0043, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 3.516645235033576, | |
| "grad_norm": 0.17137651145458221, | |
| "learning_rate": 2.4587040103955134e-05, | |
| "loss": 0.0041, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 3.5395056436633805, | |
| "grad_norm": 0.10219382494688034, | |
| "learning_rate": 2.39039571430924e-05, | |
| "loss": 0.0037, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 3.562366052293185, | |
| "grad_norm": 0.05997829884290695, | |
| "learning_rate": 2.3227503852959453e-05, | |
| "loss": 0.0025, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 3.585226460922989, | |
| "grad_norm": 0.022572757676243782, | |
| "learning_rate": 2.2557852085764053e-05, | |
| "loss": 0.0087, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 3.6080868695527935, | |
| "grad_norm": 0.025689126923680305, | |
| "learning_rate": 2.189517196579453e-05, | |
| "loss": 0.0046, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 3.6309472781825978, | |
| "grad_norm": 0.19351957738399506, | |
| "learning_rate": 2.1239631846200026e-05, | |
| "loss": 0.0041, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 3.6538076868124016, | |
| "grad_norm": 0.05244070664048195, | |
| "learning_rate": 2.0591398266220502e-05, | |
| "loss": 0.0025, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 3.676668095442206, | |
| "grad_norm": 0.05331341549754143, | |
| "learning_rate": 1.995063590887759e-05, | |
| "loss": 0.0045, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 3.6995285040720103, | |
| "grad_norm": 0.038474228233098984, | |
| "learning_rate": 1.9317507559137184e-05, | |
| "loss": 0.0039, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 3.7223889127018146, | |
| "grad_norm": 0.046009331941604614, | |
| "learning_rate": 1.8692174062553924e-05, | |
| "loss": 0.004, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 3.745249321331619, | |
| "grad_norm": 0.024187810719013214, | |
| "learning_rate": 1.807479428440873e-05, | |
| "loss": 0.0024, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 3.768109729961423, | |
| "grad_norm": 0.07836172729730606, | |
| "learning_rate": 1.746552506934917e-05, | |
| "loss": 0.0027, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 3.790970138591227, | |
| "grad_norm": 0.0379299558699131, | |
| "learning_rate": 1.6864521201543197e-05, | |
| "loss": 0.0032, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 3.8138305472210314, | |
| "grad_norm": 0.04367607831954956, | |
| "learning_rate": 1.627193536535656e-05, | |
| "loss": 0.002, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 3.8366909558508357, | |
| "grad_norm": 0.18003098666667938, | |
| "learning_rate": 1.5687918106563326e-05, | |
| "loss": 0.0025, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 3.85955136448064, | |
| "grad_norm": 0.04013814404606819, | |
| "learning_rate": 1.5112617794100047e-05, | |
| "loss": 0.0026, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 3.8824117731104444, | |
| "grad_norm": 0.030205566436052322, | |
| "learning_rate": 1.454618058237272e-05, | |
| "loss": 0.0029, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 3.9052721817402487, | |
| "grad_norm": 0.10214164853096008, | |
| "learning_rate": 1.3988750374126396e-05, | |
| "loss": 0.0026, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 3.928132590370053, | |
| "grad_norm": 0.027208510786294937, | |
| "learning_rate": 1.344046878388695e-05, | |
| "loss": 0.0026, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 3.9509929989998573, | |
| "grad_norm": 0.020535435527563095, | |
| "learning_rate": 1.2901475101983939e-05, | |
| "loss": 0.0018, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 3.973853407629661, | |
| "grad_norm": 0.03657793253660202, | |
| "learning_rate": 1.2371906259164168e-05, | |
| "loss": 0.0023, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 3.9967138162594655, | |
| "grad_norm": 0.037314701825380325, | |
| "learning_rate": 1.1851896791804507e-05, | |
| "loss": 0.004, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 4.018288326903844, | |
| "grad_norm": 0.01339447870850563, | |
| "learning_rate": 1.1341578807733088e-05, | |
| "loss": 0.0017, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 4.041148735533648, | |
| "grad_norm": 0.0763229951262474, | |
| "learning_rate": 1.0841081952667498e-05, | |
| "loss": 0.0018, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 4.064009144163452, | |
| "grad_norm": 0.029324688017368317, | |
| "learning_rate": 1.0350533377278353e-05, | |
| "loss": 0.0015, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 4.086869552793257, | |
| "grad_norm": 0.021862059831619263, | |
| "learning_rate": 9.870057704886908e-06, | |
| "loss": 0.0011, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 4.10972996142306, | |
| "grad_norm": 0.011852990835905075, | |
| "learning_rate": 9.399776999804566e-06, | |
| "loss": 0.0012, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 4.132590370052864, | |
| "grad_norm": 0.022478003054857254, | |
| "learning_rate": 8.939810736322574e-06, | |
| "loss": 0.0013, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 4.155450778682669, | |
| "grad_norm": 0.08652134984731674, | |
| "learning_rate": 8.490275768359784e-06, | |
| "loss": 0.0017, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 4.178311187312473, | |
| "grad_norm": 0.03378741443157196, | |
| "learning_rate": 8.051286299775951e-06, | |
| "loss": 0.0015, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 4.201171595942277, | |
| "grad_norm": 0.0582275353372097, | |
| "learning_rate": 7.6229538553584556e-06, | |
| "loss": 0.0011, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 4.224032004572082, | |
| "grad_norm": 0.016311751678586006, | |
| "learning_rate": 7.20538725248947e-06, | |
| "loss": 0.0024, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 4.246892413201886, | |
| "grad_norm": 0.017814885824918747, | |
| "learning_rate": 6.798692573501114e-06, | |
| "loss": 0.0011, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 4.26975282183169, | |
| "grad_norm": 0.029111934825778008, | |
| "learning_rate": 6.402973138725282e-06, | |
| "loss": 0.0013, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 4.292613230461495, | |
| "grad_norm": 0.08662671595811844, | |
| "learning_rate": 6.018329480245255e-06, | |
| "loss": 0.0017, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 4.315473639091299, | |
| "grad_norm": 0.026320841163396835, | |
| "learning_rate": 5.64485931635565e-06, | |
| "loss": 0.0014, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 4.338334047721103, | |
| "grad_norm": 0.01906999573111534, | |
| "learning_rate": 5.2826575267371615e-06, | |
| "loss": 0.0039, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 4.3611944563509075, | |
| "grad_norm": 0.019031250849366188, | |
| "learning_rate": 4.931816128352534e-06, | |
| "loss": 0.0011, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 4.384054864980712, | |
| "grad_norm": 0.14129675924777985, | |
| "learning_rate": 4.592424252069705e-06, | |
| "loss": 0.0017, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 4.406915273610516, | |
| "grad_norm": 0.012528040446341038, | |
| "learning_rate": 4.26456812001822e-06, | |
| "loss": 0.0014, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 4.4297756822403205, | |
| "grad_norm": 0.02767062373459339, | |
| "learning_rate": 3.948331023684637e-06, | |
| "loss": 0.0014, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 4.452636090870124, | |
| "grad_norm": 0.03427803888916969, | |
| "learning_rate": 3.64379330275233e-06, | |
| "loss": 0.0014, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 4.475496499499928, | |
| "grad_norm": 0.01602611131966114, | |
| "learning_rate": 3.3510323246913887e-06, | |
| "loss": 0.001, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 4.4983569081297325, | |
| "grad_norm": 0.031283311545848846, | |
| "learning_rate": 3.07012246510342e-06, | |
| "loss": 0.001, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 4.521217316759537, | |
| "grad_norm": 0.010646236129105091, | |
| "learning_rate": 2.801135088826529e-06, | |
| "loss": 0.0011, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 4.544077725389341, | |
| "grad_norm": 0.040134090930223465, | |
| "learning_rate": 2.5441385318051735e-06, | |
| "loss": 0.0012, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 4.5669381340191455, | |
| "grad_norm": 0.020084287971258163, | |
| "learning_rate": 2.2991980837294757e-06, | |
| "loss": 0.001, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 4.58979854264895, | |
| "grad_norm": 0.023815790191292763, | |
| "learning_rate": 2.066375971448481e-06, | |
| "loss": 0.0013, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 4.612658951278754, | |
| "grad_norm": 0.054296255111694336, | |
| "learning_rate": 1.8457313431614498e-06, | |
| "loss": 0.0015, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 4.6355193599085585, | |
| "grad_norm": 0.030289700254797935, | |
| "learning_rate": 1.6373202533913556e-06, | |
| "loss": 0.0009, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 4.658379768538363, | |
| "grad_norm": 0.01671276055276394, | |
| "learning_rate": 1.4411956487442925e-06, | |
| "loss": 0.0012, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 4.681240177168167, | |
| "grad_norm": 0.034442536532878876, | |
| "learning_rate": 1.2574073544584286e-06, | |
| "loss": 0.001, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 4.704100585797971, | |
| "grad_norm": 0.06515878438949585, | |
| "learning_rate": 1.0860020617459887e-06, | |
| "loss": 0.0018, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 4.726960994427776, | |
| "grad_norm": 0.008722376078367233, | |
| "learning_rate": 9.270233159313912e-07, | |
| "loss": 0.0013, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 4.749821403057579, | |
| "grad_norm": 0.006462674122303724, | |
| "learning_rate": 7.805115053885758e-07, | |
| "loss": 0.0016, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 4.772681811687384, | |
| "grad_norm": 0.018205828964710236, | |
| "learning_rate": 6.465038512804555e-07, | |
| "loss": 0.0009, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 4.795542220317188, | |
| "grad_norm": 0.011852984316647053, | |
| "learning_rate": 5.250343981028305e-07, | |
| "loss": 0.0009, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 4.818402628946992, | |
| "grad_norm": 0.04381529241800308, | |
| "learning_rate": 4.1613400503550114e-07, | |
| "loss": 0.001, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 4.841263037576796, | |
| "grad_norm": 0.06918981671333313, | |
| "learning_rate": 3.1983033810248366e-07, | |
| "loss": 0.0017, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 4.864123446206601, | |
| "grad_norm": 0.009174630045890808, | |
| "learning_rate": 2.3614786314348768e-07, | |
| "loss": 0.0011, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 4.886983854836405, | |
| "grad_norm": 0.0225895494222641, | |
| "learning_rate": 1.651078395984329e-07, | |
| "loss": 0.0018, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 4.909844263466209, | |
| "grad_norm": 0.02551046386361122, | |
| "learning_rate": 1.0672831510645242e-07, | |
| "loss": 0.0017, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 4.932704672096014, | |
| "grad_norm": 0.03379609063267708, | |
| "learning_rate": 6.102412092097165e-08, | |
| "loss": 0.0015, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 4.955565080725818, | |
| "grad_norm": 0.03675275668501854, | |
| "learning_rate": 2.8006868141805533e-08, | |
| "loss": 0.0011, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 4.978425489355622, | |
| "grad_norm": 0.12221638858318329, | |
| "learning_rate": 7.684944765379286e-09, | |
| "loss": 0.0016, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.0298867579549551, | |
| "learning_rate": 6.351355378297896e-11, | |
| "loss": 0.0018, | |
| "step": 2190 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 2190, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 250, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.1069953280835584e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |