| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.9968798751950079, | |
| "eval_steps": 500, | |
| "global_step": 960, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02080083203328133, | |
| "grad_norm": 2.954287528991699, | |
| "learning_rate": 6.25e-06, | |
| "loss": 0.3901, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.04160166406656266, | |
| "grad_norm": 0.9206520318984985, | |
| "learning_rate": 1.25e-05, | |
| "loss": 0.2205, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.062402496099843996, | |
| "grad_norm": 0.7870351076126099, | |
| "learning_rate": 1.8750000000000002e-05, | |
| "loss": 0.1896, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.08320332813312532, | |
| "grad_norm": 0.8045545220375061, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.1908, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.10400416016640665, | |
| "grad_norm": 0.6852442622184753, | |
| "learning_rate": 2.9996796251818968e-05, | |
| "loss": 0.1623, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.12480499219968799, | |
| "grad_norm": 0.6457542181015015, | |
| "learning_rate": 2.9884808696055675e-05, | |
| "loss": 0.164, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.1456058242329693, | |
| "grad_norm": 0.6373040676116943, | |
| "learning_rate": 2.9613999639484314e-05, | |
| "loss": 0.1737, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.16640665626625065, | |
| "grad_norm": 0.8400445580482483, | |
| "learning_rate": 2.9187258625509518e-05, | |
| "loss": 0.1637, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.187207488299532, | |
| "grad_norm": 0.5773469805717468, | |
| "learning_rate": 2.86091389977234e-05, | |
| "loss": 0.1647, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.2080083203328133, | |
| "grad_norm": 0.599101722240448, | |
| "learning_rate": 2.788580931554828e-05, | |
| "loss": 0.1653, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.22880915236609464, | |
| "grad_norm": 1.818597435951233, | |
| "learning_rate": 2.7024987535462327e-05, | |
| "loss": 0.165, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.24960998439937598, | |
| "grad_norm": 0.5002009272575378, | |
| "learning_rate": 2.6035858660096975e-05, | |
| "loss": 0.1638, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2704108164326573, | |
| "grad_norm": 0.44137582182884216, | |
| "learning_rate": 2.49289767338935e-05, | |
| "loss": 0.1355, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.2912116484659386, | |
| "grad_norm": 0.49017268419265747, | |
| "learning_rate": 2.3716152231029077e-05, | |
| "loss": 0.1621, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.31201248049922, | |
| "grad_norm": 0.42483949661254883, | |
| "learning_rate": 2.2410326037187558e-05, | |
| "loss": 0.1518, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3328133125325013, | |
| "grad_norm": 0.4569384455680847, | |
| "learning_rate": 2.1025431369794546e-05, | |
| "loss": 0.1346, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.3536141445657826, | |
| "grad_norm": 0.4882504642009735, | |
| "learning_rate": 1.9576245110033233e-05, | |
| "loss": 0.1534, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.374414976599064, | |
| "grad_norm": 0.4138477146625519, | |
| "learning_rate": 1.8078230132934514e-05, | |
| "loss": 0.1443, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.3952158086323453, | |
| "grad_norm": 0.43387019634246826, | |
| "learning_rate": 1.6547370317885354e-05, | |
| "loss": 0.148, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.4160166406656266, | |
| "grad_norm": 0.414253294467926, | |
| "learning_rate": 1.5e-05, | |
| "loss": 0.1412, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.43681747269890797, | |
| "grad_norm": 0.43514806032180786, | |
| "learning_rate": 1.3452629682114646e-05, | |
| "loss": 0.1331, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.4576183047321893, | |
| "grad_norm": 0.3686197102069855, | |
| "learning_rate": 1.1921769867065483e-05, | |
| "loss": 0.1317, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.4784191367654706, | |
| "grad_norm": 0.4298912584781647, | |
| "learning_rate": 1.042375488996677e-05, | |
| "loss": 0.1332, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.49921996879875197, | |
| "grad_norm": 0.4293174743652344, | |
| "learning_rate": 8.974568630205462e-06, | |
| "loss": 0.1402, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5200208008320333, | |
| "grad_norm": 0.4327556788921356, | |
| "learning_rate": 7.589673962812442e-06, | |
| "loss": 0.1433, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5408216328653146, | |
| "grad_norm": 0.3953148424625397, | |
| "learning_rate": 6.283847768970927e-06, | |
| "loss": 0.1416, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.5616224648985959, | |
| "grad_norm": 0.3659592866897583, | |
| "learning_rate": 5.071023266106502e-06, | |
| "loss": 0.1262, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.5824232969318772, | |
| "grad_norm": 0.36430105566978455, | |
| "learning_rate": 3.964141339903026e-06, | |
| "loss": 0.1153, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6032241289651586, | |
| "grad_norm": 0.38800716400146484, | |
| "learning_rate": 2.975012464537676e-06, | |
| "loss": 0.1293, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.62402496099844, | |
| "grad_norm": 0.3738207519054413, | |
| "learning_rate": 2.1141906844517207e-06, | |
| "loss": 0.1308, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6448257930317213, | |
| "grad_norm": 0.3933092951774597, | |
| "learning_rate": 1.390861002276602e-06, | |
| "loss": 0.122, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.6656266250650026, | |
| "grad_norm": 0.37102991342544556, | |
| "learning_rate": 8.127413744904805e-07, | |
| "loss": 0.1165, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.6864274570982839, | |
| "grad_norm": 0.37362316250801086, | |
| "learning_rate": 3.860003605156881e-07, | |
| "loss": 0.1344, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.7072282891315652, | |
| "grad_norm": 0.3693016469478607, | |
| "learning_rate": 1.1519130394432476e-07, | |
| "loss": 0.1144, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7280291211648466, | |
| "grad_norm": 0.3761012852191925, | |
| "learning_rate": 3.20374818103486e-09, | |
| "loss": 0.1156, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.748829953198128, | |
| "grad_norm": 0.3676697909832001, | |
| "learning_rate": 5.1232604899952296e-08, | |
| "loss": 0.1182, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.7696307852314093, | |
| "grad_norm": 0.3497138023376465, | |
| "learning_rate": 2.587654042896087e-07, | |
| "loss": 0.1222, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.7904316172646906, | |
| "grad_norm": 0.35691773891448975, | |
| "learning_rate": 6.23587763126211e-07, | |
| "loss": 0.1155, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8112324492979719, | |
| "grad_norm": 0.3574424386024475, | |
| "learning_rate": 1.1418070123306974e-06, | |
| "loss": 0.1258, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.8320332813312532, | |
| "grad_norm": 0.38320979475975037, | |
| "learning_rate": 1.8078937319026607e-06, | |
| "loss": 0.1278, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.8528341133645346, | |
| "grad_norm": 0.36120763421058655, | |
| "learning_rate": 2.614740750051663e-06, | |
| "loss": 0.1271, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.8736349453978159, | |
| "grad_norm": 0.409435510635376, | |
| "learning_rate": 3.5537389770028983e-06, | |
| "loss": 0.1471, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.8944357774310973, | |
| "grad_norm": 0.386115163564682, | |
| "learning_rate": 4.614869264327555e-06, | |
| "loss": 0.1139, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.9152366094643786, | |
| "grad_norm": 0.35207945108413696, | |
| "learning_rate": 5.786809309654979e-06, | |
| "loss": 0.1101, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.9360374414976599, | |
| "grad_norm": 0.44765156507492065, | |
| "learning_rate": 7.0570544660893674e-06, | |
| "loss": 0.1315, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.9568382735309412, | |
| "grad_norm": 0.41203129291534424, | |
| "learning_rate": 8.412051167289444e-06, | |
| "loss": 0.1179, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.9776391055642226, | |
| "grad_norm": 0.41569167375564575, | |
| "learning_rate": 9.837341544560421e-06, | |
| "loss": 0.1252, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.9984399375975039, | |
| "grad_norm": 0.46924883127212524, | |
| "learning_rate": 1.1317717692888014e-05, | |
| "loss": 0.1205, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.0192407696307852, | |
| "grad_norm": 0.42828407883644104, | |
| "learning_rate": 1.28373839398898e-05, | |
| "loss": 0.0949, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.0400416016640666, | |
| "grad_norm": 0.3947116434574127, | |
| "learning_rate": 1.4380125386267791e-05, | |
| "loss": 0.0831, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.0608424336973479, | |
| "grad_norm": 0.38285788893699646, | |
| "learning_rate": 1.592948091942891e-05, | |
| "loss": 0.1127, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.0816432657306292, | |
| "grad_norm": 0.4146939218044281, | |
| "learning_rate": 1.7468918854211013e-05, | |
| "loss": 0.1104, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.1024440977639105, | |
| "grad_norm": 0.41506752371788025, | |
| "learning_rate": 1.898201332662109e-05, | |
| "loss": 0.1132, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.1232449297971918, | |
| "grad_norm": 0.42785730957984924, | |
| "learning_rate": 2.045261955845744e-05, | |
| "loss": 0.1006, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.1440457618304731, | |
| "grad_norm": 0.3749031126499176, | |
| "learning_rate": 2.186504612273522e-05, | |
| "loss": 0.1012, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.1648465938637544, | |
| "grad_norm": 0.39465105533599854, | |
| "learning_rate": 2.3204222371836402e-05, | |
| "loss": 0.1132, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.185647425897036, | |
| "grad_norm": 0.4099496006965637, | |
| "learning_rate": 2.4455859241919324e-05, | |
| "loss": 0.0986, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.2064482579303173, | |
| "grad_norm": 0.41943153738975525, | |
| "learning_rate": 2.5606601717798212e-05, | |
| "loss": 0.1272, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.2272490899635986, | |
| "grad_norm": 0.40004250407218933, | |
| "learning_rate": 2.6644171331486363e-05, | |
| "loss": 0.1255, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.24804992199688, | |
| "grad_norm": 0.5004564523696899, | |
| "learning_rate": 2.7557497173937928e-05, | |
| "loss": 0.1217, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.2688507540301612, | |
| "grad_norm": 0.4227616488933563, | |
| "learning_rate": 2.8336834022087772e-05, | |
| "loss": 0.1198, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.2896515860634425, | |
| "grad_norm": 0.41835710406303406, | |
| "learning_rate": 2.8973866320769182e-05, | |
| "loss": 0.1207, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.3104524180967239, | |
| "grad_norm": 0.40431123971939087, | |
| "learning_rate": 2.9461796910018204e-05, | |
| "loss": 0.1186, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.3312532501300052, | |
| "grad_norm": 0.41547176241874695, | |
| "learning_rate": 2.979541955104084e-05, | |
| "loss": 0.1254, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.3520540821632865, | |
| "grad_norm": 0.3922775387763977, | |
| "learning_rate": 2.997117447698802e-05, | |
| "loss": 0.1285, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.3728549141965678, | |
| "grad_norm": 0.3523492217063904, | |
| "learning_rate": 2.9987186375809513e-05, | |
| "loss": 0.1148, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.3936557462298491, | |
| "grad_norm": 0.44066208600997925, | |
| "learning_rate": 2.984328439990804e-05, | |
| "loss": 0.1137, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.4144565782631306, | |
| "grad_norm": 0.4978786110877991, | |
| "learning_rate": 2.9541003989089956e-05, | |
| "loss": 0.1197, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.435257410296412, | |
| "grad_norm": 0.4303026795387268, | |
| "learning_rate": 2.9083570487361445e-05, | |
| "loss": 0.1332, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.4560582423296933, | |
| "grad_norm": 0.34892433881759644, | |
| "learning_rate": 2.8475864728379682e-05, | |
| "loss": 0.1246, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.4768590743629746, | |
| "grad_norm": 0.41033118963241577, | |
| "learning_rate": 2.772437095676361e-05, | |
| "loss": 0.1384, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.497659906396256, | |
| "grad_norm": 0.4040924310684204, | |
| "learning_rate": 2.683710764094591e-05, | |
| "loss": 0.1151, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.5184607384295372, | |
| "grad_norm": 0.33467575907707214, | |
| "learning_rate": 2.5823541915795936e-05, | |
| "loss": 0.1196, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.5392615704628185, | |
| "grad_norm": 0.3744722008705139, | |
| "learning_rate": 2.4694488567914113e-05, | |
| "loss": 0.107, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.5600624024960998, | |
| "grad_norm": 0.3619571626186371, | |
| "learning_rate": 2.3461994641428768e-05, | |
| "loss": 0.1324, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.5808632345293812, | |
| "grad_norm": 0.39949360489845276, | |
| "learning_rate": 2.213921089555611e-05, | |
| "loss": 0.1197, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.6016640665626625, | |
| "grad_norm": 0.35969704389572144, | |
| "learning_rate": 2.074025148547636e-05, | |
| "loss": 0.1209, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.6224648985959438, | |
| "grad_norm": 0.36468222737312317, | |
| "learning_rate": 1.9280043363736577e-05, | |
| "loss": 0.113, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.643265730629225, | |
| "grad_norm": 0.3423938453197479, | |
| "learning_rate": 1.7774167009073373e-05, | |
| "loss": 0.1149, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.6640665626625064, | |
| "grad_norm": 0.38151758909225464, | |
| "learning_rate": 1.6238690182084996e-05, | |
| "loss": 0.1083, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.6848673946957877, | |
| "grad_norm": 0.36454877257347107, | |
| "learning_rate": 1.4689996481586692e-05, | |
| "loss": 0.1013, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.705668226729069, | |
| "grad_norm": 0.3282698690891266, | |
| "learning_rate": 1.3144610530959793e-05, | |
| "loss": 0.1004, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.7264690587623504, | |
| "grad_norm": 0.313281774520874, | |
| "learning_rate": 1.1619021659762912e-05, | |
| "loss": 0.1045, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.7472698907956317, | |
| "grad_norm": 0.332051157951355, | |
| "learning_rate": 1.0129507961929739e-05, | |
| "loss": 0.0952, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.7680707228289132, | |
| "grad_norm": 0.3139604330062866, | |
| "learning_rate": 8.69196260785939e-06, | |
| "loss": 0.0919, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.7888715548621945, | |
| "grad_norm": 0.35599997639656067, | |
| "learning_rate": 7.321724263655999e-06, | |
| "loss": 0.1008, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.8096723868954758, | |
| "grad_norm": 0.30996373295783997, | |
| "learning_rate": 6.033413426951388e-06, | |
| "loss": 0.0986, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.8304732189287571, | |
| "grad_norm": 0.33788877725601196, | |
| "learning_rate": 4.840776425613894e-06, | |
| "loss": 0.1144, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.8512740509620385, | |
| "grad_norm": 0.3212425708770752, | |
| "learning_rate": 3.7565387438831093e-06, | |
| "loss": 0.105, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.8720748829953198, | |
| "grad_norm": 0.30934497714042664, | |
| "learning_rate": 2.792269240947083e-06, | |
| "loss": 0.1027, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.8928757150286013, | |
| "grad_norm": 0.3211897015571594, | |
| "learning_rate": 1.958256710754496e-06, | |
| "loss": 0.1049, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.9136765470618826, | |
| "grad_norm": 0.3027827739715576, | |
| "learning_rate": 1.2634001001741423e-06, | |
| "loss": 0.0987, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.934477379095164, | |
| "grad_norm": 0.28623566031455994, | |
| "learning_rate": 7.151135568777889e-07, | |
| "loss": 0.1019, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.9552782111284452, | |
| "grad_norm": 0.3042950928211212, | |
| "learning_rate": 3.192473200896845e-07, | |
| "loss": 0.1044, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.9760790431617266, | |
| "grad_norm": 0.32153618335723877, | |
| "learning_rate": 8.002529830135996e-08, | |
| "loss": 0.1004, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.9968798751950079, | |
| "grad_norm": 0.3439652621746063, | |
| "learning_rate": 0.0, | |
| "loss": 0.0966, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.9968798751950079, | |
| "step": 960, | |
| "total_flos": 2.5563512438029025e+18, | |
| "train_loss": 0.12861698282261688, | |
| "train_runtime": 5934.2279, | |
| "train_samples_per_second": 10.368, | |
| "train_steps_per_second": 0.162 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 960, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 2000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.5563512438029025e+18, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |