| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 8.0, | |
| "eval_steps": 500, | |
| "global_step": 4504, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.017761989342806393, | |
| "grad_norm": 2.08616042137146, | |
| "learning_rate": 4.9900088809946715e-05, | |
| "loss": 5.9583, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.035523978685612786, | |
| "grad_norm": 2.0548300743103027, | |
| "learning_rate": 4.978907637655418e-05, | |
| "loss": 3.0383, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.05328596802841918, | |
| "grad_norm": 1.3866031169891357, | |
| "learning_rate": 4.967806394316163e-05, | |
| "loss": 2.0479, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.07104795737122557, | |
| "grad_norm": 1.5728237628936768, | |
| "learning_rate": 4.9567051509769094e-05, | |
| "loss": 1.3301, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.08880994671403197, | |
| "grad_norm": 1.3490098714828491, | |
| "learning_rate": 4.9456039076376556e-05, | |
| "loss": 0.8203, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.10657193605683836, | |
| "grad_norm": 1.1025753021240234, | |
| "learning_rate": 4.934502664298402e-05, | |
| "loss": 0.5509, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.12433392539964476, | |
| "grad_norm": 1.8544913530349731, | |
| "learning_rate": 4.923401420959148e-05, | |
| "loss": 0.3441, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.14209591474245115, | |
| "grad_norm": 0.7912121415138245, | |
| "learning_rate": 4.9123001776198936e-05, | |
| "loss": 0.2242, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.15985790408525755, | |
| "grad_norm": 0.6116934418678284, | |
| "learning_rate": 4.90119893428064e-05, | |
| "loss": 0.1493, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.17761989342806395, | |
| "grad_norm": 0.627920925617218, | |
| "learning_rate": 4.890097690941386e-05, | |
| "loss": 0.123, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.19538188277087035, | |
| "grad_norm": 0.5851693153381348, | |
| "learning_rate": 4.8789964476021315e-05, | |
| "loss": 0.0908, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.21314387211367672, | |
| "grad_norm": 0.4242522716522217, | |
| "learning_rate": 4.867895204262878e-05, | |
| "loss": 0.0668, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.23090586145648312, | |
| "grad_norm": 0.38480210304260254, | |
| "learning_rate": 4.856793960923623e-05, | |
| "loss": 0.0498, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.24866785079928952, | |
| "grad_norm": 0.4704955816268921, | |
| "learning_rate": 4.8456927175843695e-05, | |
| "loss": 0.0478, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.2664298401420959, | |
| "grad_norm": 0.33954229950904846, | |
| "learning_rate": 4.834591474245116e-05, | |
| "loss": 0.0383, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.2841918294849023, | |
| "grad_norm": 0.3331491947174072, | |
| "learning_rate": 4.823490230905862e-05, | |
| "loss": 0.0334, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.3019538188277087, | |
| "grad_norm": 0.22477354109287262, | |
| "learning_rate": 4.812388987566608e-05, | |
| "loss": 0.0285, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.3197158081705151, | |
| "grad_norm": 0.32435140013694763, | |
| "learning_rate": 4.8012877442273537e-05, | |
| "loss": 0.0262, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.33747779751332146, | |
| "grad_norm": 0.2808849513530731, | |
| "learning_rate": 4.7901865008881e-05, | |
| "loss": 0.0234, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.3552397868561279, | |
| "grad_norm": 0.24697023630142212, | |
| "learning_rate": 4.779085257548846e-05, | |
| "loss": 0.0214, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.37300177619893427, | |
| "grad_norm": 0.1854216456413269, | |
| "learning_rate": 4.7679840142095916e-05, | |
| "loss": 0.0188, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.3907637655417407, | |
| "grad_norm": 0.1791575402021408, | |
| "learning_rate": 4.756882770870338e-05, | |
| "loss": 0.0187, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.40852575488454707, | |
| "grad_norm": 0.2226918488740921, | |
| "learning_rate": 4.7457815275310833e-05, | |
| "loss": 0.0169, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.42628774422735344, | |
| "grad_norm": 0.2505394518375397, | |
| "learning_rate": 4.7346802841918296e-05, | |
| "loss": 0.017, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.44404973357015987, | |
| "grad_norm": 0.1905129998922348, | |
| "learning_rate": 4.723579040852576e-05, | |
| "loss": 0.0144, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.46181172291296624, | |
| "grad_norm": 0.22495870292186737, | |
| "learning_rate": 4.712477797513321e-05, | |
| "loss": 0.0151, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.47957371225577267, | |
| "grad_norm": 0.17531022429466248, | |
| "learning_rate": 4.701376554174068e-05, | |
| "loss": 0.0113, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.49733570159857904, | |
| "grad_norm": 0.18769726157188416, | |
| "learning_rate": 4.690275310834814e-05, | |
| "loss": 0.0115, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.5150976909413855, | |
| "grad_norm": 0.18548175692558289, | |
| "learning_rate": 4.67917406749556e-05, | |
| "loss": 0.0106, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.5328596802841918, | |
| "grad_norm": 0.19990290701389313, | |
| "learning_rate": 4.668072824156306e-05, | |
| "loss": 0.0102, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.5506216696269982, | |
| "grad_norm": 0.12933073937892914, | |
| "learning_rate": 4.656971580817052e-05, | |
| "loss": 0.0092, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.5683836589698046, | |
| "grad_norm": 0.1915041208267212, | |
| "learning_rate": 4.645870337477798e-05, | |
| "loss": 0.009, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.5861456483126111, | |
| "grad_norm": 0.1646175980567932, | |
| "learning_rate": 4.6347690941385434e-05, | |
| "loss": 0.0089, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.6039076376554174, | |
| "grad_norm": 0.13864779472351074, | |
| "learning_rate": 4.6236678507992896e-05, | |
| "loss": 0.0078, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.6216696269982238, | |
| "grad_norm": 0.20968131721019745, | |
| "learning_rate": 4.612566607460036e-05, | |
| "loss": 0.0084, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.6394316163410302, | |
| "grad_norm": 0.1403323858976364, | |
| "learning_rate": 4.6014653641207814e-05, | |
| "loss": 0.0067, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.6571936056838366, | |
| "grad_norm": 0.1056448444724083, | |
| "learning_rate": 4.5903641207815276e-05, | |
| "loss": 0.007, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.6749555950266429, | |
| "grad_norm": 0.09233298897743225, | |
| "learning_rate": 4.579262877442274e-05, | |
| "loss": 0.0063, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.6927175843694494, | |
| "grad_norm": 0.2610642910003662, | |
| "learning_rate": 4.56816163410302e-05, | |
| "loss": 0.0061, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.7104795737122558, | |
| "grad_norm": 0.13106778264045715, | |
| "learning_rate": 4.557060390763766e-05, | |
| "loss": 0.0061, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.7282415630550622, | |
| "grad_norm": 0.11065071076154709, | |
| "learning_rate": 4.545959147424512e-05, | |
| "loss": 0.0053, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.7460035523978685, | |
| "grad_norm": 0.17179323732852936, | |
| "learning_rate": 4.534857904085258e-05, | |
| "loss": 0.0064, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.7637655417406749, | |
| "grad_norm": 0.19529324769973755, | |
| "learning_rate": 4.5237566607460035e-05, | |
| "loss": 0.0045, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.7815275310834814, | |
| "grad_norm": 0.20533151924610138, | |
| "learning_rate": 4.51265541740675e-05, | |
| "loss": 0.005, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.7992895204262878, | |
| "grad_norm": 0.16531281173229218, | |
| "learning_rate": 4.501554174067496e-05, | |
| "loss": 0.0041, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.8170515097690941, | |
| "grad_norm": 0.13664154708385468, | |
| "learning_rate": 4.4904529307282414e-05, | |
| "loss": 0.004, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.8348134991119005, | |
| "grad_norm": 0.11034122854471207, | |
| "learning_rate": 4.4793516873889876e-05, | |
| "loss": 0.0039, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.8525754884547069, | |
| "grad_norm": 0.06444460153579712, | |
| "learning_rate": 4.468250444049734e-05, | |
| "loss": 0.0039, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.8703374777975134, | |
| "grad_norm": 0.2418302446603775, | |
| "learning_rate": 4.45714920071048e-05, | |
| "loss": 0.0039, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.8880994671403197, | |
| "grad_norm": 0.06368648260831833, | |
| "learning_rate": 4.446047957371226e-05, | |
| "loss": 0.0035, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.9058614564831261, | |
| "grad_norm": 0.08606679737567902, | |
| "learning_rate": 4.434946714031972e-05, | |
| "loss": 0.0033, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.9236234458259325, | |
| "grad_norm": 0.06212198734283447, | |
| "learning_rate": 4.423845470692718e-05, | |
| "loss": 0.0032, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.9413854351687388, | |
| "grad_norm": 0.05146817862987518, | |
| "learning_rate": 4.4127442273534635e-05, | |
| "loss": 0.0036, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.9591474245115453, | |
| "grad_norm": 0.10226184129714966, | |
| "learning_rate": 4.40164298401421e-05, | |
| "loss": 0.0035, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.9769094138543517, | |
| "grad_norm": 0.06877221167087555, | |
| "learning_rate": 4.390541740674956e-05, | |
| "loss": 0.0034, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.9946714031971581, | |
| "grad_norm": 0.08828417211771011, | |
| "learning_rate": 4.3794404973357015e-05, | |
| "loss": 0.0026, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 3.458987339399755e-05, | |
| "eval_runtime": 22.2952, | |
| "eval_samples_per_second": 44.853, | |
| "eval_steps_per_second": 2.826, | |
| "step": 563 | |
| }, | |
| { | |
| "epoch": 1.0124333925399644, | |
| "grad_norm": 0.09242795407772064, | |
| "learning_rate": 4.368339253996448e-05, | |
| "loss": 0.003, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.030195381882771, | |
| "grad_norm": 0.0889139473438263, | |
| "learning_rate": 4.357238010657194e-05, | |
| "loss": 0.0028, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.0479573712255772, | |
| "grad_norm": 0.12308719754219055, | |
| "learning_rate": 4.34613676731794e-05, | |
| "loss": 0.0027, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.0657193605683837, | |
| "grad_norm": 0.05686354264616966, | |
| "learning_rate": 4.335035523978686e-05, | |
| "loss": 0.0024, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.0834813499111902, | |
| "grad_norm": 0.05174636095762253, | |
| "learning_rate": 4.323934280639432e-05, | |
| "loss": 0.0023, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.1012433392539964, | |
| "grad_norm": 0.1351872682571411, | |
| "learning_rate": 4.312833037300178e-05, | |
| "loss": 0.0024, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.119005328596803, | |
| "grad_norm": 0.11029157787561417, | |
| "learning_rate": 4.3017317939609236e-05, | |
| "loss": 0.0026, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.1367673179396092, | |
| "grad_norm": 0.041344061493873596, | |
| "learning_rate": 4.29063055062167e-05, | |
| "loss": 0.0018, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.1545293072824157, | |
| "grad_norm": 0.15868201851844788, | |
| "learning_rate": 4.279529307282416e-05, | |
| "loss": 0.0023, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.1722912966252221, | |
| "grad_norm": 0.06435976177453995, | |
| "learning_rate": 4.2684280639431615e-05, | |
| "loss": 0.0022, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.1900532859680284, | |
| "grad_norm": 0.05187324061989784, | |
| "learning_rate": 4.257326820603908e-05, | |
| "loss": 0.0018, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.2078152753108349, | |
| "grad_norm": 0.041784290224313736, | |
| "learning_rate": 4.246225577264654e-05, | |
| "loss": 0.0023, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.2255772646536411, | |
| "grad_norm": 0.031757429242134094, | |
| "learning_rate": 4.2351243339254e-05, | |
| "loss": 0.0021, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.2433392539964476, | |
| "grad_norm": 0.044060107320547104, | |
| "learning_rate": 4.2240230905861464e-05, | |
| "loss": 0.0019, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.261101243339254, | |
| "grad_norm": 0.11238252371549606, | |
| "learning_rate": 4.212921847246892e-05, | |
| "loss": 0.002, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.2788632326820604, | |
| "grad_norm": 0.041686106473207474, | |
| "learning_rate": 4.201820603907638e-05, | |
| "loss": 0.0015, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.2966252220248669, | |
| "grad_norm": 0.04672028496861458, | |
| "learning_rate": 4.1907193605683836e-05, | |
| "loss": 0.0015, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.3143872113676731, | |
| "grad_norm": 0.0358208566904068, | |
| "learning_rate": 4.17961811722913e-05, | |
| "loss": 0.0016, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.3321492007104796, | |
| "grad_norm": 0.17647922039031982, | |
| "learning_rate": 4.168516873889876e-05, | |
| "loss": 0.0015, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.349911190053286, | |
| "grad_norm": 0.10813446342945099, | |
| "learning_rate": 4.1574156305506216e-05, | |
| "loss": 0.0017, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.3676731793960923, | |
| "grad_norm": 0.03748601675033569, | |
| "learning_rate": 4.146314387211368e-05, | |
| "loss": 0.0018, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.3854351687388988, | |
| "grad_norm": 0.031867001205682755, | |
| "learning_rate": 4.135213143872114e-05, | |
| "loss": 0.0016, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.403197158081705, | |
| "grad_norm": 0.01816353015601635, | |
| "learning_rate": 4.12411190053286e-05, | |
| "loss": 0.0011, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.4209591474245116, | |
| "grad_norm": 0.05043243616819382, | |
| "learning_rate": 4.1130106571936064e-05, | |
| "loss": 0.0014, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.438721136767318, | |
| "grad_norm": 0.27319490909576416, | |
| "learning_rate": 4.101909413854352e-05, | |
| "loss": 0.0017, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.4564831261101243, | |
| "grad_norm": 0.04838378727436066, | |
| "learning_rate": 4.090808170515098e-05, | |
| "loss": 0.0014, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.4742451154529308, | |
| "grad_norm": 0.05813241004943848, | |
| "learning_rate": 4.079706927175844e-05, | |
| "loss": 0.0017, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.492007104795737, | |
| "grad_norm": 0.06169930100440979, | |
| "learning_rate": 4.06860568383659e-05, | |
| "loss": 0.0015, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.5097690941385435, | |
| "grad_norm": 0.01758798211812973, | |
| "learning_rate": 4.057504440497336e-05, | |
| "loss": 0.0012, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.52753108348135, | |
| "grad_norm": 0.07379154115915298, | |
| "learning_rate": 4.0464031971580816e-05, | |
| "loss": 0.0013, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.5452930728241563, | |
| "grad_norm": 0.1645466685295105, | |
| "learning_rate": 4.035301953818828e-05, | |
| "loss": 0.0015, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.5630550621669625, | |
| "grad_norm": 0.023952888324856758, | |
| "learning_rate": 4.0242007104795734e-05, | |
| "loss": 0.0011, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.580817051509769, | |
| "grad_norm": 0.012347780168056488, | |
| "learning_rate": 4.0130994671403196e-05, | |
| "loss": 0.0011, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.5985790408525755, | |
| "grad_norm": 0.12134626507759094, | |
| "learning_rate": 4.0019982238010665e-05, | |
| "loss": 0.0013, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.616341030195382, | |
| "grad_norm": 0.035019490867853165, | |
| "learning_rate": 3.990896980461812e-05, | |
| "loss": 0.001, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.6341030195381883, | |
| "grad_norm": 0.03914281353354454, | |
| "learning_rate": 3.979795737122558e-05, | |
| "loss": 0.001, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.6518650088809945, | |
| "grad_norm": 0.01977422647178173, | |
| "learning_rate": 3.968694493783304e-05, | |
| "loss": 0.0012, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.669626998223801, | |
| "grad_norm": 0.018485285341739655, | |
| "learning_rate": 3.95759325044405e-05, | |
| "loss": 0.0012, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.6873889875666075, | |
| "grad_norm": 0.2277858406305313, | |
| "learning_rate": 3.946492007104796e-05, | |
| "loss": 0.0011, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.705150976909414, | |
| "grad_norm": 0.012234285473823547, | |
| "learning_rate": 3.935390763765542e-05, | |
| "loss": 0.0011, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.7229129662522202, | |
| "grad_norm": 0.021199282258749008, | |
| "learning_rate": 3.924289520426288e-05, | |
| "loss": 0.001, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.7406749555950265, | |
| "grad_norm": 0.12197419255971909, | |
| "learning_rate": 3.9131882770870334e-05, | |
| "loss": 0.0011, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.758436944937833, | |
| "grad_norm": 0.03652465343475342, | |
| "learning_rate": 3.9020870337477797e-05, | |
| "loss": 0.0009, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.7761989342806395, | |
| "grad_norm": 0.03332207724452019, | |
| "learning_rate": 3.890985790408526e-05, | |
| "loss": 0.001, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.793960923623446, | |
| "grad_norm": 0.05201757699251175, | |
| "learning_rate": 3.879884547069272e-05, | |
| "loss": 0.001, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.8117229129662522, | |
| "grad_norm": 0.01703731343150139, | |
| "learning_rate": 3.868783303730018e-05, | |
| "loss": 0.001, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.8294849023090585, | |
| "grad_norm": 0.027388030663132668, | |
| "learning_rate": 3.857682060390764e-05, | |
| "loss": 0.0008, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.847246891651865, | |
| "grad_norm": 0.12894806265830994, | |
| "learning_rate": 3.84658081705151e-05, | |
| "loss": 0.0009, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.8650088809946714, | |
| "grad_norm": 0.030342355370521545, | |
| "learning_rate": 3.835479573712256e-05, | |
| "loss": 0.001, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.882770870337478, | |
| "grad_norm": 0.043612588196992874, | |
| "learning_rate": 3.824378330373002e-05, | |
| "loss": 0.0011, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.9005328596802842, | |
| "grad_norm": 0.015847304835915565, | |
| "learning_rate": 3.813277087033748e-05, | |
| "loss": 0.0009, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.9182948490230904, | |
| "grad_norm": 0.036328867077827454, | |
| "learning_rate": 3.8021758436944935e-05, | |
| "loss": 0.0009, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.936056838365897, | |
| "grad_norm": 0.013609735295176506, | |
| "learning_rate": 3.79107460035524e-05, | |
| "loss": 0.0011, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.9538188277087034, | |
| "grad_norm": 0.016604352742433548, | |
| "learning_rate": 3.779973357015986e-05, | |
| "loss": 0.0009, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.97158081705151, | |
| "grad_norm": 0.030676284804940224, | |
| "learning_rate": 3.768872113676732e-05, | |
| "loss": 0.0009, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.9893428063943162, | |
| "grad_norm": 0.010764030739665031, | |
| "learning_rate": 3.7577708703374783e-05, | |
| "loss": 0.0007, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 7.020198609097861e-06, | |
| "eval_runtime": 22.303, | |
| "eval_samples_per_second": 44.837, | |
| "eval_steps_per_second": 2.825, | |
| "step": 1126 | |
| }, | |
| { | |
| "epoch": 2.0071047957371224, | |
| "grad_norm": 0.015416032634675503, | |
| "learning_rate": 3.746669626998224e-05, | |
| "loss": 0.0008, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.024866785079929, | |
| "grad_norm": 0.027968090027570724, | |
| "learning_rate": 3.73556838365897e-05, | |
| "loss": 0.0007, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.0426287744227354, | |
| "grad_norm": 0.13390642404556274, | |
| "learning_rate": 3.724467140319716e-05, | |
| "loss": 0.001, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.060390763765542, | |
| "grad_norm": 0.02235797978937626, | |
| "learning_rate": 3.713365896980462e-05, | |
| "loss": 0.0007, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.0781527531083483, | |
| "grad_norm": 0.018844326958060265, | |
| "learning_rate": 3.702264653641208e-05, | |
| "loss": 0.0009, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.0959147424511544, | |
| "grad_norm": 0.04496992379426956, | |
| "learning_rate": 3.6911634103019536e-05, | |
| "loss": 0.0011, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.113676731793961, | |
| "grad_norm": 0.038956914097070694, | |
| "learning_rate": 3.6800621669627e-05, | |
| "loss": 0.0008, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.1314387211367674, | |
| "grad_norm": 0.03798123076558113, | |
| "learning_rate": 3.668960923623446e-05, | |
| "loss": 0.0007, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.149200710479574, | |
| "grad_norm": 0.02100321650505066, | |
| "learning_rate": 3.657859680284192e-05, | |
| "loss": 0.0006, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.1669626998223803, | |
| "grad_norm": 0.021854564547538757, | |
| "learning_rate": 3.6467584369449384e-05, | |
| "loss": 0.0008, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.1847246891651864, | |
| "grad_norm": 0.011918751522898674, | |
| "learning_rate": 3.635657193605684e-05, | |
| "loss": 0.0008, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.202486678507993, | |
| "grad_norm": 0.06602652370929718, | |
| "learning_rate": 3.62455595026643e-05, | |
| "loss": 0.0006, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.2202486678507993, | |
| "grad_norm": 0.007478422950953245, | |
| "learning_rate": 3.6134547069271763e-05, | |
| "loss": 0.0006, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.238010657193606, | |
| "grad_norm": 0.02918918989598751, | |
| "learning_rate": 3.602353463587922e-05, | |
| "loss": 0.0007, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.2557726465364123, | |
| "grad_norm": 0.05313832312822342, | |
| "learning_rate": 3.591252220248668e-05, | |
| "loss": 0.0008, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.2735346358792183, | |
| "grad_norm": 0.04284721240401268, | |
| "learning_rate": 3.5801509769094136e-05, | |
| "loss": 0.0009, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.291296625222025, | |
| "grad_norm": 0.027897143736481667, | |
| "learning_rate": 3.56904973357016e-05, | |
| "loss": 0.0008, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.3090586145648313, | |
| "grad_norm": 0.009780477732419968, | |
| "learning_rate": 3.557948490230906e-05, | |
| "loss": 0.0006, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.326820603907638, | |
| "grad_norm": 0.017684033140540123, | |
| "learning_rate": 3.546847246891652e-05, | |
| "loss": 0.0008, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.3445825932504443, | |
| "grad_norm": 0.07929302006959915, | |
| "learning_rate": 3.5357460035523985e-05, | |
| "loss": 0.0006, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.3623445825932503, | |
| "grad_norm": 0.06948840618133545, | |
| "learning_rate": 3.524644760213144e-05, | |
| "loss": 0.0006, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.380106571936057, | |
| "grad_norm": 0.006130309775471687, | |
| "learning_rate": 3.51354351687389e-05, | |
| "loss": 0.0005, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.3978685612788633, | |
| "grad_norm": 0.022212188690900803, | |
| "learning_rate": 3.5024422735346364e-05, | |
| "loss": 0.0006, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.4156305506216698, | |
| "grad_norm": 0.015669459477066994, | |
| "learning_rate": 3.491341030195382e-05, | |
| "loss": 0.0006, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.4333925399644762, | |
| "grad_norm": 0.026899907737970352, | |
| "learning_rate": 3.480239786856128e-05, | |
| "loss": 0.0005, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.4511545293072823, | |
| "grad_norm": 0.02007845975458622, | |
| "learning_rate": 3.469138543516874e-05, | |
| "loss": 0.0006, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.4689165186500888, | |
| "grad_norm": 0.017982805147767067, | |
| "learning_rate": 3.45803730017762e-05, | |
| "loss": 0.0005, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.4866785079928952, | |
| "grad_norm": 0.05845437943935394, | |
| "learning_rate": 3.446936056838366e-05, | |
| "loss": 0.0007, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.5044404973357017, | |
| "grad_norm": 0.006352486554533243, | |
| "learning_rate": 3.4358348134991116e-05, | |
| "loss": 0.0006, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.522202486678508, | |
| "grad_norm": 0.004761823453009129, | |
| "learning_rate": 3.4247335701598585e-05, | |
| "loss": 0.0006, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.5399644760213143, | |
| "grad_norm": 0.0643671378493309, | |
| "learning_rate": 3.413632326820604e-05, | |
| "loss": 0.0005, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.5577264653641207, | |
| "grad_norm": 0.2012147605419159, | |
| "learning_rate": 3.40253108348135e-05, | |
| "loss": 0.0006, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.575488454706927, | |
| "grad_norm": 0.005158168729394674, | |
| "learning_rate": 3.3914298401420965e-05, | |
| "loss": 0.0007, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.5932504440497337, | |
| "grad_norm": 0.02005215547978878, | |
| "learning_rate": 3.380328596802842e-05, | |
| "loss": 0.0005, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.61101243339254, | |
| "grad_norm": 0.011687914840877056, | |
| "learning_rate": 3.369227353463588e-05, | |
| "loss": 0.0005, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 2.6287744227353462, | |
| "grad_norm": 0.028902605175971985, | |
| "learning_rate": 3.358126110124334e-05, | |
| "loss": 0.0007, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.6465364120781527, | |
| "grad_norm": 0.004095068667083979, | |
| "learning_rate": 3.34702486678508e-05, | |
| "loss": 0.0005, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 2.664298401420959, | |
| "grad_norm": 0.033792633563280106, | |
| "learning_rate": 3.335923623445826e-05, | |
| "loss": 0.0005, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.6820603907637657, | |
| "grad_norm": 0.010024248622357845, | |
| "learning_rate": 3.324822380106572e-05, | |
| "loss": 0.0007, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 2.699822380106572, | |
| "grad_norm": 0.04082302376627922, | |
| "learning_rate": 3.313721136767318e-05, | |
| "loss": 0.0005, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 2.717584369449378, | |
| "grad_norm": 0.01643848419189453, | |
| "learning_rate": 3.302619893428064e-05, | |
| "loss": 0.0006, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 2.7353463587921847, | |
| "grad_norm": 0.02243112586438656, | |
| "learning_rate": 3.29151865008881e-05, | |
| "loss": 0.0005, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 2.753108348134991, | |
| "grad_norm": 0.015440504066646099, | |
| "learning_rate": 3.2804174067495565e-05, | |
| "loss": 0.0005, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.7708703374777977, | |
| "grad_norm": 0.04903204366564751, | |
| "learning_rate": 3.269316163410302e-05, | |
| "loss": 0.0006, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 2.788632326820604, | |
| "grad_norm": 0.04786301776766777, | |
| "learning_rate": 3.258214920071048e-05, | |
| "loss": 0.0006, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 2.80639431616341, | |
| "grad_norm": 0.02142539992928505, | |
| "learning_rate": 3.247113676731794e-05, | |
| "loss": 0.0008, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 2.8241563055062167, | |
| "grad_norm": 0.005326330661773682, | |
| "learning_rate": 3.23601243339254e-05, | |
| "loss": 0.0007, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 2.841918294849023, | |
| "grad_norm": 0.016222188249230385, | |
| "learning_rate": 3.224911190053286e-05, | |
| "loss": 0.0005, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.8596802841918296, | |
| "grad_norm": 0.005499828606843948, | |
| "learning_rate": 3.213809946714032e-05, | |
| "loss": 0.0005, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 2.877442273534636, | |
| "grad_norm": 0.02295517548918724, | |
| "learning_rate": 3.202708703374778e-05, | |
| "loss": 0.0005, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 2.895204262877442, | |
| "grad_norm": 0.12497241050004959, | |
| "learning_rate": 3.191607460035524e-05, | |
| "loss": 0.0005, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 2.9129662522202486, | |
| "grad_norm": 0.04707463085651398, | |
| "learning_rate": 3.1805062166962704e-05, | |
| "loss": 0.0005, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 2.930728241563055, | |
| "grad_norm": 0.030674880370497704, | |
| "learning_rate": 3.1694049733570166e-05, | |
| "loss": 0.0008, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.9484902309058616, | |
| "grad_norm": 0.1597021222114563, | |
| "learning_rate": 3.158303730017762e-05, | |
| "loss": 0.0005, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 2.966252220248668, | |
| "grad_norm": 0.08469359576702118, | |
| "learning_rate": 3.147202486678508e-05, | |
| "loss": 0.0005, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 2.984014209591474, | |
| "grad_norm": 0.10349100828170776, | |
| "learning_rate": 3.136101243339254e-05, | |
| "loss": 0.0005, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 3.2967063816613518e-06, | |
| "eval_runtime": 22.3238, | |
| "eval_samples_per_second": 44.795, | |
| "eval_steps_per_second": 2.822, | |
| "step": 1689 | |
| }, | |
| { | |
| "epoch": 3.0017761989342806, | |
| "grad_norm": 0.020849984139204025, | |
| "learning_rate": 3.125e-05, | |
| "loss": 0.0005, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 3.019538188277087, | |
| "grad_norm": 0.017703084275126457, | |
| "learning_rate": 3.113898756660746e-05, | |
| "loss": 0.0004, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 3.0373001776198936, | |
| "grad_norm": 0.004325501620769501, | |
| "learning_rate": 3.102797513321492e-05, | |
| "loss": 0.0003, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 3.0550621669626996, | |
| "grad_norm": 0.007536259479820728, | |
| "learning_rate": 3.091696269982238e-05, | |
| "loss": 0.0004, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 3.072824156305506, | |
| "grad_norm": 0.008604285307228565, | |
| "learning_rate": 3.080595026642984e-05, | |
| "loss": 0.0004, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 3.0905861456483126, | |
| "grad_norm": 0.011171748861670494, | |
| "learning_rate": 3.0694937833037304e-05, | |
| "loss": 0.0005, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 3.108348134991119, | |
| "grad_norm": 0.012916183099150658, | |
| "learning_rate": 3.0583925399644766e-05, | |
| "loss": 0.0003, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 3.1261101243339255, | |
| "grad_norm": 0.010790830478072166, | |
| "learning_rate": 3.0472912966252222e-05, | |
| "loss": 0.0004, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 3.143872113676732, | |
| "grad_norm": 0.007941185496747494, | |
| "learning_rate": 3.0361900532859684e-05, | |
| "loss": 0.0005, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 3.161634103019538, | |
| "grad_norm": 0.022536130622029305, | |
| "learning_rate": 3.025088809946714e-05, | |
| "loss": 0.0003, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 3.1793960923623446, | |
| "grad_norm": 0.008146955631673336, | |
| "learning_rate": 3.01398756660746e-05, | |
| "loss": 0.0003, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 3.197158081705151, | |
| "grad_norm": 0.01531197503209114, | |
| "learning_rate": 3.0028863232682063e-05, | |
| "loss": 0.0005, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 3.2149200710479575, | |
| "grad_norm": 0.007879276759922504, | |
| "learning_rate": 2.9917850799289522e-05, | |
| "loss": 0.0003, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 3.232682060390764, | |
| "grad_norm": 0.031879961490631104, | |
| "learning_rate": 2.9806838365896984e-05, | |
| "loss": 0.0005, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 3.25044404973357, | |
| "grad_norm": 0.023382920771837234, | |
| "learning_rate": 2.969582593250444e-05, | |
| "loss": 0.0005, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 3.2682060390763765, | |
| "grad_norm": 0.011471973732113838, | |
| "learning_rate": 2.95848134991119e-05, | |
| "loss": 0.0004, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 3.285968028419183, | |
| "grad_norm": 0.009325550869107246, | |
| "learning_rate": 2.9473801065719364e-05, | |
| "loss": 0.0005, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 3.3037300177619895, | |
| "grad_norm": 0.047994911670684814, | |
| "learning_rate": 2.9362788632326822e-05, | |
| "loss": 0.0004, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 3.321492007104796, | |
| "grad_norm": 0.004230279475450516, | |
| "learning_rate": 2.9251776198934284e-05, | |
| "loss": 0.0004, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 3.339253996447602, | |
| "grad_norm": 0.008561152964830399, | |
| "learning_rate": 2.914076376554174e-05, | |
| "loss": 0.0003, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 3.3570159857904085, | |
| "grad_norm": 0.004857722204178572, | |
| "learning_rate": 2.9029751332149202e-05, | |
| "loss": 0.0006, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 3.374777975133215, | |
| "grad_norm": 0.012910543009638786, | |
| "learning_rate": 2.8918738898756664e-05, | |
| "loss": 0.0003, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 3.3925399644760215, | |
| "grad_norm": 0.010736320167779922, | |
| "learning_rate": 2.8807726465364123e-05, | |
| "loss": 0.0005, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 3.410301953818828, | |
| "grad_norm": 0.010209358297288418, | |
| "learning_rate": 2.8696714031971585e-05, | |
| "loss": 0.0003, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 3.428063943161634, | |
| "grad_norm": 0.010665121488273144, | |
| "learning_rate": 2.858570159857904e-05, | |
| "loss": 0.0004, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 3.4458259325044405, | |
| "grad_norm": 0.008989960886538029, | |
| "learning_rate": 2.8474689165186502e-05, | |
| "loss": 0.0003, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 3.463587921847247, | |
| "grad_norm": 0.003941075410693884, | |
| "learning_rate": 2.8363676731793964e-05, | |
| "loss": 0.0003, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 3.4813499111900534, | |
| "grad_norm": 0.047086674720048904, | |
| "learning_rate": 2.825266429840142e-05, | |
| "loss": 0.0003, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 3.49911190053286, | |
| "grad_norm": 0.009249791502952576, | |
| "learning_rate": 2.8141651865008885e-05, | |
| "loss": 0.0004, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 3.516873889875666, | |
| "grad_norm": 0.013919523917138577, | |
| "learning_rate": 2.803063943161634e-05, | |
| "loss": 0.0005, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 3.5346358792184724, | |
| "grad_norm": 0.05363338813185692, | |
| "learning_rate": 2.7919626998223802e-05, | |
| "loss": 0.0004, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 3.552397868561279, | |
| "grad_norm": 0.027678849175572395, | |
| "learning_rate": 2.7808614564831264e-05, | |
| "loss": 0.0003, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.5701598579040854, | |
| "grad_norm": 0.02696838229894638, | |
| "learning_rate": 2.769760213143872e-05, | |
| "loss": 0.0004, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 3.587921847246892, | |
| "grad_norm": 0.048736572265625, | |
| "learning_rate": 2.7586589698046182e-05, | |
| "loss": 0.0003, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 3.605683836589698, | |
| "grad_norm": 0.007270255126059055, | |
| "learning_rate": 2.747557726465364e-05, | |
| "loss": 0.0003, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 3.6234458259325044, | |
| "grad_norm": 0.006998795084655285, | |
| "learning_rate": 2.7364564831261103e-05, | |
| "loss": 0.0006, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 3.641207815275311, | |
| "grad_norm": 0.004529398865997791, | |
| "learning_rate": 2.7253552397868565e-05, | |
| "loss": 0.0002, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.6589698046181174, | |
| "grad_norm": 0.009528383612632751, | |
| "learning_rate": 2.714253996447602e-05, | |
| "loss": 0.0003, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 3.676731793960924, | |
| "grad_norm": 0.056139055639505386, | |
| "learning_rate": 2.7031527531083482e-05, | |
| "loss": 0.0003, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 3.69449378330373, | |
| "grad_norm": 0.014499225653707981, | |
| "learning_rate": 2.692051509769094e-05, | |
| "loss": 0.0004, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 3.7122557726465364, | |
| "grad_norm": 0.005347531288862228, | |
| "learning_rate": 2.6809502664298403e-05, | |
| "loss": 0.0005, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 3.730017761989343, | |
| "grad_norm": 0.010448618791997433, | |
| "learning_rate": 2.6698490230905865e-05, | |
| "loss": 0.0003, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.7477797513321494, | |
| "grad_norm": 0.009756255894899368, | |
| "learning_rate": 2.658747779751332e-05, | |
| "loss": 0.0004, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 3.765541740674956, | |
| "grad_norm": 0.0032908094581216574, | |
| "learning_rate": 2.6476465364120782e-05, | |
| "loss": 0.0006, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 3.783303730017762, | |
| "grad_norm": 0.045266490429639816, | |
| "learning_rate": 2.636545293072824e-05, | |
| "loss": 0.0005, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 3.8010657193605684, | |
| "grad_norm": 0.007182170171290636, | |
| "learning_rate": 2.6254440497335703e-05, | |
| "loss": 0.0003, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 3.818827708703375, | |
| "grad_norm": 0.00983410980552435, | |
| "learning_rate": 2.6143428063943165e-05, | |
| "loss": 0.0004, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.8365896980461813, | |
| "grad_norm": 0.00767460698261857, | |
| "learning_rate": 2.603241563055062e-05, | |
| "loss": 0.0004, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 3.854351687388988, | |
| "grad_norm": 0.01133924163877964, | |
| "learning_rate": 2.5921403197158083e-05, | |
| "loss": 0.0004, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 3.872113676731794, | |
| "grad_norm": 0.005284077022224665, | |
| "learning_rate": 2.581039076376554e-05, | |
| "loss": 0.0003, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 3.8898756660746003, | |
| "grad_norm": 0.010912826284766197, | |
| "learning_rate": 2.5699378330373004e-05, | |
| "loss": 0.0002, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 3.907637655417407, | |
| "grad_norm": 0.008936459198594093, | |
| "learning_rate": 2.5588365896980466e-05, | |
| "loss": 0.0003, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.9253996447602133, | |
| "grad_norm": 0.008787784725427628, | |
| "learning_rate": 2.547735346358792e-05, | |
| "loss": 0.0003, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 3.94316163410302, | |
| "grad_norm": 0.0062966118566691875, | |
| "learning_rate": 2.5366341030195383e-05, | |
| "loss": 0.0003, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 3.960923623445826, | |
| "grad_norm": 0.005867836996912956, | |
| "learning_rate": 2.5255328596802842e-05, | |
| "loss": 0.0002, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 3.9786856127886323, | |
| "grad_norm": 0.006783599965274334, | |
| "learning_rate": 2.5144316163410304e-05, | |
| "loss": 0.0003, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 3.996447602131439, | |
| "grad_norm": 0.009927009232342243, | |
| "learning_rate": 2.5033303730017766e-05, | |
| "loss": 0.0003, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 2.0054706055816496e-06, | |
| "eval_runtime": 22.009, | |
| "eval_samples_per_second": 45.436, | |
| "eval_steps_per_second": 2.862, | |
| "step": 2252 | |
| }, | |
| { | |
| "epoch": 4.014209591474245, | |
| "grad_norm": 0.026025472208857536, | |
| "learning_rate": 2.492229129662522e-05, | |
| "loss": 0.0003, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 4.031971580817052, | |
| "grad_norm": 0.0020729824900627136, | |
| "learning_rate": 2.4811278863232683e-05, | |
| "loss": 0.0002, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 4.049733570159858, | |
| "grad_norm": 0.07728759199380875, | |
| "learning_rate": 2.4700266429840142e-05, | |
| "loss": 0.0003, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 4.067495559502665, | |
| "grad_norm": 0.006721894256770611, | |
| "learning_rate": 2.4589253996447604e-05, | |
| "loss": 0.0003, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 4.085257548845471, | |
| "grad_norm": 0.0029023478273302317, | |
| "learning_rate": 2.4478241563055063e-05, | |
| "loss": 0.0005, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 4.103019538188277, | |
| "grad_norm": 0.03222382813692093, | |
| "learning_rate": 2.436722912966252e-05, | |
| "loss": 0.0003, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 4.120781527531084, | |
| "grad_norm": 0.015039577148854733, | |
| "learning_rate": 2.4256216696269984e-05, | |
| "loss": 0.0003, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 4.13854351687389, | |
| "grad_norm": 0.04993987828493118, | |
| "learning_rate": 2.4145204262877442e-05, | |
| "loss": 0.0003, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 4.156305506216697, | |
| "grad_norm": 0.002969494555145502, | |
| "learning_rate": 2.4034191829484904e-05, | |
| "loss": 0.0002, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 4.174067495559503, | |
| "grad_norm": 0.019309891387820244, | |
| "learning_rate": 2.3923179396092363e-05, | |
| "loss": 0.0003, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 4.191829484902309, | |
| "grad_norm": 0.004141306970268488, | |
| "learning_rate": 2.3812166962699822e-05, | |
| "loss": 0.0002, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 4.209591474245116, | |
| "grad_norm": 0.0039856103248894215, | |
| "learning_rate": 2.3701154529307284e-05, | |
| "loss": 0.0003, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 4.227353463587922, | |
| "grad_norm": 0.005168092902749777, | |
| "learning_rate": 2.3590142095914743e-05, | |
| "loss": 0.0002, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 4.245115452930728, | |
| "grad_norm": 0.009566891007125378, | |
| "learning_rate": 2.3479129662522205e-05, | |
| "loss": 0.0002, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 4.262877442273535, | |
| "grad_norm": 0.05170539766550064, | |
| "learning_rate": 2.3368117229129663e-05, | |
| "loss": 0.0004, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 4.280639431616341, | |
| "grad_norm": 0.003199364058673382, | |
| "learning_rate": 2.3257104795737122e-05, | |
| "loss": 0.0002, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 4.298401420959148, | |
| "grad_norm": 0.11592520773410797, | |
| "learning_rate": 2.3146092362344584e-05, | |
| "loss": 0.0005, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 4.316163410301954, | |
| "grad_norm": 0.003450137795880437, | |
| "learning_rate": 2.3035079928952043e-05, | |
| "loss": 0.0002, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 4.333925399644761, | |
| "grad_norm": 0.018514085561037064, | |
| "learning_rate": 2.2924067495559505e-05, | |
| "loss": 0.0002, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 4.351687388987567, | |
| "grad_norm": 0.023660879582166672, | |
| "learning_rate": 2.2813055062166964e-05, | |
| "loss": 0.0003, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 4.369449378330373, | |
| "grad_norm": 0.0032503483816981316, | |
| "learning_rate": 2.2702042628774422e-05, | |
| "loss": 0.0002, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 4.38721136767318, | |
| "grad_norm": 0.00737254461273551, | |
| "learning_rate": 2.2591030195381885e-05, | |
| "loss": 0.0003, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 4.404973357015986, | |
| "grad_norm": 0.004189962055534124, | |
| "learning_rate": 2.2480017761989343e-05, | |
| "loss": 0.0002, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 4.422735346358792, | |
| "grad_norm": 0.00830498244613409, | |
| "learning_rate": 2.2369005328596805e-05, | |
| "loss": 0.0004, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 4.440497335701599, | |
| "grad_norm": 0.004922557156533003, | |
| "learning_rate": 2.2257992895204264e-05, | |
| "loss": 0.0002, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 4.458259325044405, | |
| "grad_norm": 0.018229391425848007, | |
| "learning_rate": 2.2146980461811723e-05, | |
| "loss": 0.0002, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 4.476021314387212, | |
| "grad_norm": 0.006720603443682194, | |
| "learning_rate": 2.2035968028419185e-05, | |
| "loss": 0.0002, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 4.493783303730018, | |
| "grad_norm": 0.0020187098998576403, | |
| "learning_rate": 2.1924955595026644e-05, | |
| "loss": 0.0003, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 4.511545293072825, | |
| "grad_norm": 0.0030220877379179, | |
| "learning_rate": 2.1813943161634102e-05, | |
| "loss": 0.0003, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 4.529307282415631, | |
| "grad_norm": 0.016860356554389, | |
| "learning_rate": 2.1702930728241564e-05, | |
| "loss": 0.0003, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 4.547069271758437, | |
| "grad_norm": 0.07945937663316727, | |
| "learning_rate": 2.1591918294849023e-05, | |
| "loss": 0.0004, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 4.564831261101244, | |
| "grad_norm": 0.010131836868822575, | |
| "learning_rate": 2.1480905861456485e-05, | |
| "loss": 0.0003, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 4.58259325044405, | |
| "grad_norm": 0.016588937491178513, | |
| "learning_rate": 2.1369893428063944e-05, | |
| "loss": 0.0002, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 4.600355239786856, | |
| "grad_norm": 0.004532839171588421, | |
| "learning_rate": 2.1258880994671403e-05, | |
| "loss": 0.0003, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 4.618117229129663, | |
| "grad_norm": 0.005671780090779066, | |
| "learning_rate": 2.1147868561278865e-05, | |
| "loss": 0.0002, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 4.635879218472469, | |
| "grad_norm": 0.1407405436038971, | |
| "learning_rate": 2.1036856127886323e-05, | |
| "loss": 0.0004, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 4.653641207815276, | |
| "grad_norm": 0.009644228033721447, | |
| "learning_rate": 2.0925843694493785e-05, | |
| "loss": 0.0002, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 4.671403197158082, | |
| "grad_norm": 0.021375441923737526, | |
| "learning_rate": 2.0814831261101244e-05, | |
| "loss": 0.0002, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 4.6891651865008885, | |
| "grad_norm": 0.05786694213747978, | |
| "learning_rate": 2.0703818827708703e-05, | |
| "loss": 0.0002, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 4.706927175843695, | |
| "grad_norm": 0.01768909953534603, | |
| "learning_rate": 2.0592806394316165e-05, | |
| "loss": 0.0005, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.724689165186501, | |
| "grad_norm": 0.20010802149772644, | |
| "learning_rate": 2.0481793960923624e-05, | |
| "loss": 0.0005, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 4.7424511545293075, | |
| "grad_norm": 0.0016011944971978664, | |
| "learning_rate": 2.0370781527531086e-05, | |
| "loss": 0.0002, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 4.760213143872114, | |
| "grad_norm": 0.0031138239428400993, | |
| "learning_rate": 2.0259769094138544e-05, | |
| "loss": 0.0003, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 4.77797513321492, | |
| "grad_norm": 0.007894694805145264, | |
| "learning_rate": 2.0148756660746003e-05, | |
| "loss": 0.0002, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 4.7957371225577266, | |
| "grad_norm": 0.006601590663194656, | |
| "learning_rate": 2.0037744227353465e-05, | |
| "loss": 0.0005, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.813499111900533, | |
| "grad_norm": 0.004905109293758869, | |
| "learning_rate": 1.9926731793960924e-05, | |
| "loss": 0.0002, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 4.8312611012433395, | |
| "grad_norm": 0.007573388516902924, | |
| "learning_rate": 1.9815719360568386e-05, | |
| "loss": 0.0005, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 4.849023090586146, | |
| "grad_norm": 0.004600901156663895, | |
| "learning_rate": 1.9704706927175845e-05, | |
| "loss": 0.0002, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 4.8667850799289525, | |
| "grad_norm": 0.004953782539814711, | |
| "learning_rate": 1.9593694493783303e-05, | |
| "loss": 0.0002, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 4.8845470692717585, | |
| "grad_norm": 0.005551377311348915, | |
| "learning_rate": 1.9482682060390765e-05, | |
| "loss": 0.0002, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.902309058614565, | |
| "grad_norm": 0.0048238011077046394, | |
| "learning_rate": 1.9371669626998224e-05, | |
| "loss": 0.0002, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 4.9200710479573715, | |
| "grad_norm": 0.009099510498344898, | |
| "learning_rate": 1.9260657193605686e-05, | |
| "loss": 0.0002, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 4.9378330373001775, | |
| "grad_norm": 0.0018844065489247441, | |
| "learning_rate": 1.9149644760213145e-05, | |
| "loss": 0.0002, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 4.955595026642984, | |
| "grad_norm": 0.0071139088831841946, | |
| "learning_rate": 1.9038632326820604e-05, | |
| "loss": 0.0002, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 4.9733570159857905, | |
| "grad_norm": 0.0148090161383152, | |
| "learning_rate": 1.8927619893428062e-05, | |
| "loss": 0.0002, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.9911190053285965, | |
| "grad_norm": 0.014955924823880196, | |
| "learning_rate": 1.8816607460035524e-05, | |
| "loss": 0.0002, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 1.5623232911821106e-06, | |
| "eval_runtime": 22.0179, | |
| "eval_samples_per_second": 45.418, | |
| "eval_steps_per_second": 2.861, | |
| "step": 2815 | |
| }, | |
| { | |
| "epoch": 5.0088809946714035, | |
| "grad_norm": 0.008464839309453964, | |
| "learning_rate": 1.8705595026642987e-05, | |
| "loss": 0.0002, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 5.0266429840142095, | |
| "grad_norm": 0.010644927620887756, | |
| "learning_rate": 1.8594582593250445e-05, | |
| "loss": 0.0003, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 5.044404973357016, | |
| "grad_norm": 0.004363817162811756, | |
| "learning_rate": 1.8483570159857904e-05, | |
| "loss": 0.0002, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 5.0621669626998225, | |
| "grad_norm": 0.013293485157191753, | |
| "learning_rate": 1.8372557726465363e-05, | |
| "loss": 0.0002, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 5.0799289520426285, | |
| "grad_norm": 0.05282105877995491, | |
| "learning_rate": 1.8261545293072825e-05, | |
| "loss": 0.0004, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 5.097690941385435, | |
| "grad_norm": 0.002226222539320588, | |
| "learning_rate": 1.8150532859680287e-05, | |
| "loss": 0.0003, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 5.1154529307282415, | |
| "grad_norm": 0.004167128819972277, | |
| "learning_rate": 1.8039520426287746e-05, | |
| "loss": 0.0002, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 5.1332149200710475, | |
| "grad_norm": 0.10440316051244736, | |
| "learning_rate": 1.7928507992895204e-05, | |
| "loss": 0.0003, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 5.150976909413854, | |
| "grad_norm": 0.007494487799704075, | |
| "learning_rate": 1.7817495559502663e-05, | |
| "loss": 0.0002, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 5.1687388987566605, | |
| "grad_norm": 0.004491967149078846, | |
| "learning_rate": 1.7706483126110125e-05, | |
| "loss": 0.0002, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 5.186500888099467, | |
| "grad_norm": 0.0517878532409668, | |
| "learning_rate": 1.7595470692717587e-05, | |
| "loss": 0.0002, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 5.2042628774422734, | |
| "grad_norm": 0.006387591827660799, | |
| "learning_rate": 1.7484458259325046e-05, | |
| "loss": 0.0002, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 5.22202486678508, | |
| "grad_norm": 0.008753631263971329, | |
| "learning_rate": 1.7373445825932505e-05, | |
| "loss": 0.0003, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 5.239786856127886, | |
| "grad_norm": 0.0052266488783061504, | |
| "learning_rate": 1.7262433392539963e-05, | |
| "loss": 0.0003, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 5.2575488454706925, | |
| "grad_norm": 0.004134784918278456, | |
| "learning_rate": 1.7151420959147425e-05, | |
| "loss": 0.0002, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 5.275310834813499, | |
| "grad_norm": 0.003984558861702681, | |
| "learning_rate": 1.7040408525754887e-05, | |
| "loss": 0.0002, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 5.293072824156305, | |
| "grad_norm": 0.011758877895772457, | |
| "learning_rate": 1.6929396092362346e-05, | |
| "loss": 0.0002, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 5.3108348134991115, | |
| "grad_norm": 0.005662502255290747, | |
| "learning_rate": 1.6818383658969805e-05, | |
| "loss": 0.0002, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 5.328596802841918, | |
| "grad_norm": 0.0018373008351773024, | |
| "learning_rate": 1.6707371225577264e-05, | |
| "loss": 0.0002, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 5.346358792184724, | |
| "grad_norm": 0.0203489288687706, | |
| "learning_rate": 1.6596358792184726e-05, | |
| "loss": 0.0002, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 5.364120781527531, | |
| "grad_norm": 0.006496446672827005, | |
| "learning_rate": 1.6485346358792188e-05, | |
| "loss": 0.0002, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 5.381882770870337, | |
| "grad_norm": 0.0037881876341998577, | |
| "learning_rate": 1.6374333925399646e-05, | |
| "loss": 0.0002, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 5.399644760213144, | |
| "grad_norm": 0.006189519539475441, | |
| "learning_rate": 1.6263321492007105e-05, | |
| "loss": 0.0002, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 5.41740674955595, | |
| "grad_norm": 0.016199415549635887, | |
| "learning_rate": 1.6152309058614564e-05, | |
| "loss": 0.0002, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 5.435168738898756, | |
| "grad_norm": 0.0038983342237770557, | |
| "learning_rate": 1.6041296625222023e-05, | |
| "loss": 0.0002, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 5.452930728241563, | |
| "grad_norm": 0.12250888347625732, | |
| "learning_rate": 1.5930284191829488e-05, | |
| "loss": 0.0002, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 5.470692717584369, | |
| "grad_norm": 0.008953145705163479, | |
| "learning_rate": 1.5819271758436947e-05, | |
| "loss": 0.0002, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 5.488454706927175, | |
| "grad_norm": 0.0029956751968711615, | |
| "learning_rate": 1.5708259325044405e-05, | |
| "loss": 0.0003, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 5.506216696269982, | |
| "grad_norm": 0.011769605800509453, | |
| "learning_rate": 1.5597246891651864e-05, | |
| "loss": 0.0003, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 5.523978685612788, | |
| "grad_norm": 0.006393752060830593, | |
| "learning_rate": 1.5486234458259326e-05, | |
| "loss": 0.0002, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 5.541740674955595, | |
| "grad_norm": 0.0033896728418767452, | |
| "learning_rate": 1.537522202486679e-05, | |
| "loss": 0.0002, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 5.559502664298401, | |
| "grad_norm": 0.004390314687043428, | |
| "learning_rate": 1.5264209591474247e-05, | |
| "loss": 0.0002, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 5.577264653641208, | |
| "grad_norm": 0.005353247281163931, | |
| "learning_rate": 1.5153197158081706e-05, | |
| "loss": 0.0002, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 5.595026642984014, | |
| "grad_norm": 0.006205048877745867, | |
| "learning_rate": 1.5042184724689164e-05, | |
| "loss": 0.0002, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 5.61278863232682, | |
| "grad_norm": 0.0020661200396716595, | |
| "learning_rate": 1.4931172291296627e-05, | |
| "loss": 0.0001, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 5.630550621669627, | |
| "grad_norm": 0.004063829779624939, | |
| "learning_rate": 1.4820159857904087e-05, | |
| "loss": 0.0001, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 5.648312611012433, | |
| "grad_norm": 0.0014260923489928246, | |
| "learning_rate": 1.4709147424511546e-05, | |
| "loss": 0.0001, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 5.666074600355239, | |
| "grad_norm": 0.0031196416821330786, | |
| "learning_rate": 1.4598134991119006e-05, | |
| "loss": 0.0002, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 5.683836589698046, | |
| "grad_norm": 0.0019640398677438498, | |
| "learning_rate": 1.4487122557726465e-05, | |
| "loss": 0.0003, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 5.701598579040852, | |
| "grad_norm": 0.009311804547905922, | |
| "learning_rate": 1.4376110124333927e-05, | |
| "loss": 0.0001, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 5.719360568383659, | |
| "grad_norm": 0.002296882914379239, | |
| "learning_rate": 1.4265097690941387e-05, | |
| "loss": 0.0002, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 5.737122557726465, | |
| "grad_norm": 0.008231076411902905, | |
| "learning_rate": 1.4154085257548846e-05, | |
| "loss": 0.0001, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 5.754884547069272, | |
| "grad_norm": 0.02695680968463421, | |
| "learning_rate": 1.4043072824156306e-05, | |
| "loss": 0.0002, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 5.772646536412078, | |
| "grad_norm": 0.005848235916346312, | |
| "learning_rate": 1.3932060390763765e-05, | |
| "loss": 0.0001, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 5.790408525754884, | |
| "grad_norm": 0.00468071224167943, | |
| "learning_rate": 1.3821047957371227e-05, | |
| "loss": 0.0001, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 5.808170515097691, | |
| "grad_norm": 0.0027211324777454138, | |
| "learning_rate": 1.3710035523978688e-05, | |
| "loss": 0.0002, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 5.825932504440497, | |
| "grad_norm": 0.007977406494319439, | |
| "learning_rate": 1.3599023090586146e-05, | |
| "loss": 0.0002, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 5.843694493783303, | |
| "grad_norm": 0.0070778182707726955, | |
| "learning_rate": 1.3488010657193607e-05, | |
| "loss": 0.0002, | |
| "step": 3290 | |
| }, | |
| { | |
| "epoch": 5.86145648312611, | |
| "grad_norm": 0.014064283110201359, | |
| "learning_rate": 1.3376998223801065e-05, | |
| "loss": 0.0002, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 5.879218472468916, | |
| "grad_norm": 0.0018223852384835482, | |
| "learning_rate": 1.3265985790408527e-05, | |
| "loss": 0.0001, | |
| "step": 3310 | |
| }, | |
| { | |
| "epoch": 5.896980461811723, | |
| "grad_norm": 0.070171058177948, | |
| "learning_rate": 1.3154973357015988e-05, | |
| "loss": 0.0002, | |
| "step": 3320 | |
| }, | |
| { | |
| "epoch": 5.914742451154529, | |
| "grad_norm": 0.03238040581345558, | |
| "learning_rate": 1.3043960923623447e-05, | |
| "loss": 0.0002, | |
| "step": 3330 | |
| }, | |
| { | |
| "epoch": 5.932504440497336, | |
| "grad_norm": 0.003422616748139262, | |
| "learning_rate": 1.2932948490230907e-05, | |
| "loss": 0.0002, | |
| "step": 3340 | |
| }, | |
| { | |
| "epoch": 5.950266429840142, | |
| "grad_norm": 0.0016958152409642935, | |
| "learning_rate": 1.2821936056838366e-05, | |
| "loss": 0.0001, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 5.968028419182948, | |
| "grad_norm": 0.0023135722149163485, | |
| "learning_rate": 1.2710923623445828e-05, | |
| "loss": 0.0001, | |
| "step": 3360 | |
| }, | |
| { | |
| "epoch": 5.985790408525755, | |
| "grad_norm": 0.00846858974546194, | |
| "learning_rate": 1.2599911190053288e-05, | |
| "loss": 0.0002, | |
| "step": 3370 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 1.150123352999799e-06, | |
| "eval_runtime": 22.2281, | |
| "eval_samples_per_second": 44.988, | |
| "eval_steps_per_second": 2.834, | |
| "step": 3378 | |
| }, | |
| { | |
| "epoch": 6.003552397868561, | |
| "grad_norm": 0.059037767350673676, | |
| "learning_rate": 1.2488898756660747e-05, | |
| "loss": 0.0003, | |
| "step": 3380 | |
| }, | |
| { | |
| "epoch": 6.021314387211367, | |
| "grad_norm": 0.010334952734410763, | |
| "learning_rate": 1.2377886323268206e-05, | |
| "loss": 0.0002, | |
| "step": 3390 | |
| }, | |
| { | |
| "epoch": 6.039076376554174, | |
| "grad_norm": 0.002199666341766715, | |
| "learning_rate": 1.2266873889875668e-05, | |
| "loss": 0.0001, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 6.05683836589698, | |
| "grad_norm": 0.0027150341775268316, | |
| "learning_rate": 1.2155861456483126e-05, | |
| "loss": 0.0002, | |
| "step": 3410 | |
| }, | |
| { | |
| "epoch": 6.074600355239787, | |
| "grad_norm": 0.06314147263765335, | |
| "learning_rate": 1.2044849023090587e-05, | |
| "loss": 0.0002, | |
| "step": 3420 | |
| }, | |
| { | |
| "epoch": 6.092362344582593, | |
| "grad_norm": 0.10585487633943558, | |
| "learning_rate": 1.1933836589698047e-05, | |
| "loss": 0.0002, | |
| "step": 3430 | |
| }, | |
| { | |
| "epoch": 6.110124333925399, | |
| "grad_norm": 0.005490344017744064, | |
| "learning_rate": 1.1822824156305506e-05, | |
| "loss": 0.0001, | |
| "step": 3440 | |
| }, | |
| { | |
| "epoch": 6.127886323268206, | |
| "grad_norm": 0.03931137174367905, | |
| "learning_rate": 1.1711811722912968e-05, | |
| "loss": 0.0001, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 6.145648312611012, | |
| "grad_norm": 0.001892774016596377, | |
| "learning_rate": 1.1600799289520427e-05, | |
| "loss": 0.0001, | |
| "step": 3460 | |
| }, | |
| { | |
| "epoch": 6.163410301953819, | |
| "grad_norm": 0.002348232315853238, | |
| "learning_rate": 1.1489786856127887e-05, | |
| "loss": 0.0002, | |
| "step": 3470 | |
| }, | |
| { | |
| "epoch": 6.181172291296625, | |
| "grad_norm": 0.0030624247156083584, | |
| "learning_rate": 1.1378774422735347e-05, | |
| "loss": 0.0002, | |
| "step": 3480 | |
| }, | |
| { | |
| "epoch": 6.198934280639431, | |
| "grad_norm": 0.003177167382091284, | |
| "learning_rate": 1.1267761989342806e-05, | |
| "loss": 0.0001, | |
| "step": 3490 | |
| }, | |
| { | |
| "epoch": 6.216696269982238, | |
| "grad_norm": 0.003824736224487424, | |
| "learning_rate": 1.1156749555950268e-05, | |
| "loss": 0.0002, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 6.234458259325044, | |
| "grad_norm": 0.00209417543374002, | |
| "learning_rate": 1.1045737122557727e-05, | |
| "loss": 0.0001, | |
| "step": 3510 | |
| }, | |
| { | |
| "epoch": 6.252220248667851, | |
| "grad_norm": 0.011512521654367447, | |
| "learning_rate": 1.0934724689165186e-05, | |
| "loss": 0.0001, | |
| "step": 3520 | |
| }, | |
| { | |
| "epoch": 6.269982238010657, | |
| "grad_norm": 0.01817052625119686, | |
| "learning_rate": 1.0823712255772648e-05, | |
| "loss": 0.0002, | |
| "step": 3530 | |
| }, | |
| { | |
| "epoch": 6.287744227353464, | |
| "grad_norm": 0.004132445901632309, | |
| "learning_rate": 1.0712699822380106e-05, | |
| "loss": 0.0001, | |
| "step": 3540 | |
| }, | |
| { | |
| "epoch": 6.30550621669627, | |
| "grad_norm": 0.0017249000957235694, | |
| "learning_rate": 1.0601687388987567e-05, | |
| "loss": 0.0001, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 6.323268206039076, | |
| "grad_norm": 0.0024557355791330338, | |
| "learning_rate": 1.0490674955595027e-05, | |
| "loss": 0.0002, | |
| "step": 3560 | |
| }, | |
| { | |
| "epoch": 6.341030195381883, | |
| "grad_norm": 0.02593265473842621, | |
| "learning_rate": 1.0379662522202486e-05, | |
| "loss": 0.0002, | |
| "step": 3570 | |
| }, | |
| { | |
| "epoch": 6.358792184724689, | |
| "grad_norm": 0.00831576343625784, | |
| "learning_rate": 1.0268650088809948e-05, | |
| "loss": 0.0001, | |
| "step": 3580 | |
| }, | |
| { | |
| "epoch": 6.376554174067495, | |
| "grad_norm": 0.12613391876220703, | |
| "learning_rate": 1.0157637655417407e-05, | |
| "loss": 0.0002, | |
| "step": 3590 | |
| }, | |
| { | |
| "epoch": 6.394316163410302, | |
| "grad_norm": 0.00139847572427243, | |
| "learning_rate": 1.0046625222024867e-05, | |
| "loss": 0.0002, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 6.412078152753108, | |
| "grad_norm": 0.0011393679305911064, | |
| "learning_rate": 9.935612788632327e-06, | |
| "loss": 0.0001, | |
| "step": 3610 | |
| }, | |
| { | |
| "epoch": 6.429840142095915, | |
| "grad_norm": 0.031851690262556076, | |
| "learning_rate": 9.824600355239786e-06, | |
| "loss": 0.0002, | |
| "step": 3620 | |
| }, | |
| { | |
| "epoch": 6.447602131438721, | |
| "grad_norm": 0.002192731713876128, | |
| "learning_rate": 9.713587921847248e-06, | |
| "loss": 0.0002, | |
| "step": 3630 | |
| }, | |
| { | |
| "epoch": 6.465364120781528, | |
| "grad_norm": 0.012485062703490257, | |
| "learning_rate": 9.602575488454707e-06, | |
| "loss": 0.0002, | |
| "step": 3640 | |
| }, | |
| { | |
| "epoch": 6.483126110124334, | |
| "grad_norm": 0.02442704513669014, | |
| "learning_rate": 9.491563055062167e-06, | |
| "loss": 0.0002, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 6.50088809946714, | |
| "grad_norm": 0.0015811693156138062, | |
| "learning_rate": 9.380550621669628e-06, | |
| "loss": 0.0001, | |
| "step": 3660 | |
| }, | |
| { | |
| "epoch": 6.518650088809947, | |
| "grad_norm": 0.039409998804330826, | |
| "learning_rate": 9.269538188277086e-06, | |
| "loss": 0.0001, | |
| "step": 3670 | |
| }, | |
| { | |
| "epoch": 6.536412078152753, | |
| "grad_norm": 0.002665013074874878, | |
| "learning_rate": 9.158525754884547e-06, | |
| "loss": 0.0002, | |
| "step": 3680 | |
| }, | |
| { | |
| "epoch": 6.554174067495559, | |
| "grad_norm": 0.003954344894737005, | |
| "learning_rate": 9.047513321492007e-06, | |
| "loss": 0.0002, | |
| "step": 3690 | |
| }, | |
| { | |
| "epoch": 6.571936056838366, | |
| "grad_norm": 0.005066856276243925, | |
| "learning_rate": 8.936500888099468e-06, | |
| "loss": 0.0002, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 6.589698046181172, | |
| "grad_norm": 0.007488698698580265, | |
| "learning_rate": 8.825488454706928e-06, | |
| "loss": 0.0001, | |
| "step": 3710 | |
| }, | |
| { | |
| "epoch": 6.607460035523979, | |
| "grad_norm": 0.00137425831053406, | |
| "learning_rate": 8.714476021314387e-06, | |
| "loss": 0.0002, | |
| "step": 3720 | |
| }, | |
| { | |
| "epoch": 6.625222024866785, | |
| "grad_norm": 0.005670055281370878, | |
| "learning_rate": 8.603463587921847e-06, | |
| "loss": 0.0001, | |
| "step": 3730 | |
| }, | |
| { | |
| "epoch": 6.642984014209592, | |
| "grad_norm": 0.0011902772821485996, | |
| "learning_rate": 8.492451154529308e-06, | |
| "loss": 0.0001, | |
| "step": 3740 | |
| }, | |
| { | |
| "epoch": 6.660746003552398, | |
| "grad_norm": 0.0016062385402619839, | |
| "learning_rate": 8.381438721136768e-06, | |
| "loss": 0.0001, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 6.678507992895204, | |
| "grad_norm": 0.0020727047231048346, | |
| "learning_rate": 8.270426287744228e-06, | |
| "loss": 0.0002, | |
| "step": 3760 | |
| }, | |
| { | |
| "epoch": 6.696269982238011, | |
| "grad_norm": 0.0027436264790594578, | |
| "learning_rate": 8.159413854351687e-06, | |
| "loss": 0.0004, | |
| "step": 3770 | |
| }, | |
| { | |
| "epoch": 6.714031971580817, | |
| "grad_norm": 0.0016453347634524107, | |
| "learning_rate": 8.048401420959147e-06, | |
| "loss": 0.0001, | |
| "step": 3780 | |
| }, | |
| { | |
| "epoch": 6.731793960923623, | |
| "grad_norm": 0.00958060473203659, | |
| "learning_rate": 7.937388987566608e-06, | |
| "loss": 0.0002, | |
| "step": 3790 | |
| }, | |
| { | |
| "epoch": 6.74955595026643, | |
| "grad_norm": 0.003439374268054962, | |
| "learning_rate": 7.826376554174068e-06, | |
| "loss": 0.0001, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 6.767317939609236, | |
| "grad_norm": 0.007251431699842215, | |
| "learning_rate": 7.715364120781527e-06, | |
| "loss": 0.0002, | |
| "step": 3810 | |
| }, | |
| { | |
| "epoch": 6.785079928952043, | |
| "grad_norm": 0.004211882594972849, | |
| "learning_rate": 7.604351687388989e-06, | |
| "loss": 0.0001, | |
| "step": 3820 | |
| }, | |
| { | |
| "epoch": 6.802841918294849, | |
| "grad_norm": 0.004078486934304237, | |
| "learning_rate": 7.493339253996448e-06, | |
| "loss": 0.0001, | |
| "step": 3830 | |
| }, | |
| { | |
| "epoch": 6.820603907637656, | |
| "grad_norm": 0.0059041669592261314, | |
| "learning_rate": 7.382326820603907e-06, | |
| "loss": 0.0001, | |
| "step": 3840 | |
| }, | |
| { | |
| "epoch": 6.838365896980462, | |
| "grad_norm": 0.00534476526081562, | |
| "learning_rate": 7.2713143872113685e-06, | |
| "loss": 0.0001, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 6.856127886323268, | |
| "grad_norm": 0.012589712627232075, | |
| "learning_rate": 7.160301953818828e-06, | |
| "loss": 0.0003, | |
| "step": 3860 | |
| }, | |
| { | |
| "epoch": 6.873889875666075, | |
| "grad_norm": 0.010835024528205395, | |
| "learning_rate": 7.0492895204262885e-06, | |
| "loss": 0.0002, | |
| "step": 3870 | |
| }, | |
| { | |
| "epoch": 6.891651865008881, | |
| "grad_norm": 0.0016127921408042312, | |
| "learning_rate": 6.938277087033748e-06, | |
| "loss": 0.0001, | |
| "step": 3880 | |
| }, | |
| { | |
| "epoch": 6.909413854351687, | |
| "grad_norm": 0.00635075569152832, | |
| "learning_rate": 6.827264653641208e-06, | |
| "loss": 0.0004, | |
| "step": 3890 | |
| }, | |
| { | |
| "epoch": 6.927175843694494, | |
| "grad_norm": 0.002242429880425334, | |
| "learning_rate": 6.716252220248669e-06, | |
| "loss": 0.0001, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 6.9449378330373, | |
| "grad_norm": 0.03803255409002304, | |
| "learning_rate": 6.605239786856128e-06, | |
| "loss": 0.0001, | |
| "step": 3910 | |
| }, | |
| { | |
| "epoch": 6.962699822380107, | |
| "grad_norm": 0.0017812428995966911, | |
| "learning_rate": 6.494227353463589e-06, | |
| "loss": 0.0001, | |
| "step": 3920 | |
| }, | |
| { | |
| "epoch": 6.980461811722913, | |
| "grad_norm": 0.07593787461519241, | |
| "learning_rate": 6.383214920071048e-06, | |
| "loss": 0.0001, | |
| "step": 3930 | |
| }, | |
| { | |
| "epoch": 6.99822380106572, | |
| "grad_norm": 0.004818908404558897, | |
| "learning_rate": 6.272202486678508e-06, | |
| "loss": 0.0001, | |
| "step": 3940 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 9.495445283391746e-07, | |
| "eval_runtime": 22.2592, | |
| "eval_samples_per_second": 44.925, | |
| "eval_steps_per_second": 2.83, | |
| "step": 3941 | |
| }, | |
| { | |
| "epoch": 7.015985790408526, | |
| "grad_norm": 0.13469889760017395, | |
| "learning_rate": 6.161190053285968e-06, | |
| "loss": 0.0002, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 7.033747779751332, | |
| "grad_norm": 0.001866370439529419, | |
| "learning_rate": 6.050177619893428e-06, | |
| "loss": 0.0001, | |
| "step": 3960 | |
| }, | |
| { | |
| "epoch": 7.051509769094139, | |
| "grad_norm": 0.0034974406007677317, | |
| "learning_rate": 5.939165186500888e-06, | |
| "loss": 0.0001, | |
| "step": 3970 | |
| }, | |
| { | |
| "epoch": 7.069271758436945, | |
| "grad_norm": 0.04713987559080124, | |
| "learning_rate": 5.828152753108349e-06, | |
| "loss": 0.0001, | |
| "step": 3980 | |
| }, | |
| { | |
| "epoch": 7.087033747779751, | |
| "grad_norm": 0.01695702224969864, | |
| "learning_rate": 5.717140319715809e-06, | |
| "loss": 0.0001, | |
| "step": 3990 | |
| }, | |
| { | |
| "epoch": 7.104795737122558, | |
| "grad_norm": 0.0627860575914383, | |
| "learning_rate": 5.6061278863232686e-06, | |
| "loss": 0.0001, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 7.122557726465364, | |
| "grad_norm": 0.002155202440917492, | |
| "learning_rate": 5.495115452930728e-06, | |
| "loss": 0.0002, | |
| "step": 4010 | |
| }, | |
| { | |
| "epoch": 7.140319715808171, | |
| "grad_norm": 0.007705147843807936, | |
| "learning_rate": 5.3841030195381885e-06, | |
| "loss": 0.0001, | |
| "step": 4020 | |
| }, | |
| { | |
| "epoch": 7.158081705150977, | |
| "grad_norm": 0.0011336603201925755, | |
| "learning_rate": 5.273090586145649e-06, | |
| "loss": 0.0002, | |
| "step": 4030 | |
| }, | |
| { | |
| "epoch": 7.175843694493783, | |
| "grad_norm": 0.006822965107858181, | |
| "learning_rate": 5.1620781527531085e-06, | |
| "loss": 0.0001, | |
| "step": 4040 | |
| }, | |
| { | |
| "epoch": 7.19360568383659, | |
| "grad_norm": 0.001960517605766654, | |
| "learning_rate": 5.051065719360568e-06, | |
| "loss": 0.0001, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 7.211367673179396, | |
| "grad_norm": 0.027150465175509453, | |
| "learning_rate": 4.940053285968028e-06, | |
| "loss": 0.0001, | |
| "step": 4060 | |
| }, | |
| { | |
| "epoch": 7.229129662522203, | |
| "grad_norm": 0.011790856719017029, | |
| "learning_rate": 4.829040852575489e-06, | |
| "loss": 0.0002, | |
| "step": 4070 | |
| }, | |
| { | |
| "epoch": 7.246891651865009, | |
| "grad_norm": 0.0008871167083270848, | |
| "learning_rate": 4.718028419182949e-06, | |
| "loss": 0.0001, | |
| "step": 4080 | |
| }, | |
| { | |
| "epoch": 7.264653641207815, | |
| "grad_norm": 0.001180154737085104, | |
| "learning_rate": 4.607015985790409e-06, | |
| "loss": 0.0001, | |
| "step": 4090 | |
| }, | |
| { | |
| "epoch": 7.282415630550622, | |
| "grad_norm": 0.0056028300896286964, | |
| "learning_rate": 4.496003552397868e-06, | |
| "loss": 0.0001, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 7.300177619893428, | |
| "grad_norm": 0.009414936415851116, | |
| "learning_rate": 4.384991119005329e-06, | |
| "loss": 0.0002, | |
| "step": 4110 | |
| }, | |
| { | |
| "epoch": 7.317939609236235, | |
| "grad_norm": 0.003175571095198393, | |
| "learning_rate": 4.273978685612789e-06, | |
| "loss": 0.0001, | |
| "step": 4120 | |
| }, | |
| { | |
| "epoch": 7.335701598579041, | |
| "grad_norm": 0.0012675018515437841, | |
| "learning_rate": 4.162966252220249e-06, | |
| "loss": 0.0003, | |
| "step": 4130 | |
| }, | |
| { | |
| "epoch": 7.353463587921847, | |
| "grad_norm": 0.006813838612288237, | |
| "learning_rate": 4.051953818827709e-06, | |
| "loss": 0.0001, | |
| "step": 4140 | |
| }, | |
| { | |
| "epoch": 7.371225577264654, | |
| "grad_norm": 0.059542424976825714, | |
| "learning_rate": 3.940941385435169e-06, | |
| "loss": 0.0001, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 7.38898756660746, | |
| "grad_norm": 0.12738512456417084, | |
| "learning_rate": 3.829928952042629e-06, | |
| "loss": 0.0002, | |
| "step": 4160 | |
| }, | |
| { | |
| "epoch": 7.406749555950267, | |
| "grad_norm": 0.0020928082522004843, | |
| "learning_rate": 3.718916518650089e-06, | |
| "loss": 0.0002, | |
| "step": 4170 | |
| }, | |
| { | |
| "epoch": 7.424511545293073, | |
| "grad_norm": 0.03172928839921951, | |
| "learning_rate": 3.607904085257549e-06, | |
| "loss": 0.0002, | |
| "step": 4180 | |
| }, | |
| { | |
| "epoch": 7.442273534635879, | |
| "grad_norm": 0.002771994797512889, | |
| "learning_rate": 3.4968916518650093e-06, | |
| "loss": 0.0001, | |
| "step": 4190 | |
| }, | |
| { | |
| "epoch": 7.460035523978686, | |
| "grad_norm": 0.002538390224799514, | |
| "learning_rate": 3.3858792184724693e-06, | |
| "loss": 0.0002, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 7.477797513321492, | |
| "grad_norm": 0.004362764302641153, | |
| "learning_rate": 3.274866785079929e-06, | |
| "loss": 0.0001, | |
| "step": 4210 | |
| }, | |
| { | |
| "epoch": 7.495559502664299, | |
| "grad_norm": 0.0017780765192583203, | |
| "learning_rate": 3.1638543516873893e-06, | |
| "loss": 0.0001, | |
| "step": 4220 | |
| }, | |
| { | |
| "epoch": 7.513321492007105, | |
| "grad_norm": 0.004568871576339006, | |
| "learning_rate": 3.0528419182948492e-06, | |
| "loss": 0.0001, | |
| "step": 4230 | |
| }, | |
| { | |
| "epoch": 7.531083481349912, | |
| "grad_norm": 0.007695276755839586, | |
| "learning_rate": 2.9418294849023092e-06, | |
| "loss": 0.0001, | |
| "step": 4240 | |
| }, | |
| { | |
| "epoch": 7.548845470692718, | |
| "grad_norm": 0.002825086936354637, | |
| "learning_rate": 2.830817051509769e-06, | |
| "loss": 0.0001, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 7.566607460035524, | |
| "grad_norm": 0.0010516445618122816, | |
| "learning_rate": 2.7198046181172296e-06, | |
| "loss": 0.0001, | |
| "step": 4260 | |
| }, | |
| { | |
| "epoch": 7.584369449378331, | |
| "grad_norm": 0.0022022314369678497, | |
| "learning_rate": 2.608792184724689e-06, | |
| "loss": 0.0001, | |
| "step": 4270 | |
| }, | |
| { | |
| "epoch": 7.602131438721137, | |
| "grad_norm": 0.00462978845462203, | |
| "learning_rate": 2.4977797513321495e-06, | |
| "loss": 0.0001, | |
| "step": 4280 | |
| }, | |
| { | |
| "epoch": 7.619893428063943, | |
| "grad_norm": 0.002377378987148404, | |
| "learning_rate": 2.386767317939609e-06, | |
| "loss": 0.0001, | |
| "step": 4290 | |
| }, | |
| { | |
| "epoch": 7.63765541740675, | |
| "grad_norm": 0.10261301696300507, | |
| "learning_rate": 2.2757548845470695e-06, | |
| "loss": 0.0002, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 7.655417406749556, | |
| "grad_norm": 0.005298789124935865, | |
| "learning_rate": 2.1647424511545295e-06, | |
| "loss": 0.0002, | |
| "step": 4310 | |
| }, | |
| { | |
| "epoch": 7.673179396092363, | |
| "grad_norm": 0.0017874079057946801, | |
| "learning_rate": 2.0537300177619894e-06, | |
| "loss": 0.0001, | |
| "step": 4320 | |
| }, | |
| { | |
| "epoch": 7.690941385435169, | |
| "grad_norm": 0.002110840054228902, | |
| "learning_rate": 1.9427175843694494e-06, | |
| "loss": 0.0001, | |
| "step": 4330 | |
| }, | |
| { | |
| "epoch": 7.708703374777976, | |
| "grad_norm": 0.009644666686654091, | |
| "learning_rate": 1.8317051509769096e-06, | |
| "loss": 0.0001, | |
| "step": 4340 | |
| }, | |
| { | |
| "epoch": 7.726465364120782, | |
| "grad_norm": 0.005328277125954628, | |
| "learning_rate": 1.7206927175843696e-06, | |
| "loss": 0.0001, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 7.744227353463588, | |
| "grad_norm": 0.0009074486442841589, | |
| "learning_rate": 1.6096802841918297e-06, | |
| "loss": 0.0001, | |
| "step": 4360 | |
| }, | |
| { | |
| "epoch": 7.761989342806395, | |
| "grad_norm": 0.002376771066337824, | |
| "learning_rate": 1.4986678507992897e-06, | |
| "loss": 0.0001, | |
| "step": 4370 | |
| }, | |
| { | |
| "epoch": 7.779751332149201, | |
| "grad_norm": 0.009661192074418068, | |
| "learning_rate": 1.3876554174067497e-06, | |
| "loss": 0.0001, | |
| "step": 4380 | |
| }, | |
| { | |
| "epoch": 7.797513321492007, | |
| "grad_norm": 0.0024153704289346933, | |
| "learning_rate": 1.2766429840142097e-06, | |
| "loss": 0.0001, | |
| "step": 4390 | |
| }, | |
| { | |
| "epoch": 7.815275310834814, | |
| "grad_norm": 0.008802085183560848, | |
| "learning_rate": 1.1656305506216696e-06, | |
| "loss": 0.0001, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 7.83303730017762, | |
| "grad_norm": 0.0018315628403797746, | |
| "learning_rate": 1.0546181172291296e-06, | |
| "loss": 0.0001, | |
| "step": 4410 | |
| }, | |
| { | |
| "epoch": 7.850799289520427, | |
| "grad_norm": 0.002987775718793273, | |
| "learning_rate": 9.436056838365898e-07, | |
| "loss": 0.0001, | |
| "step": 4420 | |
| }, | |
| { | |
| "epoch": 7.868561278863233, | |
| "grad_norm": 0.010026910342276096, | |
| "learning_rate": 8.325932504440498e-07, | |
| "loss": 0.0001, | |
| "step": 4430 | |
| }, | |
| { | |
| "epoch": 7.88632326820604, | |
| "grad_norm": 0.012213947251439095, | |
| "learning_rate": 7.215808170515097e-07, | |
| "loss": 0.0001, | |
| "step": 4440 | |
| }, | |
| { | |
| "epoch": 7.904085257548846, | |
| "grad_norm": 0.0036439860705286264, | |
| "learning_rate": 6.105683836589698e-07, | |
| "loss": 0.0001, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 7.921847246891652, | |
| "grad_norm": 0.04297882691025734, | |
| "learning_rate": 4.995559502664299e-07, | |
| "loss": 0.0001, | |
| "step": 4460 | |
| }, | |
| { | |
| "epoch": 7.939609236234459, | |
| "grad_norm": 0.004080113489180803, | |
| "learning_rate": 3.885435168738899e-07, | |
| "loss": 0.0001, | |
| "step": 4470 | |
| }, | |
| { | |
| "epoch": 7.957371225577265, | |
| "grad_norm": 0.0037819864228367805, | |
| "learning_rate": 2.775310834813499e-07, | |
| "loss": 0.0001, | |
| "step": 4480 | |
| }, | |
| { | |
| "epoch": 7.975133214920071, | |
| "grad_norm": 0.008480883203446865, | |
| "learning_rate": 1.6651865008880993e-07, | |
| "loss": 0.0001, | |
| "step": 4490 | |
| }, | |
| { | |
| "epoch": 7.992895204262878, | |
| "grad_norm": 0.01645360141992569, | |
| "learning_rate": 5.550621669626998e-08, | |
| "loss": 0.0002, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 8.977843890534132e-07, | |
| "eval_runtime": 21.9896, | |
| "eval_samples_per_second": 45.476, | |
| "eval_steps_per_second": 2.865, | |
| "step": 4504 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 4504, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.105854208303104e+16, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |