| { | |
| "best_global_step": 2000, | |
| "best_metric": 0.8613524744994333, | |
| "best_model_checkpoint": "./SALAMA_C5/checkpoint-2000", | |
| "epoch": 2.0, | |
| "eval_steps": 1000, | |
| "global_step": 2938, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.013614703880190605, | |
| "grad_norm": 3.235334873199463, | |
| "learning_rate": 3.8e-07, | |
| "loss": 0.0315, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02722940776038121, | |
| "grad_norm": 2.3482489585876465, | |
| "learning_rate": 7.8e-07, | |
| "loss": 0.0253, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04084411164057182, | |
| "grad_norm": 2.1805593967437744, | |
| "learning_rate": 1.1800000000000001e-06, | |
| "loss": 0.0319, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.05445881552076242, | |
| "grad_norm": 2.770758867263794, | |
| "learning_rate": 1.5800000000000001e-06, | |
| "loss": 0.0201, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.06807351940095303, | |
| "grad_norm": 2.3989436626434326, | |
| "learning_rate": 1.98e-06, | |
| "loss": 0.0276, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.08168822328114364, | |
| "grad_norm": 5.180119037628174, | |
| "learning_rate": 2.38e-06, | |
| "loss": 0.0331, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.09530292716133425, | |
| "grad_norm": 1.9398974180221558, | |
| "learning_rate": 2.7800000000000005e-06, | |
| "loss": 0.0292, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.10891763104152484, | |
| "grad_norm": 2.7253212928771973, | |
| "learning_rate": 3.1800000000000005e-06, | |
| "loss": 0.0242, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.12253233492171545, | |
| "grad_norm": 0.4866615831851959, | |
| "learning_rate": 3.58e-06, | |
| "loss": 0.0165, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.13614703880190607, | |
| "grad_norm": 3.720773696899414, | |
| "learning_rate": 3.980000000000001e-06, | |
| "loss": 0.031, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.14976174268209666, | |
| "grad_norm": 2.4779279232025146, | |
| "learning_rate": 4.38e-06, | |
| "loss": 0.0203, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.16337644656228728, | |
| "grad_norm": 6.39409875869751, | |
| "learning_rate": 4.78e-06, | |
| "loss": 0.027, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.17699115044247787, | |
| "grad_norm": 3.8380324840545654, | |
| "learning_rate": 5.18e-06, | |
| "loss": 0.0267, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.1906058543226685, | |
| "grad_norm": 2.130601167678833, | |
| "learning_rate": 5.580000000000001e-06, | |
| "loss": 0.0218, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.2042205582028591, | |
| "grad_norm": 2.812141180038452, | |
| "learning_rate": 5.98e-06, | |
| "loss": 0.0278, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.21783526208304968, | |
| "grad_norm": 3.1364989280700684, | |
| "learning_rate": 6.380000000000001e-06, | |
| "loss": 0.0246, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.2314499659632403, | |
| "grad_norm": 2.2158021926879883, | |
| "learning_rate": 6.780000000000001e-06, | |
| "loss": 0.0231, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.2450646698434309, | |
| "grad_norm": 3.147629737854004, | |
| "learning_rate": 7.180000000000001e-06, | |
| "loss": 0.0257, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.2586793737236215, | |
| "grad_norm": 2.9662864208221436, | |
| "learning_rate": 7.58e-06, | |
| "loss": 0.0326, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.27229407760381213, | |
| "grad_norm": 3.3279476165771484, | |
| "learning_rate": 7.980000000000002e-06, | |
| "loss": 0.0304, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.2859087814840027, | |
| "grad_norm": 2.976351737976074, | |
| "learning_rate": 8.380000000000001e-06, | |
| "loss": 0.0285, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.2995234853641933, | |
| "grad_norm": 2.575556516647339, | |
| "learning_rate": 8.78e-06, | |
| "loss": 0.0313, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.3131381892443839, | |
| "grad_norm": 4.082836627960205, | |
| "learning_rate": 9.180000000000002e-06, | |
| "loss": 0.0297, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.32675289312457456, | |
| "grad_norm": 1.92853581905365, | |
| "learning_rate": 9.58e-06, | |
| "loss": 0.0312, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.34036759700476515, | |
| "grad_norm": 4.263063430786133, | |
| "learning_rate": 9.980000000000001e-06, | |
| "loss": 0.036, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.35398230088495575, | |
| "grad_norm": 2.9727425575256348, | |
| "learning_rate": 9.922067268252667e-06, | |
| "loss": 0.0402, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.36759700476514634, | |
| "grad_norm": 1.8520768880844116, | |
| "learning_rate": 9.84003281378179e-06, | |
| "loss": 0.0326, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.381211708645337, | |
| "grad_norm": 3.8913838863372803, | |
| "learning_rate": 9.757998359310912e-06, | |
| "loss": 0.0394, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.3948264125255276, | |
| "grad_norm": 1.2734198570251465, | |
| "learning_rate": 9.675963904840033e-06, | |
| "loss": 0.037, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.4084411164057182, | |
| "grad_norm": 3.6827642917633057, | |
| "learning_rate": 9.593929450369155e-06, | |
| "loss": 0.0344, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.42205582028590877, | |
| "grad_norm": 2.391993284225464, | |
| "learning_rate": 9.511894995898278e-06, | |
| "loss": 0.0348, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.43567052416609936, | |
| "grad_norm": 3.092235565185547, | |
| "learning_rate": 9.4298605414274e-06, | |
| "loss": 0.0385, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.44928522804629, | |
| "grad_norm": 3.21338152885437, | |
| "learning_rate": 9.347826086956523e-06, | |
| "loss": 0.0366, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.4628999319264806, | |
| "grad_norm": 1.9083036184310913, | |
| "learning_rate": 9.265791632485645e-06, | |
| "loss": 0.04, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.4765146358066712, | |
| "grad_norm": 2.700929641723633, | |
| "learning_rate": 9.183757178014768e-06, | |
| "loss": 0.046, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.4901293396868618, | |
| "grad_norm": 2.0987277030944824, | |
| "learning_rate": 9.101722723543888e-06, | |
| "loss": 0.0309, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.5037440435670524, | |
| "grad_norm": 3.4542877674102783, | |
| "learning_rate": 9.01968826907301e-06, | |
| "loss": 0.0341, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.517358747447243, | |
| "grad_norm": 1.712745189666748, | |
| "learning_rate": 8.937653814602135e-06, | |
| "loss": 0.0226, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.5309734513274337, | |
| "grad_norm": 4.331600189208984, | |
| "learning_rate": 8.855619360131256e-06, | |
| "loss": 0.0363, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.5445881552076243, | |
| "grad_norm": 2.4707260131835938, | |
| "learning_rate": 8.773584905660378e-06, | |
| "loss": 0.041, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5582028590878149, | |
| "grad_norm": 2.3577475547790527, | |
| "learning_rate": 8.6915504511895e-06, | |
| "loss": 0.035, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.5718175629680055, | |
| "grad_norm": 1.8985271453857422, | |
| "learning_rate": 8.609515996718623e-06, | |
| "loss": 0.0303, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.585432266848196, | |
| "grad_norm": 3.2735111713409424, | |
| "learning_rate": 8.527481542247746e-06, | |
| "loss": 0.0441, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.5990469707283866, | |
| "grad_norm": 3.249154567718506, | |
| "learning_rate": 8.445447087776866e-06, | |
| "loss": 0.0341, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.6126616746085772, | |
| "grad_norm": 3.929332733154297, | |
| "learning_rate": 8.363412633305989e-06, | |
| "loss": 0.0351, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6262763784887678, | |
| "grad_norm": 2.7367618083953857, | |
| "learning_rate": 8.281378178835111e-06, | |
| "loss": 0.0341, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.6398910823689585, | |
| "grad_norm": 3.0282886028289795, | |
| "learning_rate": 8.199343724364234e-06, | |
| "loss": 0.0324, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.6535057862491491, | |
| "grad_norm": 4.974277496337891, | |
| "learning_rate": 8.117309269893356e-06, | |
| "loss": 0.0535, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.6671204901293397, | |
| "grad_norm": 2.943775177001953, | |
| "learning_rate": 8.035274815422479e-06, | |
| "loss": 0.0403, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.6807351940095303, | |
| "grad_norm": 6.06272029876709, | |
| "learning_rate": 7.953240360951601e-06, | |
| "loss": 0.0358, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6807351940095303, | |
| "eval_loss": 0.025179551914334297, | |
| "eval_runtime": 4655.1371, | |
| "eval_samples_per_second": 2.524, | |
| "eval_steps_per_second": 0.316, | |
| "eval_wer": 1.7803173403853418, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6943498978897209, | |
| "grad_norm": 3.8485658168792725, | |
| "learning_rate": 7.871205906480722e-06, | |
| "loss": 0.0315, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.7079646017699115, | |
| "grad_norm": 2.3332481384277344, | |
| "learning_rate": 7.789171452009844e-06, | |
| "loss": 0.0395, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.7215793056501021, | |
| "grad_norm": 3.6540513038635254, | |
| "learning_rate": 7.707136997538967e-06, | |
| "loss": 0.0408, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.7351940095302927, | |
| "grad_norm": 2.713531494140625, | |
| "learning_rate": 7.625102543068089e-06, | |
| "loss": 0.0351, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.7488087134104833, | |
| "grad_norm": 4.37577486038208, | |
| "learning_rate": 7.543068088597212e-06, | |
| "loss": 0.0411, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.762423417290674, | |
| "grad_norm": 2.9798600673675537, | |
| "learning_rate": 7.461033634126333e-06, | |
| "loss": 0.0354, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.7760381211708646, | |
| "grad_norm": 3.0978410243988037, | |
| "learning_rate": 7.378999179655456e-06, | |
| "loss": 0.0341, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.7896528250510552, | |
| "grad_norm": 4.264355659484863, | |
| "learning_rate": 7.296964725184577e-06, | |
| "loss": 0.0488, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.8032675289312458, | |
| "grad_norm": 4.381528854370117, | |
| "learning_rate": 7.214930270713701e-06, | |
| "loss": 0.0408, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.8168822328114363, | |
| "grad_norm": 3.541025161743164, | |
| "learning_rate": 7.132895816242823e-06, | |
| "loss": 0.0339, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.8304969366916269, | |
| "grad_norm": 4.371401786804199, | |
| "learning_rate": 7.050861361771945e-06, | |
| "loss": 0.0374, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.8441116405718175, | |
| "grad_norm": 2.3789560794830322, | |
| "learning_rate": 6.968826907301067e-06, | |
| "loss": 0.0286, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.8577263444520081, | |
| "grad_norm": 3.898851156234741, | |
| "learning_rate": 6.886792452830189e-06, | |
| "loss": 0.0289, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.8713410483321987, | |
| "grad_norm": 3.1753439903259277, | |
| "learning_rate": 6.804757998359311e-06, | |
| "loss": 0.0297, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.8849557522123894, | |
| "grad_norm": 2.0378472805023193, | |
| "learning_rate": 6.722723543888434e-06, | |
| "loss": 0.024, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.89857045609258, | |
| "grad_norm": 2.899442195892334, | |
| "learning_rate": 6.640689089417555e-06, | |
| "loss": 0.0452, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.9121851599727706, | |
| "grad_norm": 3.1962156295776367, | |
| "learning_rate": 6.558654634946679e-06, | |
| "loss": 0.0334, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.9257998638529612, | |
| "grad_norm": 1.9463192224502563, | |
| "learning_rate": 6.4766201804758e-06, | |
| "loss": 0.0324, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.9394145677331518, | |
| "grad_norm": 1.6102110147476196, | |
| "learning_rate": 6.394585726004923e-06, | |
| "loss": 0.0251, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.9530292716133424, | |
| "grad_norm": 2.6284947395324707, | |
| "learning_rate": 6.312551271534045e-06, | |
| "loss": 0.0413, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.966643975493533, | |
| "grad_norm": 1.6867362260818481, | |
| "learning_rate": 6.230516817063167e-06, | |
| "loss": 0.0358, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.9802586793737236, | |
| "grad_norm": 2.854414939880371, | |
| "learning_rate": 6.148482362592289e-06, | |
| "loss": 0.0278, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.9938733832539143, | |
| "grad_norm": 1.794249415397644, | |
| "learning_rate": 6.066447908121411e-06, | |
| "loss": 0.0283, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 1.0074880871341048, | |
| "grad_norm": 6.022786617279053, | |
| "learning_rate": 5.984413453650534e-06, | |
| "loss": 0.0266, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 1.0211027910142954, | |
| "grad_norm": 1.6846201419830322, | |
| "learning_rate": 5.902378999179655e-06, | |
| "loss": 0.0115, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.034717494894486, | |
| "grad_norm": 1.9217748641967773, | |
| "learning_rate": 5.820344544708778e-06, | |
| "loss": 0.0137, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 1.0483321987746765, | |
| "grad_norm": 0.8108400702476501, | |
| "learning_rate": 5.738310090237901e-06, | |
| "loss": 0.0132, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 1.0619469026548674, | |
| "grad_norm": 2.2375566959381104, | |
| "learning_rate": 5.656275635767022e-06, | |
| "loss": 0.0146, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 1.075561606535058, | |
| "grad_norm": 1.1262922286987305, | |
| "learning_rate": 5.574241181296145e-06, | |
| "loss": 0.0137, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 1.0891763104152485, | |
| "grad_norm": 1.472326397895813, | |
| "learning_rate": 5.4922067268252665e-06, | |
| "loss": 0.011, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.1027910142954391, | |
| "grad_norm": 1.3766967058181763, | |
| "learning_rate": 5.410172272354389e-06, | |
| "loss": 0.0118, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 1.1164057181756297, | |
| "grad_norm": 1.1501350402832031, | |
| "learning_rate": 5.328137817883512e-06, | |
| "loss": 0.0153, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 1.1300204220558203, | |
| "grad_norm": 2.6213340759277344, | |
| "learning_rate": 5.246103363412634e-06, | |
| "loss": 0.0165, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 1.143635125936011, | |
| "grad_norm": 0.9999692440032959, | |
| "learning_rate": 5.164068908941756e-06, | |
| "loss": 0.013, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 1.1572498298162015, | |
| "grad_norm": 1.7493408918380737, | |
| "learning_rate": 5.082034454470878e-06, | |
| "loss": 0.0128, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.170864533696392, | |
| "grad_norm": 1.7462941408157349, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0115, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 1.1844792375765827, | |
| "grad_norm": 0.8824525475502014, | |
| "learning_rate": 4.917965545529123e-06, | |
| "loss": 0.0128, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 1.1980939414567733, | |
| "grad_norm": 3.0848796367645264, | |
| "learning_rate": 4.8359310910582445e-06, | |
| "loss": 0.0198, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 1.2117086453369639, | |
| "grad_norm": 1.641610026359558, | |
| "learning_rate": 4.753896636587367e-06, | |
| "loss": 0.0189, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 1.2253233492171545, | |
| "grad_norm": 0.760683000087738, | |
| "learning_rate": 4.671862182116489e-06, | |
| "loss": 0.0126, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.238938053097345, | |
| "grad_norm": 1.876564621925354, | |
| "learning_rate": 4.589827727645612e-06, | |
| "loss": 0.0121, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 1.2525527569775359, | |
| "grad_norm": 1.66615891456604, | |
| "learning_rate": 4.5077932731747335e-06, | |
| "loss": 0.0115, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 1.2661674608577265, | |
| "grad_norm": 3.3713815212249756, | |
| "learning_rate": 4.425758818703856e-06, | |
| "loss": 0.015, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 1.279782164737917, | |
| "grad_norm": 1.1276594400405884, | |
| "learning_rate": 4.3437243642329776e-06, | |
| "loss": 0.0098, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 1.2933968686181077, | |
| "grad_norm": 1.7784072160720825, | |
| "learning_rate": 4.261689909762101e-06, | |
| "loss": 0.0121, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.3070115724982982, | |
| "grad_norm": 1.2826921939849854, | |
| "learning_rate": 4.1796554552912225e-06, | |
| "loss": 0.0174, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 1.3206262763784888, | |
| "grad_norm": 1.4002376794815063, | |
| "learning_rate": 4.097621000820345e-06, | |
| "loss": 0.0114, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 1.3342409802586794, | |
| "grad_norm": 1.2641770839691162, | |
| "learning_rate": 4.0155865463494666e-06, | |
| "loss": 0.0147, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 1.34785568413887, | |
| "grad_norm": 0.7290912866592407, | |
| "learning_rate": 3.933552091878589e-06, | |
| "loss": 0.0124, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 1.3614703880190606, | |
| "grad_norm": 1.4693269729614258, | |
| "learning_rate": 3.8515176374077115e-06, | |
| "loss": 0.0117, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.3614703880190606, | |
| "eval_loss": 0.012558691203594208, | |
| "eval_runtime": 4786.693, | |
| "eval_samples_per_second": 2.455, | |
| "eval_steps_per_second": 0.307, | |
| "eval_wer": 0.8613524744994333, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.3750850918992512, | |
| "grad_norm": 2.861137866973877, | |
| "learning_rate": 3.769483182936834e-06, | |
| "loss": 0.0117, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 1.3886997957794418, | |
| "grad_norm": 1.5316095352172852, | |
| "learning_rate": 3.687448728465956e-06, | |
| "loss": 0.0153, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 1.4023144996596324, | |
| "grad_norm": 3.5821139812469482, | |
| "learning_rate": 3.6095159967186222e-06, | |
| "loss": 0.0094, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 1.415929203539823, | |
| "grad_norm": 2.0754270553588867, | |
| "learning_rate": 3.5274815422477443e-06, | |
| "loss": 0.0101, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 1.4295439074200136, | |
| "grad_norm": 1.8299485445022583, | |
| "learning_rate": 3.4454470877768663e-06, | |
| "loss": 0.0084, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.4431586113002042, | |
| "grad_norm": 1.1152702569961548, | |
| "learning_rate": 3.3634126333059884e-06, | |
| "loss": 0.0095, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 1.4567733151803948, | |
| "grad_norm": 1.150465965270996, | |
| "learning_rate": 3.2813781788351113e-06, | |
| "loss": 0.0081, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 1.4703880190605854, | |
| "grad_norm": 2.0751559734344482, | |
| "learning_rate": 3.1993437243642333e-06, | |
| "loss": 0.01, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 1.484002722940776, | |
| "grad_norm": 2.7779345512390137, | |
| "learning_rate": 3.1173092698933553e-06, | |
| "loss": 0.0101, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 1.4976174268209665, | |
| "grad_norm": 0.4274977743625641, | |
| "learning_rate": 3.035274815422478e-06, | |
| "loss": 0.0118, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.5112321307011571, | |
| "grad_norm": 1.2149277925491333, | |
| "learning_rate": 2.9532403609516e-06, | |
| "loss": 0.0102, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 1.5248468345813477, | |
| "grad_norm": 0.752834141254425, | |
| "learning_rate": 2.8712059064807223e-06, | |
| "loss": 0.0089, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 1.5384615384615383, | |
| "grad_norm": 0.41771841049194336, | |
| "learning_rate": 2.7891714520098443e-06, | |
| "loss": 0.0099, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 1.552076242341729, | |
| "grad_norm": 2.4532768726348877, | |
| "learning_rate": 2.707136997538967e-06, | |
| "loss": 0.0077, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 1.5656909462219195, | |
| "grad_norm": 1.496887445449829, | |
| "learning_rate": 2.625102543068089e-06, | |
| "loss": 0.013, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.5793056501021103, | |
| "grad_norm": 1.720847487449646, | |
| "learning_rate": 2.543068088597211e-06, | |
| "loss": 0.01, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 1.592920353982301, | |
| "grad_norm": 1.2190160751342773, | |
| "learning_rate": 2.4610336341263333e-06, | |
| "loss": 0.0082, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 1.6065350578624915, | |
| "grad_norm": 0.5788622498512268, | |
| "learning_rate": 2.3789991796554554e-06, | |
| "loss": 0.0066, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 1.620149761742682, | |
| "grad_norm": 1.7323695421218872, | |
| "learning_rate": 2.296964725184578e-06, | |
| "loss": 0.01, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 1.6337644656228727, | |
| "grad_norm": 1.7555092573165894, | |
| "learning_rate": 2.2149302707137e-06, | |
| "loss": 0.0152, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.6473791695030633, | |
| "grad_norm": 1.1416444778442383, | |
| "learning_rate": 2.132895816242822e-06, | |
| "loss": 0.0131, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 1.6609938733832539, | |
| "grad_norm": 3.30466365814209, | |
| "learning_rate": 2.0508613617719444e-06, | |
| "loss": 0.0073, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 1.6746085772634445, | |
| "grad_norm": 0.8352093696594238, | |
| "learning_rate": 1.9688269073010664e-06, | |
| "loss": 0.0061, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 1.6882232811436353, | |
| "grad_norm": 1.2296479940414429, | |
| "learning_rate": 1.8867924528301889e-06, | |
| "loss": 0.0094, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 1.7018379850238259, | |
| "grad_norm": 0.8584756255149841, | |
| "learning_rate": 1.8047579983593111e-06, | |
| "loss": 0.0089, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.7154526889040165, | |
| "grad_norm": 1.1400012969970703, | |
| "learning_rate": 1.7227235438884332e-06, | |
| "loss": 0.011, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 1.729067392784207, | |
| "grad_norm": 0.4385014474391937, | |
| "learning_rate": 1.6406890894175556e-06, | |
| "loss": 0.0087, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 1.7426820966643977, | |
| "grad_norm": 1.0549437999725342, | |
| "learning_rate": 1.5586546349466777e-06, | |
| "loss": 0.01, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 1.7562968005445883, | |
| "grad_norm": 2.075157403945923, | |
| "learning_rate": 1.4766201804758e-06, | |
| "loss": 0.0113, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 1.7699115044247788, | |
| "grad_norm": 0.4366838037967682, | |
| "learning_rate": 1.3945857260049222e-06, | |
| "loss": 0.0104, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.7835262083049694, | |
| "grad_norm": 0.24587690830230713, | |
| "learning_rate": 1.3125512715340444e-06, | |
| "loss": 0.0095, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 1.79714091218516, | |
| "grad_norm": 3.6478662490844727, | |
| "learning_rate": 1.2305168170631667e-06, | |
| "loss": 0.0092, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 1.8107556160653506, | |
| "grad_norm": 1.4280874729156494, | |
| "learning_rate": 1.148482362592289e-06, | |
| "loss": 0.0085, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 1.8243703199455412, | |
| "grad_norm": 1.3123339414596558, | |
| "learning_rate": 1.066447908121411e-06, | |
| "loss": 0.0082, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 1.8379850238257318, | |
| "grad_norm": 0.4807271659374237, | |
| "learning_rate": 9.844134536505332e-07, | |
| "loss": 0.0073, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.8515997277059224, | |
| "grad_norm": 2.657691478729248, | |
| "learning_rate": 9.023789991796556e-07, | |
| "loss": 0.009, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 1.865214431586113, | |
| "grad_norm": 0.9591490626335144, | |
| "learning_rate": 8.203445447087778e-07, | |
| "loss": 0.008, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 1.8788291354663036, | |
| "grad_norm": 0.7290093898773193, | |
| "learning_rate": 7.383100902379e-07, | |
| "loss": 0.0072, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 1.8924438393464942, | |
| "grad_norm": 0.3491244316101074, | |
| "learning_rate": 6.562756357670222e-07, | |
| "loss": 0.0072, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 1.9060585432266848, | |
| "grad_norm": 4.011445045471191, | |
| "learning_rate": 5.742411812961445e-07, | |
| "loss": 0.0096, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.9196732471068754, | |
| "grad_norm": 3.5024807453155518, | |
| "learning_rate": 4.922067268252666e-07, | |
| "loss": 0.0081, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 1.933287950987066, | |
| "grad_norm": 6.232346534729004, | |
| "learning_rate": 4.101722723543889e-07, | |
| "loss": 0.0105, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 1.9469026548672566, | |
| "grad_norm": 0.20569662749767303, | |
| "learning_rate": 3.281378178835111e-07, | |
| "loss": 0.0104, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 1.9605173587474471, | |
| "grad_norm": 0.38452497124671936, | |
| "learning_rate": 2.461033634126333e-07, | |
| "loss": 0.0072, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 1.9741320626276377, | |
| "grad_norm": 0.5276004076004028, | |
| "learning_rate": 1.6406890894175555e-07, | |
| "loss": 0.0077, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.9877467665078283, | |
| "grad_norm": 1.7120985984802246, | |
| "learning_rate": 8.203445447087778e-08, | |
| "loss": 0.0109, | |
| "step": 2920 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 2938, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.712529611767808e+19, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |