Invalid JSON: Unexpected token 'N', ..."ad_norm": NaN,
"... is not valid JSON
| { | |
| "best_metric": 0.5302808284759521, | |
| "best_model_checkpoint": "Human_action_classifier/checkpoint-4300", | |
| "epoch": 7.0, | |
| "eval_steps": 100, | |
| "global_step": 4410, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 1.9168592691421509, | |
| "learning_rate": 0.00019954648526077098, | |
| "loss": 2.6612, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 2.498575210571289, | |
| "learning_rate": 0.00019909297052154195, | |
| "loss": 2.4529, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 2.4278714656829834, | |
| "learning_rate": 0.00019863945578231293, | |
| "loss": 2.3062, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 2.5514345169067383, | |
| "learning_rate": 0.0001981859410430839, | |
| "loss": 2.169, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 2.1051814556121826, | |
| "learning_rate": 0.0001977324263038549, | |
| "loss": 1.9571, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 2.5904359817504883, | |
| "learning_rate": 0.00019727891156462587, | |
| "loss": 1.7926, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 2.9325740337371826, | |
| "learning_rate": 0.00019682539682539682, | |
| "loss": 1.6761, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 3.636162519454956, | |
| "learning_rate": 0.00019637188208616781, | |
| "loss": 1.6388, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 3.7232675552368164, | |
| "learning_rate": 0.0001959183673469388, | |
| "loss": 1.527, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 3.467459201812744, | |
| "learning_rate": 0.00019546485260770976, | |
| "loss": 1.4545, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "eval_accuracy": 0.6706349206349206, | |
| "eval_loss": 1.3145380020141602, | |
| "eval_runtime": 93.6176, | |
| "eval_samples_per_second": 26.918, | |
| "eval_steps_per_second": 3.365, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 2.4409241676330566, | |
| "learning_rate": 0.00019501133786848073, | |
| "loss": 1.331, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 3.841000556945801, | |
| "learning_rate": 0.0001945578231292517, | |
| "loss": 1.4273, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 4.918443202972412, | |
| "learning_rate": 0.0001941043083900227, | |
| "loss": 1.4078, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 3.317399740219116, | |
| "learning_rate": 0.00019365079365079365, | |
| "loss": 1.3905, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 5.5322418212890625, | |
| "learning_rate": 0.00019319727891156462, | |
| "loss": 1.5453, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 4.116984844207764, | |
| "learning_rate": 0.00019274376417233562, | |
| "loss": 1.394, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 3.141216278076172, | |
| "learning_rate": 0.0001922902494331066, | |
| "loss": 1.2375, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 6.414323329925537, | |
| "learning_rate": 0.00019183673469387756, | |
| "loss": 1.3073, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 5.794140338897705, | |
| "learning_rate": 0.00019138321995464854, | |
| "loss": 1.3405, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 4.384269714355469, | |
| "learning_rate": 0.0001909297052154195, | |
| "loss": 1.2568, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "eval_accuracy": 0.7178571428571429, | |
| "eval_loss": 1.0386934280395508, | |
| "eval_runtime": 94.8513, | |
| "eval_samples_per_second": 26.568, | |
| "eval_steps_per_second": 3.321, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 2.881287097930908, | |
| "learning_rate": 0.00019047619047619048, | |
| "loss": 1.2329, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 4.77938985824585, | |
| "learning_rate": 0.00019002267573696145, | |
| "loss": 1.1439, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 5.865462303161621, | |
| "learning_rate": 0.00018956916099773243, | |
| "loss": 1.2314, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 3.464250326156616, | |
| "learning_rate": 0.00018911564625850343, | |
| "loss": 1.329, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 3.2501161098480225, | |
| "learning_rate": 0.0001886621315192744, | |
| "loss": 1.3277, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 5.26331901550293, | |
| "learning_rate": 0.00018820861678004534, | |
| "loss": 1.2876, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 2.62630295753479, | |
| "learning_rate": 0.00018775510204081634, | |
| "loss": 1.1428, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 6.121326923370361, | |
| "learning_rate": 0.00018730158730158731, | |
| "loss": 1.2277, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 5.415524005889893, | |
| "learning_rate": 0.0001868480725623583, | |
| "loss": 1.1641, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 7.849498271942139, | |
| "learning_rate": 0.00018639455782312926, | |
| "loss": 1.3145, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "eval_accuracy": 0.7134920634920635, | |
| "eval_loss": 1.0026524066925049, | |
| "eval_runtime": 89.5917, | |
| "eval_samples_per_second": 28.128, | |
| "eval_steps_per_second": 3.516, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "grad_norm": 3.9149835109710693, | |
| "learning_rate": 0.00018594104308390023, | |
| "loss": 1.1359, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "grad_norm": 4.712550163269043, | |
| "learning_rate": 0.0001854875283446712, | |
| "loss": 1.1196, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 4.665726184844971, | |
| "learning_rate": 0.0001850340136054422, | |
| "loss": 1.1178, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "grad_norm": 4.421199798583984, | |
| "learning_rate": 0.00018458049886621315, | |
| "loss": 1.093, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 5.4558305740356445, | |
| "learning_rate": 0.00018412698412698412, | |
| "loss": 1.0638, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "grad_norm": 6.7150559425354, | |
| "learning_rate": 0.00018367346938775512, | |
| "loss": 1.21, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "grad_norm": 4.524730205535889, | |
| "learning_rate": 0.0001832199546485261, | |
| "loss": 1.199, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 5.457512855529785, | |
| "learning_rate": 0.00018276643990929706, | |
| "loss": 1.202, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "grad_norm": 5.54859733581543, | |
| "learning_rate": 0.00018231292517006804, | |
| "loss": 1.1671, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 3.217099905014038, | |
| "learning_rate": 0.000181859410430839, | |
| "loss": 1.0866, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "eval_accuracy": 0.7376984126984127, | |
| "eval_loss": 0.8882665038108826, | |
| "eval_runtime": 94.504, | |
| "eval_samples_per_second": 26.666, | |
| "eval_steps_per_second": 3.333, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 5.335367202758789, | |
| "learning_rate": 0.00018140589569161, | |
| "loss": 1.0176, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "grad_norm": 7.274291515350342, | |
| "learning_rate": 0.00018099773242630387, | |
| "loss": 1.2467, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 5.159976959228516, | |
| "learning_rate": 0.00018054421768707484, | |
| "loss": 1.127, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 5.351009845733643, | |
| "learning_rate": 0.0001800907029478458, | |
| "loss": 1.2382, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "grad_norm": 4.435301303863525, | |
| "learning_rate": 0.00017963718820861678, | |
| "loss": 1.061, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "grad_norm": 4.027517795562744, | |
| "learning_rate": 0.00017918367346938776, | |
| "loss": 1.0432, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 4.309961795806885, | |
| "learning_rate": 0.00017873015873015876, | |
| "loss": 0.9949, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 4.835757732391357, | |
| "learning_rate": 0.00017827664399092973, | |
| "loss": 1.1899, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 5.26571798324585, | |
| "learning_rate": 0.00017782312925170067, | |
| "loss": 1.1235, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "grad_norm": 3.8406684398651123, | |
| "learning_rate": 0.00017736961451247167, | |
| "loss": 1.0036, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "eval_accuracy": 0.7321428571428571, | |
| "eval_loss": 0.897292971611023, | |
| "eval_runtime": 96.2225, | |
| "eval_samples_per_second": 26.189, | |
| "eval_steps_per_second": 3.274, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "grad_norm": 4.993399143218994, | |
| "learning_rate": 0.00017691609977324264, | |
| "loss": 0.8682, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "grad_norm": 4.4026875495910645, | |
| "learning_rate": 0.00017646258503401362, | |
| "loss": 1.0603, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 4.832537651062012, | |
| "learning_rate": 0.0001760090702947846, | |
| "loss": 1.034, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "grad_norm": 5.253382682800293, | |
| "learning_rate": 0.00017555555555555556, | |
| "loss": 0.9938, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "grad_norm": 3.681997060775757, | |
| "learning_rate": 0.00017510204081632653, | |
| "loss": 1.0082, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "grad_norm": 6.324045181274414, | |
| "learning_rate": 0.0001746485260770975, | |
| "loss": 0.9629, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 5.0340352058410645, | |
| "learning_rate": 0.00017419501133786848, | |
| "loss": 1.1184, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 3.532378673553467, | |
| "learning_rate": 0.00017374149659863948, | |
| "loss": 0.9167, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "grad_norm": 5.029895305633545, | |
| "learning_rate": 0.00017328798185941045, | |
| "loss": 1.1213, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 4.585740566253662, | |
| "learning_rate": 0.00017283446712018142, | |
| "loss": 1.1811, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "eval_accuracy": 0.7571428571428571, | |
| "eval_loss": 0.8048315644264221, | |
| "eval_runtime": 95.9399, | |
| "eval_samples_per_second": 26.266, | |
| "eval_steps_per_second": 3.283, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "grad_norm": 2.9866397380828857, | |
| "learning_rate": 0.0001723809523809524, | |
| "loss": 1.2417, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "grad_norm": 4.54673433303833, | |
| "learning_rate": 0.00017192743764172337, | |
| "loss": 1.1588, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 4.411011695861816, | |
| "learning_rate": 0.00017147392290249434, | |
| "loss": 0.9512, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "grad_norm": 4.393170356750488, | |
| "learning_rate": 0.0001710204081632653, | |
| "loss": 0.9679, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "grad_norm": 5.261455535888672, | |
| "learning_rate": 0.00017056689342403628, | |
| "loss": 0.9089, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "grad_norm": 4.93878173828125, | |
| "learning_rate": 0.00017011337868480726, | |
| "loss": 0.8341, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "grad_norm": 4.5593156814575195, | |
| "learning_rate": 0.00016965986394557825, | |
| "loss": 0.9952, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 3.9355390071868896, | |
| "learning_rate": 0.0001692063492063492, | |
| "loss": 0.9956, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 4.096402645111084, | |
| "learning_rate": 0.00016875283446712017, | |
| "loss": 0.9566, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "grad_norm": 5.853695869445801, | |
| "learning_rate": 0.00016829931972789117, | |
| "loss": 0.9242, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "eval_accuracy": 0.7273809523809524, | |
| "eval_loss": 0.9095195531845093, | |
| "eval_runtime": 92.7769, | |
| "eval_samples_per_second": 27.162, | |
| "eval_steps_per_second": 3.395, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "grad_norm": 5.18225622177124, | |
| "learning_rate": 0.00016784580498866214, | |
| "loss": 1.1815, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "grad_norm": 6.094394683837891, | |
| "learning_rate": 0.00016739229024943312, | |
| "loss": 1.0088, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 3.388333320617676, | |
| "learning_rate": 0.0001669387755102041, | |
| "loss": 1.0219, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "grad_norm": 2.702335834503174, | |
| "learning_rate": 0.00016648526077097506, | |
| "loss": 0.995, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "grad_norm": 6.5921735763549805, | |
| "learning_rate": 0.00016603174603174606, | |
| "loss": 1.0336, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "grad_norm": 7.6583781242370605, | |
| "learning_rate": 0.000165578231292517, | |
| "loss": 0.8889, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "grad_norm": 5.093008518218994, | |
| "learning_rate": 0.00016512471655328798, | |
| "loss": 0.8492, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "grad_norm": 4.372059345245361, | |
| "learning_rate": 0.00016467120181405898, | |
| "loss": 0.8115, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 4.1783857345581055, | |
| "learning_rate": 0.00016421768707482995, | |
| "loss": 0.834, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "grad_norm": 4.295966148376465, | |
| "learning_rate": 0.0001638095238095238, | |
| "loss": 0.9477, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "eval_accuracy": 0.7619047619047619, | |
| "eval_loss": 0.8036767244338989, | |
| "eval_runtime": 89.9677, | |
| "eval_samples_per_second": 28.01, | |
| "eval_steps_per_second": 3.501, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "grad_norm": 4.976074695587158, | |
| "learning_rate": 0.0001633560090702948, | |
| "loss": 0.8778, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "grad_norm": 3.34804105758667, | |
| "learning_rate": 0.00016290249433106578, | |
| "loss": 0.9856, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "grad_norm": 6.416792869567871, | |
| "learning_rate": 0.00016244897959183672, | |
| "loss": 0.9829, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "grad_norm": 5.707892894744873, | |
| "learning_rate": 0.00016199546485260772, | |
| "loss": 0.9397, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "grad_norm": 4.620942115783691, | |
| "learning_rate": 0.0001615419501133787, | |
| "loss": 1.078, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "grad_norm": 4.452626705169678, | |
| "learning_rate": 0.00016108843537414967, | |
| "loss": 0.9531, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "grad_norm": 5.697545528411865, | |
| "learning_rate": 0.00016063492063492064, | |
| "loss": 1.0438, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 4.365375518798828, | |
| "learning_rate": 0.0001601814058956916, | |
| "loss": 0.9024, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "grad_norm": 5.501079082489014, | |
| "learning_rate": 0.00015972789115646259, | |
| "loss": 0.9091, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "grad_norm": 4.409488201141357, | |
| "learning_rate": 0.00015927437641723358, | |
| "loss": 0.8634, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "eval_accuracy": 0.7642857142857142, | |
| "eval_loss": 0.7937940359115601, | |
| "eval_runtime": 92.0164, | |
| "eval_samples_per_second": 27.386, | |
| "eval_steps_per_second": 3.423, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "grad_norm": 4.543234825134277, | |
| "learning_rate": 0.00015882086167800453, | |
| "loss": 0.7896, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "grad_norm": 3.8954508304595947, | |
| "learning_rate": 0.0001583673469387755, | |
| "loss": 0.9597, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "grad_norm": 7.744637489318848, | |
| "learning_rate": 0.0001579138321995465, | |
| "loss": 0.9585, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "grad_norm": 3.619973659515381, | |
| "learning_rate": 0.00015746031746031747, | |
| "loss": 0.826, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "grad_norm": 10.431225776672363, | |
| "learning_rate": 0.00015700680272108845, | |
| "loss": 0.9651, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "grad_norm": 4.897637367248535, | |
| "learning_rate": 0.00015655328798185942, | |
| "loss": 0.9136, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "grad_norm": 2.429625988006592, | |
| "learning_rate": 0.0001560997732426304, | |
| "loss": 0.9676, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "grad_norm": 2.8244125843048096, | |
| "learning_rate": 0.00015564625850340136, | |
| "loss": 0.8796, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "grad_norm": 3.9079954624176025, | |
| "learning_rate": 0.00015519274376417234, | |
| "loss": 0.7595, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "grad_norm": 8.262408256530762, | |
| "learning_rate": 0.0001547392290249433, | |
| "loss": 1.0098, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "eval_accuracy": 0.7765873015873016, | |
| "eval_loss": 0.7327961325645447, | |
| "eval_runtime": 90.6084, | |
| "eval_samples_per_second": 27.812, | |
| "eval_steps_per_second": 3.477, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 5.290775299072266, | |
| "learning_rate": 0.0001542857142857143, | |
| "loss": 0.8282, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "grad_norm": 5.40395450592041, | |
| "learning_rate": 0.00015383219954648528, | |
| "loss": 1.0232, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "grad_norm": 6.36408805847168, | |
| "learning_rate": 0.00015337868480725622, | |
| "loss": 0.9572, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "grad_norm": 3.8068315982818604, | |
| "learning_rate": 0.00015292517006802722, | |
| "loss": 0.8457, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "grad_norm": 5.893327236175537, | |
| "learning_rate": 0.0001524716553287982, | |
| "loss": 0.915, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "grad_norm": 4.604687213897705, | |
| "learning_rate": 0.00015201814058956917, | |
| "loss": 0.9143, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "grad_norm": 4.973442077636719, | |
| "learning_rate": 0.00015156462585034014, | |
| "loss": 0.8211, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "grad_norm": 4.463946342468262, | |
| "learning_rate": 0.0001511111111111111, | |
| "loss": 0.8649, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "grad_norm": 4.718800067901611, | |
| "learning_rate": 0.0001506575963718821, | |
| "loss": 0.7986, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "grad_norm": 4.523365020751953, | |
| "learning_rate": 0.00015020408163265306, | |
| "loss": 0.8176, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "eval_accuracy": 0.7515873015873016, | |
| "eval_loss": 0.8064602017402649, | |
| "eval_runtime": 93.1844, | |
| "eval_samples_per_second": 27.043, | |
| "eval_steps_per_second": 3.38, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "grad_norm": 5.417792797088623, | |
| "learning_rate": 0.00014975056689342403, | |
| "loss": 1.0616, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "grad_norm": 6.2752275466918945, | |
| "learning_rate": 0.00014929705215419503, | |
| "loss": 0.9484, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "grad_norm": 6.395713806152344, | |
| "learning_rate": 0.000148843537414966, | |
| "loss": 1.0357, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "grad_norm": 4.707973003387451, | |
| "learning_rate": 0.00014839002267573697, | |
| "loss": 0.922, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "grad_norm": 6.663784980773926, | |
| "learning_rate": 0.00014793650793650795, | |
| "loss": 1.141, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "grad_norm": 4.041499137878418, | |
| "learning_rate": 0.00014748299319727892, | |
| "loss": 0.9529, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "grad_norm": 5.212307453155518, | |
| "learning_rate": 0.0001470294784580499, | |
| "loss": 0.6781, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "grad_norm": 5.991590976715088, | |
| "learning_rate": 0.00014657596371882086, | |
| "loss": 0.8701, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "grad_norm": 8.68984317779541, | |
| "learning_rate": 0.00014612244897959183, | |
| "loss": 1.0802, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "grad_norm": 6.660585403442383, | |
| "learning_rate": 0.0001456689342403628, | |
| "loss": 0.8072, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "eval_accuracy": 0.7694444444444445, | |
| "eval_loss": 0.77680903673172, | |
| "eval_runtime": 91.7003, | |
| "eval_samples_per_second": 27.481, | |
| "eval_steps_per_second": 3.435, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "grad_norm": 3.435088872909546, | |
| "learning_rate": 0.0001452154195011338, | |
| "loss": 1.0272, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "grad_norm": 3.200514078140259, | |
| "learning_rate": 0.00014476190476190475, | |
| "loss": 1.0246, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "grad_norm": 3.882340669631958, | |
| "learning_rate": 0.00014430839002267575, | |
| "loss": 0.7745, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "grad_norm": 3.1602838039398193, | |
| "learning_rate": 0.00014385487528344672, | |
| "loss": 0.9499, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "grad_norm": 2.896543502807617, | |
| "learning_rate": 0.0001434013605442177, | |
| "loss": 0.9155, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 4.671875476837158, | |
| "learning_rate": 0.00014294784580498867, | |
| "loss": 0.8183, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "grad_norm": 6.149994373321533, | |
| "learning_rate": 0.00014249433106575964, | |
| "loss": 0.8043, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.03, | |
| "grad_norm": 4.373509407043457, | |
| "learning_rate": 0.0001420408163265306, | |
| "loss": 0.7205, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "grad_norm": 5.70250940322876, | |
| "learning_rate": 0.0001415873015873016, | |
| "loss": 0.8109, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.06, | |
| "grad_norm": 4.3769683837890625, | |
| "learning_rate": 0.00014113378684807256, | |
| "loss": 0.7739, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.06, | |
| "eval_accuracy": 0.7726190476190476, | |
| "eval_loss": 0.7623938322067261, | |
| "eval_runtime": 108.4142, | |
| "eval_samples_per_second": 23.244, | |
| "eval_steps_per_second": 2.906, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "grad_norm": 8.936156272888184, | |
| "learning_rate": 0.00014068027210884353, | |
| "loss": 0.7361, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "grad_norm": 6.855538845062256, | |
| "learning_rate": 0.00014022675736961453, | |
| "loss": 0.7778, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "grad_norm": 2.5195322036743164, | |
| "learning_rate": 0.0001397732426303855, | |
| "loss": 0.7778, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.13, | |
| "grad_norm": 4.916295051574707, | |
| "learning_rate": 0.00013931972789115645, | |
| "loss": 0.8522, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "grad_norm": 5.055403232574463, | |
| "learning_rate": 0.00013886621315192745, | |
| "loss": 0.6851, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "grad_norm": 5.334274768829346, | |
| "learning_rate": 0.00013841269841269842, | |
| "loss": 0.9657, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "grad_norm": 6.083943843841553, | |
| "learning_rate": 0.00013795918367346942, | |
| "loss": 0.6776, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "grad_norm": 4.362452983856201, | |
| "learning_rate": 0.00013750566893424036, | |
| "loss": 0.7703, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "grad_norm": 6.978400707244873, | |
| "learning_rate": 0.00013705215419501133, | |
| "loss": 0.7911, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "grad_norm": 4.561004638671875, | |
| "learning_rate": 0.00013659863945578233, | |
| "loss": 0.6851, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "eval_accuracy": 0.794047619047619, | |
| "eval_loss": 0.668690025806427, | |
| "eval_runtime": 91.4622, | |
| "eval_samples_per_second": 27.552, | |
| "eval_steps_per_second": 3.444, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "grad_norm": 4.068347930908203, | |
| "learning_rate": 0.0001361451247165533, | |
| "loss": 0.6705, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "grad_norm": 4.628905773162842, | |
| "learning_rate": 0.00013569160997732425, | |
| "loss": 0.8955, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.27, | |
| "grad_norm": 6.214323997497559, | |
| "learning_rate": 0.00013523809523809525, | |
| "loss": 0.785, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "grad_norm": 3.4356954097747803, | |
| "learning_rate": 0.00013478458049886622, | |
| "loss": 0.6415, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "grad_norm": 6.76524019241333, | |
| "learning_rate": 0.0001343310657596372, | |
| "loss": 0.8914, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "grad_norm": 4.742693901062012, | |
| "learning_rate": 0.00013387755102040817, | |
| "loss": 0.8758, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "grad_norm": 3.940936326980591, | |
| "learning_rate": 0.00013342403628117914, | |
| "loss": 0.6984, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "grad_norm": 7.005272388458252, | |
| "learning_rate": 0.0001329705215419501, | |
| "loss": 0.833, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "grad_norm": 5.6659722328186035, | |
| "learning_rate": 0.0001325170068027211, | |
| "loss": 0.7352, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "grad_norm": 4.9742045402526855, | |
| "learning_rate": 0.00013206349206349206, | |
| "loss": 0.7496, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "eval_accuracy": 0.7948412698412698, | |
| "eval_loss": 0.6806091070175171, | |
| "eval_runtime": 91.1192, | |
| "eval_samples_per_second": 27.656, | |
| "eval_steps_per_second": 3.457, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 5.499560832977295, | |
| "learning_rate": 0.00013160997732426303, | |
| "loss": 0.8274, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 2.41, | |
| "grad_norm": 4.391964435577393, | |
| "learning_rate": 0.00013115646258503403, | |
| "loss": 0.7892, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 2.43, | |
| "grad_norm": 4.317266464233398, | |
| "learning_rate": 0.000130702947845805, | |
| "loss": 0.7462, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "grad_norm": 3.7989251613616943, | |
| "learning_rate": 0.00013024943310657597, | |
| "loss": 0.8322, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "grad_norm": 4.737931251525879, | |
| "learning_rate": 0.00012979591836734695, | |
| "loss": 0.9, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "grad_norm": 2.4748082160949707, | |
| "learning_rate": 0.00012934240362811792, | |
| "loss": 0.7613, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "grad_norm": 5.137518882751465, | |
| "learning_rate": 0.00012888888888888892, | |
| "loss": 0.8596, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "grad_norm": 3.9287266731262207, | |
| "learning_rate": 0.00012843537414965986, | |
| "loss": 0.9221, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "grad_norm": 3.864816665649414, | |
| "learning_rate": 0.00012798185941043083, | |
| "loss": 0.6858, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 2.54, | |
| "grad_norm": 3.6453895568847656, | |
| "learning_rate": 0.00012752834467120183, | |
| "loss": 0.7352, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.54, | |
| "eval_accuracy": 0.7896825396825397, | |
| "eval_loss": 0.6942620277404785, | |
| "eval_runtime": 90.1988, | |
| "eval_samples_per_second": 27.938, | |
| "eval_steps_per_second": 3.492, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "grad_norm": 6.48276424407959, | |
| "learning_rate": 0.0001270748299319728, | |
| "loss": 0.7989, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 2.57, | |
| "grad_norm": 3.8373072147369385, | |
| "learning_rate": 0.00012662131519274375, | |
| "loss": 0.7562, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 2.59, | |
| "grad_norm": 5.370635986328125, | |
| "learning_rate": 0.00012616780045351475, | |
| "loss": 0.8958, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 2.2488410472869873, | |
| "learning_rate": 0.00012571428571428572, | |
| "loss": 0.8911, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "grad_norm": 4.261588096618652, | |
| "learning_rate": 0.0001252607709750567, | |
| "loss": 0.7508, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.63, | |
| "grad_norm": 4.3199286460876465, | |
| "learning_rate": 0.00012480725623582767, | |
| "loss": 0.6107, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "grad_norm": 5.610477447509766, | |
| "learning_rate": 0.00012435374149659864, | |
| "loss": 0.5786, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "grad_norm": 6.289084434509277, | |
| "learning_rate": 0.00012390022675736964, | |
| "loss": 0.5845, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "grad_norm": 3.4954001903533936, | |
| "learning_rate": 0.0001234467120181406, | |
| "loss": 0.8207, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "grad_norm": 3.2665460109710693, | |
| "learning_rate": 0.00012299319727891156, | |
| "loss": 0.7311, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "eval_accuracy": 0.7714285714285715, | |
| "eval_loss": 0.7353097200393677, | |
| "eval_runtime": 91.0228, | |
| "eval_samples_per_second": 27.685, | |
| "eval_steps_per_second": 3.461, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "grad_norm": 4.803609848022461, | |
| "learning_rate": 0.00012253968253968256, | |
| "loss": 0.7369, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "grad_norm": 4.8724565505981445, | |
| "learning_rate": 0.00012208616780045353, | |
| "loss": 0.623, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "grad_norm": 5.754215240478516, | |
| "learning_rate": 0.00012163265306122449, | |
| "loss": 0.6377, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "grad_norm": 7.614988803863525, | |
| "learning_rate": 0.00012117913832199547, | |
| "loss": 0.8485, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 2.78, | |
| "grad_norm": 2.6383018493652344, | |
| "learning_rate": 0.00012072562358276644, | |
| "loss": 0.6942, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "grad_norm": 5.747374057769775, | |
| "learning_rate": 0.00012027210884353742, | |
| "loss": 0.7562, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 2.81, | |
| "grad_norm": 5.084746360778809, | |
| "learning_rate": 0.0001198185941043084, | |
| "loss": 0.69, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "grad_norm": 6.089473247528076, | |
| "learning_rate": 0.00011936507936507938, | |
| "loss": 0.9495, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "grad_norm": 5.547908306121826, | |
| "learning_rate": 0.00011891156462585033, | |
| "loss": 0.6505, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "grad_norm": 3.5899226665496826, | |
| "learning_rate": 0.00011845804988662132, | |
| "loss": 0.7181, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "eval_accuracy": 0.792063492063492, | |
| "eval_loss": 0.6831231713294983, | |
| "eval_runtime": 91.1983, | |
| "eval_samples_per_second": 27.632, | |
| "eval_steps_per_second": 3.454, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.87, | |
| "grad_norm": 4.794887542724609, | |
| "learning_rate": 0.00011800453514739229, | |
| "loss": 0.6245, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "grad_norm": 7.641270160675049, | |
| "learning_rate": 0.00011755102040816328, | |
| "loss": 0.8966, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "grad_norm": 4.0006608963012695, | |
| "learning_rate": 0.00011709750566893425, | |
| "loss": 0.6106, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "grad_norm": 5.983084201812744, | |
| "learning_rate": 0.00011664399092970522, | |
| "loss": 0.887, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "grad_norm": 4.492617130279541, | |
| "learning_rate": 0.00011619047619047621, | |
| "loss": 0.7974, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "grad_norm": 4.350939750671387, | |
| "learning_rate": 0.00011573696145124717, | |
| "loss": 0.8817, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 2.97, | |
| "grad_norm": 4.855531692504883, | |
| "learning_rate": 0.00011528344671201814, | |
| "loss": 0.9235, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "grad_norm": 5.735949993133545, | |
| "learning_rate": 0.00011482993197278912, | |
| "loss": 0.6679, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 3.4668216705322266, | |
| "learning_rate": 0.0001143764172335601, | |
| "loss": 0.6783, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 3.02, | |
| "grad_norm": 4.096149921417236, | |
| "learning_rate": 0.00011392290249433107, | |
| "loss": 0.5986, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 3.02, | |
| "eval_accuracy": 0.7896825396825397, | |
| "eval_loss": 0.6930129528045654, | |
| "eval_runtime": 91.4004, | |
| "eval_samples_per_second": 27.571, | |
| "eval_steps_per_second": 3.446, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 3.03, | |
| "grad_norm": 3.9420833587646484, | |
| "learning_rate": 0.00011346938775510206, | |
| "loss": 0.5611, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 3.05, | |
| "grad_norm": 2.5048274993896484, | |
| "learning_rate": 0.00011301587301587301, | |
| "loss": 0.6619, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "grad_norm": 4.899613380432129, | |
| "learning_rate": 0.00011256235827664399, | |
| "loss": 0.6889, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "grad_norm": 4.282281398773193, | |
| "learning_rate": 0.00011210884353741497, | |
| "loss": 0.6182, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 3.1, | |
| "grad_norm": 3.1164541244506836, | |
| "learning_rate": 0.00011165532879818594, | |
| "loss": 0.6822, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 3.11, | |
| "grad_norm": 4.760287761688232, | |
| "learning_rate": 0.00011120181405895693, | |
| "loss": 0.6557, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 3.13, | |
| "grad_norm": 3.0668411254882812, | |
| "learning_rate": 0.0001107482993197279, | |
| "loss": 0.6537, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 3.14, | |
| "grad_norm": 4.527184009552002, | |
| "learning_rate": 0.00011029478458049886, | |
| "loss": 0.5513, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 3.16, | |
| "grad_norm": 2.029935598373413, | |
| "learning_rate": 0.00010984126984126986, | |
| "loss": 0.5102, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 3.17, | |
| "grad_norm": 4.294469833374023, | |
| "learning_rate": 0.00010938775510204082, | |
| "loss": 0.5716, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.17, | |
| "eval_accuracy": 0.8047619047619048, | |
| "eval_loss": 0.6684977412223816, | |
| "eval_runtime": 90.6284, | |
| "eval_samples_per_second": 27.806, | |
| "eval_steps_per_second": 3.476, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.19, | |
| "grad_norm": 6.498531818389893, | |
| "learning_rate": 0.00010893424036281179, | |
| "loss": 0.6862, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 3.21, | |
| "grad_norm": 5.384933948516846, | |
| "learning_rate": 0.00010848072562358278, | |
| "loss": 0.4793, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "grad_norm": 4.124419212341309, | |
| "learning_rate": 0.00010802721088435375, | |
| "loss": 0.7055, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 3.24, | |
| "grad_norm": 4.198364734649658, | |
| "learning_rate": 0.00010757369614512471, | |
| "loss": 0.7231, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "grad_norm": 4.410750389099121, | |
| "learning_rate": 0.00010712018140589571, | |
| "loss": 0.751, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.27, | |
| "grad_norm": 6.240994930267334, | |
| "learning_rate": 0.00010666666666666667, | |
| "loss": 0.6299, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 3.29, | |
| "grad_norm": 4.845404148101807, | |
| "learning_rate": 0.00010621315192743764, | |
| "loss": 0.6496, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "grad_norm": 4.447664260864258, | |
| "learning_rate": 0.00010575963718820862, | |
| "loss": 0.5909, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 3.32, | |
| "grad_norm": 4.779788494110107, | |
| "learning_rate": 0.0001053061224489796, | |
| "loss": 0.6505, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "grad_norm": 3.946617841720581, | |
| "learning_rate": 0.00010485260770975056, | |
| "loss": 0.5218, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "eval_accuracy": 0.7916666666666666, | |
| "eval_loss": 0.7152296900749207, | |
| "eval_runtime": 89.3624, | |
| "eval_samples_per_second": 28.2, | |
| "eval_steps_per_second": 3.525, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.35, | |
| "grad_norm": 4.13429069519043, | |
| "learning_rate": 0.00010439909297052155, | |
| "loss": 0.6193, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 3.37, | |
| "grad_norm": 5.503479480743408, | |
| "learning_rate": 0.00010394557823129251, | |
| "loss": 0.6206, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 3.38, | |
| "grad_norm": 6.610657215118408, | |
| "learning_rate": 0.00010349206349206351, | |
| "loss": 0.7244, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "grad_norm": 3.592276096343994, | |
| "learning_rate": 0.00010303854875283447, | |
| "loss": 0.502, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 3.41, | |
| "grad_norm": 7.070047378540039, | |
| "learning_rate": 0.00010258503401360544, | |
| "loss": 0.8749, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "grad_norm": 8.58852481842041, | |
| "learning_rate": 0.00010213151927437643, | |
| "loss": 0.5832, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 3.44, | |
| "grad_norm": 5.416906356811523, | |
| "learning_rate": 0.0001016780045351474, | |
| "loss": 0.5901, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 3.46, | |
| "grad_norm": 6.415400981903076, | |
| "learning_rate": 0.00010122448979591836, | |
| "loss": 0.4842, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 3.48, | |
| "grad_norm": 1.809209942817688, | |
| "learning_rate": 0.00010077097505668936, | |
| "loss": 0.6591, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 3.49, | |
| "grad_norm": 4.30819034576416, | |
| "learning_rate": 0.00010031746031746032, | |
| "loss": 0.8469, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.49, | |
| "eval_accuracy": 0.801984126984127, | |
| "eval_loss": 0.6404625177383423, | |
| "eval_runtime": 91.7544, | |
| "eval_samples_per_second": 27.465, | |
| "eval_steps_per_second": 3.433, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.51, | |
| "grad_norm": 1.2012885808944702, | |
| "learning_rate": 9.98639455782313e-05, | |
| "loss": 0.5602, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "grad_norm": 6.166879653930664, | |
| "learning_rate": 9.941043083900228e-05, | |
| "loss": 0.693, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 3.54, | |
| "grad_norm": 2.257598876953125, | |
| "learning_rate": 9.895691609977325e-05, | |
| "loss": 0.6969, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 3.56, | |
| "grad_norm": 3.866694450378418, | |
| "learning_rate": 9.850340136054422e-05, | |
| "loss": 0.7577, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 3.57, | |
| "grad_norm": 4.493105888366699, | |
| "learning_rate": 9.804988662131521e-05, | |
| "loss": 0.7494, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.59, | |
| "grad_norm": 6.990530490875244, | |
| "learning_rate": 9.759637188208617e-05, | |
| "loss": 0.6278, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "grad_norm": 3.6842026710510254, | |
| "learning_rate": 9.714285714285715e-05, | |
| "loss": 0.6444, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 3.62, | |
| "grad_norm": 3.766533851623535, | |
| "learning_rate": 9.668934240362812e-05, | |
| "loss": 0.6254, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 3.63, | |
| "grad_norm": 3.9097561836242676, | |
| "learning_rate": 9.62358276643991e-05, | |
| "loss": 0.6871, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 3.65, | |
| "grad_norm": 5.504273414611816, | |
| "learning_rate": 9.578231292517007e-05, | |
| "loss": 0.5783, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.65, | |
| "eval_accuracy": 0.7956349206349206, | |
| "eval_loss": 0.6727890968322754, | |
| "eval_runtime": 91.691, | |
| "eval_samples_per_second": 27.484, | |
| "eval_steps_per_second": 3.435, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.67, | |
| "grad_norm": 6.009402751922607, | |
| "learning_rate": 9.532879818594105e-05, | |
| "loss": 0.6296, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 3.68, | |
| "grad_norm": 3.723788022994995, | |
| "learning_rate": 9.487528344671203e-05, | |
| "loss": 0.61, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 3.7, | |
| "grad_norm": 1.6886146068572998, | |
| "learning_rate": 9.4421768707483e-05, | |
| "loss": 0.6042, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "grad_norm": 3.2953665256500244, | |
| "learning_rate": 9.396825396825397e-05, | |
| "loss": 0.5417, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 3.73, | |
| "grad_norm": 5.710081100463867, | |
| "learning_rate": 9.351473922902494e-05, | |
| "loss": 0.4744, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "grad_norm": 8.341416358947754, | |
| "learning_rate": 9.306122448979592e-05, | |
| "loss": 0.6468, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 3.76, | |
| "grad_norm": 5.668067455291748, | |
| "learning_rate": 9.26077097505669e-05, | |
| "loss": 0.7058, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 3.78, | |
| "grad_norm": 7.821765422821045, | |
| "learning_rate": 9.215419501133787e-05, | |
| "loss": 0.6014, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 3.79, | |
| "grad_norm": 3.075150728225708, | |
| "learning_rate": 9.170068027210885e-05, | |
| "loss": 0.5857, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 3.81, | |
| "grad_norm": 5.349681377410889, | |
| "learning_rate": 9.124716553287982e-05, | |
| "loss": 0.7202, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.81, | |
| "eval_accuracy": 0.8154761904761905, | |
| "eval_loss": 0.6007378697395325, | |
| "eval_runtime": 91.6882, | |
| "eval_samples_per_second": 27.484, | |
| "eval_steps_per_second": 3.436, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.83, | |
| "grad_norm": 6.9100117683410645, | |
| "learning_rate": 9.079365079365079e-05, | |
| "loss": 0.5335, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 3.84, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.038548752834468e-05, | |
| "loss": 0.7311, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 3.86, | |
| "grad_norm": 3.347836494445801, | |
| "learning_rate": 8.993197278911565e-05, | |
| "loss": 0.5405, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 3.87, | |
| "grad_norm": 10.232234954833984, | |
| "learning_rate": 8.947845804988662e-05, | |
| "loss": 0.7061, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 3.89, | |
| "grad_norm": 5.486011981964111, | |
| "learning_rate": 8.902494331065761e-05, | |
| "loss": 0.6905, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "grad_norm": 4.699936389923096, | |
| "learning_rate": 8.857142857142857e-05, | |
| "loss": 0.5979, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "grad_norm": 7.7097296714782715, | |
| "learning_rate": 8.811791383219955e-05, | |
| "loss": 0.5904, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 3.94, | |
| "grad_norm": 3.3451244831085205, | |
| "learning_rate": 8.766439909297052e-05, | |
| "loss": 0.5359, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 3.95, | |
| "grad_norm": 7.01662540435791, | |
| "learning_rate": 8.72108843537415e-05, | |
| "loss": 0.728, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 3.97, | |
| "grad_norm": 4.137801170349121, | |
| "learning_rate": 8.675736961451247e-05, | |
| "loss": 0.5525, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.97, | |
| "eval_accuracy": 0.8055555555555556, | |
| "eval_loss": 0.6558998823165894, | |
| "eval_runtime": 94.8951, | |
| "eval_samples_per_second": 26.556, | |
| "eval_steps_per_second": 3.319, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.98, | |
| "grad_norm": 6.564231872558594, | |
| "learning_rate": 8.630385487528345e-05, | |
| "loss": 0.6735, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 5.252976894378662, | |
| "learning_rate": 8.585034013605443e-05, | |
| "loss": 0.6903, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 4.02, | |
| "grad_norm": 4.289461612701416, | |
| "learning_rate": 8.53968253968254e-05, | |
| "loss": 0.5104, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 4.03, | |
| "grad_norm": 4.131557941436768, | |
| "learning_rate": 8.494331065759637e-05, | |
| "loss": 0.5064, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 4.05, | |
| "grad_norm": 3.5607643127441406, | |
| "learning_rate": 8.448979591836736e-05, | |
| "loss": 0.4825, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 4.06, | |
| "grad_norm": 2.5051770210266113, | |
| "learning_rate": 8.403628117913832e-05, | |
| "loss": 0.5668, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 4.08, | |
| "grad_norm": 1.6936321258544922, | |
| "learning_rate": 8.35827664399093e-05, | |
| "loss": 0.4412, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "grad_norm": 3.99070143699646, | |
| "learning_rate": 8.312925170068027e-05, | |
| "loss": 0.5046, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 4.11, | |
| "grad_norm": 4.099004745483398, | |
| "learning_rate": 8.267573696145126e-05, | |
| "loss": 0.5753, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 4.13, | |
| "grad_norm": 3.5789458751678467, | |
| "learning_rate": 8.222222222222222e-05, | |
| "loss": 0.519, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 4.13, | |
| "eval_accuracy": 0.8222222222222222, | |
| "eval_loss": 0.5868101716041565, | |
| "eval_runtime": 93.686, | |
| "eval_samples_per_second": 26.898, | |
| "eval_steps_per_second": 3.362, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 4.14, | |
| "grad_norm": 4.785660266876221, | |
| "learning_rate": 8.17687074829932e-05, | |
| "loss": 0.4747, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 4.16, | |
| "grad_norm": 3.662282705307007, | |
| "learning_rate": 8.131519274376418e-05, | |
| "loss": 0.4822, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 4.17, | |
| "grad_norm": 3.897036075592041, | |
| "learning_rate": 8.086167800453515e-05, | |
| "loss": 0.4848, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 4.19, | |
| "grad_norm": 5.990245342254639, | |
| "learning_rate": 8.040816326530612e-05, | |
| "loss": 0.5569, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 4.21, | |
| "grad_norm": 4.004576206207275, | |
| "learning_rate": 7.99546485260771e-05, | |
| "loss": 0.4929, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.22, | |
| "grad_norm": 5.0893754959106445, | |
| "learning_rate": 7.950113378684808e-05, | |
| "loss": 0.5564, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 4.24, | |
| "grad_norm": 3.9271037578582764, | |
| "learning_rate": 7.904761904761905e-05, | |
| "loss": 0.382, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 4.25, | |
| "grad_norm": 4.929361820220947, | |
| "learning_rate": 7.859410430839002e-05, | |
| "loss": 0.5502, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 4.27, | |
| "grad_norm": 4.333121299743652, | |
| "learning_rate": 7.814058956916101e-05, | |
| "loss": 0.5877, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 4.29, | |
| "grad_norm": 3.787369728088379, | |
| "learning_rate": 7.768707482993197e-05, | |
| "loss": 0.6171, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.29, | |
| "eval_accuracy": 0.8103174603174603, | |
| "eval_loss": 0.6157482266426086, | |
| "eval_runtime": 91.6071, | |
| "eval_samples_per_second": 27.509, | |
| "eval_steps_per_second": 3.439, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.3, | |
| "grad_norm": 4.089489936828613, | |
| "learning_rate": 7.723356009070295e-05, | |
| "loss": 0.5279, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 4.32, | |
| "grad_norm": 5.687109470367432, | |
| "learning_rate": 7.678004535147393e-05, | |
| "loss": 0.4443, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 4.33, | |
| "grad_norm": 3.6936216354370117, | |
| "learning_rate": 7.632653061224491e-05, | |
| "loss": 0.533, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 4.35, | |
| "grad_norm": 2.403999090194702, | |
| "learning_rate": 7.587301587301587e-05, | |
| "loss": 0.5214, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 4.37, | |
| "grad_norm": 5.553544044494629, | |
| "learning_rate": 7.541950113378686e-05, | |
| "loss": 0.5621, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.38, | |
| "grad_norm": 2.7590301036834717, | |
| "learning_rate": 7.496598639455783e-05, | |
| "loss": 0.4752, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 4.4, | |
| "grad_norm": 6.257105350494385, | |
| "learning_rate": 7.45124716553288e-05, | |
| "loss": 0.5196, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 4.41, | |
| "grad_norm": 6.366973400115967, | |
| "learning_rate": 7.405895691609977e-05, | |
| "loss": 0.4779, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 4.43, | |
| "grad_norm": 4.746866703033447, | |
| "learning_rate": 7.360544217687076e-05, | |
| "loss": 0.4031, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 4.44, | |
| "grad_norm": 3.278367519378662, | |
| "learning_rate": 7.315192743764173e-05, | |
| "loss": 0.5401, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.44, | |
| "eval_accuracy": 0.8083333333333333, | |
| "eval_loss": 0.6119987368583679, | |
| "eval_runtime": 91.505, | |
| "eval_samples_per_second": 27.539, | |
| "eval_steps_per_second": 3.442, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.46, | |
| "grad_norm": 5.5712504386901855, | |
| "learning_rate": 7.26984126984127e-05, | |
| "loss": 0.5565, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "grad_norm": 4.8718767166137695, | |
| "learning_rate": 7.224489795918368e-05, | |
| "loss": 0.5219, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 4.49, | |
| "grad_norm": 5.685264587402344, | |
| "learning_rate": 7.179138321995466e-05, | |
| "loss": 0.5021, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 4.51, | |
| "grad_norm": 6.191750526428223, | |
| "learning_rate": 7.133786848072562e-05, | |
| "loss": 0.5115, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 4.52, | |
| "grad_norm": 5.250429630279541, | |
| "learning_rate": 7.08843537414966e-05, | |
| "loss": 0.5805, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.54, | |
| "grad_norm": 5.834912300109863, | |
| "learning_rate": 7.043083900226758e-05, | |
| "loss": 0.584, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 4.56, | |
| "grad_norm": 2.52066707611084, | |
| "learning_rate": 6.997732426303855e-05, | |
| "loss": 0.3752, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 4.57, | |
| "grad_norm": 2.4411842823028564, | |
| "learning_rate": 6.952380952380952e-05, | |
| "loss": 0.5748, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 4.59, | |
| "grad_norm": 4.865506649017334, | |
| "learning_rate": 6.907029478458051e-05, | |
| "loss": 0.6119, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 4.6, | |
| "grad_norm": 2.109516143798828, | |
| "learning_rate": 6.861678004535148e-05, | |
| "loss": 0.6105, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.6, | |
| "eval_accuracy": 0.8325396825396826, | |
| "eval_loss": 0.5618996024131775, | |
| "eval_runtime": 90.5734, | |
| "eval_samples_per_second": 27.823, | |
| "eval_steps_per_second": 3.478, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.62, | |
| "grad_norm": 6.931108474731445, | |
| "learning_rate": 6.816326530612245e-05, | |
| "loss": 0.6452, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 4.63, | |
| "grad_norm": 5.011793613433838, | |
| "learning_rate": 6.770975056689343e-05, | |
| "loss": 0.3178, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 4.65, | |
| "grad_norm": 2.996354818344116, | |
| "learning_rate": 6.72562358276644e-05, | |
| "loss": 0.3436, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 4.67, | |
| "grad_norm": 4.3052754402160645, | |
| "learning_rate": 6.680272108843538e-05, | |
| "loss": 0.4743, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 4.68, | |
| "grad_norm": 5.777819633483887, | |
| "learning_rate": 6.634920634920636e-05, | |
| "loss": 0.6294, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.7, | |
| "grad_norm": 4.6029157638549805, | |
| "learning_rate": 6.589569160997733e-05, | |
| "loss": 0.495, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 4.71, | |
| "grad_norm": 3.34942889213562, | |
| "learning_rate": 6.54421768707483e-05, | |
| "loss": 0.5216, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 4.73, | |
| "grad_norm": 3.747777223587036, | |
| "learning_rate": 6.498866213151927e-05, | |
| "loss": 0.4908, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "grad_norm": 3.217052459716797, | |
| "learning_rate": 6.453514739229024e-05, | |
| "loss": 0.5847, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 4.76, | |
| "grad_norm": 2.1214661598205566, | |
| "learning_rate": 6.408163265306123e-05, | |
| "loss": 0.7497, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.76, | |
| "eval_accuracy": 0.8301587301587302, | |
| "eval_loss": 0.5859270095825195, | |
| "eval_runtime": 89.9674, | |
| "eval_samples_per_second": 28.01, | |
| "eval_steps_per_second": 3.501, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.78, | |
| "grad_norm": 3.495818614959717, | |
| "learning_rate": 6.36281179138322e-05, | |
| "loss": 0.4944, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 4.79, | |
| "grad_norm": 6.128993034362793, | |
| "learning_rate": 6.317460317460318e-05, | |
| "loss": 0.5474, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 4.81, | |
| "grad_norm": 2.1895864009857178, | |
| "learning_rate": 6.272108843537415e-05, | |
| "loss": 0.5447, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 4.83, | |
| "grad_norm": 4.312340259552002, | |
| "learning_rate": 6.226757369614513e-05, | |
| "loss": 0.5194, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 4.84, | |
| "grad_norm": 5.473392009735107, | |
| "learning_rate": 6.181405895691609e-05, | |
| "loss": 0.5538, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.86, | |
| "grad_norm": 2.302345037460327, | |
| "learning_rate": 6.136054421768708e-05, | |
| "loss": 0.4733, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 4.87, | |
| "grad_norm": 3.3499696254730225, | |
| "learning_rate": 6.090702947845806e-05, | |
| "loss": 0.755, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 4.89, | |
| "grad_norm": 3.382891893386841, | |
| "learning_rate": 6.045351473922902e-05, | |
| "loss": 0.4829, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 4.9, | |
| "grad_norm": 3.4998888969421387, | |
| "learning_rate": 6e-05, | |
| "loss": 0.4543, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 4.92, | |
| "grad_norm": 3.2224600315093994, | |
| "learning_rate": 5.954648526077098e-05, | |
| "loss": 0.4856, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.92, | |
| "eval_accuracy": 0.8261904761904761, | |
| "eval_loss": 0.5833402872085571, | |
| "eval_runtime": 90.9659, | |
| "eval_samples_per_second": 27.703, | |
| "eval_steps_per_second": 3.463, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.94, | |
| "grad_norm": 2.1335177421569824, | |
| "learning_rate": 5.909297052154196e-05, | |
| "loss": 0.3995, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 4.95, | |
| "grad_norm": 4.156594753265381, | |
| "learning_rate": 5.8639455782312925e-05, | |
| "loss": 0.5539, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 4.97, | |
| "grad_norm": 5.811089515686035, | |
| "learning_rate": 5.8185941043083904e-05, | |
| "loss": 0.579, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 4.98, | |
| "grad_norm": 1.908595323562622, | |
| "learning_rate": 5.773242630385488e-05, | |
| "loss": 0.5315, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 2.8674533367156982, | |
| "learning_rate": 5.727891156462585e-05, | |
| "loss": 0.5577, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 5.02, | |
| "grad_norm": 1.5174939632415771, | |
| "learning_rate": 5.682539682539683e-05, | |
| "loss": 0.5052, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 5.03, | |
| "grad_norm": 3.2664871215820312, | |
| "learning_rate": 5.637188208616781e-05, | |
| "loss": 0.3135, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 5.05, | |
| "grad_norm": 5.35206937789917, | |
| "learning_rate": 5.5918367346938786e-05, | |
| "loss": 0.5859, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 5.06, | |
| "grad_norm": 2.6170833110809326, | |
| "learning_rate": 5.546485260770975e-05, | |
| "loss": 0.4511, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 5.08, | |
| "grad_norm": 3.2739861011505127, | |
| "learning_rate": 5.501133786848073e-05, | |
| "loss": 0.4959, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 5.08, | |
| "eval_accuracy": 0.832936507936508, | |
| "eval_loss": 0.5703846216201782, | |
| "eval_runtime": 92.0072, | |
| "eval_samples_per_second": 27.389, | |
| "eval_steps_per_second": 3.424, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 5.1, | |
| "grad_norm": 0.7258143424987793, | |
| "learning_rate": 5.455782312925171e-05, | |
| "loss": 0.4922, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 5.11, | |
| "grad_norm": 1.8463611602783203, | |
| "learning_rate": 5.4104308390022675e-05, | |
| "loss": 0.3888, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 5.13, | |
| "grad_norm": 4.082030296325684, | |
| "learning_rate": 5.3650793650793654e-05, | |
| "loss": 0.4486, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 5.14, | |
| "grad_norm": 1.123199462890625, | |
| "learning_rate": 5.319727891156463e-05, | |
| "loss": 0.4229, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 5.16, | |
| "grad_norm": 2.546860694885254, | |
| "learning_rate": 5.2743764172335605e-05, | |
| "loss": 0.5514, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 5.17, | |
| "grad_norm": 6.097323417663574, | |
| "learning_rate": 5.229024943310658e-05, | |
| "loss": 0.4391, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 5.19, | |
| "grad_norm": 3.1347315311431885, | |
| "learning_rate": 5.1836734693877557e-05, | |
| "loss": 0.4455, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 5.21, | |
| "grad_norm": 2.0431602001190186, | |
| "learning_rate": 5.138321995464853e-05, | |
| "loss": 0.3076, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 5.22, | |
| "grad_norm": 4.452468395233154, | |
| "learning_rate": 5.09297052154195e-05, | |
| "loss": 0.4442, | |
| "step": 3290 | |
| }, | |
| { | |
| "epoch": 5.24, | |
| "grad_norm": 5.282511234283447, | |
| "learning_rate": 5.047619047619048e-05, | |
| "loss": 0.4413, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 5.24, | |
| "eval_accuracy": 0.819047619047619, | |
| "eval_loss": 0.6217456459999084, | |
| "eval_runtime": 91.9773, | |
| "eval_samples_per_second": 27.398, | |
| "eval_steps_per_second": 3.425, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 5.25, | |
| "grad_norm": 8.806018829345703, | |
| "learning_rate": 5.002267573696145e-05, | |
| "loss": 0.4263, | |
| "step": 3310 | |
| }, | |
| { | |
| "epoch": 5.27, | |
| "grad_norm": 4.782093048095703, | |
| "learning_rate": 4.9569160997732425e-05, | |
| "loss": 0.6431, | |
| "step": 3320 | |
| }, | |
| { | |
| "epoch": 5.29, | |
| "grad_norm": 5.115506172180176, | |
| "learning_rate": 4.9115646258503404e-05, | |
| "loss": 0.4876, | |
| "step": 3330 | |
| }, | |
| { | |
| "epoch": 5.3, | |
| "grad_norm": 4.416604995727539, | |
| "learning_rate": 4.8662131519274376e-05, | |
| "loss": 0.371, | |
| "step": 3340 | |
| }, | |
| { | |
| "epoch": 5.32, | |
| "grad_norm": 3.7609243392944336, | |
| "learning_rate": 4.820861678004535e-05, | |
| "loss": 0.3873, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 5.33, | |
| "grad_norm": 4.652336120605469, | |
| "learning_rate": 4.775510204081633e-05, | |
| "loss": 0.4134, | |
| "step": 3360 | |
| }, | |
| { | |
| "epoch": 5.35, | |
| "grad_norm": 5.700862407684326, | |
| "learning_rate": 4.73015873015873e-05, | |
| "loss": 0.4463, | |
| "step": 3370 | |
| }, | |
| { | |
| "epoch": 5.37, | |
| "grad_norm": 4.792759895324707, | |
| "learning_rate": 4.684807256235828e-05, | |
| "loss": 0.4243, | |
| "step": 3380 | |
| }, | |
| { | |
| "epoch": 5.38, | |
| "grad_norm": 4.60031795501709, | |
| "learning_rate": 4.639455782312925e-05, | |
| "loss": 0.3602, | |
| "step": 3390 | |
| }, | |
| { | |
| "epoch": 5.4, | |
| "grad_norm": 6.657435417175293, | |
| "learning_rate": 4.594104308390023e-05, | |
| "loss": 0.4513, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 5.4, | |
| "eval_accuracy": 0.8293650793650794, | |
| "eval_loss": 0.5750200748443604, | |
| "eval_runtime": 96.412, | |
| "eval_samples_per_second": 26.138, | |
| "eval_steps_per_second": 3.267, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 5.41, | |
| "grad_norm": 3.526585578918457, | |
| "learning_rate": 4.54875283446712e-05, | |
| "loss": 0.374, | |
| "step": 3410 | |
| }, | |
| { | |
| "epoch": 5.43, | |
| "grad_norm": 2.742906332015991, | |
| "learning_rate": 4.5034013605442174e-05, | |
| "loss": 0.3822, | |
| "step": 3420 | |
| }, | |
| { | |
| "epoch": 5.44, | |
| "grad_norm": 3.6437864303588867, | |
| "learning_rate": 4.4580498866213154e-05, | |
| "loss": 0.4369, | |
| "step": 3430 | |
| }, | |
| { | |
| "epoch": 5.46, | |
| "grad_norm": 2.621497869491577, | |
| "learning_rate": 4.4126984126984126e-05, | |
| "loss": 0.3399, | |
| "step": 3440 | |
| }, | |
| { | |
| "epoch": 5.48, | |
| "grad_norm": 1.8672789335250854, | |
| "learning_rate": 4.3673469387755105e-05, | |
| "loss": 0.584, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 5.49, | |
| "grad_norm": 1.4387513399124146, | |
| "learning_rate": 4.321995464852608e-05, | |
| "loss": 0.4242, | |
| "step": 3460 | |
| }, | |
| { | |
| "epoch": 5.51, | |
| "grad_norm": 0.7556703090667725, | |
| "learning_rate": 4.2766439909297056e-05, | |
| "loss": 0.462, | |
| "step": 3470 | |
| }, | |
| { | |
| "epoch": 5.52, | |
| "grad_norm": 2.0518500804901123, | |
| "learning_rate": 4.231292517006803e-05, | |
| "loss": 0.4239, | |
| "step": 3480 | |
| }, | |
| { | |
| "epoch": 5.54, | |
| "grad_norm": 4.876095771789551, | |
| "learning_rate": 4.1859410430839e-05, | |
| "loss": 0.5488, | |
| "step": 3490 | |
| }, | |
| { | |
| "epoch": 5.56, | |
| "grad_norm": 1.6563076972961426, | |
| "learning_rate": 4.140589569160998e-05, | |
| "loss": 0.3987, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 5.56, | |
| "eval_accuracy": 0.8341269841269842, | |
| "eval_loss": 0.5825861096382141, | |
| "eval_runtime": 98.7535, | |
| "eval_samples_per_second": 25.518, | |
| "eval_steps_per_second": 3.19, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 5.57, | |
| "grad_norm": 6.538485050201416, | |
| "learning_rate": 4.095238095238095e-05, | |
| "loss": 0.5583, | |
| "step": 3510 | |
| }, | |
| { | |
| "epoch": 5.59, | |
| "grad_norm": 4.381786823272705, | |
| "learning_rate": 4.049886621315193e-05, | |
| "loss": 0.408, | |
| "step": 3520 | |
| }, | |
| { | |
| "epoch": 5.6, | |
| "grad_norm": 3.696018695831299, | |
| "learning_rate": 4.00453514739229e-05, | |
| "loss": 0.4421, | |
| "step": 3530 | |
| }, | |
| { | |
| "epoch": 5.62, | |
| "grad_norm": 3.3752057552337646, | |
| "learning_rate": 3.9591836734693876e-05, | |
| "loss": 0.4057, | |
| "step": 3540 | |
| }, | |
| { | |
| "epoch": 5.63, | |
| "grad_norm": 6.889618396759033, | |
| "learning_rate": 3.9138321995464855e-05, | |
| "loss": 0.4479, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 5.65, | |
| "grad_norm": 5.5090131759643555, | |
| "learning_rate": 3.868480725623583e-05, | |
| "loss": 0.6009, | |
| "step": 3560 | |
| }, | |
| { | |
| "epoch": 5.67, | |
| "grad_norm": 2.368633985519409, | |
| "learning_rate": 3.8231292517006806e-05, | |
| "loss": 0.4276, | |
| "step": 3570 | |
| }, | |
| { | |
| "epoch": 5.68, | |
| "grad_norm": 4.297762393951416, | |
| "learning_rate": 3.777777777777778e-05, | |
| "loss": 0.4368, | |
| "step": 3580 | |
| }, | |
| { | |
| "epoch": 5.7, | |
| "grad_norm": 3.7877378463745117, | |
| "learning_rate": 3.732426303854876e-05, | |
| "loss": 0.3481, | |
| "step": 3590 | |
| }, | |
| { | |
| "epoch": 5.71, | |
| "grad_norm": 2.9667625427246094, | |
| "learning_rate": 3.687074829931973e-05, | |
| "loss": 0.4395, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 5.71, | |
| "eval_accuracy": 0.8384920634920635, | |
| "eval_loss": 0.5753714442253113, | |
| "eval_runtime": 97.0735, | |
| "eval_samples_per_second": 25.96, | |
| "eval_steps_per_second": 3.245, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 5.73, | |
| "grad_norm": 5.830836296081543, | |
| "learning_rate": 3.64172335600907e-05, | |
| "loss": 0.4859, | |
| "step": 3610 | |
| }, | |
| { | |
| "epoch": 5.75, | |
| "grad_norm": 5.262144088745117, | |
| "learning_rate": 3.596371882086168e-05, | |
| "loss": 0.4409, | |
| "step": 3620 | |
| }, | |
| { | |
| "epoch": 5.76, | |
| "grad_norm": 0.9806166291236877, | |
| "learning_rate": 3.551020408163265e-05, | |
| "loss": 0.3884, | |
| "step": 3630 | |
| }, | |
| { | |
| "epoch": 5.78, | |
| "grad_norm": 3.1864867210388184, | |
| "learning_rate": 3.505668934240363e-05, | |
| "loss": 0.3753, | |
| "step": 3640 | |
| }, | |
| { | |
| "epoch": 5.79, | |
| "grad_norm": 2.679213762283325, | |
| "learning_rate": 3.4603174603174604e-05, | |
| "loss": 0.3764, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 5.81, | |
| "grad_norm": 1.3040461540222168, | |
| "learning_rate": 3.4149659863945583e-05, | |
| "loss": 0.3632, | |
| "step": 3660 | |
| }, | |
| { | |
| "epoch": 5.83, | |
| "grad_norm": 6.803734302520752, | |
| "learning_rate": 3.3696145124716556e-05, | |
| "loss": 0.6695, | |
| "step": 3670 | |
| }, | |
| { | |
| "epoch": 5.84, | |
| "grad_norm": 3.717195510864258, | |
| "learning_rate": 3.324263038548753e-05, | |
| "loss": 0.4226, | |
| "step": 3680 | |
| }, | |
| { | |
| "epoch": 5.86, | |
| "grad_norm": 0.3749280273914337, | |
| "learning_rate": 3.278911564625851e-05, | |
| "loss": 0.4404, | |
| "step": 3690 | |
| }, | |
| { | |
| "epoch": 5.87, | |
| "grad_norm": 3.4548332691192627, | |
| "learning_rate": 3.233560090702948e-05, | |
| "loss": 0.4669, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 5.87, | |
| "eval_accuracy": 0.8357142857142857, | |
| "eval_loss": 0.5652737021446228, | |
| "eval_runtime": 98.0382, | |
| "eval_samples_per_second": 25.704, | |
| "eval_steps_per_second": 3.213, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 5.89, | |
| "grad_norm": 2.5312252044677734, | |
| "learning_rate": 3.188208616780046e-05, | |
| "loss": 0.4786, | |
| "step": 3710 | |
| }, | |
| { | |
| "epoch": 5.9, | |
| "grad_norm": 4.841963291168213, | |
| "learning_rate": 3.142857142857143e-05, | |
| "loss": 0.5179, | |
| "step": 3720 | |
| }, | |
| { | |
| "epoch": 5.92, | |
| "grad_norm": 4.394898414611816, | |
| "learning_rate": 3.097505668934241e-05, | |
| "loss": 0.5062, | |
| "step": 3730 | |
| }, | |
| { | |
| "epoch": 5.94, | |
| "grad_norm": 4.590970516204834, | |
| "learning_rate": 3.052154195011338e-05, | |
| "loss": 0.3639, | |
| "step": 3740 | |
| }, | |
| { | |
| "epoch": 5.95, | |
| "grad_norm": 4.6838459968566895, | |
| "learning_rate": 3.0068027210884354e-05, | |
| "loss": 0.3552, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 5.97, | |
| "grad_norm": 4.303285121917725, | |
| "learning_rate": 2.961451247165533e-05, | |
| "loss": 0.3526, | |
| "step": 3760 | |
| }, | |
| { | |
| "epoch": 5.98, | |
| "grad_norm": 6.06368350982666, | |
| "learning_rate": 2.9160997732426306e-05, | |
| "loss": 0.3266, | |
| "step": 3770 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 4.715054988861084, | |
| "learning_rate": 2.870748299319728e-05, | |
| "loss": 0.4612, | |
| "step": 3780 | |
| }, | |
| { | |
| "epoch": 6.02, | |
| "grad_norm": 3.029750108718872, | |
| "learning_rate": 2.8253968253968253e-05, | |
| "loss": 0.3938, | |
| "step": 3790 | |
| }, | |
| { | |
| "epoch": 6.03, | |
| "grad_norm": 6.53373384475708, | |
| "learning_rate": 2.7800453514739233e-05, | |
| "loss": 0.4005, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 6.03, | |
| "eval_accuracy": 0.8376984126984127, | |
| "eval_loss": 0.542424201965332, | |
| "eval_runtime": 92.2864, | |
| "eval_samples_per_second": 27.306, | |
| "eval_steps_per_second": 3.413, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 6.05, | |
| "grad_norm": 2.6752915382385254, | |
| "learning_rate": 2.7346938775510205e-05, | |
| "loss": 0.311, | |
| "step": 3810 | |
| }, | |
| { | |
| "epoch": 6.06, | |
| "grad_norm": 5.5302863121032715, | |
| "learning_rate": 2.6893424036281177e-05, | |
| "loss": 0.4392, | |
| "step": 3820 | |
| }, | |
| { | |
| "epoch": 6.08, | |
| "grad_norm": 3.140334367752075, | |
| "learning_rate": 2.6439909297052156e-05, | |
| "loss": 0.3301, | |
| "step": 3830 | |
| }, | |
| { | |
| "epoch": 6.1, | |
| "grad_norm": 7.531820774078369, | |
| "learning_rate": 2.598639455782313e-05, | |
| "loss": 0.4208, | |
| "step": 3840 | |
| }, | |
| { | |
| "epoch": 6.11, | |
| "grad_norm": 4.852546215057373, | |
| "learning_rate": 2.5532879818594107e-05, | |
| "loss": 0.3858, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 6.13, | |
| "grad_norm": 6.866227149963379, | |
| "learning_rate": 2.507936507936508e-05, | |
| "loss": 0.4882, | |
| "step": 3860 | |
| }, | |
| { | |
| "epoch": 6.14, | |
| "grad_norm": 2.053617477416992, | |
| "learning_rate": 2.4625850340136055e-05, | |
| "loss": 0.3131, | |
| "step": 3870 | |
| }, | |
| { | |
| "epoch": 6.16, | |
| "grad_norm": 3.9806272983551025, | |
| "learning_rate": 2.417233560090703e-05, | |
| "loss": 0.4078, | |
| "step": 3880 | |
| }, | |
| { | |
| "epoch": 6.17, | |
| "grad_norm": 3.6454029083251953, | |
| "learning_rate": 2.3718820861678007e-05, | |
| "loss": 0.4362, | |
| "step": 3890 | |
| }, | |
| { | |
| "epoch": 6.19, | |
| "grad_norm": 7.963522434234619, | |
| "learning_rate": 2.326530612244898e-05, | |
| "loss": 0.4457, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 6.19, | |
| "eval_accuracy": 0.8392857142857143, | |
| "eval_loss": 0.5619771480560303, | |
| "eval_runtime": 91.8917, | |
| "eval_samples_per_second": 27.424, | |
| "eval_steps_per_second": 3.428, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 6.21, | |
| "grad_norm": 0.9147405624389648, | |
| "learning_rate": 2.2811791383219955e-05, | |
| "loss": 0.278, | |
| "step": 3910 | |
| }, | |
| { | |
| "epoch": 6.22, | |
| "grad_norm": 7.86151647567749, | |
| "learning_rate": 2.235827664399093e-05, | |
| "loss": 0.3866, | |
| "step": 3920 | |
| }, | |
| { | |
| "epoch": 6.24, | |
| "grad_norm": 5.7085371017456055, | |
| "learning_rate": 2.1904761904761906e-05, | |
| "loss": 0.3633, | |
| "step": 3930 | |
| }, | |
| { | |
| "epoch": 6.25, | |
| "grad_norm": 4.3009772300720215, | |
| "learning_rate": 2.145124716553288e-05, | |
| "loss": 0.3932, | |
| "step": 3940 | |
| }, | |
| { | |
| "epoch": 6.27, | |
| "grad_norm": 0.3253590166568756, | |
| "learning_rate": 2.0997732426303857e-05, | |
| "loss": 0.3084, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 6.29, | |
| "grad_norm": 4.880115509033203, | |
| "learning_rate": 2.0544217687074833e-05, | |
| "loss": 0.3723, | |
| "step": 3960 | |
| }, | |
| { | |
| "epoch": 6.3, | |
| "grad_norm": 5.515774250030518, | |
| "learning_rate": 2.0090702947845805e-05, | |
| "loss": 0.4312, | |
| "step": 3970 | |
| }, | |
| { | |
| "epoch": 6.32, | |
| "grad_norm": 4.72555685043335, | |
| "learning_rate": 1.963718820861678e-05, | |
| "loss": 0.2305, | |
| "step": 3980 | |
| }, | |
| { | |
| "epoch": 6.33, | |
| "grad_norm": 0.13027626276016235, | |
| "learning_rate": 1.9183673469387756e-05, | |
| "loss": 0.3037, | |
| "step": 3990 | |
| }, | |
| { | |
| "epoch": 6.35, | |
| "grad_norm": 4.166107654571533, | |
| "learning_rate": 1.8730158730158732e-05, | |
| "loss": 0.3693, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 6.35, | |
| "eval_accuracy": 0.8412698412698413, | |
| "eval_loss": 0.5411426424980164, | |
| "eval_runtime": 91.5694, | |
| "eval_samples_per_second": 27.52, | |
| "eval_steps_per_second": 3.44, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 6.37, | |
| "grad_norm": 3.1201322078704834, | |
| "learning_rate": 1.8276643990929708e-05, | |
| "loss": 0.3472, | |
| "step": 4010 | |
| }, | |
| { | |
| "epoch": 6.38, | |
| "grad_norm": 5.773028373718262, | |
| "learning_rate": 1.7823129251700683e-05, | |
| "loss": 0.376, | |
| "step": 4020 | |
| }, | |
| { | |
| "epoch": 6.4, | |
| "grad_norm": 4.617589473724365, | |
| "learning_rate": 1.736961451247166e-05, | |
| "loss": 0.4044, | |
| "step": 4030 | |
| }, | |
| { | |
| "epoch": 6.41, | |
| "grad_norm": 2.2319741249084473, | |
| "learning_rate": 1.691609977324263e-05, | |
| "loss": 0.2603, | |
| "step": 4040 | |
| }, | |
| { | |
| "epoch": 6.43, | |
| "grad_norm": 4.363328456878662, | |
| "learning_rate": 1.6462585034013607e-05, | |
| "loss": 0.3736, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 6.44, | |
| "grad_norm": 1.4275892972946167, | |
| "learning_rate": 1.6009070294784583e-05, | |
| "loss": 0.2221, | |
| "step": 4060 | |
| }, | |
| { | |
| "epoch": 6.46, | |
| "grad_norm": 1.1315315961837769, | |
| "learning_rate": 1.5555555555555555e-05, | |
| "loss": 0.2594, | |
| "step": 4070 | |
| }, | |
| { | |
| "epoch": 6.48, | |
| "grad_norm": 2.161555051803589, | |
| "learning_rate": 1.5102040816326532e-05, | |
| "loss": 0.3224, | |
| "step": 4080 | |
| }, | |
| { | |
| "epoch": 6.49, | |
| "grad_norm": 5.551158428192139, | |
| "learning_rate": 1.4648526077097508e-05, | |
| "loss": 0.2861, | |
| "step": 4090 | |
| }, | |
| { | |
| "epoch": 6.51, | |
| "grad_norm": 4.5997419357299805, | |
| "learning_rate": 1.419501133786848e-05, | |
| "loss": 0.2933, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 6.51, | |
| "eval_accuracy": 0.8484126984126984, | |
| "eval_loss": 0.5324992537498474, | |
| "eval_runtime": 92.004, | |
| "eval_samples_per_second": 27.39, | |
| "eval_steps_per_second": 3.424, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 6.52, | |
| "grad_norm": 4.983388900756836, | |
| "learning_rate": 1.3741496598639456e-05, | |
| "loss": 0.3076, | |
| "step": 4110 | |
| }, | |
| { | |
| "epoch": 6.54, | |
| "grad_norm": 7.082801818847656, | |
| "learning_rate": 1.3287981859410432e-05, | |
| "loss": 0.4137, | |
| "step": 4120 | |
| }, | |
| { | |
| "epoch": 6.56, | |
| "grad_norm": 5.25059700012207, | |
| "learning_rate": 1.2834467120181407e-05, | |
| "loss": 0.3036, | |
| "step": 4130 | |
| }, | |
| { | |
| "epoch": 6.57, | |
| "grad_norm": 4.526573181152344, | |
| "learning_rate": 1.2380952380952381e-05, | |
| "loss": 0.3122, | |
| "step": 4140 | |
| }, | |
| { | |
| "epoch": 6.59, | |
| "grad_norm": 3.637713670730591, | |
| "learning_rate": 1.1927437641723357e-05, | |
| "loss": 0.4162, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 6.6, | |
| "grad_norm": 7.545217514038086, | |
| "learning_rate": 1.147392290249433e-05, | |
| "loss": 0.5283, | |
| "step": 4160 | |
| }, | |
| { | |
| "epoch": 6.62, | |
| "grad_norm": 3.526754856109619, | |
| "learning_rate": 1.1020408163265306e-05, | |
| "loss": 0.4345, | |
| "step": 4170 | |
| }, | |
| { | |
| "epoch": 6.63, | |
| "grad_norm": 5.48629093170166, | |
| "learning_rate": 1.056689342403628e-05, | |
| "loss": 0.3755, | |
| "step": 4180 | |
| }, | |
| { | |
| "epoch": 6.65, | |
| "grad_norm": 3.289555788040161, | |
| "learning_rate": 1.0113378684807256e-05, | |
| "loss": 0.3603, | |
| "step": 4190 | |
| }, | |
| { | |
| "epoch": 6.67, | |
| "grad_norm": 3.701138496398926, | |
| "learning_rate": 9.659863945578232e-06, | |
| "loss": 0.2603, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 6.67, | |
| "eval_accuracy": 0.8476190476190476, | |
| "eval_loss": 0.5360472202301025, | |
| "eval_runtime": 94.6291, | |
| "eval_samples_per_second": 26.63, | |
| "eval_steps_per_second": 3.329, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 6.68, | |
| "grad_norm": 4.533103942871094, | |
| "learning_rate": 9.206349206349207e-06, | |
| "loss": 0.3483, | |
| "step": 4210 | |
| }, | |
| { | |
| "epoch": 6.7, | |
| "grad_norm": 6.598170280456543, | |
| "learning_rate": 8.752834467120181e-06, | |
| "loss": 0.4429, | |
| "step": 4220 | |
| }, | |
| { | |
| "epoch": 6.71, | |
| "grad_norm": 3.7935194969177246, | |
| "learning_rate": 8.299319727891157e-06, | |
| "loss": 0.2892, | |
| "step": 4230 | |
| }, | |
| { | |
| "epoch": 6.73, | |
| "grad_norm": 4.123006343841553, | |
| "learning_rate": 7.845804988662133e-06, | |
| "loss": 0.3482, | |
| "step": 4240 | |
| }, | |
| { | |
| "epoch": 6.75, | |
| "grad_norm": 0.6726520657539368, | |
| "learning_rate": 7.392290249433107e-06, | |
| "loss": 0.3813, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 6.76, | |
| "grad_norm": 7.85457181930542, | |
| "learning_rate": 6.938775510204082e-06, | |
| "loss": 0.3737, | |
| "step": 4260 | |
| }, | |
| { | |
| "epoch": 6.78, | |
| "grad_norm": 1.485037922859192, | |
| "learning_rate": 6.485260770975057e-06, | |
| "loss": 0.2864, | |
| "step": 4270 | |
| }, | |
| { | |
| "epoch": 6.79, | |
| "grad_norm": 2.535890817642212, | |
| "learning_rate": 6.031746031746032e-06, | |
| "loss": 0.2424, | |
| "step": 4280 | |
| }, | |
| { | |
| "epoch": 6.81, | |
| "grad_norm": 1.3501815795898438, | |
| "learning_rate": 5.578231292517007e-06, | |
| "loss": 0.3013, | |
| "step": 4290 | |
| }, | |
| { | |
| "epoch": 6.83, | |
| "grad_norm": 2.961240291595459, | |
| "learning_rate": 5.124716553287982e-06, | |
| "loss": 0.3364, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 6.83, | |
| "eval_accuracy": 0.8496031746031746, | |
| "eval_loss": 0.5302808284759521, | |
| "eval_runtime": 99.1164, | |
| "eval_samples_per_second": 25.425, | |
| "eval_steps_per_second": 3.178, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 6.84, | |
| "grad_norm": 3.593552589416504, | |
| "learning_rate": 4.671201814058957e-06, | |
| "loss": 0.3273, | |
| "step": 4310 | |
| }, | |
| { | |
| "epoch": 6.86, | |
| "grad_norm": 3.5346760749816895, | |
| "learning_rate": 4.217687074829932e-06, | |
| "loss": 0.3345, | |
| "step": 4320 | |
| }, | |
| { | |
| "epoch": 6.87, | |
| "grad_norm": 3.0319371223449707, | |
| "learning_rate": 3.764172335600907e-06, | |
| "loss": 0.4083, | |
| "step": 4330 | |
| }, | |
| { | |
| "epoch": 6.89, | |
| "grad_norm": 2.639801263809204, | |
| "learning_rate": 3.310657596371882e-06, | |
| "loss": 0.2986, | |
| "step": 4340 | |
| }, | |
| { | |
| "epoch": 6.9, | |
| "grad_norm": 1.1220003366470337, | |
| "learning_rate": 2.8571428571428573e-06, | |
| "loss": 0.3088, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 6.92, | |
| "grad_norm": 5.006831645965576, | |
| "learning_rate": 2.4036281179138325e-06, | |
| "loss": 0.4156, | |
| "step": 4360 | |
| }, | |
| { | |
| "epoch": 6.94, | |
| "grad_norm": 4.62202262878418, | |
| "learning_rate": 1.9501133786848073e-06, | |
| "loss": 0.3888, | |
| "step": 4370 | |
| }, | |
| { | |
| "epoch": 6.95, | |
| "grad_norm": 6.1172590255737305, | |
| "learning_rate": 1.4965986394557823e-06, | |
| "loss": 0.3449, | |
| "step": 4380 | |
| }, | |
| { | |
| "epoch": 6.97, | |
| "grad_norm": 6.193305969238281, | |
| "learning_rate": 1.0430839002267576e-06, | |
| "loss": 0.3253, | |
| "step": 4390 | |
| }, | |
| { | |
| "epoch": 6.98, | |
| "grad_norm": 1.3442612886428833, | |
| "learning_rate": 5.895691609977325e-07, | |
| "loss": 0.3639, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 6.98, | |
| "eval_accuracy": 0.8492063492063492, | |
| "eval_loss": 0.5315643548965454, | |
| "eval_runtime": 94.3134, | |
| "eval_samples_per_second": 26.719, | |
| "eval_steps_per_second": 3.34, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 4.398177146911621, | |
| "learning_rate": 1.3605442176870747e-07, | |
| "loss": 0.2582, | |
| "step": 4410 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "step": 4410, | |
| "total_flos": 5.468471871363809e+18, | |
| "train_loss": 0.7062997149772384, | |
| "train_runtime": 5016.6762, | |
| "train_samples_per_second": 14.065, | |
| "train_steps_per_second": 0.879 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 4410, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 7, | |
| "save_steps": 100, | |
| "total_flos": 5.468471871363809e+18, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |