| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 1890, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.010582010582010581, | |
| "grad_norm": 39.13801574707031, | |
| "learning_rate": 1.9952380952380953e-05, | |
| "loss": 5.4194, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.021164021164021163, | |
| "grad_norm": 24.028564453125, | |
| "learning_rate": 1.98994708994709e-05, | |
| "loss": 1.773, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.031746031746031744, | |
| "grad_norm": 16.863679885864258, | |
| "learning_rate": 1.9846560846560846e-05, | |
| "loss": 0.7073, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.042328042328042326, | |
| "grad_norm": 10.789055824279785, | |
| "learning_rate": 1.9793650793650796e-05, | |
| "loss": 0.3436, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.05291005291005291, | |
| "grad_norm": 8.038490295410156, | |
| "learning_rate": 1.9740740740740743e-05, | |
| "loss": 0.2155, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.06349206349206349, | |
| "grad_norm": 4.677557945251465, | |
| "learning_rate": 1.968783068783069e-05, | |
| "loss": 0.1287, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.07407407407407407, | |
| "grad_norm": 3.4516615867614746, | |
| "learning_rate": 1.9634920634920636e-05, | |
| "loss": 0.0824, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.08465608465608465, | |
| "grad_norm": 1.6580854654312134, | |
| "learning_rate": 1.9582010582010586e-05, | |
| "loss": 0.0501, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.09523809523809523, | |
| "grad_norm": 1.1410599946975708, | |
| "learning_rate": 1.9529100529100532e-05, | |
| "loss": 0.0316, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.10582010582010581, | |
| "grad_norm": 1.6213871240615845, | |
| "learning_rate": 1.947619047619048e-05, | |
| "loss": 0.025, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.1164021164021164, | |
| "grad_norm": 0.5977617502212524, | |
| "learning_rate": 1.9423280423280425e-05, | |
| "loss": 0.0153, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.12698412698412698, | |
| "grad_norm": 2.150684118270874, | |
| "learning_rate": 1.9370370370370372e-05, | |
| "loss": 0.0162, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.13756613756613756, | |
| "grad_norm": 0.3688974678516388, | |
| "learning_rate": 1.931746031746032e-05, | |
| "loss": 0.0116, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.14814814814814814, | |
| "grad_norm": 10.174367904663086, | |
| "learning_rate": 1.9264550264550265e-05, | |
| "loss": 0.0149, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.15873015873015872, | |
| "grad_norm": 0.2287941724061966, | |
| "learning_rate": 1.921164021164021e-05, | |
| "loss": 0.0068, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.1693121693121693, | |
| "grad_norm": 0.7957000136375427, | |
| "learning_rate": 1.915873015873016e-05, | |
| "loss": 0.01, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.17989417989417988, | |
| "grad_norm": 0.24841707944869995, | |
| "learning_rate": 1.9105820105820108e-05, | |
| "loss": 0.0161, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.19047619047619047, | |
| "grad_norm": 1.3141834735870361, | |
| "learning_rate": 1.9052910052910055e-05, | |
| "loss": 0.0057, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.20105820105820105, | |
| "grad_norm": 0.17035521566867828, | |
| "learning_rate": 1.9e-05, | |
| "loss": 0.012, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.21164021164021163, | |
| "grad_norm": 3.853283405303955, | |
| "learning_rate": 1.894708994708995e-05, | |
| "loss": 0.0148, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 0.13835227489471436, | |
| "learning_rate": 1.8894179894179898e-05, | |
| "loss": 0.0139, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.2328042328042328, | |
| "grad_norm": 0.3647116422653198, | |
| "learning_rate": 1.8841269841269844e-05, | |
| "loss": 0.0117, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.24338624338624337, | |
| "grad_norm": 4.365599632263184, | |
| "learning_rate": 1.878835978835979e-05, | |
| "loss": 0.0102, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.25396825396825395, | |
| "grad_norm": 0.1187911406159401, | |
| "learning_rate": 1.8735449735449737e-05, | |
| "loss": 0.0053, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.26455026455026454, | |
| "grad_norm": 5.611083507537842, | |
| "learning_rate": 1.8682539682539684e-05, | |
| "loss": 0.0052, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.2751322751322751, | |
| "grad_norm": 1.2097302675247192, | |
| "learning_rate": 1.862962962962963e-05, | |
| "loss": 0.0066, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.2857142857142857, | |
| "grad_norm": 0.08928469568490982, | |
| "learning_rate": 1.8576719576719577e-05, | |
| "loss": 0.0183, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 1.7315622568130493, | |
| "learning_rate": 1.8523809523809527e-05, | |
| "loss": 0.0049, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.30687830687830686, | |
| "grad_norm": 0.10508707910776138, | |
| "learning_rate": 1.8470899470899473e-05, | |
| "loss": 0.0174, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.31746031746031744, | |
| "grad_norm": 0.08319881558418274, | |
| "learning_rate": 1.841798941798942e-05, | |
| "loss": 0.0068, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.328042328042328, | |
| "grad_norm": 3.9523701667785645, | |
| "learning_rate": 1.8365079365079366e-05, | |
| "loss": 0.0545, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.3386243386243386, | |
| "grad_norm": 0.11226499825716019, | |
| "learning_rate": 1.8312169312169313e-05, | |
| "loss": 0.0066, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.3492063492063492, | |
| "grad_norm": 1.6739437580108643, | |
| "learning_rate": 1.825925925925926e-05, | |
| "loss": 0.0075, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.35978835978835977, | |
| "grad_norm": 4.503218173980713, | |
| "learning_rate": 1.8206349206349206e-05, | |
| "loss": 0.0051, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.37037037037037035, | |
| "grad_norm": 0.07483673095703125, | |
| "learning_rate": 1.8153439153439152e-05, | |
| "loss": 0.0035, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.38095238095238093, | |
| "grad_norm": 0.06276049464941025, | |
| "learning_rate": 1.8100529100529102e-05, | |
| "loss": 0.0368, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.3915343915343915, | |
| "grad_norm": 3.8753137588500977, | |
| "learning_rate": 1.804761904761905e-05, | |
| "loss": 0.0103, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.4021164021164021, | |
| "grad_norm": 0.8073577880859375, | |
| "learning_rate": 1.7994708994708995e-05, | |
| "loss": 0.0112, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.4126984126984127, | |
| "grad_norm": 5.514145374298096, | |
| "learning_rate": 1.7941798941798942e-05, | |
| "loss": 0.0038, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.42328042328042326, | |
| "grad_norm": 0.3400741517543793, | |
| "learning_rate": 1.7888888888888892e-05, | |
| "loss": 0.0027, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.43386243386243384, | |
| "grad_norm": 6.407121181488037, | |
| "learning_rate": 1.783597883597884e-05, | |
| "loss": 0.0061, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.0525721050798893, | |
| "learning_rate": 1.7783068783068785e-05, | |
| "loss": 0.0031, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.455026455026455, | |
| "grad_norm": 9.535446166992188, | |
| "learning_rate": 1.773015873015873e-05, | |
| "loss": 0.006, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.4656084656084656, | |
| "grad_norm": 0.08640318363904953, | |
| "learning_rate": 1.7677248677248678e-05, | |
| "loss": 0.0062, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.47619047619047616, | |
| "grad_norm": 0.06083179637789726, | |
| "learning_rate": 1.7624338624338625e-05, | |
| "loss": 0.0036, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.48677248677248675, | |
| "grad_norm": 0.0371096171438694, | |
| "learning_rate": 1.757142857142857e-05, | |
| "loss": 0.0009, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.4973544973544973, | |
| "grad_norm": 0.7781650424003601, | |
| "learning_rate": 1.7518518518518518e-05, | |
| "loss": 0.0113, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.5079365079365079, | |
| "grad_norm": 0.3946099877357483, | |
| "learning_rate": 1.7465608465608468e-05, | |
| "loss": 0.004, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.5185185185185185, | |
| "grad_norm": 0.039193294942379, | |
| "learning_rate": 1.7412698412698414e-05, | |
| "loss": 0.0013, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.5291005291005291, | |
| "grad_norm": 0.037502456456422806, | |
| "learning_rate": 1.735978835978836e-05, | |
| "loss": 0.003, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.5396825396825397, | |
| "grad_norm": 0.03162345662713051, | |
| "learning_rate": 1.7306878306878307e-05, | |
| "loss": 0.0011, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.5502645502645502, | |
| "grad_norm": 0.026782995089888573, | |
| "learning_rate": 1.7253968253968257e-05, | |
| "loss": 0.0006, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.5608465608465608, | |
| "grad_norm": 0.027304725721478462, | |
| "learning_rate": 1.7201058201058204e-05, | |
| "loss": 0.0024, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 0.045405562967061996, | |
| "learning_rate": 1.714814814814815e-05, | |
| "loss": 0.0079, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.582010582010582, | |
| "grad_norm": 0.026607122272253036, | |
| "learning_rate": 1.7095238095238097e-05, | |
| "loss": 0.0024, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.1648360937833786, | |
| "learning_rate": 1.7042328042328043e-05, | |
| "loss": 0.0038, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.6031746031746031, | |
| "grad_norm": 0.2880645990371704, | |
| "learning_rate": 1.698941798941799e-05, | |
| "loss": 0.0069, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.6137566137566137, | |
| "grad_norm": 0.08049669116735458, | |
| "learning_rate": 1.6936507936507936e-05, | |
| "loss": 0.0091, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.6243386243386243, | |
| "grad_norm": 0.16391621530056, | |
| "learning_rate": 1.6883597883597883e-05, | |
| "loss": 0.003, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.6349206349206349, | |
| "grad_norm": 0.31218671798706055, | |
| "learning_rate": 1.6830687830687833e-05, | |
| "loss": 0.0009, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.6455026455026455, | |
| "grad_norm": 0.02184475213289261, | |
| "learning_rate": 1.677777777777778e-05, | |
| "loss": 0.0133, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.656084656084656, | |
| "grad_norm": 0.025240115821361542, | |
| "learning_rate": 1.6724867724867726e-05, | |
| "loss": 0.0013, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.021431762725114822, | |
| "learning_rate": 1.6671957671957672e-05, | |
| "loss": 0.0164, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.6772486772486772, | |
| "grad_norm": 0.3141108751296997, | |
| "learning_rate": 1.6619047619047622e-05, | |
| "loss": 0.0026, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.6878306878306878, | |
| "grad_norm": 0.021496519446372986, | |
| "learning_rate": 1.656613756613757e-05, | |
| "loss": 0.0023, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.6984126984126984, | |
| "grad_norm": 0.06723565608263016, | |
| "learning_rate": 1.6513227513227515e-05, | |
| "loss": 0.0019, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.708994708994709, | |
| "grad_norm": 0.02188393473625183, | |
| "learning_rate": 1.6460317460317462e-05, | |
| "loss": 0.0036, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.7195767195767195, | |
| "grad_norm": 0.07254693657159805, | |
| "learning_rate": 1.640740740740741e-05, | |
| "loss": 0.0061, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.7301587301587301, | |
| "grad_norm": 3.8045756816864014, | |
| "learning_rate": 1.6354497354497355e-05, | |
| "loss": 0.0041, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 0.019957900047302246, | |
| "learning_rate": 1.63015873015873e-05, | |
| "loss": 0.0005, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.7513227513227513, | |
| "grad_norm": 0.017905883491039276, | |
| "learning_rate": 1.6248677248677248e-05, | |
| "loss": 0.0099, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.7619047619047619, | |
| "grad_norm": 0.580219030380249, | |
| "learning_rate": 1.6195767195767198e-05, | |
| "loss": 0.0033, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.7724867724867724, | |
| "grad_norm": 0.0343511626124382, | |
| "learning_rate": 1.6142857142857145e-05, | |
| "loss": 0.0057, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.783068783068783, | |
| "grad_norm": 0.6010801196098328, | |
| "learning_rate": 1.608994708994709e-05, | |
| "loss": 0.0009, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.7936507936507936, | |
| "grad_norm": 0.020727235823869705, | |
| "learning_rate": 1.6037037037037038e-05, | |
| "loss": 0.0024, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.8042328042328042, | |
| "grad_norm": 5.676093101501465, | |
| "learning_rate": 1.5984126984126988e-05, | |
| "loss": 0.0052, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.8148148148148148, | |
| "grad_norm": 0.22317780554294586, | |
| "learning_rate": 1.5931216931216934e-05, | |
| "loss": 0.0063, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.8253968253968254, | |
| "grad_norm": 0.983028769493103, | |
| "learning_rate": 1.587830687830688e-05, | |
| "loss": 0.0046, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.8359788359788359, | |
| "grad_norm": 0.018990248441696167, | |
| "learning_rate": 1.5825396825396827e-05, | |
| "loss": 0.0044, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.8465608465608465, | |
| "grad_norm": 6.899256706237793, | |
| "learning_rate": 1.5772486772486774e-05, | |
| "loss": 0.004, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.8571428571428571, | |
| "grad_norm": 4.200356483459473, | |
| "learning_rate": 1.571957671957672e-05, | |
| "loss": 0.0046, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.8677248677248677, | |
| "grad_norm": 2.085758686065674, | |
| "learning_rate": 1.5666666666666667e-05, | |
| "loss": 0.0029, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.8783068783068783, | |
| "grad_norm": 0.07632625102996826, | |
| "learning_rate": 1.5613756613756613e-05, | |
| "loss": 0.0054, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 5.698914051055908, | |
| "learning_rate": 1.5560846560846563e-05, | |
| "loss": 0.0037, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.8994708994708994, | |
| "grad_norm": 0.16661055386066437, | |
| "learning_rate": 1.550793650793651e-05, | |
| "loss": 0.0033, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.91005291005291, | |
| "grad_norm": 0.061884596943855286, | |
| "learning_rate": 1.5455026455026456e-05, | |
| "loss": 0.0021, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.9206349206349206, | |
| "grad_norm": 1.1297595500946045, | |
| "learning_rate": 1.5402116402116403e-05, | |
| "loss": 0.0022, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.9312169312169312, | |
| "grad_norm": 0.3115411698818207, | |
| "learning_rate": 1.5349206349206353e-05, | |
| "loss": 0.0052, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.9417989417989417, | |
| "grad_norm": 2.6712639331817627, | |
| "learning_rate": 1.52962962962963e-05, | |
| "loss": 0.0029, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.9523809523809523, | |
| "grad_norm": 0.01280083879828453, | |
| "learning_rate": 1.5243386243386244e-05, | |
| "loss": 0.0005, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.9629629629629629, | |
| "grad_norm": 5.4594645500183105, | |
| "learning_rate": 1.519047619047619e-05, | |
| "loss": 0.007, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.9735449735449735, | |
| "grad_norm": 0.01234627328813076, | |
| "learning_rate": 1.5137566137566139e-05, | |
| "loss": 0.007, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.9841269841269841, | |
| "grad_norm": 0.12818680703639984, | |
| "learning_rate": 1.5084656084656086e-05, | |
| "loss": 0.0005, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.9947089947089947, | |
| "grad_norm": 0.010110250674188137, | |
| "learning_rate": 1.5031746031746032e-05, | |
| "loss": 0.0011, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.03604938089847565, | |
| "eval_runtime": 462.3812, | |
| "eval_samples_per_second": 0.227, | |
| "eval_steps_per_second": 0.115, | |
| "step": 945 | |
| }, | |
| { | |
| "epoch": 1.0052910052910053, | |
| "grad_norm": 0.012420996092259884, | |
| "learning_rate": 1.4978835978835979e-05, | |
| "loss": 0.0003, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.0158730158730158, | |
| "grad_norm": 0.010969682596623898, | |
| "learning_rate": 1.4925925925925929e-05, | |
| "loss": 0.0002, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.0264550264550265, | |
| "grad_norm": 0.009813076816499233, | |
| "learning_rate": 1.4873015873015875e-05, | |
| "loss": 0.0064, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.037037037037037, | |
| "grad_norm": 0.08147811144590378, | |
| "learning_rate": 1.4820105820105822e-05, | |
| "loss": 0.0009, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.0476190476190477, | |
| "grad_norm": 0.010766289196908474, | |
| "learning_rate": 1.4767195767195768e-05, | |
| "loss": 0.0025, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.0582010582010581, | |
| "grad_norm": 0.009957353584468365, | |
| "learning_rate": 1.4714285714285716e-05, | |
| "loss": 0.0032, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.0687830687830688, | |
| "grad_norm": 7.969147682189941, | |
| "learning_rate": 1.4661375661375663e-05, | |
| "loss": 0.0028, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.0793650793650793, | |
| "grad_norm": 4.326008319854736, | |
| "learning_rate": 1.460846560846561e-05, | |
| "loss": 0.0048, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.08994708994709, | |
| "grad_norm": 0.00999133475124836, | |
| "learning_rate": 1.4555555555555556e-05, | |
| "loss": 0.0026, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.1005291005291005, | |
| "grad_norm": 0.010022982954978943, | |
| "learning_rate": 1.4502645502645504e-05, | |
| "loss": 0.0084, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.1111111111111112, | |
| "grad_norm": 0.013834713026881218, | |
| "learning_rate": 1.444973544973545e-05, | |
| "loss": 0.0043, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.1216931216931216, | |
| "grad_norm": 0.011380772106349468, | |
| "learning_rate": 1.4396825396825397e-05, | |
| "loss": 0.0352, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.1322751322751323, | |
| "grad_norm": 0.07020097970962524, | |
| "learning_rate": 1.4343915343915344e-05, | |
| "loss": 0.0018, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.1428571428571428, | |
| "grad_norm": 0.02143331617116928, | |
| "learning_rate": 1.4291005291005294e-05, | |
| "loss": 0.0017, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.1534391534391535, | |
| "grad_norm": 3.8044309616088867, | |
| "learning_rate": 1.423809523809524e-05, | |
| "loss": 0.0009, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.164021164021164, | |
| "grad_norm": 0.010620811954140663, | |
| "learning_rate": 1.4185185185185187e-05, | |
| "loss": 0.0022, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.1746031746031746, | |
| "grad_norm": 0.013597151264548302, | |
| "learning_rate": 1.4132275132275133e-05, | |
| "loss": 0.0002, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.1851851851851851, | |
| "grad_norm": 0.009412454441189766, | |
| "learning_rate": 1.4079365079365082e-05, | |
| "loss": 0.003, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.1957671957671958, | |
| "grad_norm": 9.870640754699707, | |
| "learning_rate": 1.4026455026455028e-05, | |
| "loss": 0.0051, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.2063492063492063, | |
| "grad_norm": 0.15585137903690338, | |
| "learning_rate": 1.3973544973544975e-05, | |
| "loss": 0.0005, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.216931216931217, | |
| "grad_norm": 0.011549165472388268, | |
| "learning_rate": 1.3920634920634921e-05, | |
| "loss": 0.0017, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.2275132275132274, | |
| "grad_norm": 0.022659868001937866, | |
| "learning_rate": 1.386772486772487e-05, | |
| "loss": 0.0076, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.2380952380952381, | |
| "grad_norm": 1.041445255279541, | |
| "learning_rate": 1.3814814814814816e-05, | |
| "loss": 0.0011, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.2486772486772486, | |
| "grad_norm": 0.00932463351637125, | |
| "learning_rate": 1.3761904761904763e-05, | |
| "loss": 0.0024, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.2592592592592593, | |
| "grad_norm": 0.0319746658205986, | |
| "learning_rate": 1.3708994708994709e-05, | |
| "loss": 0.0012, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.2698412698412698, | |
| "grad_norm": 0.0502803809940815, | |
| "learning_rate": 1.3656084656084659e-05, | |
| "loss": 0.0173, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.2804232804232805, | |
| "grad_norm": 3.397770881652832, | |
| "learning_rate": 1.3603174603174606e-05, | |
| "loss": 0.0135, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.291005291005291, | |
| "grad_norm": 0.014460274949669838, | |
| "learning_rate": 1.3550264550264552e-05, | |
| "loss": 0.0008, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.3015873015873016, | |
| "grad_norm": 0.04834635928273201, | |
| "learning_rate": 1.3497354497354499e-05, | |
| "loss": 0.0053, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.312169312169312, | |
| "grad_norm": 0.09482118487358093, | |
| "learning_rate": 1.3444444444444447e-05, | |
| "loss": 0.0017, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.3227513227513228, | |
| "grad_norm": 0.007359776180237532, | |
| "learning_rate": 1.3391534391534393e-05, | |
| "loss": 0.0003, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.013864404521882534, | |
| "learning_rate": 1.333862433862434e-05, | |
| "loss": 0.0003, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.343915343915344, | |
| "grad_norm": 1.8579610586166382, | |
| "learning_rate": 1.3285714285714287e-05, | |
| "loss": 0.0014, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.3544973544973544, | |
| "grad_norm": 0.007739638909697533, | |
| "learning_rate": 1.3232804232804235e-05, | |
| "loss": 0.0005, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.3650793650793651, | |
| "grad_norm": 0.010888740420341492, | |
| "learning_rate": 1.3179894179894181e-05, | |
| "loss": 0.0003, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.3756613756613756, | |
| "grad_norm": 0.007155210245400667, | |
| "learning_rate": 1.3126984126984128e-05, | |
| "loss": 0.0093, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.3862433862433863, | |
| "grad_norm": 0.014670108444988728, | |
| "learning_rate": 1.3074074074074074e-05, | |
| "loss": 0.0056, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 1.3968253968253967, | |
| "grad_norm": 0.007492662873119116, | |
| "learning_rate": 1.3021164021164023e-05, | |
| "loss": 0.0031, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 1.4074074074074074, | |
| "grad_norm": 4.942596912384033, | |
| "learning_rate": 1.296825396825397e-05, | |
| "loss": 0.0026, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 1.417989417989418, | |
| "grad_norm": 0.007958627305924892, | |
| "learning_rate": 1.2915343915343916e-05, | |
| "loss": 0.0089, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": 2.9070186614990234, | |
| "learning_rate": 1.2862433862433862e-05, | |
| "loss": 0.0021, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.439153439153439, | |
| "grad_norm": 0.9513811469078064, | |
| "learning_rate": 1.2809523809523809e-05, | |
| "loss": 0.0011, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 1.4497354497354498, | |
| "grad_norm": 0.0836050733923912, | |
| "learning_rate": 1.2756613756613759e-05, | |
| "loss": 0.0089, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 1.4603174603174602, | |
| "grad_norm": 0.03550349175930023, | |
| "learning_rate": 1.2703703703703705e-05, | |
| "loss": 0.003, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 1.470899470899471, | |
| "grad_norm": 0.8934294581413269, | |
| "learning_rate": 1.2650793650793652e-05, | |
| "loss": 0.0032, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 1.4814814814814814, | |
| "grad_norm": 0.006408127024769783, | |
| "learning_rate": 1.2597883597883598e-05, | |
| "loss": 0.0088, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.492063492063492, | |
| "grad_norm": 0.15951858460903168, | |
| "learning_rate": 1.2544973544973547e-05, | |
| "loss": 0.0008, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 1.5026455026455028, | |
| "grad_norm": 0.011819873005151749, | |
| "learning_rate": 1.2492063492063493e-05, | |
| "loss": 0.0285, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 1.5132275132275133, | |
| "grad_norm": 0.005755511112511158, | |
| "learning_rate": 1.243915343915344e-05, | |
| "loss": 0.0006, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 1.5238095238095237, | |
| "grad_norm": 0.0067981211468577385, | |
| "learning_rate": 1.2386243386243386e-05, | |
| "loss": 0.0023, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 1.5343915343915344, | |
| "grad_norm": 0.0054528010077774525, | |
| "learning_rate": 1.2333333333333334e-05, | |
| "loss": 0.0002, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.544973544973545, | |
| "grad_norm": 0.00653385603800416, | |
| "learning_rate": 1.2280423280423281e-05, | |
| "loss": 0.0057, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 1.5555555555555556, | |
| "grad_norm": 0.0077536082826554775, | |
| "learning_rate": 1.2227513227513227e-05, | |
| "loss": 0.0002, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 1.566137566137566, | |
| "grad_norm": 0.0057713366113603115, | |
| "learning_rate": 1.2174603174603174e-05, | |
| "loss": 0.0004, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 1.5767195767195767, | |
| "grad_norm": 0.019029010087251663, | |
| "learning_rate": 1.2121693121693124e-05, | |
| "loss": 0.0053, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 1.5873015873015874, | |
| "grad_norm": 0.783202052116394, | |
| "learning_rate": 1.206878306878307e-05, | |
| "loss": 0.0014, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.597883597883598, | |
| "grad_norm": 0.18570241332054138, | |
| "learning_rate": 1.2015873015873017e-05, | |
| "loss": 0.0019, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 1.6084656084656084, | |
| "grad_norm": 3.245318651199341, | |
| "learning_rate": 1.1962962962962964e-05, | |
| "loss": 0.0061, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 1.619047619047619, | |
| "grad_norm": 0.009805947542190552, | |
| "learning_rate": 1.1910052910052912e-05, | |
| "loss": 0.0008, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 1.6296296296296298, | |
| "grad_norm": 0.1963416039943695, | |
| "learning_rate": 1.1857142857142858e-05, | |
| "loss": 0.0008, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 1.6402116402116402, | |
| "grad_norm": 0.03911126032471657, | |
| "learning_rate": 1.1804232804232805e-05, | |
| "loss": 0.0015, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.6507936507936507, | |
| "grad_norm": 0.12514500319957733, | |
| "learning_rate": 1.1751322751322751e-05, | |
| "loss": 0.0002, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 1.6613756613756614, | |
| "grad_norm": 0.005236633587628603, | |
| "learning_rate": 1.16984126984127e-05, | |
| "loss": 0.0037, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 1.671957671957672, | |
| "grad_norm": 0.004985412582755089, | |
| "learning_rate": 1.1645502645502646e-05, | |
| "loss": 0.0025, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 1.6825396825396826, | |
| "grad_norm": 0.008958135731518269, | |
| "learning_rate": 1.1592592592592593e-05, | |
| "loss": 0.0001, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 1.693121693121693, | |
| "grad_norm": 0.050911322236061096, | |
| "learning_rate": 1.153968253968254e-05, | |
| "loss": 0.0018, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.7037037037037037, | |
| "grad_norm": 0.8171196579933167, | |
| "learning_rate": 1.148677248677249e-05, | |
| "loss": 0.0006, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 1.7142857142857144, | |
| "grad_norm": 0.00528077082708478, | |
| "learning_rate": 1.1433862433862436e-05, | |
| "loss": 0.0001, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 1.7248677248677249, | |
| "grad_norm": 1.8344557285308838, | |
| "learning_rate": 1.1380952380952382e-05, | |
| "loss": 0.0012, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 1.7354497354497354, | |
| "grad_norm": 0.1585739254951477, | |
| "learning_rate": 1.1328042328042327e-05, | |
| "loss": 0.0002, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 1.746031746031746, | |
| "grad_norm": 12.171996116638184, | |
| "learning_rate": 1.1275132275132277e-05, | |
| "loss": 0.0022, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.7566137566137567, | |
| "grad_norm": 2.625952959060669, | |
| "learning_rate": 1.1222222222222224e-05, | |
| "loss": 0.0063, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 1.7671957671957672, | |
| "grad_norm": 0.4654531180858612, | |
| "learning_rate": 1.116931216931217e-05, | |
| "loss": 0.0025, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 1.7777777777777777, | |
| "grad_norm": 3.298426628112793, | |
| "learning_rate": 1.1116402116402117e-05, | |
| "loss": 0.0042, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 1.7883597883597884, | |
| "grad_norm": 0.06672941148281097, | |
| "learning_rate": 1.1063492063492065e-05, | |
| "loss": 0.0002, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 1.798941798941799, | |
| "grad_norm": 0.9771575927734375, | |
| "learning_rate": 1.1010582010582011e-05, | |
| "loss": 0.0019, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.8095238095238095, | |
| "grad_norm": 0.1068960428237915, | |
| "learning_rate": 1.0957671957671958e-05, | |
| "loss": 0.0039, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 1.82010582010582, | |
| "grad_norm": 0.0055111791007220745, | |
| "learning_rate": 1.0904761904761905e-05, | |
| "loss": 0.0014, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 1.8306878306878307, | |
| "grad_norm": 0.34078916907310486, | |
| "learning_rate": 1.0851851851851853e-05, | |
| "loss": 0.0007, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 1.8412698412698414, | |
| "grad_norm": 0.02196904458105564, | |
| "learning_rate": 1.07989417989418e-05, | |
| "loss": 0.0007, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 1.8518518518518519, | |
| "grad_norm": 0.004520466551184654, | |
| "learning_rate": 1.0746031746031746e-05, | |
| "loss": 0.0007, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.8624338624338623, | |
| "grad_norm": 0.13541559875011444, | |
| "learning_rate": 1.0693121693121692e-05, | |
| "loss": 0.0023, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 1.873015873015873, | |
| "grad_norm": 0.6529462337493896, | |
| "learning_rate": 1.0640211640211642e-05, | |
| "loss": 0.0012, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 1.8835978835978837, | |
| "grad_norm": 0.04629633575677872, | |
| "learning_rate": 1.0587301587301589e-05, | |
| "loss": 0.0482, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 1.8941798941798942, | |
| "grad_norm": 0.2736707925796509, | |
| "learning_rate": 1.0534391534391535e-05, | |
| "loss": 0.0045, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 1.9047619047619047, | |
| "grad_norm": 0.010693887248635292, | |
| "learning_rate": 1.0481481481481482e-05, | |
| "loss": 0.0118, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.9153439153439153, | |
| "grad_norm": 0.31277382373809814, | |
| "learning_rate": 1.042857142857143e-05, | |
| "loss": 0.0004, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 1.925925925925926, | |
| "grad_norm": 0.00564979063346982, | |
| "learning_rate": 1.0375661375661377e-05, | |
| "loss": 0.0038, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 1.9365079365079365, | |
| "grad_norm": 0.01478109136223793, | |
| "learning_rate": 1.0322751322751323e-05, | |
| "loss": 0.0002, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 1.947089947089947, | |
| "grad_norm": 0.1157880499958992, | |
| "learning_rate": 1.026984126984127e-05, | |
| "loss": 0.0007, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 1.9576719576719577, | |
| "grad_norm": 0.011665510945022106, | |
| "learning_rate": 1.0216931216931218e-05, | |
| "loss": 0.0003, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 1.9682539682539684, | |
| "grad_norm": 0.005074594635516405, | |
| "learning_rate": 1.0164021164021165e-05, | |
| "loss": 0.0002, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 1.9788359788359788, | |
| "grad_norm": 0.004943141248077154, | |
| "learning_rate": 1.0111111111111111e-05, | |
| "loss": 0.0003, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 1.9894179894179893, | |
| "grad_norm": 0.004243962932378054, | |
| "learning_rate": 1.0058201058201058e-05, | |
| "loss": 0.0027, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.2805013060569763, | |
| "learning_rate": 1.0005291005291008e-05, | |
| "loss": 0.0015, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.016391731798648834, | |
| "eval_runtime": 459.5859, | |
| "eval_samples_per_second": 0.228, | |
| "eval_steps_per_second": 0.115, | |
| "step": 1890 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 3780, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1023956924497920.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |