| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 152, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.013157894736842105, | |
| "grad_norm": 37.79440689086914, | |
| "learning_rate": 5.0000000000000004e-08, | |
| "loss": 3.1402, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.02631578947368421, | |
| "grad_norm": 38.45823287963867, | |
| "learning_rate": 1.0000000000000001e-07, | |
| "loss": 3.1787, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.039473684210526314, | |
| "grad_norm": 38.25625228881836, | |
| "learning_rate": 1.5000000000000002e-07, | |
| "loss": 3.1316, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.05263157894736842, | |
| "grad_norm": 37.2024040222168, | |
| "learning_rate": 2.0000000000000002e-07, | |
| "loss": 3.1011, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.06578947368421052, | |
| "grad_norm": 38.17294692993164, | |
| "learning_rate": 2.5000000000000004e-07, | |
| "loss": 3.133, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07894736842105263, | |
| "grad_norm": 37.374794006347656, | |
| "learning_rate": 3.0000000000000004e-07, | |
| "loss": 3.0731, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.09210526315789473, | |
| "grad_norm": 37.226966857910156, | |
| "learning_rate": 3.5000000000000004e-07, | |
| "loss": 3.069, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.10526315789473684, | |
| "grad_norm": 38.40094757080078, | |
| "learning_rate": 4.0000000000000003e-07, | |
| "loss": 3.1223, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.11842105263157894, | |
| "grad_norm": 37.86320877075195, | |
| "learning_rate": 4.5000000000000003e-07, | |
| "loss": 3.062, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.13157894736842105, | |
| "grad_norm": 38.02171325683594, | |
| "learning_rate": 5.000000000000001e-07, | |
| "loss": 3.0008, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.14473684210526316, | |
| "grad_norm": 38.5522346496582, | |
| "learning_rate": 5.5e-07, | |
| "loss": 3.0047, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.15789473684210525, | |
| "grad_norm": 37.72829818725586, | |
| "learning_rate": 6.000000000000001e-07, | |
| "loss": 2.9274, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.17105263157894737, | |
| "grad_norm": 38.488494873046875, | |
| "learning_rate": 6.5e-07, | |
| "loss": 2.8727, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.18421052631578946, | |
| "grad_norm": 38.87471389770508, | |
| "learning_rate": 7.000000000000001e-07, | |
| "loss": 2.8422, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.19736842105263158, | |
| "grad_norm": 37.584896087646484, | |
| "learning_rate": 7.5e-07, | |
| "loss": 2.6728, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.21052631578947367, | |
| "grad_norm": 37.04607391357422, | |
| "learning_rate": 8.000000000000001e-07, | |
| "loss": 2.5215, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.2236842105263158, | |
| "grad_norm": 37.30121994018555, | |
| "learning_rate": 8.500000000000001e-07, | |
| "loss": 2.4689, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.23684210526315788, | |
| "grad_norm": 35.99961853027344, | |
| "learning_rate": 9.000000000000001e-07, | |
| "loss": 2.3, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 35.817543029785156, | |
| "learning_rate": 9.500000000000001e-07, | |
| "loss": 2.1423, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.2631578947368421, | |
| "grad_norm": 35.056915283203125, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 1.9639, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.27631578947368424, | |
| "grad_norm": 34.83850860595703, | |
| "learning_rate": 1.0500000000000001e-06, | |
| "loss": 1.7845, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.2894736842105263, | |
| "grad_norm": 34.32366943359375, | |
| "learning_rate": 1.1e-06, | |
| "loss": 1.5864, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.3026315789473684, | |
| "grad_norm": 33.79611587524414, | |
| "learning_rate": 1.1500000000000002e-06, | |
| "loss": 1.4011, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.3157894736842105, | |
| "grad_norm": 32.596031188964844, | |
| "learning_rate": 1.2000000000000002e-06, | |
| "loss": 1.195, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.32894736842105265, | |
| "grad_norm": 30.045007705688477, | |
| "learning_rate": 1.25e-06, | |
| "loss": 0.9883, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.34210526315789475, | |
| "grad_norm": 24.89093589782715, | |
| "learning_rate": 1.3e-06, | |
| "loss": 0.7669, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.35526315789473684, | |
| "grad_norm": 23.454408645629883, | |
| "learning_rate": 1.3500000000000002e-06, | |
| "loss": 0.6304, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.3684210526315789, | |
| "grad_norm": 19.837312698364258, | |
| "learning_rate": 1.4000000000000001e-06, | |
| "loss": 0.4717, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.3815789473684211, | |
| "grad_norm": 15.185093879699707, | |
| "learning_rate": 1.45e-06, | |
| "loss": 0.363, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.39473684210526316, | |
| "grad_norm": 9.057796478271484, | |
| "learning_rate": 1.5e-06, | |
| "loss": 0.2439, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.40789473684210525, | |
| "grad_norm": 5.976982593536377, | |
| "learning_rate": 1.5500000000000002e-06, | |
| "loss": 0.1864, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.42105263157894735, | |
| "grad_norm": 3.067375421524048, | |
| "learning_rate": 1.6000000000000001e-06, | |
| "loss": 0.1134, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.4342105263157895, | |
| "grad_norm": 2.3589119911193848, | |
| "learning_rate": 1.6500000000000003e-06, | |
| "loss": 0.0985, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.4473684210526316, | |
| "grad_norm": 2.0044353008270264, | |
| "learning_rate": 1.7000000000000002e-06, | |
| "loss": 0.0859, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.4605263157894737, | |
| "grad_norm": 1.4279972314834595, | |
| "learning_rate": 1.75e-06, | |
| "loss": 0.0728, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.47368421052631576, | |
| "grad_norm": 0.9807674288749695, | |
| "learning_rate": 1.8000000000000001e-06, | |
| "loss": 0.061, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.4868421052631579, | |
| "grad_norm": 0.906160295009613, | |
| "learning_rate": 1.85e-06, | |
| "loss": 0.0676, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.8837690353393555, | |
| "learning_rate": 1.9000000000000002e-06, | |
| "loss": 0.0622, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.5131578947368421, | |
| "grad_norm": 0.9579435586929321, | |
| "learning_rate": 1.9500000000000004e-06, | |
| "loss": 0.0557, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 0.8149510622024536, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 0.0555, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5394736842105263, | |
| "grad_norm": 0.8899760246276855, | |
| "learning_rate": 2.05e-06, | |
| "loss": 0.0517, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.5526315789473685, | |
| "grad_norm": 0.6007645130157471, | |
| "learning_rate": 2.1000000000000002e-06, | |
| "loss": 0.0518, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.5657894736842105, | |
| "grad_norm": 0.48819127678871155, | |
| "learning_rate": 2.15e-06, | |
| "loss": 0.0429, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.5789473684210527, | |
| "grad_norm": 0.42939358949661255, | |
| "learning_rate": 2.2e-06, | |
| "loss": 0.0459, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.5921052631578947, | |
| "grad_norm": 0.5706579685211182, | |
| "learning_rate": 2.25e-06, | |
| "loss": 0.0453, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.6052631578947368, | |
| "grad_norm": 0.3034597337245941, | |
| "learning_rate": 2.3000000000000004e-06, | |
| "loss": 0.0421, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.618421052631579, | |
| "grad_norm": 0.5601783394813538, | |
| "learning_rate": 2.35e-06, | |
| "loss": 0.0411, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.631578947368421, | |
| "grad_norm": 0.35388317704200745, | |
| "learning_rate": 2.4000000000000003e-06, | |
| "loss": 0.04, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.6447368421052632, | |
| "grad_norm": 0.48609891533851624, | |
| "learning_rate": 2.4500000000000003e-06, | |
| "loss": 0.04, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.6578947368421053, | |
| "grad_norm": 0.4638507068157196, | |
| "learning_rate": 2.5e-06, | |
| "loss": 0.0369, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6710526315789473, | |
| "grad_norm": 0.5685771703720093, | |
| "learning_rate": 2.55e-06, | |
| "loss": 0.0428, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.6842105263157895, | |
| "grad_norm": 0.46358174085617065, | |
| "learning_rate": 2.6e-06, | |
| "loss": 0.0483, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.6973684210526315, | |
| "grad_norm": 0.35054436326026917, | |
| "learning_rate": 2.6500000000000005e-06, | |
| "loss": 0.0391, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.7105263157894737, | |
| "grad_norm": 0.3350559175014496, | |
| "learning_rate": 2.7000000000000004e-06, | |
| "loss": 0.039, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.7236842105263158, | |
| "grad_norm": 0.2875112295150757, | |
| "learning_rate": 2.7500000000000004e-06, | |
| "loss": 0.0383, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7368421052631579, | |
| "grad_norm": 0.4492928683757782, | |
| "learning_rate": 2.8000000000000003e-06, | |
| "loss": 0.0358, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.29484888911247253, | |
| "learning_rate": 2.85e-06, | |
| "loss": 0.0355, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.7631578947368421, | |
| "grad_norm": 0.36551928520202637, | |
| "learning_rate": 2.9e-06, | |
| "loss": 0.0403, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.7763157894736842, | |
| "grad_norm": 0.4458053708076477, | |
| "learning_rate": 2.95e-06, | |
| "loss": 0.0342, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.7894736842105263, | |
| "grad_norm": 0.34047460556030273, | |
| "learning_rate": 3e-06, | |
| "loss": 0.0302, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.8026315789473685, | |
| "grad_norm": 0.3420606255531311, | |
| "learning_rate": 3.05e-06, | |
| "loss": 0.034, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.8157894736842105, | |
| "grad_norm": 0.3902851939201355, | |
| "learning_rate": 3.1000000000000004e-06, | |
| "loss": 0.0327, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.8289473684210527, | |
| "grad_norm": 0.29165828227996826, | |
| "learning_rate": 3.1500000000000003e-06, | |
| "loss": 0.0341, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.8421052631578947, | |
| "grad_norm": 0.40872958302497864, | |
| "learning_rate": 3.2000000000000003e-06, | |
| "loss": 0.035, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.8552631578947368, | |
| "grad_norm": 0.36295783519744873, | |
| "learning_rate": 3.2500000000000002e-06, | |
| "loss": 0.0323, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.868421052631579, | |
| "grad_norm": 0.3857724368572235, | |
| "learning_rate": 3.3000000000000006e-06, | |
| "loss": 0.0336, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.881578947368421, | |
| "grad_norm": 0.3207017481327057, | |
| "learning_rate": 3.3500000000000005e-06, | |
| "loss": 0.0332, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.8947368421052632, | |
| "grad_norm": 0.2903987169265747, | |
| "learning_rate": 3.4000000000000005e-06, | |
| "loss": 0.0327, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.9078947368421053, | |
| "grad_norm": 0.3386954963207245, | |
| "learning_rate": 3.45e-06, | |
| "loss": 0.0308, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.9210526315789473, | |
| "grad_norm": 0.4339621365070343, | |
| "learning_rate": 3.5e-06, | |
| "loss": 0.0361, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9342105263157895, | |
| "grad_norm": 0.28095564246177673, | |
| "learning_rate": 3.5500000000000003e-06, | |
| "loss": 0.0306, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.9473684210526315, | |
| "grad_norm": 0.4141469895839691, | |
| "learning_rate": 3.6000000000000003e-06, | |
| "loss": 0.028, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.9605263157894737, | |
| "grad_norm": 0.35212820768356323, | |
| "learning_rate": 3.65e-06, | |
| "loss": 0.032, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.9736842105263158, | |
| "grad_norm": 0.26956063508987427, | |
| "learning_rate": 3.7e-06, | |
| "loss": 0.0294, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.9868421052631579, | |
| "grad_norm": 0.32735681533813477, | |
| "learning_rate": 3.7500000000000005e-06, | |
| "loss": 0.0272, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.4906782805919647, | |
| "learning_rate": 3.8000000000000005e-06, | |
| "loss": 0.0324, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 1.013157894736842, | |
| "grad_norm": 0.3451901078224182, | |
| "learning_rate": 3.85e-06, | |
| "loss": 0.0288, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 1.0263157894736843, | |
| "grad_norm": 0.30598726868629456, | |
| "learning_rate": 3.900000000000001e-06, | |
| "loss": 0.0305, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 1.0394736842105263, | |
| "grad_norm": 0.31189921498298645, | |
| "learning_rate": 3.95e-06, | |
| "loss": 0.0274, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 1.0526315789473684, | |
| "grad_norm": 0.31895947456359863, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.0236, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.0657894736842106, | |
| "grad_norm": 0.3290308117866516, | |
| "learning_rate": 4.05e-06, | |
| "loss": 0.0284, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 1.0789473684210527, | |
| "grad_norm": 0.3651576638221741, | |
| "learning_rate": 4.1e-06, | |
| "loss": 0.0274, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.0921052631578947, | |
| "grad_norm": 0.2393084615468979, | |
| "learning_rate": 4.15e-06, | |
| "loss": 0.0301, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 1.1052631578947367, | |
| "grad_norm": 0.333898663520813, | |
| "learning_rate": 4.2000000000000004e-06, | |
| "loss": 0.0235, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.118421052631579, | |
| "grad_norm": 0.3287582993507385, | |
| "learning_rate": 4.25e-06, | |
| "loss": 0.0248, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.131578947368421, | |
| "grad_norm": 0.3432455360889435, | |
| "learning_rate": 4.3e-06, | |
| "loss": 0.026, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.1447368421052633, | |
| "grad_norm": 0.3176783621311188, | |
| "learning_rate": 4.350000000000001e-06, | |
| "loss": 0.0249, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.1578947368421053, | |
| "grad_norm": 0.33373433351516724, | |
| "learning_rate": 4.4e-06, | |
| "loss": 0.0251, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.1710526315789473, | |
| "grad_norm": 0.36087968945503235, | |
| "learning_rate": 4.450000000000001e-06, | |
| "loss": 0.0251, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 1.1842105263157894, | |
| "grad_norm": 0.3681696057319641, | |
| "learning_rate": 4.5e-06, | |
| "loss": 0.0276, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.1973684210526316, | |
| "grad_norm": 0.46539774537086487, | |
| "learning_rate": 4.5500000000000005e-06, | |
| "loss": 0.0229, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 1.2105263157894737, | |
| "grad_norm": 0.23368288576602936, | |
| "learning_rate": 4.600000000000001e-06, | |
| "loss": 0.021, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.2236842105263157, | |
| "grad_norm": 0.26623716950416565, | |
| "learning_rate": 4.65e-06, | |
| "loss": 0.0265, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 1.236842105263158, | |
| "grad_norm": 0.28750717639923096, | |
| "learning_rate": 4.7e-06, | |
| "loss": 0.0221, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 0.46578383445739746, | |
| "learning_rate": 4.75e-06, | |
| "loss": 0.0236, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.263157894736842, | |
| "grad_norm": 0.33406543731689453, | |
| "learning_rate": 4.800000000000001e-06, | |
| "loss": 0.0239, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.2763157894736843, | |
| "grad_norm": 0.21247217059135437, | |
| "learning_rate": 4.85e-06, | |
| "loss": 0.0188, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 1.2894736842105263, | |
| "grad_norm": 0.26229164004325867, | |
| "learning_rate": 4.9000000000000005e-06, | |
| "loss": 0.022, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.3026315789473684, | |
| "grad_norm": 0.2967258393764496, | |
| "learning_rate": 4.95e-06, | |
| "loss": 0.0218, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 1.3157894736842106, | |
| "grad_norm": 0.419189453125, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0247, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.3289473684210527, | |
| "grad_norm": 0.25418952107429504, | |
| "learning_rate": 4.999902656502973e-06, | |
| "loss": 0.0223, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 1.3421052631578947, | |
| "grad_norm": 0.20174147188663483, | |
| "learning_rate": 4.9996106335924965e-06, | |
| "loss": 0.0266, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 1.3552631578947367, | |
| "grad_norm": 0.21732494235038757, | |
| "learning_rate": 4.999123954009797e-06, | |
| "loss": 0.0188, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 1.368421052631579, | |
| "grad_norm": 0.2683119773864746, | |
| "learning_rate": 4.998442655654946e-06, | |
| "loss": 0.0203, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 1.381578947368421, | |
| "grad_norm": 0.18175765872001648, | |
| "learning_rate": 4.997566791583916e-06, | |
| "loss": 0.0185, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.3947368421052633, | |
| "grad_norm": 0.3932501971721649, | |
| "learning_rate": 4.996496430004446e-06, | |
| "loss": 0.0238, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 1.4078947368421053, | |
| "grad_norm": 0.31145599484443665, | |
| "learning_rate": 4.995231654270726e-06, | |
| "loss": 0.0199, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 1.4210526315789473, | |
| "grad_norm": 0.41356661915779114, | |
| "learning_rate": 4.993772562876909e-06, | |
| "loss": 0.0187, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 1.4342105263157894, | |
| "grad_norm": 0.22484919428825378, | |
| "learning_rate": 4.992119269449445e-06, | |
| "loss": 0.0182, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 1.4473684210526316, | |
| "grad_norm": 0.28703081607818604, | |
| "learning_rate": 4.990271902738223e-06, | |
| "loss": 0.0239, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.4605263157894737, | |
| "grad_norm": 0.2394670695066452, | |
| "learning_rate": 4.988230606606552e-06, | |
| "loss": 0.0171, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 1.4736842105263157, | |
| "grad_norm": 0.3552885949611664, | |
| "learning_rate": 4.985995540019956e-06, | |
| "loss": 0.0226, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 1.486842105263158, | |
| "grad_norm": 0.24968908727169037, | |
| "learning_rate": 4.983566877033791e-06, | |
| "loss": 0.0193, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.24420695006847382, | |
| "learning_rate": 4.980944806779698e-06, | |
| "loss": 0.0226, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 1.513157894736842, | |
| "grad_norm": 0.34696799516677856, | |
| "learning_rate": 4.9781295334508664e-06, | |
| "loss": 0.02, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.526315789473684, | |
| "grad_norm": 0.23682132363319397, | |
| "learning_rate": 4.975121276286136e-06, | |
| "loss": 0.0194, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 1.5394736842105263, | |
| "grad_norm": 0.2485751509666443, | |
| "learning_rate": 4.9719202695529265e-06, | |
| "loss": 0.0149, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 1.5526315789473686, | |
| "grad_norm": 0.2815033495426178, | |
| "learning_rate": 4.968526762528988e-06, | |
| "loss": 0.0153, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 1.5657894736842106, | |
| "grad_norm": 0.24127744138240814, | |
| "learning_rate": 4.964941019482995e-06, | |
| "loss": 0.019, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 1.5789473684210527, | |
| "grad_norm": 0.2987695038318634, | |
| "learning_rate": 4.961163319653959e-06, | |
| "loss": 0.0165, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.5921052631578947, | |
| "grad_norm": 0.33492133021354675, | |
| "learning_rate": 4.9571939572294914e-06, | |
| "loss": 0.0185, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 1.6052631578947367, | |
| "grad_norm": 0.20466521382331848, | |
| "learning_rate": 4.953033241322887e-06, | |
| "loss": 0.0151, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 1.618421052631579, | |
| "grad_norm": 0.36396247148513794, | |
| "learning_rate": 4.948681495949055e-06, | |
| "loss": 0.0138, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 1.631578947368421, | |
| "grad_norm": 0.2000381350517273, | |
| "learning_rate": 4.944139059999286e-06, | |
| "loss": 0.0125, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 1.6447368421052633, | |
| "grad_norm": 0.24977952241897583, | |
| "learning_rate": 4.939406287214861e-06, | |
| "loss": 0.0152, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.6578947368421053, | |
| "grad_norm": 0.26705336570739746, | |
| "learning_rate": 4.9344835461595016e-06, | |
| "loss": 0.0148, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 1.6710526315789473, | |
| "grad_norm": 0.26699599623680115, | |
| "learning_rate": 4.929371220190671e-06, | |
| "loss": 0.0151, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 1.6842105263157894, | |
| "grad_norm": 0.20149633288383484, | |
| "learning_rate": 4.9240697074297205e-06, | |
| "loss": 0.0151, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 1.6973684210526314, | |
| "grad_norm": 0.1961003988981247, | |
| "learning_rate": 4.918579420730884e-06, | |
| "loss": 0.0163, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 1.7105263157894737, | |
| "grad_norm": 0.2148503214120865, | |
| "learning_rate": 4.912900787649124e-06, | |
| "loss": 0.0137, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.723684210526316, | |
| "grad_norm": 0.20505128800868988, | |
| "learning_rate": 4.907034250406846e-06, | |
| "loss": 0.0136, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 1.736842105263158, | |
| "grad_norm": 0.19462467730045319, | |
| "learning_rate": 4.900980265859449e-06, | |
| "loss": 0.0139, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "grad_norm": 0.21602794528007507, | |
| "learning_rate": 4.894739305459754e-06, | |
| "loss": 0.015, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 1.763157894736842, | |
| "grad_norm": 0.22933153808116913, | |
| "learning_rate": 4.88831185522129e-06, | |
| "loss": 0.0142, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 1.776315789473684, | |
| "grad_norm": 0.1785646229982376, | |
| "learning_rate": 4.881698415680442e-06, | |
| "loss": 0.0097, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.7894736842105263, | |
| "grad_norm": 0.21535581350326538, | |
| "learning_rate": 4.874899501857477e-06, | |
| "loss": 0.0106, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 1.8026315789473686, | |
| "grad_norm": 0.2360723614692688, | |
| "learning_rate": 4.867915643216434e-06, | |
| "loss": 0.0123, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 1.8157894736842106, | |
| "grad_norm": 0.18098825216293335, | |
| "learning_rate": 4.860747383623889e-06, | |
| "loss": 0.0126, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 1.8289473684210527, | |
| "grad_norm": 0.1836131066083908, | |
| "learning_rate": 4.85339528130661e-06, | |
| "loss": 0.0125, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 1.8421052631578947, | |
| "grad_norm": 0.34765973687171936, | |
| "learning_rate": 4.845859908808074e-06, | |
| "loss": 0.0158, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.8552631578947367, | |
| "grad_norm": 0.22595159709453583, | |
| "learning_rate": 4.838141852943891e-06, | |
| "loss": 0.0101, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 1.868421052631579, | |
| "grad_norm": 0.2811257243156433, | |
| "learning_rate": 4.830241714756099e-06, | |
| "loss": 0.0111, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 1.881578947368421, | |
| "grad_norm": 0.1875840127468109, | |
| "learning_rate": 4.822160109466361e-06, | |
| "loss": 0.0086, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 1.8947368421052633, | |
| "grad_norm": 0.19390800595283508, | |
| "learning_rate": 4.813897666428054e-06, | |
| "loss": 0.0106, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 1.9078947368421053, | |
| "grad_norm": 0.3725268244743347, | |
| "learning_rate": 4.805455029077255e-06, | |
| "loss": 0.0095, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.9210526315789473, | |
| "grad_norm": 0.2201736867427826, | |
| "learning_rate": 4.79683285488264e-06, | |
| "loss": 0.0074, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 1.9342105263157894, | |
| "grad_norm": 0.17423805594444275, | |
| "learning_rate": 4.788031815294282e-06, | |
| "loss": 0.0072, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 1.9473684210526314, | |
| "grad_norm": 0.22169643640518188, | |
| "learning_rate": 4.779052595691355e-06, | |
| "loss": 0.0121, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 1.9605263157894737, | |
| "grad_norm": 0.3247295618057251, | |
| "learning_rate": 4.76989589532877e-06, | |
| "loss": 0.0121, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 1.973684210526316, | |
| "grad_norm": 0.1830369532108307, | |
| "learning_rate": 4.7605624272827125e-06, | |
| "loss": 0.0077, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.986842105263158, | |
| "grad_norm": 0.2967239022254944, | |
| "learning_rate": 4.75105291839512e-06, | |
| "loss": 0.0104, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.17589347064495087, | |
| "learning_rate": 4.741368109217072e-06, | |
| "loss": 0.0075, | |
| "step": 152 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 456, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 76, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9.77843856062441e+18, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |