| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 76, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.013157894736842105, | |
| "grad_norm": 37.79440689086914, | |
| "learning_rate": 5.0000000000000004e-08, | |
| "loss": 3.1402, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.02631578947368421, | |
| "grad_norm": 38.45823287963867, | |
| "learning_rate": 1.0000000000000001e-07, | |
| "loss": 3.1787, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.039473684210526314, | |
| "grad_norm": 38.25625228881836, | |
| "learning_rate": 1.5000000000000002e-07, | |
| "loss": 3.1316, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.05263157894736842, | |
| "grad_norm": 37.2024040222168, | |
| "learning_rate": 2.0000000000000002e-07, | |
| "loss": 3.1011, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.06578947368421052, | |
| "grad_norm": 38.17294692993164, | |
| "learning_rate": 2.5000000000000004e-07, | |
| "loss": 3.133, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07894736842105263, | |
| "grad_norm": 37.374794006347656, | |
| "learning_rate": 3.0000000000000004e-07, | |
| "loss": 3.0731, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.09210526315789473, | |
| "grad_norm": 37.226966857910156, | |
| "learning_rate": 3.5000000000000004e-07, | |
| "loss": 3.069, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.10526315789473684, | |
| "grad_norm": 38.40094757080078, | |
| "learning_rate": 4.0000000000000003e-07, | |
| "loss": 3.1223, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.11842105263157894, | |
| "grad_norm": 37.86320877075195, | |
| "learning_rate": 4.5000000000000003e-07, | |
| "loss": 3.062, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.13157894736842105, | |
| "grad_norm": 38.02171325683594, | |
| "learning_rate": 5.000000000000001e-07, | |
| "loss": 3.0008, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.14473684210526316, | |
| "grad_norm": 38.5522346496582, | |
| "learning_rate": 5.5e-07, | |
| "loss": 3.0047, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.15789473684210525, | |
| "grad_norm": 37.72829818725586, | |
| "learning_rate": 6.000000000000001e-07, | |
| "loss": 2.9274, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.17105263157894737, | |
| "grad_norm": 38.488494873046875, | |
| "learning_rate": 6.5e-07, | |
| "loss": 2.8727, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.18421052631578946, | |
| "grad_norm": 38.87471389770508, | |
| "learning_rate": 7.000000000000001e-07, | |
| "loss": 2.8422, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.19736842105263158, | |
| "grad_norm": 37.584896087646484, | |
| "learning_rate": 7.5e-07, | |
| "loss": 2.6728, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.21052631578947367, | |
| "grad_norm": 37.04607391357422, | |
| "learning_rate": 8.000000000000001e-07, | |
| "loss": 2.5215, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.2236842105263158, | |
| "grad_norm": 37.30121994018555, | |
| "learning_rate": 8.500000000000001e-07, | |
| "loss": 2.4689, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.23684210526315788, | |
| "grad_norm": 35.99961853027344, | |
| "learning_rate": 9.000000000000001e-07, | |
| "loss": 2.3, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 35.817543029785156, | |
| "learning_rate": 9.500000000000001e-07, | |
| "loss": 2.1423, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.2631578947368421, | |
| "grad_norm": 35.056915283203125, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 1.9639, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.27631578947368424, | |
| "grad_norm": 34.83850860595703, | |
| "learning_rate": 1.0500000000000001e-06, | |
| "loss": 1.7845, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.2894736842105263, | |
| "grad_norm": 34.32366943359375, | |
| "learning_rate": 1.1e-06, | |
| "loss": 1.5864, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.3026315789473684, | |
| "grad_norm": 33.79611587524414, | |
| "learning_rate": 1.1500000000000002e-06, | |
| "loss": 1.4011, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.3157894736842105, | |
| "grad_norm": 32.596031188964844, | |
| "learning_rate": 1.2000000000000002e-06, | |
| "loss": 1.195, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.32894736842105265, | |
| "grad_norm": 30.045007705688477, | |
| "learning_rate": 1.25e-06, | |
| "loss": 0.9883, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.34210526315789475, | |
| "grad_norm": 24.89093589782715, | |
| "learning_rate": 1.3e-06, | |
| "loss": 0.7669, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.35526315789473684, | |
| "grad_norm": 23.454408645629883, | |
| "learning_rate": 1.3500000000000002e-06, | |
| "loss": 0.6304, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.3684210526315789, | |
| "grad_norm": 19.837312698364258, | |
| "learning_rate": 1.4000000000000001e-06, | |
| "loss": 0.4717, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.3815789473684211, | |
| "grad_norm": 15.185093879699707, | |
| "learning_rate": 1.45e-06, | |
| "loss": 0.363, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.39473684210526316, | |
| "grad_norm": 9.057796478271484, | |
| "learning_rate": 1.5e-06, | |
| "loss": 0.2439, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.40789473684210525, | |
| "grad_norm": 5.976982593536377, | |
| "learning_rate": 1.5500000000000002e-06, | |
| "loss": 0.1864, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.42105263157894735, | |
| "grad_norm": 3.067375421524048, | |
| "learning_rate": 1.6000000000000001e-06, | |
| "loss": 0.1134, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.4342105263157895, | |
| "grad_norm": 2.3589119911193848, | |
| "learning_rate": 1.6500000000000003e-06, | |
| "loss": 0.0985, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.4473684210526316, | |
| "grad_norm": 2.0044353008270264, | |
| "learning_rate": 1.7000000000000002e-06, | |
| "loss": 0.0859, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.4605263157894737, | |
| "grad_norm": 1.4279972314834595, | |
| "learning_rate": 1.75e-06, | |
| "loss": 0.0728, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.47368421052631576, | |
| "grad_norm": 0.9807674288749695, | |
| "learning_rate": 1.8000000000000001e-06, | |
| "loss": 0.061, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.4868421052631579, | |
| "grad_norm": 0.906160295009613, | |
| "learning_rate": 1.85e-06, | |
| "loss": 0.0676, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.8837690353393555, | |
| "learning_rate": 1.9000000000000002e-06, | |
| "loss": 0.0622, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.5131578947368421, | |
| "grad_norm": 0.9579435586929321, | |
| "learning_rate": 1.9500000000000004e-06, | |
| "loss": 0.0557, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 0.8149510622024536, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 0.0555, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5394736842105263, | |
| "grad_norm": 0.8899760246276855, | |
| "learning_rate": 2.05e-06, | |
| "loss": 0.0517, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.5526315789473685, | |
| "grad_norm": 0.6007645130157471, | |
| "learning_rate": 2.1000000000000002e-06, | |
| "loss": 0.0518, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.5657894736842105, | |
| "grad_norm": 0.48819127678871155, | |
| "learning_rate": 2.15e-06, | |
| "loss": 0.0429, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.5789473684210527, | |
| "grad_norm": 0.42939358949661255, | |
| "learning_rate": 2.2e-06, | |
| "loss": 0.0459, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.5921052631578947, | |
| "grad_norm": 0.5706579685211182, | |
| "learning_rate": 2.25e-06, | |
| "loss": 0.0453, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.6052631578947368, | |
| "grad_norm": 0.3034597337245941, | |
| "learning_rate": 2.3000000000000004e-06, | |
| "loss": 0.0421, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.618421052631579, | |
| "grad_norm": 0.5601783394813538, | |
| "learning_rate": 2.35e-06, | |
| "loss": 0.0411, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.631578947368421, | |
| "grad_norm": 0.35388317704200745, | |
| "learning_rate": 2.4000000000000003e-06, | |
| "loss": 0.04, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.6447368421052632, | |
| "grad_norm": 0.48609891533851624, | |
| "learning_rate": 2.4500000000000003e-06, | |
| "loss": 0.04, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.6578947368421053, | |
| "grad_norm": 0.4638507068157196, | |
| "learning_rate": 2.5e-06, | |
| "loss": 0.0369, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6710526315789473, | |
| "grad_norm": 0.5685771703720093, | |
| "learning_rate": 2.55e-06, | |
| "loss": 0.0428, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.6842105263157895, | |
| "grad_norm": 0.46358174085617065, | |
| "learning_rate": 2.6e-06, | |
| "loss": 0.0483, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.6973684210526315, | |
| "grad_norm": 0.35054436326026917, | |
| "learning_rate": 2.6500000000000005e-06, | |
| "loss": 0.0391, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.7105263157894737, | |
| "grad_norm": 0.3350559175014496, | |
| "learning_rate": 2.7000000000000004e-06, | |
| "loss": 0.039, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.7236842105263158, | |
| "grad_norm": 0.2875112295150757, | |
| "learning_rate": 2.7500000000000004e-06, | |
| "loss": 0.0383, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7368421052631579, | |
| "grad_norm": 0.4492928683757782, | |
| "learning_rate": 2.8000000000000003e-06, | |
| "loss": 0.0358, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.29484888911247253, | |
| "learning_rate": 2.85e-06, | |
| "loss": 0.0355, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.7631578947368421, | |
| "grad_norm": 0.36551928520202637, | |
| "learning_rate": 2.9e-06, | |
| "loss": 0.0403, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.7763157894736842, | |
| "grad_norm": 0.4458053708076477, | |
| "learning_rate": 2.95e-06, | |
| "loss": 0.0342, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.7894736842105263, | |
| "grad_norm": 0.34047460556030273, | |
| "learning_rate": 3e-06, | |
| "loss": 0.0302, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.8026315789473685, | |
| "grad_norm": 0.3420606255531311, | |
| "learning_rate": 3.05e-06, | |
| "loss": 0.034, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.8157894736842105, | |
| "grad_norm": 0.3902851939201355, | |
| "learning_rate": 3.1000000000000004e-06, | |
| "loss": 0.0327, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.8289473684210527, | |
| "grad_norm": 0.29165828227996826, | |
| "learning_rate": 3.1500000000000003e-06, | |
| "loss": 0.0341, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.8421052631578947, | |
| "grad_norm": 0.40872958302497864, | |
| "learning_rate": 3.2000000000000003e-06, | |
| "loss": 0.035, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.8552631578947368, | |
| "grad_norm": 0.36295783519744873, | |
| "learning_rate": 3.2500000000000002e-06, | |
| "loss": 0.0323, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.868421052631579, | |
| "grad_norm": 0.3857724368572235, | |
| "learning_rate": 3.3000000000000006e-06, | |
| "loss": 0.0336, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.881578947368421, | |
| "grad_norm": 0.3207017481327057, | |
| "learning_rate": 3.3500000000000005e-06, | |
| "loss": 0.0332, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.8947368421052632, | |
| "grad_norm": 0.2903987169265747, | |
| "learning_rate": 3.4000000000000005e-06, | |
| "loss": 0.0327, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.9078947368421053, | |
| "grad_norm": 0.3386954963207245, | |
| "learning_rate": 3.45e-06, | |
| "loss": 0.0308, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.9210526315789473, | |
| "grad_norm": 0.4339621365070343, | |
| "learning_rate": 3.5e-06, | |
| "loss": 0.0361, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9342105263157895, | |
| "grad_norm": 0.28095564246177673, | |
| "learning_rate": 3.5500000000000003e-06, | |
| "loss": 0.0306, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.9473684210526315, | |
| "grad_norm": 0.4141469895839691, | |
| "learning_rate": 3.6000000000000003e-06, | |
| "loss": 0.028, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.9605263157894737, | |
| "grad_norm": 0.35212820768356323, | |
| "learning_rate": 3.65e-06, | |
| "loss": 0.032, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.9736842105263158, | |
| "grad_norm": 0.26956063508987427, | |
| "learning_rate": 3.7e-06, | |
| "loss": 0.0294, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.9868421052631579, | |
| "grad_norm": 0.32735681533813477, | |
| "learning_rate": 3.7500000000000005e-06, | |
| "loss": 0.0272, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.4906782805919647, | |
| "learning_rate": 3.8000000000000005e-06, | |
| "loss": 0.0324, | |
| "step": 76 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 456, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 76, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.889219280312205e+18, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |