| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 90.9090909090909, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 2.27, | |
| "learning_rate": 3e-06, | |
| "loss": 6.9841, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 4.55, | |
| "learning_rate": 6e-06, | |
| "loss": 3.6549, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 6.82, | |
| "learning_rate": 9e-06, | |
| "loss": 2.3101, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 9.09, | |
| "learning_rate": 1.2e-05, | |
| "loss": 1.9692, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 11.36, | |
| "learning_rate": 1.5e-05, | |
| "loss": 1.7899, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 13.64, | |
| "learning_rate": 1.8e-05, | |
| "loss": 1.6814, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 15.91, | |
| "learning_rate": 2.1e-05, | |
| "loss": 1.6234, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 18.18, | |
| "learning_rate": 2.4e-05, | |
| "loss": 1.5821, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 20.45, | |
| "learning_rate": 2.7000000000000002e-05, | |
| "loss": 1.5501, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 22.73, | |
| "learning_rate": 3e-05, | |
| "loss": 1.5385, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "learning_rate": 2.911764705882353e-05, | |
| "loss": 1.5289, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 27.27, | |
| "learning_rate": 2.823529411764706e-05, | |
| "loss": 1.5101, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 29.55, | |
| "learning_rate": 2.735294117647059e-05, | |
| "loss": 1.4921, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 31.82, | |
| "learning_rate": 2.647058823529412e-05, | |
| "loss": 1.4881, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 34.09, | |
| "learning_rate": 2.5588235294117648e-05, | |
| "loss": 1.4834, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 36.36, | |
| "learning_rate": 2.4705882352941174e-05, | |
| "loss": 1.4783, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 38.64, | |
| "learning_rate": 2.3823529411764704e-05, | |
| "loss": 1.4742, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 40.91, | |
| "learning_rate": 2.2941176470588233e-05, | |
| "loss": 1.4671, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 43.18, | |
| "learning_rate": 2.2058823529411766e-05, | |
| "loss": 1.4711, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 45.45, | |
| "learning_rate": 2.1176470588235296e-05, | |
| "loss": 1.465, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 47.73, | |
| "learning_rate": 2.0294117647058825e-05, | |
| "loss": 1.4592, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 50.0, | |
| "learning_rate": 1.9411764705882355e-05, | |
| "loss": 1.4607, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 52.27, | |
| "learning_rate": 1.8529411764705884e-05, | |
| "loss": 1.4629, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 54.55, | |
| "learning_rate": 1.7647058823529414e-05, | |
| "loss": 1.4577, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 56.82, | |
| "learning_rate": 1.6764705882352943e-05, | |
| "loss": 1.4557, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 59.09, | |
| "learning_rate": 1.5882352941176473e-05, | |
| "loss": 1.4525, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 61.36, | |
| "learning_rate": 1.5e-05, | |
| "loss": 1.4519, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 63.64, | |
| "learning_rate": 1.411764705882353e-05, | |
| "loss": 1.4513, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 65.91, | |
| "learning_rate": 1.323529411764706e-05, | |
| "loss": 1.4517, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 68.18, | |
| "learning_rate": 1.2352941176470587e-05, | |
| "loss": 1.4493, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 70.45, | |
| "learning_rate": 1.1470588235294117e-05, | |
| "loss": 1.4502, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 72.73, | |
| "learning_rate": 1.0588235294117648e-05, | |
| "loss": 1.4598, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 75.0, | |
| "learning_rate": 9.705882352941177e-06, | |
| "loss": 1.4471, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 77.27, | |
| "learning_rate": 8.823529411764707e-06, | |
| "loss": 1.446, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 79.55, | |
| "learning_rate": 7.941176470588236e-06, | |
| "loss": 1.4473, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 81.82, | |
| "learning_rate": 7.058823529411765e-06, | |
| "loss": 1.4453, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 84.09, | |
| "learning_rate": 6.176470588235294e-06, | |
| "loss": 1.4462, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 86.36, | |
| "learning_rate": 5.294117647058824e-06, | |
| "loss": 1.4449, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 88.64, | |
| "learning_rate": 4.411764705882353e-06, | |
| "loss": 1.4489, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 90.91, | |
| "learning_rate": 3.5294117647058825e-06, | |
| "loss": 1.4447, | |
| "step": 2000 | |
| } | |
| ], | |
| "max_steps": 2200, | |
| "num_train_epochs": 100, | |
| "total_flos": 4713262822195200.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |