| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 90.9090909090909, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 2.27, | |
| "learning_rate": 3e-06, | |
| "loss": 7.7201, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 4.55, | |
| "learning_rate": 6e-06, | |
| "loss": 4.0276, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 6.82, | |
| "learning_rate": 9e-06, | |
| "loss": 2.3807, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 9.09, | |
| "learning_rate": 1.2e-05, | |
| "loss": 2.0072, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 11.36, | |
| "learning_rate": 1.5e-05, | |
| "loss": 1.8108, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 13.64, | |
| "learning_rate": 1.8e-05, | |
| "loss": 1.7074, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 15.91, | |
| "learning_rate": 2.1e-05, | |
| "loss": 1.6441, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 18.18, | |
| "learning_rate": 2.4e-05, | |
| "loss": 1.5972, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 20.45, | |
| "learning_rate": 2.7000000000000002e-05, | |
| "loss": 1.5593, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 22.73, | |
| "learning_rate": 3e-05, | |
| "loss": 1.5459, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "learning_rate": 2.911764705882353e-05, | |
| "loss": 1.5295, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 27.27, | |
| "learning_rate": 2.823529411764706e-05, | |
| "loss": 1.5236, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 29.55, | |
| "learning_rate": 2.735294117647059e-05, | |
| "loss": 1.4994, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 31.82, | |
| "learning_rate": 2.647058823529412e-05, | |
| "loss": 1.4965, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 34.09, | |
| "learning_rate": 2.5588235294117648e-05, | |
| "loss": 1.4875, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 36.36, | |
| "learning_rate": 2.4705882352941174e-05, | |
| "loss": 1.4824, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 38.64, | |
| "learning_rate": 2.3823529411764704e-05, | |
| "loss": 1.481, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 40.91, | |
| "learning_rate": 2.2941176470588233e-05, | |
| "loss": 1.4717, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 43.18, | |
| "learning_rate": 2.2058823529411766e-05, | |
| "loss": 1.4692, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 45.45, | |
| "learning_rate": 2.1176470588235296e-05, | |
| "loss": 1.4677, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 47.73, | |
| "learning_rate": 2.0294117647058825e-05, | |
| "loss": 1.4637, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 50.0, | |
| "learning_rate": 1.9411764705882355e-05, | |
| "loss": 1.4649, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 52.27, | |
| "learning_rate": 1.8529411764705884e-05, | |
| "loss": 1.4665, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 54.55, | |
| "learning_rate": 1.7647058823529414e-05, | |
| "loss": 1.4584, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 56.82, | |
| "learning_rate": 1.6764705882352943e-05, | |
| "loss": 1.4576, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 59.09, | |
| "learning_rate": 1.5882352941176473e-05, | |
| "loss": 1.455, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 61.36, | |
| "learning_rate": 1.5e-05, | |
| "loss": 1.4541, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 63.64, | |
| "learning_rate": 1.411764705882353e-05, | |
| "loss": 1.4532, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 65.91, | |
| "learning_rate": 1.323529411764706e-05, | |
| "loss": 1.4557, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 68.18, | |
| "learning_rate": 1.2352941176470587e-05, | |
| "loss": 1.452, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 70.45, | |
| "learning_rate": 1.1470588235294117e-05, | |
| "loss": 1.4513, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 72.73, | |
| "learning_rate": 1.0588235294117648e-05, | |
| "loss": 1.4697, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 75.0, | |
| "learning_rate": 9.705882352941177e-06, | |
| "loss": 1.4492, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 77.27, | |
| "learning_rate": 8.823529411764707e-06, | |
| "loss": 1.447, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 79.55, | |
| "learning_rate": 7.941176470588236e-06, | |
| "loss": 1.4489, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 81.82, | |
| "learning_rate": 7.058823529411765e-06, | |
| "loss": 1.4475, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 84.09, | |
| "learning_rate": 6.176470588235294e-06, | |
| "loss": 1.447, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 86.36, | |
| "learning_rate": 5.294117647058824e-06, | |
| "loss": 1.4472, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 88.64, | |
| "learning_rate": 4.411764705882353e-06, | |
| "loss": 1.4602, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 90.91, | |
| "learning_rate": 3.5294117647058825e-06, | |
| "loss": 1.447, | |
| "step": 2000 | |
| } | |
| ], | |
| "max_steps": 2200, | |
| "num_train_epochs": 100, | |
| "total_flos": 4713262822195200.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |