| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.3440860215053765, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 1.9731182795698928e-05, | |
| "loss": 9.7093, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 1.946236559139785e-05, | |
| "loss": 0.7333, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 1.9193548387096777e-05, | |
| "loss": 0.3919, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 1.89247311827957e-05, | |
| "loss": 0.4332, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 1.8655913978494623e-05, | |
| "loss": 0.4742, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 1.838709677419355e-05, | |
| "loss": 0.4272, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 1.8118279569892473e-05, | |
| "loss": 0.4382, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 1.78494623655914e-05, | |
| "loss": 0.3068, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 1.7580645161290325e-05, | |
| "loss": 0.3431, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 1.7311827956989248e-05, | |
| "loss": 0.3652, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 1.7043010752688175e-05, | |
| "loss": 0.3845, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 1.6774193548387098e-05, | |
| "loss": 0.3613, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 1.6505376344086024e-05, | |
| "loss": 0.4529, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 1.6236559139784947e-05, | |
| "loss": 0.3652, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 1.596774193548387e-05, | |
| "loss": 0.3286, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 1.5698924731182796e-05, | |
| "loss": 0.4203, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 1.5430107526881723e-05, | |
| "loss": 0.347, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 1.5161290322580646e-05, | |
| "loss": 0.3657, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 1.4892473118279572e-05, | |
| "loss": 0.3914, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 1.4623655913978497e-05, | |
| "loss": 0.3301, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 1.4354838709677421e-05, | |
| "loss": 0.3266, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 1.4086021505376346e-05, | |
| "loss": 0.3893, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 1.381720430107527e-05, | |
| "loss": 0.331, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 1.3548387096774194e-05, | |
| "loss": 0.3853, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 1.3279569892473118e-05, | |
| "loss": 0.3697, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 1.3010752688172043e-05, | |
| "loss": 0.4314, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 1.274193548387097e-05, | |
| "loss": 0.3491, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 1.2473118279569894e-05, | |
| "loss": 0.3355, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 1.2204301075268819e-05, | |
| "loss": 0.353, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 1.1935483870967743e-05, | |
| "loss": 0.3984, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 1.1666666666666668e-05, | |
| "loss": 0.3151, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 1.1397849462365593e-05, | |
| "loss": 0.3339, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 1.1129032258064516e-05, | |
| "loss": 0.3183, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 1.086021505376344e-05, | |
| "loss": 0.3476, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 1.0591397849462367e-05, | |
| "loss": 0.4169, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 1.0322580645161291e-05, | |
| "loss": 0.3493, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 1.0053763440860216e-05, | |
| "loss": 0.3481, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 9.78494623655914e-06, | |
| "loss": 0.2469, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 9.516129032258065e-06, | |
| "loss": 0.2227, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 9.24731182795699e-06, | |
| "loss": 0.232, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 8.978494623655915e-06, | |
| "loss": 0.1878, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 8.70967741935484e-06, | |
| "loss": 0.2655, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 8.440860215053764e-06, | |
| "loss": 0.1786, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 8.172043010752689e-06, | |
| "loss": 0.1902, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 7.903225806451613e-06, | |
| "loss": 0.227, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 7.634408602150538e-06, | |
| "loss": 0.2045, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 7.365591397849463e-06, | |
| "loss": 0.2533, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 7.096774193548388e-06, | |
| "loss": 0.1642, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 6.827956989247312e-06, | |
| "loss": 0.1756, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 6.5591397849462365e-06, | |
| "loss": 0.1804, | |
| "step": 500 | |
| } | |
| ], | |
| "max_steps": 744, | |
| "num_train_epochs": 2, | |
| "total_flos": 928706985984000.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |