| { | |
| "best_metric": 0.9530809205642168, | |
| "best_model_checkpoint": "./save_models/sst2/roberta-base_lr1e-05_run1/checkpoint-22734", | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 37890, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 2.198768689533861e-06, | |
| "loss": 0.6593, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 4.397537379067722e-06, | |
| "loss": 0.3243, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 6.596306068601583e-06, | |
| "loss": 0.2873, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 8.795074758135444e-06, | |
| "loss": 0.2614, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 9.936545372866128e-06, | |
| "loss": 0.2533, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 9.796159029649596e-06, | |
| "loss": 0.2394, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 9.655772686433064e-06, | |
| "loss": 0.2448, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9278396436525612, | |
| "eval_loss": 0.25612303614616394, | |
| "eval_runtime": 4.9386, | |
| "eval_samples_per_second": 1363.746, | |
| "eval_steps_per_second": 85.247, | |
| "step": 3789 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 9.515386343216533e-06, | |
| "loss": 0.2235, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 9.375000000000001e-06, | |
| "loss": 0.1893, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 9.234613656783469e-06, | |
| "loss": 0.2059, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 9.094227313566937e-06, | |
| "loss": 0.2096, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 8.953840970350406e-06, | |
| "loss": 0.1936, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 8.813454627133872e-06, | |
| "loss": 0.2067, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 8.67306828391734e-06, | |
| "loss": 0.1879, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 8.532681940700809e-06, | |
| "loss": 0.2037, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.948181143281366, | |
| "eval_loss": 0.17774701118469238, | |
| "eval_runtime": 4.9236, | |
| "eval_samples_per_second": 1367.913, | |
| "eval_steps_per_second": 85.507, | |
| "step": 7578 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "learning_rate": 8.392295597484278e-06, | |
| "loss": 0.1544, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "learning_rate": 8.251909254267747e-06, | |
| "loss": 0.1502, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 8.111522911051213e-06, | |
| "loss": 0.1551, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "learning_rate": 7.971136567834682e-06, | |
| "loss": 0.1464, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 7.83075022461815e-06, | |
| "loss": 0.1668, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.77, | |
| "learning_rate": 7.690363881401618e-06, | |
| "loss": 0.1452, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 7.549977538185086e-06, | |
| "loss": 0.1558, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9496659242761692, | |
| "eval_loss": 0.2051115483045578, | |
| "eval_runtime": 4.9481, | |
| "eval_samples_per_second": 1361.12, | |
| "eval_steps_per_second": 85.083, | |
| "step": 11367 | |
| }, | |
| { | |
| "epoch": 3.04, | |
| "learning_rate": 7.409591194968554e-06, | |
| "loss": 0.1437, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 3.17, | |
| "learning_rate": 7.269204851752022e-06, | |
| "loss": 0.1174, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "learning_rate": 7.12881850853549e-06, | |
| "loss": 0.135, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "learning_rate": 6.988432165318958e-06, | |
| "loss": 0.126, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 3.56, | |
| "learning_rate": 6.848045822102426e-06, | |
| "loss": 0.1249, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "learning_rate": 6.707659478885895e-06, | |
| "loss": 0.1366, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 3.83, | |
| "learning_rate": 6.567273135669363e-06, | |
| "loss": 0.1355, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 3.96, | |
| "learning_rate": 6.426886792452831e-06, | |
| "loss": 0.1239, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9512991833704528, | |
| "eval_loss": 0.2515639364719391, | |
| "eval_runtime": 4.9353, | |
| "eval_samples_per_second": 1364.649, | |
| "eval_steps_per_second": 85.303, | |
| "step": 15156 | |
| }, | |
| { | |
| "epoch": 4.09, | |
| "learning_rate": 6.2865004492362994e-06, | |
| "loss": 0.0957, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 4.22, | |
| "learning_rate": 6.146114106019767e-06, | |
| "loss": 0.1014, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 4.35, | |
| "learning_rate": 6.005727762803235e-06, | |
| "loss": 0.0923, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 4.49, | |
| "learning_rate": 5.865341419586703e-06, | |
| "loss": 0.1169, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 4.62, | |
| "learning_rate": 5.7249550763701715e-06, | |
| "loss": 0.1123, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "learning_rate": 5.584568733153639e-06, | |
| "loss": 0.1148, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 4.88, | |
| "learning_rate": 5.444182389937107e-06, | |
| "loss": 0.109, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9508537490720119, | |
| "eval_loss": 0.2747756838798523, | |
| "eval_runtime": 4.9452, | |
| "eval_samples_per_second": 1361.92, | |
| "eval_steps_per_second": 85.133, | |
| "step": 18945 | |
| }, | |
| { | |
| "epoch": 5.01, | |
| "learning_rate": 5.303796046720575e-06, | |
| "loss": 0.0971, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 5.15, | |
| "learning_rate": 5.163409703504043e-06, | |
| "loss": 0.0867, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 5.28, | |
| "learning_rate": 5.023023360287511e-06, | |
| "loss": 0.0813, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 5.41, | |
| "learning_rate": 4.88263701707098e-06, | |
| "loss": 0.0938, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 5.54, | |
| "learning_rate": 4.7422506738544475e-06, | |
| "loss": 0.0912, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 5.67, | |
| "learning_rate": 4.601864330637916e-06, | |
| "loss": 0.0787, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 5.81, | |
| "learning_rate": 4.461477987421384e-06, | |
| "loss": 0.0773, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 5.94, | |
| "learning_rate": 4.321091644204852e-06, | |
| "loss": 0.0915, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.9530809205642168, | |
| "eval_loss": 0.27420464158058167, | |
| "eval_runtime": 4.9297, | |
| "eval_samples_per_second": 1366.207, | |
| "eval_steps_per_second": 85.401, | |
| "step": 22734 | |
| }, | |
| { | |
| "epoch": 6.07, | |
| "learning_rate": 4.18070530098832e-06, | |
| "loss": 0.0761, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 6.2, | |
| "learning_rate": 4.040318957771789e-06, | |
| "loss": 0.0694, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 6.33, | |
| "learning_rate": 3.899932614555256e-06, | |
| "loss": 0.0657, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 6.47, | |
| "learning_rate": 3.7595462713387242e-06, | |
| "loss": 0.0793, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 6.6, | |
| "learning_rate": 3.6191599281221925e-06, | |
| "loss": 0.0697, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 6.73, | |
| "learning_rate": 3.4787735849056607e-06, | |
| "loss": 0.0667, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 6.86, | |
| "learning_rate": 3.338387241689129e-06, | |
| "loss": 0.0666, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 6.99, | |
| "learning_rate": 3.1980008984725967e-06, | |
| "loss": 0.0661, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.9515961395694135, | |
| "eval_loss": 0.32847702503204346, | |
| "eval_runtime": 4.9266, | |
| "eval_samples_per_second": 1367.065, | |
| "eval_steps_per_second": 85.454, | |
| "step": 26523 | |
| }, | |
| { | |
| "epoch": 7.13, | |
| "learning_rate": 3.057614555256065e-06, | |
| "loss": 0.0561, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 7.26, | |
| "learning_rate": 2.9172282120395328e-06, | |
| "loss": 0.0596, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 7.39, | |
| "learning_rate": 2.776841868823001e-06, | |
| "loss": 0.053, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 7.52, | |
| "learning_rate": 2.6364555256064692e-06, | |
| "loss": 0.0546, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 7.65, | |
| "learning_rate": 2.496069182389937e-06, | |
| "loss": 0.0559, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 7.79, | |
| "learning_rate": 2.3556828391734053e-06, | |
| "loss": 0.0686, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 7.92, | |
| "learning_rate": 2.2152964959568735e-06, | |
| "loss": 0.0632, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.9504083147735709, | |
| "eval_loss": 0.35768425464630127, | |
| "eval_runtime": 4.9267, | |
| "eval_samples_per_second": 1367.037, | |
| "eval_steps_per_second": 85.452, | |
| "step": 30312 | |
| }, | |
| { | |
| "epoch": 8.05, | |
| "learning_rate": 2.0749101527403413e-06, | |
| "loss": 0.0457, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 8.18, | |
| "learning_rate": 1.9345238095238096e-06, | |
| "loss": 0.0512, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 8.31, | |
| "learning_rate": 1.7941374663072778e-06, | |
| "loss": 0.0426, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 8.45, | |
| "learning_rate": 1.6537511230907458e-06, | |
| "loss": 0.0536, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 8.58, | |
| "learning_rate": 1.513364779874214e-06, | |
| "loss": 0.0429, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 8.71, | |
| "learning_rate": 1.372978436657682e-06, | |
| "loss": 0.0431, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 8.84, | |
| "learning_rate": 1.2325920934411503e-06, | |
| "loss": 0.0471, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 8.97, | |
| "learning_rate": 1.092205750224618e-06, | |
| "loss": 0.049, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.9505567928730512, | |
| "eval_loss": 0.35209420323371887, | |
| "eval_runtime": 4.9373, | |
| "eval_samples_per_second": 1364.109, | |
| "eval_steps_per_second": 85.269, | |
| "step": 34101 | |
| }, | |
| { | |
| "epoch": 9.11, | |
| "learning_rate": 9.518194070080863e-07, | |
| "loss": 0.0449, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 9.24, | |
| "learning_rate": 8.114330637915545e-07, | |
| "loss": 0.0357, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 9.37, | |
| "learning_rate": 6.710467205750225e-07, | |
| "loss": 0.0494, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 9.5, | |
| "learning_rate": 5.306603773584906e-07, | |
| "loss": 0.0331, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 9.63, | |
| "learning_rate": 3.902740341419587e-07, | |
| "loss": 0.0367, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 9.77, | |
| "learning_rate": 2.4988769092542675e-07, | |
| "loss": 0.0395, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 9.9, | |
| "learning_rate": 1.095013477088949e-07, | |
| "loss": 0.0326, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.949072011878248, | |
| "eval_loss": 0.37647131085395813, | |
| "eval_runtime": 4.935, | |
| "eval_samples_per_second": 1364.743, | |
| "eval_steps_per_second": 85.309, | |
| "step": 37890 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 37890, | |
| "total_flos": 1.11573681913992e+16, | |
| "train_loss": 0.11990570215349557, | |
| "train_runtime": 1605.649, | |
| "train_samples_per_second": 377.505, | |
| "train_steps_per_second": 23.598 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 37890, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "total_flos": 1.11573681913992e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |