| { | |
| "best_metric": 0.33508616900126137, | |
| "best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-2/checkpoint-8552", | |
| "epoch": 4.0, | |
| "eval_steps": 500, | |
| "global_step": 8552, | |
| "is_hyper_param_search": true, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 5.6188127124741755e-05, | |
| "loss": 0.6368, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 5.269904819250494e-05, | |
| "loss": 0.6054, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 4.920996926026812e-05, | |
| "loss": 0.6131, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 4.57208903280313e-05, | |
| "loss": 0.5971, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.6043549180030823, | |
| "eval_matthews_correlation": 0.16979966395795365, | |
| "eval_runtime": 0.8177, | |
| "eval_samples_per_second": 1275.601, | |
| "eval_steps_per_second": 80.719, | |
| "step": 2138 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 4.2231811395794476e-05, | |
| "loss": 0.5802, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 3.874273246355766e-05, | |
| "loss": 0.5465, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 3.525365353132083e-05, | |
| "loss": 0.575, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 3.1764574599084014e-05, | |
| "loss": 0.56, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.8283206820487976, | |
| "eval_matthews_correlation": 0.3112801106717032, | |
| "eval_runtime": 0.7366, | |
| "eval_samples_per_second": 1416.049, | |
| "eval_steps_per_second": 89.606, | |
| "step": 4276 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 2.8275495666847193e-05, | |
| "loss": 0.4468, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "learning_rate": 2.4786416734610375e-05, | |
| "loss": 0.4467, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.57, | |
| "learning_rate": 2.1297337802373553e-05, | |
| "loss": 0.4318, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.81, | |
| "learning_rate": 1.7808258870136732e-05, | |
| "loss": 0.4282, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 1.206721544265747, | |
| "eval_matthews_correlation": 0.3321332844589781, | |
| "eval_runtime": 0.7912, | |
| "eval_samples_per_second": 1318.307, | |
| "eval_steps_per_second": 83.421, | |
| "step": 6414 | |
| }, | |
| { | |
| "epoch": 3.04, | |
| "learning_rate": 1.4319179937899912e-05, | |
| "loss": 0.3788, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.27, | |
| "learning_rate": 1.083010100566309e-05, | |
| "loss": 0.2532, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.51, | |
| "learning_rate": 7.341022073426271e-06, | |
| "loss": 0.2491, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 3.74, | |
| "learning_rate": 3.85194314118945e-06, | |
| "loss": 0.2683, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.98, | |
| "learning_rate": 3.6286420895262934e-07, | |
| "loss": 0.2636, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 1.352795124053955, | |
| "eval_matthews_correlation": 0.33508616900126137, | |
| "eval_runtime": 0.7254, | |
| "eval_samples_per_second": 1437.829, | |
| "eval_steps_per_second": 90.984, | |
| "step": 8552 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 8552, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 500, | |
| "total_flos": 138798926351136.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": { | |
| "learning_rate": 5.967720605697858e-05, | |
| "num_train_epochs": 4, | |
| "per_device_train_batch_size": 4, | |
| "seed": 37 | |
| } | |
| } | |