| { | |
| "best_metric": 0.6923967003822327, | |
| "best_model_checkpoint": "tiny_bert_rand_50_v1_wnli/checkpoint-6", | |
| "epoch": 7.0, | |
| "eval_steps": 500, | |
| "global_step": 21, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.956745445728302, | |
| "learning_rate": 4.9e-05, | |
| "loss": 0.6996, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.5352112676056338, | |
| "eval_loss": 0.6930016279220581, | |
| "eval_runtime": 0.036, | |
| "eval_samples_per_second": 1973.842, | |
| "eval_steps_per_second": 27.801, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.7020816206932068, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.6965, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.5633802816901409, | |
| "eval_loss": 0.6923967003822327, | |
| "eval_runtime": 0.0355, | |
| "eval_samples_per_second": 2000.79, | |
| "eval_steps_per_second": 28.18, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.49778902530670166, | |
| "learning_rate": 4.7e-05, | |
| "loss": 0.6961, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.36619718309859156, | |
| "eval_loss": 0.7064810991287231, | |
| "eval_runtime": 0.0772, | |
| "eval_samples_per_second": 919.852, | |
| "eval_steps_per_second": 12.956, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.6517056226730347, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 0.6933, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.352112676056338, | |
| "eval_loss": 0.704830527305603, | |
| "eval_runtime": 0.037, | |
| "eval_samples_per_second": 1921.187, | |
| "eval_steps_per_second": 27.059, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.8543090224266052, | |
| "learning_rate": 4.5e-05, | |
| "loss": 0.6941, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.4507042253521127, | |
| "eval_loss": 0.703125, | |
| "eval_runtime": 0.0372, | |
| "eval_samples_per_second": 1910.796, | |
| "eval_steps_per_second": 26.913, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.6381711959838867, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 0.6911, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.39436619718309857, | |
| "eval_loss": 0.7073062658309937, | |
| "eval_runtime": 0.044, | |
| "eval_samples_per_second": 1614.322, | |
| "eval_steps_per_second": 22.737, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 0.4603327214717865, | |
| "learning_rate": 4.3e-05, | |
| "loss": 0.6905, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.4084507042253521, | |
| "eval_loss": 0.710222065448761, | |
| "eval_runtime": 0.0359, | |
| "eval_samples_per_second": 1978.182, | |
| "eval_steps_per_second": 27.862, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "step": 21, | |
| "total_flos": 116563804707840.0, | |
| "train_loss": 0.6944480964115688, | |
| "train_runtime": 8.3963, | |
| "train_samples_per_second": 3781.441, | |
| "train_steps_per_second": 17.865 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 150, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "total_flos": 116563804707840.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |