| { | |
| "best_metric": 1.6017142534255981, | |
| "best_model_checkpoint": "arbitropy/mt5-base-bcoqa/checkpoint-2100", | |
| "epoch": 0.060544904137235116, | |
| "eval_steps": 700, | |
| "global_step": 2100, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 14.746796607971191, | |
| "learning_rate": 4.9639613665849795e-05, | |
| "loss": 4.3627, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "eval_loss": 1.7135968208312988, | |
| "eval_runtime": 62.9933, | |
| "eval_samples_per_second": 75.183, | |
| "eval_steps_per_second": 18.796, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 3.6309075355529785, | |
| "learning_rate": 4.927922733169959e-05, | |
| "loss": 2.2652, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "eval_loss": 1.6376169919967651, | |
| "eval_runtime": 63.2403, | |
| "eval_samples_per_second": 74.889, | |
| "eval_steps_per_second": 18.722, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 6.434725761413574, | |
| "learning_rate": 4.891884099754937e-05, | |
| "loss": 2.0917, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 10.990394592285156, | |
| "learning_rate": 4.8558454663399166e-05, | |
| "loss": 1.9203, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "eval_loss": 1.6017142534255981, | |
| "eval_runtime": 63.1369, | |
| "eval_samples_per_second": 75.012, | |
| "eval_steps_per_second": 18.753, | |
| "step": 2100 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 69370, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 700, | |
| "total_flos": 1.956696216451891e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |