| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 4.860524091293322, |
| "global_step": 11500, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.21, |
| "learning_rate": 2.8732037193575658e-05, |
| "loss": 3.5507, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.42, |
| "learning_rate": 2.7464074387151312e-05, |
| "loss": 3.139, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.63, |
| "learning_rate": 2.6196111580726966e-05, |
| "loss": 3.0681, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.85, |
| "learning_rate": 2.492814877430262e-05, |
| "loss": 3.0193, |
| "step": 2000 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_gen_len": 7.8559, |
| "eval_loss": 2.918958902359009, |
| "eval_rouge1": 48.6486, |
| "eval_rouge2": 0.0, |
| "eval_rougeL": 48.6486, |
| "eval_rougeLsum": 48.6486, |
| "eval_runtime": 1.4313, |
| "eval_samples_per_second": 77.553, |
| "eval_steps_per_second": 4.891, |
| "step": 2366 |
| }, |
| { |
| "epoch": 1.06, |
| "learning_rate": 2.3660185967878277e-05, |
| "loss": 2.9837, |
| "step": 2500 |
| }, |
| { |
| "epoch": 1.27, |
| "learning_rate": 2.239222316145393e-05, |
| "loss": 2.9458, |
| "step": 3000 |
| }, |
| { |
| "epoch": 1.48, |
| "learning_rate": 2.112426035502959e-05, |
| "loss": 2.9247, |
| "step": 3500 |
| }, |
| { |
| "epoch": 1.69, |
| "learning_rate": 1.985629754860524e-05, |
| "loss": 2.9004, |
| "step": 4000 |
| }, |
| { |
| "epoch": 1.9, |
| "learning_rate": 1.8588334742180896e-05, |
| "loss": 2.9038, |
| "step": 4500 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_gen_len": 7.7387, |
| "eval_loss": 2.838879346847534, |
| "eval_rouge1": 48.6486, |
| "eval_rouge2": 0.0, |
| "eval_rougeL": 48.6486, |
| "eval_rougeLsum": 48.6486, |
| "eval_runtime": 1.069, |
| "eval_samples_per_second": 103.833, |
| "eval_steps_per_second": 6.548, |
| "step": 4732 |
| }, |
| { |
| "epoch": 2.11, |
| "learning_rate": 1.732037193575655e-05, |
| "loss": 2.8876, |
| "step": 5000 |
| }, |
| { |
| "epoch": 2.32, |
| "learning_rate": 1.6052409129332207e-05, |
| "loss": 2.8646, |
| "step": 5500 |
| }, |
| { |
| "epoch": 2.54, |
| "learning_rate": 1.4784446322907861e-05, |
| "loss": 2.8488, |
| "step": 6000 |
| }, |
| { |
| "epoch": 2.75, |
| "learning_rate": 1.3516483516483517e-05, |
| "loss": 2.8431, |
| "step": 6500 |
| }, |
| { |
| "epoch": 2.96, |
| "learning_rate": 1.224852071005917e-05, |
| "loss": 2.8369, |
| "step": 7000 |
| }, |
| { |
| "epoch": 3.0, |
| "eval_gen_len": 7.8468, |
| "eval_loss": 2.808067560195923, |
| "eval_rouge1": 48.6486, |
| "eval_rouge2": 0.0, |
| "eval_rougeL": 48.6486, |
| "eval_rougeLsum": 48.6486, |
| "eval_runtime": 1.0729, |
| "eval_samples_per_second": 103.457, |
| "eval_steps_per_second": 6.524, |
| "step": 7098 |
| }, |
| { |
| "epoch": 3.17, |
| "learning_rate": 1.0980557903634826e-05, |
| "loss": 2.8347, |
| "step": 7500 |
| }, |
| { |
| "epoch": 3.38, |
| "learning_rate": 9.712595097210482e-06, |
| "loss": 2.82, |
| "step": 8000 |
| }, |
| { |
| "epoch": 3.59, |
| "learning_rate": 8.444632290786136e-06, |
| "loss": 2.826, |
| "step": 8500 |
| }, |
| { |
| "epoch": 3.8, |
| "learning_rate": 7.1766694843617924e-06, |
| "loss": 2.7988, |
| "step": 9000 |
| }, |
| { |
| "epoch": 4.0, |
| "eval_gen_len": 8.027, |
| "eval_loss": 2.785550832748413, |
| "eval_rouge1": 48.6486, |
| "eval_rouge2": 0.0, |
| "eval_rougeL": 48.6486, |
| "eval_rougeLsum": 48.6486, |
| "eval_runtime": 1.0854, |
| "eval_samples_per_second": 102.268, |
| "eval_steps_per_second": 6.449, |
| "step": 9464 |
| }, |
| { |
| "epoch": 4.02, |
| "learning_rate": 5.908706677937447e-06, |
| "loss": 2.8132, |
| "step": 9500 |
| }, |
| { |
| "epoch": 4.23, |
| "learning_rate": 4.640743871513102e-06, |
| "loss": 2.7946, |
| "step": 10000 |
| }, |
| { |
| "epoch": 4.44, |
| "learning_rate": 3.3727810650887576e-06, |
| "loss": 2.8009, |
| "step": 10500 |
| }, |
| { |
| "epoch": 4.65, |
| "learning_rate": 2.1048182586644128e-06, |
| "loss": 2.7986, |
| "step": 11000 |
| }, |
| { |
| "epoch": 4.86, |
| "learning_rate": 8.368554522400676e-07, |
| "loss": 2.7982, |
| "step": 11500 |
| } |
| ], |
| "max_steps": 11830, |
| "num_train_epochs": 5, |
| "total_flos": 5833380202217472.0, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|