| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.504424778761062, | |
| "eval_steps": 500, | |
| "global_step": 170, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.08849557522123894, | |
| "grad_norm": 0.8676792979240417, | |
| "learning_rate": 9.91645696388268e-05, | |
| "loss": 1.6736, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.17699115044247787, | |
| "grad_norm": 1.35064697265625, | |
| "learning_rate": 9.665301848904975e-05, | |
| "loss": 1.1231, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.26548672566371684, | |
| "grad_norm": 1.436787486076355, | |
| "learning_rate": 9.255088935784784e-05, | |
| "loss": 1.0192, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.35398230088495575, | |
| "grad_norm": 0.7928199172019958, | |
| "learning_rate": 8.699803935381485e-05, | |
| "loss": 0.8938, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.4424778761061947, | |
| "grad_norm": 1.052542805671692, | |
| "learning_rate": 8.018378615106108e-05, | |
| "loss": 0.9706, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5309734513274337, | |
| "grad_norm": 1.4108525514602661, | |
| "learning_rate": 7.23404534331376e-05, | |
| "loss": 0.8909, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.6194690265486725, | |
| "grad_norm": 1.1198198795318604, | |
| "learning_rate": 6.373545009932168e-05, | |
| "loss": 0.8943, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.7079646017699115, | |
| "grad_norm": 0.9904882907867432, | |
| "learning_rate": 5.466215328310079e-05, | |
| "loss": 0.873, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.7964601769911505, | |
| "grad_norm": 1.4658399820327759, | |
| "learning_rate": 4.542990601526297e-05, | |
| "loss": 0.8852, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.8849557522123894, | |
| "grad_norm": 1.0678004026412964, | |
| "learning_rate": 3.635347054911746e-05, | |
| "loss": 0.8028, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.9734513274336283, | |
| "grad_norm": 1.1287755966186523, | |
| "learning_rate": 2.774229692390805e-05, | |
| "loss": 0.7924, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.0619469026548674, | |
| "grad_norm": 0.9173304438591003, | |
| "learning_rate": 1.9889972641710248e-05, | |
| "loss": 0.7311, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.1504424778761062, | |
| "grad_norm": 0.9768574833869934, | |
| "learning_rate": 1.3064213158260386e-05, | |
| "loss": 0.7295, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.238938053097345, | |
| "grad_norm": 1.0783871412277222, | |
| "learning_rate": 7.497734449769639e-06, | |
| "loss": 0.7283, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.3274336283185841, | |
| "grad_norm": 0.9281437993049622, | |
| "learning_rate": 3.380318844467728e-06, | |
| "loss": 0.7425, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.415929203539823, | |
| "grad_norm": 0.9174750447273254, | |
| "learning_rate": 8.523446247096445e-07, | |
| "loss": 0.7629, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.504424778761062, | |
| "grad_norm": 1.350212812423706, | |
| "learning_rate": 0.0, | |
| "loss": 0.6129, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.504424778761062, | |
| "step": 170, | |
| "total_flos": 8891638258335744.0, | |
| "train_loss": 0.8897645473480225, | |
| "train_runtime": 144.6489, | |
| "train_samples_per_second": 9.333, | |
| "train_steps_per_second": 1.175 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 170, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "total_flos": 8891638258335744.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |