| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 50, | |
| "global_step": 222, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04509582863585118, | |
| "grad_norm": 7.656381058041006e-05, | |
| "learning_rate": 3.9130434782608694e-07, | |
| "loss": 1.6138, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.09019165727170236, | |
| "grad_norm": 6.489222141681239e-05, | |
| "learning_rate": 8.260869565217391e-07, | |
| "loss": 1.6747, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.13528748590755355, | |
| "grad_norm": 6.760261749150231e-05, | |
| "learning_rate": 9.97758641300553e-07, | |
| "loss": 1.6401, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.18038331454340473, | |
| "grad_norm": 5.8056666603079066e-05, | |
| "learning_rate": 9.841341526992535e-07, | |
| "loss": 1.8594, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.2254791431792559, | |
| "grad_norm": 4.639743929146789e-05, | |
| "learning_rate": 9.584688140963944e-07, | |
| "loss": 1.5358, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.2254791431792559, | |
| "eval_loss": 1.7225253582000732, | |
| "eval_runtime": 54.904, | |
| "eval_samples_per_second": 3.406, | |
| "eval_steps_per_second": 1.712, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.2705749718151071, | |
| "grad_norm": 4.406080552143976e-05, | |
| "learning_rate": 9.214009454506752e-07, | |
| "loss": 1.5081, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.3156708004509583, | |
| "grad_norm": 4.9922884500119835e-05, | |
| "learning_rate": 8.738524578558546e-07, | |
| "loss": 1.6377, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.36076662908680945, | |
| "grad_norm": 4.485138924792409e-05, | |
| "learning_rate": 8.170059247861193e-07, | |
| "loss": 1.5535, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.40586245772266066, | |
| "grad_norm": 3.757755985134281e-05, | |
| "learning_rate": 7.522751704345887e-07, | |
| "loss": 1.3739, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.4509582863585118, | |
| "grad_norm": 4.152490146225318e-05, | |
| "learning_rate": 6.812701066393123e-07, | |
| "loss": 1.5175, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.4509582863585118, | |
| "eval_loss": 1.597386360168457, | |
| "eval_runtime": 54.3789, | |
| "eval_samples_per_second": 3.439, | |
| "eval_steps_per_second": 1.729, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.496054114994363, | |
| "grad_norm": 3.326448131701909e-05, | |
| "learning_rate": 6.057566929339095e-07, | |
| "loss": 1.414, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.5411499436302142, | |
| "grad_norm": 3.826636020676233e-05, | |
| "learning_rate": 5.27613015552254e-07, | |
| "loss": 1.4865, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.5862457722660653, | |
| "grad_norm": 3.5070326703134924e-05, | |
| "learning_rate": 4.4878257774169345e-07, | |
| "loss": 1.4087, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.6313416009019166, | |
| "grad_norm": 3.05857029161416e-05, | |
| "learning_rate": 3.7122596309655174e-07, | |
| "loss": 1.4122, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.6764374295377678, | |
| "grad_norm": 2.6550629627308808e-05, | |
| "learning_rate": 2.9687207408810555e-07, | |
| "loss": 1.4691, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.6764374295377678, | |
| "eval_loss": 1.5566362142562866, | |
| "eval_runtime": 54.2124, | |
| "eval_samples_per_second": 3.449, | |
| "eval_steps_per_second": 1.734, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.7215332581736189, | |
| "grad_norm": 2.8728065444738604e-05, | |
| "learning_rate": 2.275701585324649e-07, | |
| "loss": 1.3447, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.7666290868094702, | |
| "grad_norm": 2.87340644717915e-05, | |
| "learning_rate": 1.6504381714107252e-07, | |
| "loss": 1.4244, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.8117249154453213, | |
| "grad_norm": 2.3698501536273398e-05, | |
| "learning_rate": 1.1084813602723514e-07, | |
| "loss": 1.4641, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.8568207440811725, | |
| "grad_norm": 2.6557932869764045e-05, | |
| "learning_rate": 6.633101032164273e-08, | |
| "loss": 1.5457, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.9019165727170236, | |
| "grad_norm": 2.557658990554046e-05, | |
| "learning_rate": 3.2599620813200835e-08, | |
| "loss": 1.4515, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.9019165727170236, | |
| "eval_loss": 1.547265648841858, | |
| "eval_runtime": 54.2134, | |
| "eval_samples_per_second": 3.449, | |
| "eval_steps_per_second": 1.734, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.9470124013528749, | |
| "grad_norm": 2.975752249767538e-05, | |
| "learning_rate": 1.0492897371142728e-08, | |
| "loss": 1.3338, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.992108229988726, | |
| "grad_norm": 2.890920586651191e-05, | |
| "learning_rate": 5.606540077782162e-10, | |
| "loss": 1.5025, | |
| "step": 220 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 222, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.5912091671706624e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |