| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 1.0, |
| "eval_steps": 50, |
| "global_step": 222, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.04509582863585118, |
| "grad_norm": 6.924273475306109e-05, |
| "learning_rate": 3.9130434782608694e-07, |
| "loss": 1.6552, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.09019165727170236, |
| "grad_norm": 5.8883713791146874e-05, |
| "learning_rate": 8.260869565217391e-07, |
| "loss": 1.7092, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.13528748590755355, |
| "grad_norm": 6.821321585448459e-05, |
| "learning_rate": 9.97758641300553e-07, |
| "loss": 1.6896, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.18038331454340473, |
| "grad_norm": 5.448404044727795e-05, |
| "learning_rate": 9.841341526992535e-07, |
| "loss": 1.9092, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.2254791431792559, |
| "grad_norm": 4.814426938537508e-05, |
| "learning_rate": 9.584688140963944e-07, |
| "loss": 1.5902, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.2254791431792559, |
| "eval_loss": 1.7812573909759521, |
| "eval_runtime": 39.7364, |
| "eval_samples_per_second": 4.706, |
| "eval_steps_per_second": 2.366, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.2705749718151071, |
| "grad_norm": 4.571067256620154e-05, |
| "learning_rate": 9.214009454506752e-07, |
| "loss": 1.5572, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.3156708004509583, |
| "grad_norm": 5.38822278031148e-05, |
| "learning_rate": 8.738524578558546e-07, |
| "loss": 1.6925, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.36076662908680945, |
| "grad_norm": 4.7335557610495016e-05, |
| "learning_rate": 8.170059247861193e-07, |
| "loss": 1.604, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.40586245772266066, |
| "grad_norm": 3.958986417273991e-05, |
| "learning_rate": 7.522751704345887e-07, |
| "loss": 1.4225, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.4509582863585118, |
| "grad_norm": 4.49267536168918e-05, |
| "learning_rate": 6.812701066393123e-07, |
| "loss": 1.5621, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.4509582863585118, |
| "eval_loss": 1.6484391689300537, |
| "eval_runtime": 39.1649, |
| "eval_samples_per_second": 4.775, |
| "eval_steps_per_second": 2.4, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.496054114994363, |
| "grad_norm": 3.838773773168214e-05, |
| "learning_rate": 6.057566929339095e-07, |
| "loss": 1.4543, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.5411499436302142, |
| "grad_norm": 4.512808664003387e-05, |
| "learning_rate": 5.27613015552254e-07, |
| "loss": 1.5298, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.5862457722660653, |
| "grad_norm": 1.84769305633381e-05, |
| "learning_rate": 4.4878257774169345e-07, |
| "loss": 1.4496, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.6313416009019166, |
| "grad_norm": 3.5642162401927635e-05, |
| "learning_rate": 3.7122596309655174e-07, |
| "loss": 1.4475, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.6764374295377678, |
| "grad_norm": 2.6416861146572046e-05, |
| "learning_rate": 2.9687207408810555e-07, |
| "loss": 1.4962, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.6764374295377678, |
| "eval_loss": 1.5978729724884033, |
| "eval_runtime": 39.1896, |
| "eval_samples_per_second": 4.772, |
| "eval_steps_per_second": 2.399, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.7215332581736189, |
| "grad_norm": 3.0987648642621934e-05, |
| "learning_rate": 2.275701585324649e-07, |
| "loss": 1.3739, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.7666290868094702, |
| "grad_norm": 3.293091504019685e-05, |
| "learning_rate": 1.6504381714107252e-07, |
| "loss": 1.4553, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.8117249154453213, |
| "grad_norm": 2.840998968167696e-05, |
| "learning_rate": 1.1084813602723514e-07, |
| "loss": 1.4978, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.8568207440811725, |
| "grad_norm": 3.1241466786013916e-05, |
| "learning_rate": 6.633101032164273e-08, |
| "loss": 1.5816, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.9019165727170236, |
| "grad_norm": 2.8312277208897285e-05, |
| "learning_rate": 3.2599620813200835e-08, |
| "loss": 1.484, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.9019165727170236, |
| "eval_loss": 1.5860079526901245, |
| "eval_runtime": 39.1241, |
| "eval_samples_per_second": 4.78, |
| "eval_steps_per_second": 2.403, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.9470124013528749, |
| "grad_norm": 3.578228279366158e-05, |
| "learning_rate": 1.0492897371142728e-08, |
| "loss": 1.3656, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.992108229988726, |
| "grad_norm": 3.2024283427745104e-05, |
| "learning_rate": 5.606540077782162e-10, |
| "loss": 1.5348, |
| "step": 220 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 222, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 1, |
| "save_steps": 100, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 3.4466834382336e+16, |
| "train_batch_size": 2, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|