| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 100, | |
| "global_step": 193, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.025906735751295335, | |
| "grad_norm": 2.361337485007518, | |
| "learning_rate": 5e-06, | |
| "loss": 1.0953, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.05181347150259067, | |
| "grad_norm": 1.8793520800812682, | |
| "learning_rate": 1e-05, | |
| "loss": 1.0616, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.07772020725388601, | |
| "grad_norm": 1.1858193047272028, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 0.9693, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.10362694300518134, | |
| "grad_norm": 0.7387291447998859, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9092, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.12953367875647667, | |
| "grad_norm": 0.6192531094778085, | |
| "learning_rate": 1.9958807403786452e-05, | |
| "loss": 0.8608, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.15544041450777202, | |
| "grad_norm": 0.5667677037077908, | |
| "learning_rate": 1.9835568981142376e-05, | |
| "loss": 0.8458, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.18134715025906736, | |
| "grad_norm": 0.4996267697921315, | |
| "learning_rate": 1.9631300034184155e-05, | |
| "loss": 0.8224, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.20725388601036268, | |
| "grad_norm": 0.43618384389551423, | |
| "learning_rate": 1.9347683436562e-05, | |
| "loss": 0.8119, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.23316062176165803, | |
| "grad_norm": 0.3931133310858494, | |
| "learning_rate": 1.8987055769072973e-05, | |
| "loss": 0.8148, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.25906735751295334, | |
| "grad_norm": 0.40316612152446546, | |
| "learning_rate": 1.855238806969513e-05, | |
| "loss": 0.8131, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.2849740932642487, | |
| "grad_norm": 0.3990540539011308, | |
| "learning_rate": 1.804726135663399e-05, | |
| "loss": 0.783, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.31088082901554404, | |
| "grad_norm": 0.3705450948569945, | |
| "learning_rate": 1.7475837126035105e-05, | |
| "loss": 0.7952, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.33678756476683935, | |
| "grad_norm": 0.40117396147358353, | |
| "learning_rate": 1.684282306741802e-05, | |
| "loss": 0.7869, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.3626943005181347, | |
| "grad_norm": 0.3970006009715918, | |
| "learning_rate": 1.615343427928555e-05, | |
| "loss": 0.7872, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.38860103626943004, | |
| "grad_norm": 0.3669722762471545, | |
| "learning_rate": 1.541335030443444e-05, | |
| "loss": 0.7667, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.41450777202072536, | |
| "grad_norm": 0.37983092792870526, | |
| "learning_rate": 1.4628668338932721e-05, | |
| "loss": 0.7713, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.44041450777202074, | |
| "grad_norm": 0.3786015996009629, | |
| "learning_rate": 1.3805853000252584e-05, | |
| "loss": 0.7811, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.46632124352331605, | |
| "grad_norm": 0.40866822056608465, | |
| "learning_rate": 1.2951683068394941e-05, | |
| "loss": 0.7688, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.49222797927461137, | |
| "grad_norm": 0.3661767415183916, | |
| "learning_rate": 1.2073195638779944e-05, | |
| "loss": 0.7612, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.5181347150259067, | |
| "grad_norm": 0.38416291561910765, | |
| "learning_rate": 1.1177628147000961e-05, | |
| "loss": 0.7633, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.5181347150259067, | |
| "eval_loss": 0.7889683246612549, | |
| "eval_runtime": 4.4368, | |
| "eval_samples_per_second": 28.85, | |
| "eval_steps_per_second": 1.127, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.5440414507772021, | |
| "grad_norm": 0.38739496057146355, | |
| "learning_rate": 1.0272358743072152e-05, | |
| "loss": 0.7753, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.5699481865284974, | |
| "grad_norm": 0.3663559543332141, | |
| "learning_rate": 9.364845506397625e-06, | |
| "loss": 0.7625, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.5958549222797928, | |
| "grad_norm": 0.36329749184394017, | |
| "learning_rate": 8.462565002240733e-06, | |
| "loss": 0.752, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.6217616580310881, | |
| "grad_norm": 0.3708780988768332, | |
| "learning_rate": 7.572950685897295e-06, | |
| "loss": 0.7565, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.6476683937823834, | |
| "grad_norm": 0.35690693505459087, | |
| "learning_rate": 6.7033316620310985e-06, | |
| "loss": 0.7634, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.6735751295336787, | |
| "grad_norm": 0.3428496499757748, | |
| "learning_rate": 5.8608723037040894e-06, | |
| "loss": 0.7655, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.6994818652849741, | |
| "grad_norm": 0.32806907567717136, | |
| "learning_rate": 5.052513228551048e-06, | |
| "loss": 0.7478, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.7253886010362695, | |
| "grad_norm": 0.3398915194393529, | |
| "learning_rate": 4.284914118367637e-06, | |
| "loss": 0.7458, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.7512953367875648, | |
| "grad_norm": 0.3347891147189906, | |
| "learning_rate": 3.5643988531937923e-06, | |
| "loss": 0.7512, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.7772020725388601, | |
| "grad_norm": 0.3234147393280456, | |
| "learning_rate": 2.8969034119063176e-06, | |
| "loss": 0.7498, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.8031088082901554, | |
| "grad_norm": 0.3201461199560322, | |
| "learning_rate": 2.2879269685426742e-06, | |
| "loss": 0.7587, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.8290155440414507, | |
| "grad_norm": 0.326179350230441, | |
| "learning_rate": 1.742486587249873e-06, | |
| "loss": 0.7461, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.8549222797927462, | |
| "grad_norm": 0.32259239143729646, | |
| "learning_rate": 1.2650758891049464e-06, | |
| "loss": 0.7519, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.8808290155440415, | |
| "grad_norm": 0.3117286785796205, | |
| "learning_rate": 8.596280313312355e-07, | |
| "loss": 0.7609, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.9067357512953368, | |
| "grad_norm": 0.32745395184601234, | |
| "learning_rate": 5.294833039069269e-07, | |
| "loss": 0.7529, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.9326424870466321, | |
| "grad_norm": 0.29318306308239345, | |
| "learning_rate": 2.773616105217836e-07, | |
| "loss": 0.765, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.9585492227979274, | |
| "grad_norm": 0.30031542714063897, | |
| "learning_rate": 1.053400605982613e-07, | |
| "loss": 0.7455, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.9844559585492227, | |
| "grad_norm": 0.3457344392324936, | |
| "learning_rate": 1.4835856985568887e-08, | |
| "loss": 0.7511, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 193, | |
| "total_flos": 76874092904448.0, | |
| "train_loss": 0.7989139248052409, | |
| "train_runtime": 2563.2883, | |
| "train_samples_per_second": 8.431, | |
| "train_steps_per_second": 0.075 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 193, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 76874092904448.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |