| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9969230769230769, | |
| "eval_steps": 100, | |
| "global_step": 243, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0041025641025641026, | |
| "grad_norm": 86.8332649639965, | |
| "learning_rate": 8e-08, | |
| "loss": 7.2742, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.020512820512820513, | |
| "grad_norm": 74.05727804792163, | |
| "learning_rate": 4e-07, | |
| "loss": 6.4673, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.041025641025641026, | |
| "grad_norm": 38.97594404855973, | |
| "learning_rate": 8e-07, | |
| "loss": 4.526, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.06153846153846154, | |
| "grad_norm": 5.6840773038859655, | |
| "learning_rate": 1.2e-06, | |
| "loss": 1.6883, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.08205128205128205, | |
| "grad_norm": 1.3276030305734883, | |
| "learning_rate": 1.6e-06, | |
| "loss": 1.1059, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.10256410256410256, | |
| "grad_norm": 2.343635823419092, | |
| "learning_rate": 2e-06, | |
| "loss": 1.1063, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.12307692307692308, | |
| "grad_norm": 2.289270560431258, | |
| "learning_rate": 1.9974051702905273e-06, | |
| "loss": 1.0758, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.14358974358974358, | |
| "grad_norm": 1.5816159769523035, | |
| "learning_rate": 1.9896341474445524e-06, | |
| "loss": 1.01, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.1641025641025641, | |
| "grad_norm": 2.3893777455781637, | |
| "learning_rate": 1.976727260423982e-06, | |
| "loss": 0.9688, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.18461538461538463, | |
| "grad_norm": 1.6349839241564788, | |
| "learning_rate": 1.9587514915766123e-06, | |
| "loss": 0.9678, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.20512820512820512, | |
| "grad_norm": 3.191279307503814, | |
| "learning_rate": 1.935800129020554e-06, | |
| "loss": 0.9354, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.22564102564102564, | |
| "grad_norm": 1.6448779128510165, | |
| "learning_rate": 1.907992282510675e-06, | |
| "loss": 0.9385, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.24615384615384617, | |
| "grad_norm": 1.9829387005126868, | |
| "learning_rate": 1.8754722652995345e-06, | |
| "loss": 0.9058, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.26666666666666666, | |
| "grad_norm": 2.29111150139111, | |
| "learning_rate": 1.8384088452007577e-06, | |
| "loss": 0.8891, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.28717948717948716, | |
| "grad_norm": 1.5810061706904381, | |
| "learning_rate": 1.7969943687415575e-06, | |
| "loss": 0.8934, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.3076923076923077, | |
| "grad_norm": 3.073136058331183, | |
| "learning_rate": 1.751443762949772e-06, | |
| "loss": 0.9122, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.3282051282051282, | |
| "grad_norm": 1.7225381141190559, | |
| "learning_rate": 1.7019934199557866e-06, | |
| "loss": 0.9145, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.3487179487179487, | |
| "grad_norm": 1.3310212541816835, | |
| "learning_rate": 1.6488999701978903e-06, | |
| "loss": 0.8261, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.36923076923076925, | |
| "grad_norm": 1.6745334659944915, | |
| "learning_rate": 1.5924389505977035e-06, | |
| "loss": 0.8751, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.38974358974358975, | |
| "grad_norm": 1.8233491777788193, | |
| "learning_rate": 1.5329033746173974e-06, | |
| "loss": 0.8872, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.41025641025641024, | |
| "grad_norm": 1.3524539580904948, | |
| "learning_rate": 1.4706022116196205e-06, | |
| "loss": 0.9192, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.4307692307692308, | |
| "grad_norm": 1.2310835465382302, | |
| "learning_rate": 1.4058587834217354e-06, | |
| "loss": 0.8605, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.4512820512820513, | |
| "grad_norm": 1.4801328470056587, | |
| "learning_rate": 1.3390090863657047e-06, | |
| "loss": 0.8715, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.4717948717948718, | |
| "grad_norm": 1.117288039177199, | |
| "learning_rate": 1.2704000476115078e-06, | |
| "loss": 0.8293, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.49230769230769234, | |
| "grad_norm": 2.3897644821265698, | |
| "learning_rate": 1.200387724703341e-06, | |
| "loss": 0.8803, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.5128205128205128, | |
| "grad_norm": 1.270827623469446, | |
| "learning_rate": 1.1293354577522264e-06, | |
| "loss": 0.8735, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 1.2548143617737169, | |
| "learning_rate": 1.0576119838245842e-06, | |
| "loss": 0.8359, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.5538461538461539, | |
| "grad_norm": 1.184800287371971, | |
| "learning_rate": 9.85589523322443e-07, | |
| "loss": 0.883, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.5743589743589743, | |
| "grad_norm": 1.5024665213422632, | |
| "learning_rate": 9.136418482863228e-07, | |
| "loss": 0.85, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5948717948717949, | |
| "grad_norm": 1.0627183442814103, | |
| "learning_rate": 8.42142342645646e-07, | |
| "loss": 0.8154, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 1.7796232405974886, | |
| "learning_rate": 7.714620644833109e-07, | |
| "loss": 0.8369, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.6358974358974359, | |
| "grad_norm": 1.3421106550942563, | |
| "learning_rate": 7.019678203706163e-07, | |
| "loss": 0.8171, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.6564102564102564, | |
| "grad_norm": 1.1941093068038942, | |
| "learning_rate": 6.340202617660841e-07, | |
| "loss": 0.8119, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.676923076923077, | |
| "grad_norm": 0.910589742578592, | |
| "learning_rate": 5.679720133572206e-07, | |
| "loss": 0.7591, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.6974358974358974, | |
| "grad_norm": 1.1777506504426953, | |
| "learning_rate": 5.041658430584852e-07, | |
| "loss": 0.7912, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.717948717948718, | |
| "grad_norm": 1.0581504812865925, | |
| "learning_rate": 4.429328831625565e-07, | |
| "loss": 0.8096, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.7384615384615385, | |
| "grad_norm": 0.9582774191881785, | |
| "learning_rate": 3.8459091187650726e-07, | |
| "loss": 0.827, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.7589743589743589, | |
| "grad_norm": 1.1383169940367925, | |
| "learning_rate": 3.294427041611425e-07, | |
| "loss": 0.7893, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.7794871794871795, | |
| "grad_norm": 1.6396416587696268, | |
| "learning_rate": 2.777744604320705e-07, | |
| "loss": 0.7929, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 1.2294302106292587, | |
| "learning_rate": 2.2985432127701941e-07, | |
| "loss": 0.7893, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.8205128205128205, | |
| "grad_norm": 1.8335365896381306, | |
| "learning_rate": 1.8593097589751316e-07, | |
| "loss": 0.7996, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.841025641025641, | |
| "grad_norm": 1.2025469183045263, | |
| "learning_rate": 1.4623237149661137e-07, | |
| "loss": 0.7809, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.8615384615384616, | |
| "grad_norm": 1.1758082969534152, | |
| "learning_rate": 1.1096453031056264e-07, | |
| "loss": 0.7489, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.882051282051282, | |
| "grad_norm": 0.9606044331683303, | |
| "learning_rate": 8.031048042356392e-08, | |
| "loss": 0.7648, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.9025641025641026, | |
| "grad_norm": 1.1443378986807335, | |
| "learning_rate": 5.442930591433992e-08, | |
| "loss": 0.7949, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.9230769230769231, | |
| "grad_norm": 1.0819245011051657, | |
| "learning_rate": 3.345532126395578e-08, | |
| "loss": 0.7707, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.9435897435897436, | |
| "grad_norm": 1.1813637949996902, | |
| "learning_rate": 1.7497374309405344e-08, | |
| "loss": 0.7604, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.9641025641025641, | |
| "grad_norm": 1.057572311636513, | |
| "learning_rate": 6.6382813604083375e-09, | |
| "loss": 0.7773, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.9846153846153847, | |
| "grad_norm": 1.1501667694643198, | |
| "learning_rate": 9.343974109685682e-10, | |
| "loss": 0.7849, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.9969230769230769, | |
| "step": 243, | |
| "total_flos": 33199876669440.0, | |
| "train_loss": 1.074208640267329, | |
| "train_runtime": 3530.8079, | |
| "train_samples_per_second": 8.837, | |
| "train_steps_per_second": 0.069 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 243, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 33199876669440.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |