| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9969230769230769, | |
| "eval_steps": 100, | |
| "global_step": 243, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0041025641025641026, | |
| "grad_norm": 144.718941674609, | |
| "learning_rate": 8e-08, | |
| "loss": 38.5112, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.020512820512820513, | |
| "grad_norm": 154.18045796294427, | |
| "learning_rate": 4e-07, | |
| "loss": 35.9902, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.041025641025641026, | |
| "grad_norm": 92.7196382330663, | |
| "learning_rate": 8e-07, | |
| "loss": 29.7759, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.06153846153846154, | |
| "grad_norm": 35.20462752075206, | |
| "learning_rate": 1.2e-06, | |
| "loss": 12.1467, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.08205128205128205, | |
| "grad_norm": 4.452197350562835, | |
| "learning_rate": 1.6e-06, | |
| "loss": 4.8547, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.10256410256410256, | |
| "grad_norm": 3.44782142029081, | |
| "learning_rate": 2e-06, | |
| "loss": 1.8181, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.12307692307692308, | |
| "grad_norm": 1.1315878217409587, | |
| "learning_rate": 1.9974051702905273e-06, | |
| "loss": 1.7755, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.14358974358974358, | |
| "grad_norm": 1.6309192793868679, | |
| "learning_rate": 1.9896341474445524e-06, | |
| "loss": 1.7732, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.1641025641025641, | |
| "grad_norm": 0.2387421624472641, | |
| "learning_rate": 1.976727260423982e-06, | |
| "loss": 1.7657, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.18461538461538463, | |
| "grad_norm": 0.9151595286542283, | |
| "learning_rate": 1.9587514915766123e-06, | |
| "loss": 1.759, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.20512820512820512, | |
| "grad_norm": 0.2928676536099991, | |
| "learning_rate": 1.935800129020554e-06, | |
| "loss": 1.7765, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.22564102564102564, | |
| "grad_norm": 0.9206336707113131, | |
| "learning_rate": 1.907992282510675e-06, | |
| "loss": 1.7559, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.24615384615384617, | |
| "grad_norm": 0.17973694321521183, | |
| "learning_rate": 1.8754722652995345e-06, | |
| "loss": 1.7596, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.26666666666666666, | |
| "grad_norm": 1.0994344778442904, | |
| "learning_rate": 1.8384088452007577e-06, | |
| "loss": 1.767, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.28717948717948716, | |
| "grad_norm": 1.2516396310176934, | |
| "learning_rate": 1.7969943687415575e-06, | |
| "loss": 1.7595, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.3076923076923077, | |
| "grad_norm": 0.8825826957973506, | |
| "learning_rate": 1.751443762949772e-06, | |
| "loss": 1.7603, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.3282051282051282, | |
| "grad_norm": 0.5684243878200888, | |
| "learning_rate": 1.7019934199557866e-06, | |
| "loss": 1.7601, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.3487179487179487, | |
| "grad_norm": 1.4561019039630394, | |
| "learning_rate": 1.6488999701978903e-06, | |
| "loss": 1.7539, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.36923076923076925, | |
| "grad_norm": 2.2253113492840897, | |
| "learning_rate": 1.5924389505977035e-06, | |
| "loss": 1.7631, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.38974358974358975, | |
| "grad_norm": 1.105842930732609, | |
| "learning_rate": 1.5329033746173974e-06, | |
| "loss": 1.7499, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.41025641025641024, | |
| "grad_norm": 1.7513422442593327, | |
| "learning_rate": 1.4706022116196205e-06, | |
| "loss": 1.73, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.4307692307692308, | |
| "grad_norm": 1.9495284124044943, | |
| "learning_rate": 1.4058587834217354e-06, | |
| "loss": 1.7023, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.4512820512820513, | |
| "grad_norm": 1.023939099304097, | |
| "learning_rate": 1.3390090863657047e-06, | |
| "loss": 1.705, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.4717948717948718, | |
| "grad_norm": 1.791995404705104, | |
| "learning_rate": 1.2704000476115078e-06, | |
| "loss": 1.6624, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.49230769230769234, | |
| "grad_norm": 1.3515164464369838, | |
| "learning_rate": 1.200387724703341e-06, | |
| "loss": 1.6314, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.5128205128205128, | |
| "grad_norm": 1.427350231430973, | |
| "learning_rate": 1.1293354577522264e-06, | |
| "loss": 1.6233, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 2.4313788240604417, | |
| "learning_rate": 1.0576119838245842e-06, | |
| "loss": 1.5465, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.5538461538461539, | |
| "grad_norm": 1.728140266408429, | |
| "learning_rate": 9.85589523322443e-07, | |
| "loss": 1.5467, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.5743589743589743, | |
| "grad_norm": 0.7953641344440756, | |
| "learning_rate": 9.136418482863228e-07, | |
| "loss": 1.5656, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5948717948717949, | |
| "grad_norm": 1.5885192238418508, | |
| "learning_rate": 8.42142342645646e-07, | |
| "loss": 1.481, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 1.2682164305701722, | |
| "learning_rate": 7.714620644833109e-07, | |
| "loss": 1.5413, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.6358974358974359, | |
| "grad_norm": 1.6587698354477947, | |
| "learning_rate": 7.019678203706163e-07, | |
| "loss": 1.5597, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.6564102564102564, | |
| "grad_norm": 1.7321894272960383, | |
| "learning_rate": 6.340202617660841e-07, | |
| "loss": 1.4949, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.676923076923077, | |
| "grad_norm": 1.1918404151514075, | |
| "learning_rate": 5.679720133572206e-07, | |
| "loss": 1.5773, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.6974358974358974, | |
| "grad_norm": 0.9685225265758775, | |
| "learning_rate": 5.041658430584852e-07, | |
| "loss": 1.4437, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.717948717948718, | |
| "grad_norm": 1.7447510850016987, | |
| "learning_rate": 4.429328831625565e-07, | |
| "loss": 1.4728, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.7384615384615385, | |
| "grad_norm": 1.0929907826100385, | |
| "learning_rate": 3.8459091187650726e-07, | |
| "loss": 1.395, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.7589743589743589, | |
| "grad_norm": 1.5828947310433559, | |
| "learning_rate": 3.294427041611425e-07, | |
| "loss": 1.4572, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.7794871794871795, | |
| "grad_norm": 1.0668250330355, | |
| "learning_rate": 2.777744604320705e-07, | |
| "loss": 1.4795, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.9455195351653852, | |
| "learning_rate": 2.2985432127701941e-07, | |
| "loss": 1.452, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.8205128205128205, | |
| "grad_norm": 0.8697579664779012, | |
| "learning_rate": 1.8593097589751316e-07, | |
| "loss": 1.4309, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.841025641025641, | |
| "grad_norm": 1.1623912397754987, | |
| "learning_rate": 1.4623237149661137e-07, | |
| "loss": 1.4005, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.8615384615384616, | |
| "grad_norm": 1.9536059558063326, | |
| "learning_rate": 1.1096453031056264e-07, | |
| "loss": 1.3974, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.882051282051282, | |
| "grad_norm": 1.158578839970839, | |
| "learning_rate": 8.031048042356392e-08, | |
| "loss": 1.4343, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.9025641025641026, | |
| "grad_norm": 1.3389828616732187, | |
| "learning_rate": 5.442930591433992e-08, | |
| "loss": 1.3794, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.9230769230769231, | |
| "grad_norm": 1.1383780851429117, | |
| "learning_rate": 3.345532126395578e-08, | |
| "loss": 1.3479, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.9435897435897436, | |
| "grad_norm": 1.6621281942279627, | |
| "learning_rate": 1.7497374309405344e-08, | |
| "loss": 1.5059, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.9641025641025641, | |
| "grad_norm": 1.547913879246758, | |
| "learning_rate": 6.6382813604083375e-09, | |
| "loss": 1.399, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.9846153846153847, | |
| "grad_norm": 1.2658520924196741, | |
| "learning_rate": 9.343974109685682e-10, | |
| "loss": 1.4923, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.9969230769230769, | |
| "step": 243, | |
| "total_flos": 40267023106048.0, | |
| "train_loss": 3.1795749114864647, | |
| "train_runtime": 3149.6275, | |
| "train_samples_per_second": 9.906, | |
| "train_steps_per_second": 0.077 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 243, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 40267023106048.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |