| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 320, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 3.140199899673462, | |
| "learning_rate": 6.25e-06, | |
| "loss": 2.7646, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 2.039125680923462, | |
| "learning_rate": 1.4062500000000001e-05, | |
| "loss": 1.9534, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 1.139061450958252, | |
| "learning_rate": 2.1875e-05, | |
| "loss": 1.4779, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 1.319356918334961, | |
| "learning_rate": 2.96875e-05, | |
| "loss": 1.1318, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 1.038979172706604, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 1.0328, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.9851827621459961, | |
| "learning_rate": 4.5312500000000004e-05, | |
| "loss": 0.8692, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.096, | |
| "grad_norm": 1.206039309501648, | |
| "learning_rate": 4.999405067699773e-05, | |
| "loss": 0.7644, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.256, | |
| "grad_norm": 0.9264540672302246, | |
| "learning_rate": 4.992715330761167e-05, | |
| "loss": 0.5543, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.416, | |
| "grad_norm": 0.8003137707710266, | |
| "learning_rate": 4.9786121534345265e-05, | |
| "loss": 0.5479, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.576, | |
| "grad_norm": 0.9653586149215698, | |
| "learning_rate": 4.957137479163253e-05, | |
| "loss": 0.5097, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.736, | |
| "grad_norm": 1.0603554248809814, | |
| "learning_rate": 4.9283551745331534e-05, | |
| "loss": 0.534, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.896, | |
| "grad_norm": 0.9365988373756409, | |
| "learning_rate": 4.892350839330522e-05, | |
| "loss": 0.4533, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.032, | |
| "grad_norm": 0.7057751417160034, | |
| "learning_rate": 4.849231551964771e-05, | |
| "loss": 0.4508, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 2.192, | |
| "grad_norm": 0.9021510481834412, | |
| "learning_rate": 4.7991255510127306e-05, | |
| "loss": 0.3168, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.352, | |
| "grad_norm": 0.827077329158783, | |
| "learning_rate": 4.742181853831721e-05, | |
| "loss": 0.2477, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 2.512, | |
| "grad_norm": 0.6666144728660583, | |
| "learning_rate": 4.678569813375654e-05, | |
| "loss": 0.2526, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.672, | |
| "grad_norm": 0.6521950960159302, | |
| "learning_rate": 4.608478614532215e-05, | |
| "loss": 0.2086, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 2.832, | |
| "grad_norm": 0.8377714157104492, | |
| "learning_rate": 4.54788011072248e-05, | |
| "loss": 0.2177, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.992, | |
| "grad_norm": 0.8437433838844299, | |
| "learning_rate": 4.466664285921542e-05, | |
| "loss": 0.221, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 3.128, | |
| "grad_norm": 0.9066725969314575, | |
| "learning_rate": 4.379599518697444e-05, | |
| "loss": 0.1249, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.288, | |
| "grad_norm": 0.5545098185539246, | |
| "learning_rate": 4.2869447433351165e-05, | |
| "loss": 0.1105, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 3.448, | |
| "grad_norm": 0.7694709897041321, | |
| "learning_rate": 4.188975519039151e-05, | |
| "loss": 0.1135, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 3.608, | |
| "grad_norm": 0.6357588171958923, | |
| "learning_rate": 4.085983210409114e-05, | |
| "loss": 0.1076, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 3.768, | |
| "grad_norm": 0.6851034760475159, | |
| "learning_rate": 3.978274120908956e-05, | |
| "loss": 0.1155, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 3.928, | |
| "grad_norm": 0.5832521319389343, | |
| "learning_rate": 3.8661685819076085e-05, | |
| "loss": 0.1101, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 4.064, | |
| "grad_norm": 0.8762519359588623, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.0852, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.224, | |
| "grad_norm": 0.5241655707359314, | |
| "learning_rate": 3.6301138654418e-05, | |
| "loss": 0.066, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 4.384, | |
| "grad_norm": 1.1077111959457397, | |
| "learning_rate": 3.5068667246468436e-05, | |
| "loss": 0.067, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 4.5440000000000005, | |
| "grad_norm": 0.6022670865058899, | |
| "learning_rate": 3.380625119803084e-05, | |
| "loss": 0.0631, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 4.704, | |
| "grad_norm": 0.7928010821342468, | |
| "learning_rate": 3.251764498760683e-05, | |
| "loss": 0.0536, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 4.864, | |
| "grad_norm": 0.6389479041099548, | |
| "learning_rate": 3.120668098434291e-05, | |
| "loss": 0.0528, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 1.1353836059570312, | |
| "learning_rate": 2.9877258050403212e-05, | |
| "loss": 0.0553, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 5.16, | |
| "grad_norm": 0.4007839858531952, | |
| "learning_rate": 2.8533329945589194e-05, | |
| "loss": 0.0291, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 5.32, | |
| "grad_norm": 0.48751428723335266, | |
| "learning_rate": 2.717889356869146e-05, | |
| "loss": 0.0383, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 5.48, | |
| "grad_norm": 0.6208081841468811, | |
| "learning_rate": 2.5817977070544407e-05, | |
| "loss": 0.0242, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 5.64, | |
| "grad_norm": 0.46939215064048767, | |
| "learning_rate": 2.4454627874135974e-05, | |
| "loss": 0.0264, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 5.8, | |
| "grad_norm": 0.5824191570281982, | |
| "learning_rate": 2.309290063740119e-05, | |
| "loss": 0.027, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 5.96, | |
| "grad_norm": 0.5354934334754944, | |
| "learning_rate": 2.173684519449872e-05, | |
| "loss": 0.026, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 6.096, | |
| "grad_norm": 0.20214977860450745, | |
| "learning_rate": 2.0390494511433416e-05, | |
| "loss": 0.0142, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 6.256, | |
| "grad_norm": 0.17667493224143982, | |
| "learning_rate": 1.9057852691845677e-05, | |
| "loss": 0.0088, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 6.416, | |
| "grad_norm": 0.17028746008872986, | |
| "learning_rate": 1.7742883068638447e-05, | |
| "loss": 0.0122, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 6.576, | |
| "grad_norm": 0.34028640389442444, | |
| "learning_rate": 1.6449496416858284e-05, | |
| "loss": 0.0092, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 6.736, | |
| "grad_norm": 0.259472131729126, | |
| "learning_rate": 1.5181539322885651e-05, | |
| "loss": 0.0121, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 6.896, | |
| "grad_norm": 0.8269719481468201, | |
| "learning_rate": 1.3942782744524973e-05, | |
| "loss": 0.0113, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 7.032, | |
| "grad_norm": 0.05031565576791763, | |
| "learning_rate": 1.27369107960173e-05, | |
| "loss": 0.0102, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 7.192, | |
| "grad_norm": 0.3213302493095398, | |
| "learning_rate": 1.1567509791329401e-05, | |
| "loss": 0.0041, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 7.352, | |
| "grad_norm": 0.12773875892162323, | |
| "learning_rate": 1.043805757830495e-05, | |
| "loss": 0.0033, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 7.5120000000000005, | |
| "grad_norm": 0.09073451161384583, | |
| "learning_rate": 9.351913195398524e-06, | |
| "loss": 0.0043, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 7.672, | |
| "grad_norm": 0.22024847567081451, | |
| "learning_rate": 8.31230688175382e-06, | |
| "loss": 0.0056, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 7.832, | |
| "grad_norm": 0.04149346798658371, | |
| "learning_rate": 7.3223304703363135e-06, | |
| "loss": 0.0027, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 7.992, | |
| "grad_norm": 0.07035554200410843, | |
| "learning_rate": 6.384928192691844e-06, | |
| "loss": 0.0036, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 8.128, | |
| "grad_norm": 0.06894655525684357, | |
| "learning_rate": 5.50288792267796e-06, | |
| "loss": 0.0029, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 8.288, | |
| "grad_norm": 0.03905967250466347, | |
| "learning_rate": 4.678832885209622e-06, | |
| "loss": 0.002, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 8.448, | |
| "grad_norm": 0.0546087920665741, | |
| "learning_rate": 3.9152138546778625e-06, | |
| "loss": 0.0027, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 8.608, | |
| "grad_norm": 0.07655919343233109, | |
| "learning_rate": 3.2143018662434687e-06, | |
| "loss": 0.0022, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 8.768, | |
| "grad_norm": 0.05733692646026611, | |
| "learning_rate": 2.578181461682794e-06, | |
| "loss": 0.0022, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 8.928, | |
| "grad_norm": 0.07310225069522858, | |
| "learning_rate": 2.0087444898726937e-06, | |
| "loss": 0.0024, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 9.064, | |
| "grad_norm": 0.04006943106651306, | |
| "learning_rate": 1.5076844803522922e-06, | |
| "loss": 0.002, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 9.224, | |
| "grad_norm": 0.0350627638399601, | |
| "learning_rate": 1.0764916066947794e-06, | |
| "loss": 0.002, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 9.384, | |
| "grad_norm": 0.06751354783773422, | |
| "learning_rate": 7.164482546684642e-07, | |
| "loss": 0.0023, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 9.544, | |
| "grad_norm": 0.048498354852199554, | |
| "learning_rate": 4.2862520836747246e-07, | |
| "loss": 0.0023, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 9.704, | |
| "grad_norm": 0.024826915934681892, | |
| "learning_rate": 2.1387846565474045e-07, | |
| "loss": 0.0017, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 9.864, | |
| "grad_norm": 0.030059821903705597, | |
| "learning_rate": 7.284669238833419e-08, | |
| "loss": 0.0017, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.04423270374536514, | |
| "learning_rate": 5.94932300227169e-09, | |
| "loss": 0.0019, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 320, | |
| "total_flos": 5.065494092578816e+16, | |
| "train_loss": 0.24897580899705646, | |
| "train_runtime": 4607.0277, | |
| "train_samples_per_second": 4.341, | |
| "train_steps_per_second": 0.069 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 320, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.065494092578816e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |