| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.997779422649889, | |
| "eval_steps": 100, | |
| "global_step": 337, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.014803849000740192, | |
| "grad_norm": 0.6499706242009551, | |
| "learning_rate": 2.9411764705882355e-06, | |
| "loss": 1.09, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.029607698001480384, | |
| "grad_norm": 0.38165756890239044, | |
| "learning_rate": 5.882352941176471e-06, | |
| "loss": 1.0792, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.04441154700222058, | |
| "grad_norm": 0.39503550657875475, | |
| "learning_rate": 8.823529411764707e-06, | |
| "loss": 1.0222, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.05921539600296077, | |
| "grad_norm": 0.2808612725161364, | |
| "learning_rate": 1.1764705882352942e-05, | |
| "loss": 0.9451, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.07401924500370097, | |
| "grad_norm": 0.2269678181481597, | |
| "learning_rate": 1.4705882352941179e-05, | |
| "loss": 0.9125, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.08882309400444116, | |
| "grad_norm": 0.17821230151080844, | |
| "learning_rate": 1.7647058823529414e-05, | |
| "loss": 0.893, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.10362694300518134, | |
| "grad_norm": 0.17392408164611256, | |
| "learning_rate": 1.9999462497359468e-05, | |
| "loss": 0.8651, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.11843079200592153, | |
| "grad_norm": 0.14916473743012665, | |
| "learning_rate": 1.9980655971335944e-05, | |
| "loss": 0.8452, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.13323464100666174, | |
| "grad_norm": 0.12420164455302427, | |
| "learning_rate": 1.993503206718859e-05, | |
| "loss": 0.8229, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.14803849000740193, | |
| "grad_norm": 0.1446428727492868, | |
| "learning_rate": 1.986271337340182e-05, | |
| "loss": 0.8277, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.16284233900814213, | |
| "grad_norm": 0.12252511264514188, | |
| "learning_rate": 1.976389420563607e-05, | |
| "loss": 0.8105, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.17764618800888232, | |
| "grad_norm": 0.1259201476801368, | |
| "learning_rate": 1.9638840084614182e-05, | |
| "loss": 0.7964, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.19245003700962252, | |
| "grad_norm": 0.12286505969436663, | |
| "learning_rate": 1.9487887022684336e-05, | |
| "loss": 0.8063, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.20725388601036268, | |
| "grad_norm": 0.14045793441433746, | |
| "learning_rate": 1.9311440620976597e-05, | |
| "loss": 0.799, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.22205773501110287, | |
| "grad_norm": 0.12510849706002214, | |
| "learning_rate": 1.9109974979578852e-05, | |
| "loss": 0.7899, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.23686158401184307, | |
| "grad_norm": 0.1273375341999532, | |
| "learning_rate": 1.8884031423660492e-05, | |
| "loss": 0.8184, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.25166543301258326, | |
| "grad_norm": 0.12248634943055726, | |
| "learning_rate": 1.8634217048966638e-05, | |
| "loss": 0.801, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.2664692820133235, | |
| "grad_norm": 0.11799767120963099, | |
| "learning_rate": 1.836120309059107e-05, | |
| "loss": 0.7838, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.28127313101406365, | |
| "grad_norm": 0.12704036807880528, | |
| "learning_rate": 1.8065723119410885e-05, | |
| "loss": 0.7809, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.29607698001480387, | |
| "grad_norm": 0.132940387624838, | |
| "learning_rate": 1.77485710710289e-05, | |
| "loss": 0.7879, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.29607698001480387, | |
| "eval_loss": 0.804416298866272, | |
| "eval_runtime": 2.4988, | |
| "eval_samples_per_second": 51.226, | |
| "eval_steps_per_second": 3.202, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.31088082901554404, | |
| "grad_norm": 0.12007130504030676, | |
| "learning_rate": 1.741059911251997e-05, | |
| "loss": 0.7786, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.32568467801628426, | |
| "grad_norm": 0.12568516417746936, | |
| "learning_rate": 1.7052715352713076e-05, | |
| "loss": 0.7727, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.3404885270170244, | |
| "grad_norm": 0.1241647005253993, | |
| "learning_rate": 1.667588140216154e-05, | |
| "loss": 0.7995, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.35529237601776464, | |
| "grad_norm": 0.13523203037491746, | |
| "learning_rate": 1.628110978935756e-05, | |
| "loss": 0.774, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.3700962250185048, | |
| "grad_norm": 0.13517719792691274, | |
| "learning_rate": 1.586946124013354e-05, | |
| "loss": 0.7734, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.38490007401924503, | |
| "grad_norm": 0.12106953246682954, | |
| "learning_rate": 1.5442041827560274e-05, | |
| "loss": 0.7498, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3997039230199852, | |
| "grad_norm": 0.11633776169372793, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 0.7608, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.41450777202072536, | |
| "grad_norm": 0.13112519420936772, | |
| "learning_rate": 1.4544523495299843e-05, | |
| "loss": 0.7669, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.4293116210214656, | |
| "grad_norm": 0.1352433251746082, | |
| "learning_rate": 1.4076836149416889e-05, | |
| "loss": 0.7829, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.44411547002220575, | |
| "grad_norm": 0.13349442949251494, | |
| "learning_rate": 1.3598194608050011e-05, | |
| "loss": 0.7678, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.45891931902294597, | |
| "grad_norm": 0.11675179880699883, | |
| "learning_rate": 1.3109884950114007e-05, | |
| "loss": 0.7568, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.47372316802368614, | |
| "grad_norm": 0.12218364687774884, | |
| "learning_rate": 1.2613219232128608e-05, | |
| "loss": 0.7569, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.48852701702442636, | |
| "grad_norm": 0.11905324687514315, | |
| "learning_rate": 1.2109531962807333e-05, | |
| "loss": 0.7583, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.5033308660251665, | |
| "grad_norm": 0.11892329139771698, | |
| "learning_rate": 1.1600176517318742e-05, | |
| "loss": 0.7631, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.5181347150259067, | |
| "grad_norm": 0.12183891017667803, | |
| "learning_rate": 1.1086521500854746e-05, | |
| "loss": 0.75, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.532938564026647, | |
| "grad_norm": 0.12127027337823368, | |
| "learning_rate": 1.0569947071276847e-05, | |
| "loss": 0.7708, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.5477424130273871, | |
| "grad_norm": 0.13083590208483745, | |
| "learning_rate": 1.0051841230721065e-05, | |
| "loss": 0.764, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.5625462620281273, | |
| "grad_norm": 0.13174671759780987, | |
| "learning_rate": 9.533596096125826e-06, | |
| "loss": 0.7706, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.5773501110288675, | |
| "grad_norm": 0.1220448436691762, | |
| "learning_rate": 9.016604158703654e-06, | |
| "loss": 0.7443, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.5921539600296077, | |
| "grad_norm": 0.13106396851882912, | |
| "learning_rate": 8.502254542407186e-06, | |
| "loss": 0.7423, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5921539600296077, | |
| "eval_loss": 0.7784367799758911, | |
| "eval_runtime": 2.4997, | |
| "eval_samples_per_second": 51.207, | |
| "eval_steps_per_second": 3.2, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6069578090303479, | |
| "grad_norm": 0.11871336145982754, | |
| "learning_rate": 7.991929271442817e-06, | |
| "loss": 0.7461, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.6217616580310881, | |
| "grad_norm": 0.11632683967768367, | |
| "learning_rate": 7.48699955686089e-06, | |
| "loss": 0.7485, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.6365655070318282, | |
| "grad_norm": 0.11504656091733292, | |
| "learning_rate": 6.988822112200157e-06, | |
| "loss": 0.7566, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.6513693560325685, | |
| "grad_norm": 0.12812897728770611, | |
| "learning_rate": 6.498735508086094e-06, | |
| "loss": 0.7597, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.6661732050333087, | |
| "grad_norm": 0.11417300140197907, | |
| "learning_rate": 6.018056575578075e-06, | |
| "loss": 0.7536, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.6809770540340488, | |
| "grad_norm": 0.1077245580808127, | |
| "learning_rate": 5.548076867929331e-06, | |
| "loss": 0.7503, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.695780903034789, | |
| "grad_norm": 0.11620684188189162, | |
| "learning_rate": 5.090059190266779e-06, | |
| "loss": 0.7385, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.7105847520355293, | |
| "grad_norm": 0.1081898710920247, | |
| "learning_rate": 4.645234206515171e-06, | |
| "loss": 0.7435, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.7253886010362695, | |
| "grad_norm": 0.113110089955297, | |
| "learning_rate": 4.214797132682597e-06, | |
| "loss": 0.7401, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.7401924500370096, | |
| "grad_norm": 0.11722649443013206, | |
| "learning_rate": 3.799904525392251e-06, | |
| "loss": 0.747, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.7549962990377498, | |
| "grad_norm": 0.11326872763696812, | |
| "learning_rate": 3.401671174289469e-06, | |
| "loss": 0.7371, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.7698001480384901, | |
| "grad_norm": 0.1103886571880932, | |
| "learning_rate": 3.021167106673928e-06, | |
| "loss": 0.7532, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.7846039970392302, | |
| "grad_norm": 0.10849690094930604, | |
| "learning_rate": 2.6594147124053983e-06, | |
| "loss": 0.7421, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.7994078460399704, | |
| "grad_norm": 0.11049589855370454, | |
| "learning_rate": 2.317385996808195e-06, | |
| "loss": 0.7537, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.8142116950407106, | |
| "grad_norm": 0.10346918207046948, | |
| "learning_rate": 1.9959999689556407e-06, | |
| "loss": 0.7463, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.8290155440414507, | |
| "grad_norm": 0.10572545555016111, | |
| "learning_rate": 1.6961201723520248e-06, | |
| "loss": 0.7321, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.843819393042191, | |
| "grad_norm": 0.10092477302569877, | |
| "learning_rate": 1.4185523646469822e-06, | |
| "loss": 0.757, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.8586232420429312, | |
| "grad_norm": 0.10759503659652209, | |
| "learning_rate": 1.1640423526166987e-06, | |
| "loss": 0.7347, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.8734270910436713, | |
| "grad_norm": 0.101660660973576, | |
| "learning_rate": 9.332739882292752e-07, | |
| "loss": 0.7608, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.8882309400444115, | |
| "grad_norm": 0.10112277305894785, | |
| "learning_rate": 7.268673311786378e-07, | |
| "loss": 0.7509, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.8882309400444115, | |
| "eval_loss": 0.7699846029281616, | |
| "eval_runtime": 2.5012, | |
| "eval_samples_per_second": 51.175, | |
| "eval_steps_per_second": 3.198, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.9030347890451518, | |
| "grad_norm": 0.09838159633157796, | |
| "learning_rate": 5.453769828241872e-07, | |
| "loss": 0.7343, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.9178386380458919, | |
| "grad_norm": 0.10410657593109854, | |
| "learning_rate": 3.8929059601275463e-07, | |
| "loss": 0.7669, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.9326424870466321, | |
| "grad_norm": 0.09907836834688523, | |
| "learning_rate": 2.5902756478688674e-07, | |
| "loss": 0.7491, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.9474463360473723, | |
| "grad_norm": 0.10070683540317808, | |
| "learning_rate": 1.5493789750014032e-07, | |
| "loss": 0.7509, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.9622501850481125, | |
| "grad_norm": 0.10747978746967968, | |
| "learning_rate": 7.730127636723539e-08, | |
| "loss": 0.7315, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.9770540340488527, | |
| "grad_norm": 0.10485191992215714, | |
| "learning_rate": 2.6326305976001054e-08, | |
| "loss": 0.7362, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.9918578830495929, | |
| "grad_norm": 0.11259867302232492, | |
| "learning_rate": 2.149952780321485e-09, | |
| "loss": 0.7576, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.997779422649889, | |
| "step": 337, | |
| "total_flos": 5.209337905293558e+17, | |
| "train_loss": 0.787128531260731, | |
| "train_runtime": 1669.3848, | |
| "train_samples_per_second": 12.945, | |
| "train_steps_per_second": 0.202 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 337, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.209337905293558e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |