| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.997779422649889, | |
| "eval_steps": 100, | |
| "global_step": 337, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.014803849000740192, | |
| "grad_norm": 0.6503643989562988, | |
| "learning_rate": 2.9411764705882355e-06, | |
| "loss": 1.09, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.029607698001480384, | |
| "grad_norm": 0.3810490071773529, | |
| "learning_rate": 5.882352941176471e-06, | |
| "loss": 1.0792, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.04441154700222058, | |
| "grad_norm": 0.3948555886745453, | |
| "learning_rate": 8.823529411764707e-06, | |
| "loss": 1.0222, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.05921539600296077, | |
| "grad_norm": 0.2805577516555786, | |
| "learning_rate": 1.1764705882352942e-05, | |
| "loss": 0.9451, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.07401924500370097, | |
| "grad_norm": 0.2285250872373581, | |
| "learning_rate": 1.4705882352941179e-05, | |
| "loss": 0.9125, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.08882309400444116, | |
| "grad_norm": 0.1779119372367859, | |
| "learning_rate": 1.7647058823529414e-05, | |
| "loss": 0.893, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.10362694300518134, | |
| "grad_norm": 0.17365849018096924, | |
| "learning_rate": 1.9999462497359468e-05, | |
| "loss": 0.8651, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.11843079200592153, | |
| "grad_norm": 0.14850875735282898, | |
| "learning_rate": 1.9980655971335944e-05, | |
| "loss": 0.8452, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.13323464100666174, | |
| "grad_norm": 0.1241055279970169, | |
| "learning_rate": 1.993503206718859e-05, | |
| "loss": 0.8228, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.14803849000740193, | |
| "grad_norm": 0.14281505346298218, | |
| "learning_rate": 1.986271337340182e-05, | |
| "loss": 0.8277, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.16284233900814213, | |
| "grad_norm": 0.12098833918571472, | |
| "learning_rate": 1.976389420563607e-05, | |
| "loss": 0.8106, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.17764618800888232, | |
| "grad_norm": 0.12549127638339996, | |
| "learning_rate": 1.9638840084614182e-05, | |
| "loss": 0.7963, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.19245003700962252, | |
| "grad_norm": 0.12255293875932693, | |
| "learning_rate": 1.9487887022684336e-05, | |
| "loss": 0.8063, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.20725388601036268, | |
| "grad_norm": 0.1393764466047287, | |
| "learning_rate": 1.9311440620976597e-05, | |
| "loss": 0.7989, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.22205773501110287, | |
| "grad_norm": 0.12465377151966095, | |
| "learning_rate": 1.9109974979578852e-05, | |
| "loss": 0.7899, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.23686158401184307, | |
| "grad_norm": 0.12621091306209564, | |
| "learning_rate": 1.8884031423660492e-05, | |
| "loss": 0.8185, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.25166543301258326, | |
| "grad_norm": 0.12274689227342606, | |
| "learning_rate": 1.8634217048966638e-05, | |
| "loss": 0.801, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.2664692820133235, | |
| "grad_norm": 0.11692527681589127, | |
| "learning_rate": 1.836120309059107e-05, | |
| "loss": 0.7837, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.28127313101406365, | |
| "grad_norm": 0.12686526775360107, | |
| "learning_rate": 1.8065723119410885e-05, | |
| "loss": 0.7808, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.29607698001480387, | |
| "grad_norm": 0.13229221105575562, | |
| "learning_rate": 1.77485710710289e-05, | |
| "loss": 0.7879, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.29607698001480387, | |
| "eval_loss": 0.8044255375862122, | |
| "eval_runtime": 5.859, | |
| "eval_samples_per_second": 21.847, | |
| "eval_steps_per_second": 1.365, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.31088082901554404, | |
| "grad_norm": 0.11952897906303406, | |
| "learning_rate": 1.741059911251997e-05, | |
| "loss": 0.7786, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.32568467801628426, | |
| "grad_norm": 0.1261080503463745, | |
| "learning_rate": 1.7052715352713076e-05, | |
| "loss": 0.7727, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.3404885270170244, | |
| "grad_norm": 0.12398428469896317, | |
| "learning_rate": 1.667588140216154e-05, | |
| "loss": 0.7995, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.35529237601776464, | |
| "grad_norm": 0.13477057218551636, | |
| "learning_rate": 1.628110978935756e-05, | |
| "loss": 0.774, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.3700962250185048, | |
| "grad_norm": 0.1344415843486786, | |
| "learning_rate": 1.586946124013354e-05, | |
| "loss": 0.7734, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.38490007401924503, | |
| "grad_norm": 0.12036354839801788, | |
| "learning_rate": 1.5442041827560274e-05, | |
| "loss": 0.7498, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3997039230199852, | |
| "grad_norm": 0.11690913140773773, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 0.7607, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.41450777202072536, | |
| "grad_norm": 0.1300031542778015, | |
| "learning_rate": 1.4544523495299843e-05, | |
| "loss": 0.7669, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.4293116210214656, | |
| "grad_norm": 0.13464853167533875, | |
| "learning_rate": 1.4076836149416889e-05, | |
| "loss": 0.7829, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.44411547002220575, | |
| "grad_norm": 0.13280808925628662, | |
| "learning_rate": 1.3598194608050011e-05, | |
| "loss": 0.7678, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.45891931902294597, | |
| "grad_norm": 0.11657247692346573, | |
| "learning_rate": 1.3109884950114007e-05, | |
| "loss": 0.7567, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.47372316802368614, | |
| "grad_norm": 0.1221541091799736, | |
| "learning_rate": 1.2613219232128608e-05, | |
| "loss": 0.7568, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.48852701702442636, | |
| "grad_norm": 0.11861217021942139, | |
| "learning_rate": 1.2109531962807333e-05, | |
| "loss": 0.7583, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.5033308660251665, | |
| "grad_norm": 0.11882445961236954, | |
| "learning_rate": 1.1600176517318742e-05, | |
| "loss": 0.7631, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.5181347150259067, | |
| "grad_norm": 0.12230531871318817, | |
| "learning_rate": 1.1086521500854746e-05, | |
| "loss": 0.7499, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.532938564026647, | |
| "grad_norm": 0.12119855731725693, | |
| "learning_rate": 1.0569947071276847e-05, | |
| "loss": 0.7708, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.5477424130273871, | |
| "grad_norm": 0.12843738496303558, | |
| "learning_rate": 1.0051841230721065e-05, | |
| "loss": 0.7639, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.5625462620281273, | |
| "grad_norm": 0.13090410828590393, | |
| "learning_rate": 9.533596096125826e-06, | |
| "loss": 0.7705, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.5773501110288675, | |
| "grad_norm": 0.12135408818721771, | |
| "learning_rate": 9.016604158703654e-06, | |
| "loss": 0.7443, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.5921539600296077, | |
| "grad_norm": 0.13062784075737, | |
| "learning_rate": 8.502254542407186e-06, | |
| "loss": 0.7423, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.5921539600296077, | |
| "eval_loss": 0.7782678604125977, | |
| "eval_runtime": 5.8957, | |
| "eval_samples_per_second": 21.711, | |
| "eval_steps_per_second": 1.357, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6069578090303479, | |
| "grad_norm": 0.11821803450584412, | |
| "learning_rate": 7.991929271442817e-06, | |
| "loss": 0.7461, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.6217616580310881, | |
| "grad_norm": 0.11570143699645996, | |
| "learning_rate": 7.48699955686089e-06, | |
| "loss": 0.7483, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.6365655070318282, | |
| "grad_norm": 0.11539588123559952, | |
| "learning_rate": 6.988822112200157e-06, | |
| "loss": 0.7566, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.6513693560325685, | |
| "grad_norm": 0.12663376331329346, | |
| "learning_rate": 6.498735508086094e-06, | |
| "loss": 0.7597, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.6661732050333087, | |
| "grad_norm": 0.11310730874538422, | |
| "learning_rate": 6.018056575578075e-06, | |
| "loss": 0.7536, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.6809770540340488, | |
| "grad_norm": 0.10681577771902084, | |
| "learning_rate": 5.548076867929331e-06, | |
| "loss": 0.7503, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.695780903034789, | |
| "grad_norm": 0.11569629609584808, | |
| "learning_rate": 5.090059190266779e-06, | |
| "loss": 0.7384, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.7105847520355293, | |
| "grad_norm": 0.10818663239479065, | |
| "learning_rate": 4.645234206515171e-06, | |
| "loss": 0.7436, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.7253886010362695, | |
| "grad_norm": 0.1134616956114769, | |
| "learning_rate": 4.214797132682597e-06, | |
| "loss": 0.7401, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.7401924500370096, | |
| "grad_norm": 0.1172085627913475, | |
| "learning_rate": 3.799904525392251e-06, | |
| "loss": 0.747, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.7549962990377498, | |
| "grad_norm": 0.11327671259641647, | |
| "learning_rate": 3.401671174289469e-06, | |
| "loss": 0.7371, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.7698001480384901, | |
| "grad_norm": 0.11018561571836472, | |
| "learning_rate": 3.021167106673928e-06, | |
| "loss": 0.7531, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.7846039970392302, | |
| "grad_norm": 0.10802847146987915, | |
| "learning_rate": 2.6594147124053983e-06, | |
| "loss": 0.742, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.7994078460399704, | |
| "grad_norm": 0.11024806648492813, | |
| "learning_rate": 2.317385996808195e-06, | |
| "loss": 0.7537, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.8142116950407106, | |
| "grad_norm": 0.10315828770399094, | |
| "learning_rate": 1.9959999689556407e-06, | |
| "loss": 0.7463, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.8290155440414507, | |
| "grad_norm": 0.10550706088542938, | |
| "learning_rate": 1.6961201723520248e-06, | |
| "loss": 0.732, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.843819393042191, | |
| "grad_norm": 0.10042756050825119, | |
| "learning_rate": 1.4185523646469822e-06, | |
| "loss": 0.757, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.8586232420429312, | |
| "grad_norm": 0.10768315196037292, | |
| "learning_rate": 1.1640423526166987e-06, | |
| "loss": 0.7347, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.8734270910436713, | |
| "grad_norm": 0.10151806473731995, | |
| "learning_rate": 9.332739882292752e-07, | |
| "loss": 0.7608, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.8882309400444115, | |
| "grad_norm": 0.10100872814655304, | |
| "learning_rate": 7.268673311786378e-07, | |
| "loss": 0.7508, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.8882309400444115, | |
| "eval_loss": 0.7698503732681274, | |
| "eval_runtime": 5.9196, | |
| "eval_samples_per_second": 21.623, | |
| "eval_steps_per_second": 1.351, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.9030347890451518, | |
| "grad_norm": 0.09824325144290924, | |
| "learning_rate": 5.453769828241872e-07, | |
| "loss": 0.7343, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.9178386380458919, | |
| "grad_norm": 0.1036561131477356, | |
| "learning_rate": 3.8929059601275463e-07, | |
| "loss": 0.7668, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.9326424870466321, | |
| "grad_norm": 0.09889034926891327, | |
| "learning_rate": 2.5902756478688674e-07, | |
| "loss": 0.749, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.9474463360473723, | |
| "grad_norm": 0.10057399421930313, | |
| "learning_rate": 1.5493789750014032e-07, | |
| "loss": 0.7509, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.9622501850481125, | |
| "grad_norm": 0.10745055228471756, | |
| "learning_rate": 7.730127636723539e-08, | |
| "loss": 0.7315, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.9770540340488527, | |
| "grad_norm": 0.10501035302877426, | |
| "learning_rate": 2.6326305976001054e-08, | |
| "loss": 0.7362, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.9918578830495929, | |
| "grad_norm": 0.11271944642066956, | |
| "learning_rate": 2.149952780321485e-09, | |
| "loss": 0.7575, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.997779422649889, | |
| "step": 337, | |
| "total_flos": 76745898196992.0, | |
| "train_loss": 0.7870961399389658, | |
| "train_runtime": 4760.8197, | |
| "train_samples_per_second": 4.539, | |
| "train_steps_per_second": 0.071 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 337, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 76745898196992.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |