| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9981515711645101, | |
| "eval_steps": 500, | |
| "global_step": 270, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.018484288354898338, | |
| "grad_norm": 314.0, | |
| "learning_rate": 3.7037037037037037e-06, | |
| "loss": 20.7105, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.036968576709796676, | |
| "grad_norm": 177.0, | |
| "learning_rate": 7.4074074074074075e-06, | |
| "loss": 19.245, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.05545286506469501, | |
| "grad_norm": 189.0, | |
| "learning_rate": 1.1111111111111113e-05, | |
| "loss": 16.7313, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.07393715341959335, | |
| "grad_norm": 85.0, | |
| "learning_rate": 1.4814814814814815e-05, | |
| "loss": 14.1818, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.09242144177449169, | |
| "grad_norm": 54.25, | |
| "learning_rate": 1.851851851851852e-05, | |
| "loss": 11.905, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.11090573012939002, | |
| "grad_norm": 91.0, | |
| "learning_rate": 1.9992479525042305e-05, | |
| "loss": 10.2358, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.12939001848428835, | |
| "grad_norm": 60.0, | |
| "learning_rate": 1.9946562024066018e-05, | |
| "loss": 9.522, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.1478743068391867, | |
| "grad_norm": 22.125, | |
| "learning_rate": 1.9859096633447965e-05, | |
| "loss": 9.0956, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.16635859519408502, | |
| "grad_norm": 98.5, | |
| "learning_rate": 1.973044870579824e-05, | |
| "loss": 8.5045, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.18484288354898337, | |
| "grad_norm": 22.5, | |
| "learning_rate": 1.95611556177388e-05, | |
| "loss": 8.2548, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.2033271719038817, | |
| "grad_norm": 46.5, | |
| "learning_rate": 1.93519245252219e-05, | |
| "loss": 7.7123, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.22181146025878004, | |
| "grad_norm": 34.75, | |
| "learning_rate": 1.9103629409661468e-05, | |
| "loss": 7.3372, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.24029574861367836, | |
| "grad_norm": 45.75, | |
| "learning_rate": 1.881730742721608e-05, | |
| "loss": 7.2639, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.2587800369685767, | |
| "grad_norm": 99.5, | |
| "learning_rate": 1.8494154576472976e-05, | |
| "loss": 6.9563, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.27726432532347506, | |
| "grad_norm": 58.75, | |
| "learning_rate": 1.8135520702629677e-05, | |
| "loss": 6.8706, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.2957486136783734, | |
| "grad_norm": 189.0, | |
| "learning_rate": 1.7742903859041324e-05, | |
| "loss": 6.8386, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.3142329020332717, | |
| "grad_norm": 31.0, | |
| "learning_rate": 1.7317944049686125e-05, | |
| "loss": 6.7403, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.33271719038817005, | |
| "grad_norm": 55.25, | |
| "learning_rate": 1.686241637868734e-05, | |
| "loss": 6.554, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.3512014787430684, | |
| "grad_norm": 58.5, | |
| "learning_rate": 1.637822363550706e-05, | |
| "loss": 6.5162, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.36968576709796674, | |
| "grad_norm": 18.875, | |
| "learning_rate": 1.586738834678418e-05, | |
| "loss": 6.3662, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.38817005545286504, | |
| "grad_norm": 25.125, | |
| "learning_rate": 1.5332044328016916e-05, | |
| "loss": 6.4888, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.4066543438077634, | |
| "grad_norm": 17.0, | |
| "learning_rate": 1.4774427770379492e-05, | |
| "loss": 6.5204, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.42513863216266173, | |
| "grad_norm": 125.5, | |
| "learning_rate": 1.4196867899904292e-05, | |
| "loss": 6.3338, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.4436229205175601, | |
| "grad_norm": 17.5, | |
| "learning_rate": 1.3601777248047105e-05, | |
| "loss": 6.0951, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.46210720887245843, | |
| "grad_norm": 10.0, | |
| "learning_rate": 1.2991641574276419e-05, | |
| "loss": 6.1339, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.4805914972273567, | |
| "grad_norm": 24.0, | |
| "learning_rate": 1.2369009482781191e-05, | |
| "loss": 6.1337, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.49907578558225507, | |
| "grad_norm": 40.75, | |
| "learning_rate": 1.1736481776669307e-05, | |
| "loss": 5.997, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.5175600739371534, | |
| "grad_norm": 61.5, | |
| "learning_rate": 1.1096700594125318e-05, | |
| "loss": 6.0472, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5360443622920518, | |
| "grad_norm": 18.125, | |
| "learning_rate": 1.0452338371907065e-05, | |
| "loss": 5.8697, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.5545286506469501, | |
| "grad_norm": 85.0, | |
| "learning_rate": 9.806086682281759e-06, | |
| "loss": 5.9026, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.5730129390018485, | |
| "grad_norm": 28.0, | |
| "learning_rate": 9.160644990030932e-06, | |
| "loss": 6.016, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.5914972273567468, | |
| "grad_norm": 12.75, | |
| "learning_rate": 8.518709376487515e-06, | |
| "loss": 5.9233, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.609981515711645, | |
| "grad_norm": 77.5, | |
| "learning_rate": 7.882961277705897e-06, | |
| "loss": 5.8483, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.6284658040665434, | |
| "grad_norm": 27.25, | |
| "learning_rate": 7.256056283806987e-06, | |
| "loss": 5.9308, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.6469500924214417, | |
| "grad_norm": 58.0, | |
| "learning_rate": 6.640613046284581e-06, | |
| "loss": 5.9355, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.6654343807763401, | |
| "grad_norm": 31.125, | |
| "learning_rate": 6.039202339608432e-06, | |
| "loss": 5.6434, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.6839186691312384, | |
| "grad_norm": 25.0, | |
| "learning_rate": 5.454336322814995e-06, | |
| "loss": 5.6888, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.7024029574861368, | |
| "grad_norm": 34.75, | |
| "learning_rate": 4.888458045941269e-06, | |
| "loss": 5.7455, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.7208872458410351, | |
| "grad_norm": 23.375, | |
| "learning_rate": 4.343931245134616e-06, | |
| "loss": 5.7362, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.7393715341959335, | |
| "grad_norm": 12.8125, | |
| "learning_rate": 3.823030469065431e-06, | |
| "loss": 5.6849, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.7578558225508318, | |
| "grad_norm": 24.875, | |
| "learning_rate": 3.3279315778858034e-06, | |
| "loss": 5.862, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.7763401109057301, | |
| "grad_norm": 36.0, | |
| "learning_rate": 2.8607026544210115e-06, | |
| "loss": 5.7541, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.7948243992606284, | |
| "grad_norm": 38.25, | |
| "learning_rate": 2.423295365558821e-06, | |
| "loss": 5.7035, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.8133086876155268, | |
| "grad_norm": 19.5, | |
| "learning_rate": 2.01753680992107e-06, | |
| "loss": 5.8542, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.8317929759704251, | |
| "grad_norm": 33.25, | |
| "learning_rate": 1.6451218858706374e-06, | |
| "loss": 5.7262, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.8502772643253235, | |
| "grad_norm": 47.0, | |
| "learning_rate": 1.307606211733522e-06, | |
| "loss": 5.6701, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.8687615526802218, | |
| "grad_norm": 15.3125, | |
| "learning_rate": 1.0063996278090704e-06, | |
| "loss": 5.6769, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.8872458410351202, | |
| "grad_norm": 16.0, | |
| "learning_rate": 7.427603073110967e-07, | |
| "loss": 5.868, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.9057301293900185, | |
| "grad_norm": 7.71875, | |
| "learning_rate": 5.177895008392353e-07, | |
| "loss": 5.7182, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.9242144177449169, | |
| "grad_norm": 62.5, | |
| "learning_rate": 3.3242693633337986e-07, | |
| "loss": 5.9037, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.9426987060998152, | |
| "grad_norm": 12.875, | |
| "learning_rate": 1.874468937261531e-07, | |
| "loss": 5.6952, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.9611829944547134, | |
| "grad_norm": 16.875, | |
| "learning_rate": 8.345497068998897e-08, | |
| "loss": 5.7802, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.9796672828096118, | |
| "grad_norm": 13.75, | |
| "learning_rate": 2.088555298867978e-08, | |
| "loss": 5.7397, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.9981515711645101, | |
| "grad_norm": 26.625, | |
| "learning_rate": 0.0, | |
| "loss": 5.8055, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.9981515711645101, | |
| "eval_loss": 5.764816761016846, | |
| "eval_runtime": 319.0279, | |
| "eval_samples_per_second": 3.044, | |
| "eval_steps_per_second": 0.382, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.9981515711645101, | |
| "step": 270, | |
| "total_flos": 5.260333472022528e+16, | |
| "train_loss": 7.369897298459653, | |
| "train_runtime": 2195.5957, | |
| "train_samples_per_second": 3.941, | |
| "train_steps_per_second": 0.123 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 270, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.260333472022528e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |