| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.951456310679612, | |
| "eval_steps": 500, | |
| "global_step": 255, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.1941747572815534, | |
| "grad_norm": 3.1270775891219507, | |
| "learning_rate": 6.493506493506493e-07, | |
| "loss": 1.8796, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.3883495145631068, | |
| "grad_norm": 4.0478996237342, | |
| "learning_rate": 1.2987012987012986e-06, | |
| "loss": 1.9555, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.5825242718446602, | |
| "grad_norm": 3.706686831129688, | |
| "learning_rate": 1.9480519480519483e-06, | |
| "loss": 1.8677, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.7766990291262136, | |
| "grad_norm": 1.800540642872872, | |
| "learning_rate": 2.597402597402597e-06, | |
| "loss": 1.6047, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.970873786407767, | |
| "grad_norm": 1.5787493617396333, | |
| "learning_rate": 3.246753246753247e-06, | |
| "loss": 1.3922, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.1650485436893203, | |
| "grad_norm": 1.207810791025872, | |
| "learning_rate": 3.896103896103897e-06, | |
| "loss": 1.2978, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.3592233009708738, | |
| "grad_norm": 0.9175578117993997, | |
| "learning_rate": 4.5454545454545455e-06, | |
| "loss": 1.1015, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.5533980582524272, | |
| "grad_norm": 0.9409417065870398, | |
| "learning_rate": 4.999765432089186e-06, | |
| "loss": 0.9465, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.7475728155339807, | |
| "grad_norm": 0.7673543415725506, | |
| "learning_rate": 4.995596560308607e-06, | |
| "loss": 0.802, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.941747572815534, | |
| "grad_norm": 0.6596047063364, | |
| "learning_rate": 4.986225072382357e-06, | |
| "loss": 0.8013, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.1359223300970873, | |
| "grad_norm": 0.6761088913203434, | |
| "learning_rate": 4.971670505224043e-06, | |
| "loss": 0.8454, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.3300970873786406, | |
| "grad_norm": 0.8375974153078517, | |
| "learning_rate": 4.9519632010080765e-06, | |
| "loss": 0.7326, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.524271844660194, | |
| "grad_norm": 0.7531376616419518, | |
| "learning_rate": 4.927144243914781e-06, | |
| "loss": 0.7275, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.7184466019417477, | |
| "grad_norm": 0.6285281076273438, | |
| "learning_rate": 4.897265374481447e-06, | |
| "loss": 0.7418, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.912621359223301, | |
| "grad_norm": 0.602398570146741, | |
| "learning_rate": 4.862388881737883e-06, | |
| "loss": 0.7094, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 3.1067961165048543, | |
| "grad_norm": 0.6666767672393625, | |
| "learning_rate": 4.822587473351317e-06, | |
| "loss": 0.6429, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 3.3009708737864076, | |
| "grad_norm": 0.5205954520717115, | |
| "learning_rate": 4.777944124051395e-06, | |
| "loss": 0.6477, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 3.4951456310679614, | |
| "grad_norm": 0.7568300500075249, | |
| "learning_rate": 4.728551902651227e-06, | |
| "loss": 0.6659, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 3.6893203883495147, | |
| "grad_norm": 0.7044549203105918, | |
| "learning_rate": 4.6745137780251125e-06, | |
| "loss": 0.656, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 3.883495145631068, | |
| "grad_norm": 0.7556811208616407, | |
| "learning_rate": 4.615942404447439e-06, | |
| "loss": 0.6441, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 4.077669902912621, | |
| "grad_norm": 0.5819276993822524, | |
| "learning_rate": 4.552959886740232e-06, | |
| "loss": 0.6552, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 4.271844660194175, | |
| "grad_norm": 0.7872228459586639, | |
| "learning_rate": 4.48569752571899e-06, | |
| "loss": 0.5654, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 4.466019417475728, | |
| "grad_norm": 0.6601391677800797, | |
| "learning_rate": 4.414295544467447e-06, | |
| "loss": 0.5617, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 4.660194174757281, | |
| "grad_norm": 0.760089090328379, | |
| "learning_rate": 4.338902796011929e-06, | |
| "loss": 0.5924, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 4.854368932038835, | |
| "grad_norm": 0.6323283888532482, | |
| "learning_rate": 4.259676453004709e-06, | |
| "loss": 0.5691, | |
| "step": 250 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 765, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 15, | |
| "save_steps": 255, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 56467474219008.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |