| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 2617, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03821169277799007, | |
| "grad_norm": 0.09013137966394424, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 2.4195, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.07642338555598013, | |
| "grad_norm": 0.2500901520252228, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 2.4204, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1146350783339702, | |
| "grad_norm": 0.3224215507507324, | |
| "learning_rate": 2e-05, | |
| "loss": 2.3633, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.15284677111196027, | |
| "grad_norm": 0.37588340044021606, | |
| "learning_rate": 1.9908219117491004e-05, | |
| "loss": 2.2832, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.19105846388995032, | |
| "grad_norm": 0.4413728713989258, | |
| "learning_rate": 1.9634561216042834e-05, | |
| "loss": 2.2425, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.2292701566679404, | |
| "grad_norm": 0.4971752166748047, | |
| "learning_rate": 1.9184049608395596e-05, | |
| "loss": 2.2089, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.26748184944593045, | |
| "grad_norm": 0.6140124797821045, | |
| "learning_rate": 1.8564953965135358e-05, | |
| "loss": 2.1991, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.30569354222392053, | |
| "grad_norm": 0.6006307601928711, | |
| "learning_rate": 1.778863851516131e-05, | |
| "loss": 2.182, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.34390523500191056, | |
| "grad_norm": 0.6290389895439148, | |
| "learning_rate": 1.6869353441894245e-05, | |
| "loss": 2.1591, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.38211692777990064, | |
| "grad_norm": 0.7398427724838257, | |
| "learning_rate": 1.5823973304394526e-05, | |
| "loss": 2.1383, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.4203286205578907, | |
| "grad_norm": 0.600612461566925, | |
| "learning_rate": 1.4671687284977572e-05, | |
| "loss": 2.1325, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.4585403133358808, | |
| "grad_norm": 0.7145066857337952, | |
| "learning_rate": 1.3433646949196354e-05, | |
| "loss": 2.1227, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.4967520061138708, | |
| "grad_norm": 0.8655490279197693, | |
| "learning_rate": 1.2132577983970828e-05, | |
| "loss": 2.1309, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.5349636988918609, | |
| "grad_norm": 0.7973862886428833, | |
| "learning_rate": 1.0792363040867677e-05, | |
| "loss": 2.0986, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.573175391669851, | |
| "grad_norm": 0.788762629032135, | |
| "learning_rate": 9.437603341932861e-06, | |
| "loss": 2.0651, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.6113870844478411, | |
| "grad_norm": 0.8126798868179321, | |
| "learning_rate": 8.093167095317543e-06, | |
| "loss": 2.0953, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.6495987772258311, | |
| "grad_norm": 0.8075258135795593, | |
| "learning_rate": 6.783733010060018e-06, | |
| "loss": 2.096, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.6878104700038211, | |
| "grad_norm": 0.9124458432197571, | |
| "learning_rate": 5.533337289346743e-06, | |
| "loss": 2.0671, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.7260221627818112, | |
| "grad_norm": 0.9505221843719482, | |
| "learning_rate": 4.364932417724222e-06, | |
| "loss": 2.0891, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.7642338555598013, | |
| "grad_norm": 0.8759971261024475, | |
| "learning_rate": 3.299965841241525e-06, | |
| "loss": 2.0694, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.8024455483377914, | |
| "grad_norm": 0.7681768536567688, | |
| "learning_rate": 2.3579862743450877e-06, | |
| "loss": 2.0927, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.8406572411157814, | |
| "grad_norm": 0.7481808662414551, | |
| "learning_rate": 1.5562848602259473e-06, | |
| "loss": 2.0987, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.8788689338937715, | |
| "grad_norm": 1.056018590927124, | |
| "learning_rate": 9.095777715434162e-07, | |
| "loss": 2.0576, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.9170806266717616, | |
| "grad_norm": 0.7692207098007202, | |
| "learning_rate": 4.297360777623161e-07, | |
| "loss": 2.0724, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.9552923194497516, | |
| "grad_norm": 0.8094775676727295, | |
| "learning_rate": 1.2556783770661497e-07, | |
| "loss": 2.09, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.9935040122277417, | |
| "grad_norm": 0.8155607581138611, | |
| "learning_rate": 2.656417277018264e-09, | |
| "loss": 2.0653, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 2617, | |
| "total_flos": 4.75502420361216e+16, | |
| "train_loss": 2.155636970715346, | |
| "train_runtime": 827.7262, | |
| "train_samples_per_second": 6.322, | |
| "train_steps_per_second": 3.162 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 2617, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.75502420361216e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |