| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.967741935483871, | |
| "eval_steps": 500, | |
| "global_step": 69, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.21505376344086022, | |
| "grad_norm": 5.693103313446045, | |
| "learning_rate": 1.9954719225730847e-05, | |
| "loss": 1.1669, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.43010752688172044, | |
| "grad_norm": 1.963355302810669, | |
| "learning_rate": 1.9450008187146685e-05, | |
| "loss": 0.6522, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.6451612903225806, | |
| "grad_norm": 1.6932282447814941, | |
| "learning_rate": 1.8412535328311813e-05, | |
| "loss": 0.4912, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.8602150537634409, | |
| "grad_norm": 1.487468957901001, | |
| "learning_rate": 1.6900790114821122e-05, | |
| "loss": 0.415, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.075268817204301, | |
| "grad_norm": 1.5301791429519653, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 0.3687, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 1.2903225806451613, | |
| "grad_norm": 1.3023558855056763, | |
| "learning_rate": 1.2817325568414299e-05, | |
| "loss": 0.2981, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.5053763440860215, | |
| "grad_norm": 1.2679604291915894, | |
| "learning_rate": 1.0475819158237426e-05, | |
| "loss": 0.2836, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.7204301075268817, | |
| "grad_norm": 1.180540919303894, | |
| "learning_rate": 8.107487556395902e-06, | |
| "loss": 0.2772, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.935483870967742, | |
| "grad_norm": 1.1995508670806885, | |
| "learning_rate": 5.845849869981137e-06, | |
| "loss": 0.268, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 2.150537634408602, | |
| "grad_norm": 1.1225168704986572, | |
| "learning_rate": 3.818410137793947e-06, | |
| "loss": 0.2306, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.3655913978494625, | |
| "grad_norm": 1.166407823562622, | |
| "learning_rate": 2.1394690525721275e-06, | |
| "loss": 0.2085, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 2.5806451612903225, | |
| "grad_norm": 1.157459020614624, | |
| "learning_rate": 9.036800464548157e-07, | |
| "loss": 0.2009, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.795698924731183, | |
| "grad_norm": 1.1147010326385498, | |
| "learning_rate": 1.8071302737293294e-07, | |
| "loss": 0.2012, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 2.967741935483871, | |
| "step": 69, | |
| "total_flos": 2.640330688875725e+16, | |
| "train_loss": 0.37769337063250336, | |
| "train_runtime": 647.3014, | |
| "train_samples_per_second": 6.873, | |
| "train_steps_per_second": 0.107 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 69, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.640330688875725e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |