| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 3000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 18.73765754699707, | |
| "learning_rate": 2.7500000000000004e-05, | |
| "loss": 2.1984, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 51.54933166503906, | |
| "learning_rate": 4.966312056737589e-05, | |
| "loss": 2.1209, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 32.52346420288086, | |
| "learning_rate": 4.789007092198582e-05, | |
| "loss": 2.1384, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 35.90546417236328, | |
| "learning_rate": 4.6117021276595746e-05, | |
| "loss": 2.095, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 29.83684730529785, | |
| "learning_rate": 4.4343971631205674e-05, | |
| "loss": 1.9609, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 25.847257614135742, | |
| "learning_rate": 4.25709219858156e-05, | |
| "loss": 1.8644, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 44.531307220458984, | |
| "learning_rate": 4.079787234042554e-05, | |
| "loss": 1.8201, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 36.09579849243164, | |
| "learning_rate": 3.902482269503546e-05, | |
| "loss": 1.6903, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 25.78478240966797, | |
| "learning_rate": 3.7251773049645395e-05, | |
| "loss": 1.5981, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 26.695831298828125, | |
| "learning_rate": 3.547872340425532e-05, | |
| "loss": 1.6652, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 31.150781631469727, | |
| "learning_rate": 3.3705673758865245e-05, | |
| "loss": 1.5633, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 37.66817092895508, | |
| "learning_rate": 3.193262411347518e-05, | |
| "loss": 1.5541, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "grad_norm": 26.20791244506836, | |
| "learning_rate": 3.0159574468085105e-05, | |
| "loss": 1.516, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 22.03032684326172, | |
| "learning_rate": 2.8386524822695037e-05, | |
| "loss": 1.6995, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 19.463829040527344, | |
| "learning_rate": 2.6613475177304965e-05, | |
| "loss": 1.4198, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 12.715448379516602, | |
| "learning_rate": 2.4840425531914897e-05, | |
| "loss": 1.2346, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "grad_norm": 23.795001983642578, | |
| "learning_rate": 2.3067375886524825e-05, | |
| "loss": 1.4306, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 35.95954132080078, | |
| "learning_rate": 2.129432624113475e-05, | |
| "loss": 1.5738, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "grad_norm": 35.67465591430664, | |
| "learning_rate": 1.9521276595744682e-05, | |
| "loss": 1.4514, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 19.942232131958008, | |
| "learning_rate": 1.774822695035461e-05, | |
| "loss": 1.4458, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "grad_norm": 18.1514949798584, | |
| "learning_rate": 1.597517730496454e-05, | |
| "loss": 1.5136, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "grad_norm": 28.748600006103516, | |
| "learning_rate": 1.420212765957447e-05, | |
| "loss": 1.5163, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "grad_norm": 16.781105041503906, | |
| "learning_rate": 1.2429078014184398e-05, | |
| "loss": 1.3713, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 13.730096817016602, | |
| "learning_rate": 1.0656028368794328e-05, | |
| "loss": 1.5527, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 40.0937614440918, | |
| "learning_rate": 8.882978723404256e-06, | |
| "loss": 1.3158, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 58.328670501708984, | |
| "learning_rate": 7.109929078014185e-06, | |
| "loss": 1.4293, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "grad_norm": 22.44051742553711, | |
| "learning_rate": 5.336879432624114e-06, | |
| "loss": 1.3321, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 38.144752502441406, | |
| "learning_rate": 3.563829787234043e-06, | |
| "loss": 1.2445, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "grad_norm": 21.987308502197266, | |
| "learning_rate": 1.790780141843972e-06, | |
| "loss": 1.5435, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 32.320709228515625, | |
| "learning_rate": 1.773049645390071e-08, | |
| "loss": 1.3325, | |
| "step": 3000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 3000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4092733440000.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |