| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.010602392606598222, | |
| "eval_steps": 500, | |
| "global_step": 600, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0008835327172165185, | |
| "grad_norm": 5.665971279144287, | |
| "learning_rate": 4.3286219081272084e-07, | |
| "loss": 1.3738, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.001767065434433037, | |
| "grad_norm": 5.6161651611328125, | |
| "learning_rate": 8.745583038869259e-07, | |
| "loss": 1.1661, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.0026505981516495554, | |
| "grad_norm": 7.866199970245361, | |
| "learning_rate": 1.3162544169611309e-06, | |
| "loss": 1.2107, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.003534130868866074, | |
| "grad_norm": 5.07379674911499, | |
| "learning_rate": 1.7579505300353357e-06, | |
| "loss": 0.9855, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.004417663586082593, | |
| "grad_norm": 3.2607851028442383, | |
| "learning_rate": 2.199646643109541e-06, | |
| "loss": 0.9431, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.005301196303299111, | |
| "grad_norm": 6.517599105834961, | |
| "learning_rate": 2.6413427561837457e-06, | |
| "loss": 0.8566, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.00618472902051563, | |
| "grad_norm": 2.8523333072662354, | |
| "learning_rate": 3.0830388692579506e-06, | |
| "loss": 0.8697, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.007068261737732148, | |
| "grad_norm": 3.460226058959961, | |
| "learning_rate": 3.5247349823321555e-06, | |
| "loss": 0.8099, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.007951794454948667, | |
| "grad_norm": 3.2528891563415527, | |
| "learning_rate": 3.966431095406361e-06, | |
| "loss": 0.766, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.008835327172165185, | |
| "grad_norm": 4.1086039543151855, | |
| "learning_rate": 4.408127208480566e-06, | |
| "loss": 0.7402, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.009718859889381704, | |
| "grad_norm": 3.8160510063171387, | |
| "learning_rate": 4.849823321554771e-06, | |
| "loss": 0.8769, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.010602392606598222, | |
| "grad_norm": 2.901653289794922, | |
| "learning_rate": 5.291519434628975e-06, | |
| "loss": 0.6827, | |
| "step": 600 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 56591, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 200, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |