| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 7048, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.14188422247446084, | |
| "grad_norm": 24.020404815673828, | |
| "learning_rate": 2.787173666288309e-05, | |
| "loss": 1.2482, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.28376844494892167, | |
| "grad_norm": 15.585792541503906, | |
| "learning_rate": 2.5743473325766176e-05, | |
| "loss": 0.921, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.42565266742338254, | |
| "grad_norm": 15.092517852783203, | |
| "learning_rate": 2.3615209988649265e-05, | |
| "loss": 0.8583, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.5675368898978433, | |
| "grad_norm": 12.11979866027832, | |
| "learning_rate": 2.1486946651532347e-05, | |
| "loss": 0.8274, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.7094211123723042, | |
| "grad_norm": 18.533437728881836, | |
| "learning_rate": 1.9358683314415437e-05, | |
| "loss": 0.7728, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.8513053348467651, | |
| "grad_norm": 16.716705322265625, | |
| "learning_rate": 1.7230419977298526e-05, | |
| "loss": 0.7614, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.9931895573212258, | |
| "grad_norm": 18.170560836791992, | |
| "learning_rate": 1.5102156640181612e-05, | |
| "loss": 0.7471, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.1350737797956867, | |
| "grad_norm": 11.996767044067383, | |
| "learning_rate": 1.29738933030647e-05, | |
| "loss": 0.6015, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.2769580022701477, | |
| "grad_norm": 15.390630722045898, | |
| "learning_rate": 1.0845629965947787e-05, | |
| "loss": 0.6034, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.4188422247446084, | |
| "grad_norm": 14.796677589416504, | |
| "learning_rate": 8.717366628830874e-06, | |
| "loss": 0.5894, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.5607264472190692, | |
| "grad_norm": 11.31975269317627, | |
| "learning_rate": 6.589103291713962e-06, | |
| "loss": 0.5715, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.70261066969353, | |
| "grad_norm": 13.917352676391602, | |
| "learning_rate": 4.4608399545970485e-06, | |
| "loss": 0.5731, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.844494892167991, | |
| "grad_norm": 16.910314559936523, | |
| "learning_rate": 2.332576617480136e-06, | |
| "loss": 0.5691, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.9863791146424519, | |
| "grad_norm": 14.70508098602295, | |
| "learning_rate": 2.043132803632236e-07, | |
| "loss": 0.5752, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 7048, | |
| "total_flos": 5601854796788736.0, | |
| "train_loss": 0.7287338001909375, | |
| "train_runtime": 2646.7279, | |
| "train_samples_per_second": 127.787, | |
| "train_steps_per_second": 2.663 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 7048, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5601854796788736.0, | |
| "train_batch_size": 48, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |