| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 100.0, | |
| "eval_steps": 500, | |
| "global_step": 7700, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 6.4935064935064934, | |
| "grad_norm": 29.42378044128418, | |
| "learning_rate": 4.675324675324675e-05, | |
| "loss": 1.5516, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 12.987012987012987, | |
| "grad_norm": 14.185086250305176, | |
| "learning_rate": 4.3506493506493503e-05, | |
| "loss": 0.9416, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 19.48051948051948, | |
| "grad_norm": 3.7115426063537598, | |
| "learning_rate": 4.025974025974026e-05, | |
| "loss": 0.5566, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 25.974025974025974, | |
| "grad_norm": 8.48068618774414, | |
| "learning_rate": 3.701298701298702e-05, | |
| "loss": 0.3817, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 32.467532467532465, | |
| "grad_norm": 3.017573118209839, | |
| "learning_rate": 3.376623376623377e-05, | |
| "loss": 0.2771, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 38.96103896103896, | |
| "grad_norm": 0.5344293117523193, | |
| "learning_rate": 3.051948051948052e-05, | |
| "loss": 0.2127, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 45.45454545454545, | |
| "grad_norm": 1.3606529235839844, | |
| "learning_rate": 2.7272727272727273e-05, | |
| "loss": 0.19, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 51.94805194805195, | |
| "grad_norm": 2.6488523483276367, | |
| "learning_rate": 2.4025974025974027e-05, | |
| "loss": 0.1353, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 58.44155844155844, | |
| "grad_norm": 0.12463207542896271, | |
| "learning_rate": 2.077922077922078e-05, | |
| "loss": 0.1148, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 64.93506493506493, | |
| "grad_norm": 0.8130776286125183, | |
| "learning_rate": 1.7532467532467535e-05, | |
| "loss": 0.0934, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 71.42857142857143, | |
| "grad_norm": 0.03147471696138382, | |
| "learning_rate": 1.4285714285714285e-05, | |
| "loss": 0.0735, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 77.92207792207792, | |
| "grad_norm": 6.990360260009766, | |
| "learning_rate": 1.103896103896104e-05, | |
| "loss": 0.0427, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 84.41558441558442, | |
| "grad_norm": 2.2498698234558105, | |
| "learning_rate": 7.792207792207792e-06, | |
| "loss": 0.0373, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 90.9090909090909, | |
| "grad_norm": 0.016162734478712082, | |
| "learning_rate": 4.5454545454545455e-06, | |
| "loss": 0.028, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 97.40259740259741, | |
| "grad_norm": 0.5883862972259521, | |
| "learning_rate": 1.2987012987012988e-06, | |
| "loss": 0.0206, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 100.0, | |
| "step": 7700, | |
| "total_flos": 1.1008605548556e+16, | |
| "train_loss": 0.30270214981846993, | |
| "train_runtime": 8124.9371, | |
| "train_samples_per_second": 7.532, | |
| "train_steps_per_second": 0.948 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 7700, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 100, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.1008605548556e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |