| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.4496088357109986, | |
| "eval_steps": 500, | |
| "global_step": 6300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05752416014726185, | |
| "grad_norm": 0.5827537775039673, | |
| "learning_rate": 0.00016513000460193283, | |
| "loss": 5.0544, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.1150483202945237, | |
| "grad_norm": 0.5847251415252686, | |
| "learning_rate": 0.00016024045098941557, | |
| "loss": 4.2362, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.1150483202945237, | |
| "eval_loss": 3.4322922229766846, | |
| "eval_runtime": 35.9031, | |
| "eval_samples_per_second": 109.74, | |
| "eval_steps_per_second": 3.231, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.17257248044178555, | |
| "grad_norm": 0.6557937264442444, | |
| "learning_rate": 0.0001553508973768983, | |
| "loss": 4.0566, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.2300966405890474, | |
| "grad_norm": 0.6734243631362915, | |
| "learning_rate": 0.00015046134376438104, | |
| "loss": 3.9349, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.2300966405890474, | |
| "eval_loss": 3.1976470947265625, | |
| "eval_runtime": 35.3562, | |
| "eval_samples_per_second": 111.437, | |
| "eval_steps_per_second": 3.281, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.28762080073630925, | |
| "grad_norm": 0.6826881766319275, | |
| "learning_rate": 0.0001455717901518638, | |
| "loss": 3.856, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.3451449608835711, | |
| "grad_norm": 0.6653180122375488, | |
| "learning_rate": 0.00014068223653934653, | |
| "loss": 3.7903, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.3451449608835711, | |
| "eval_loss": 3.0786449909210205, | |
| "eval_runtime": 36.0086, | |
| "eval_samples_per_second": 109.418, | |
| "eval_steps_per_second": 3.221, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.40266912103083297, | |
| "grad_norm": 0.7236284613609314, | |
| "learning_rate": 0.0001357926829268293, | |
| "loss": 3.7295, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.4601932811780948, | |
| "grad_norm": 0.7614400386810303, | |
| "learning_rate": 0.000130903129314312, | |
| "loss": 3.692, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.4601932811780948, | |
| "eval_loss": 3.0163052082061768, | |
| "eval_runtime": 35.3697, | |
| "eval_samples_per_second": 111.395, | |
| "eval_steps_per_second": 3.28, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.5177174413253567, | |
| "grad_norm": 0.7231118083000183, | |
| "learning_rate": 0.00012601357570179476, | |
| "loss": 3.6578, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.5752416014726185, | |
| "grad_norm": 0.7403096556663513, | |
| "learning_rate": 0.00012112402208927751, | |
| "loss": 3.6348, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.5752416014726185, | |
| "eval_loss": 2.9728314876556396, | |
| "eval_runtime": 35.3987, | |
| "eval_samples_per_second": 111.303, | |
| "eval_steps_per_second": 3.277, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.6327657616198803, | |
| "grad_norm": 0.8307600021362305, | |
| "learning_rate": 0.00011623446847676025, | |
| "loss": 3.5985, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.6902899217671422, | |
| "grad_norm": 0.692379891872406, | |
| "learning_rate": 0.000111344914864243, | |
| "loss": 3.5853, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.6902899217671422, | |
| "eval_loss": 2.949444055557251, | |
| "eval_runtime": 35.5037, | |
| "eval_samples_per_second": 110.974, | |
| "eval_steps_per_second": 3.267, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.747814081914404, | |
| "grad_norm": 0.6413500905036926, | |
| "learning_rate": 0.00010645536125172573, | |
| "loss": 3.553, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.8053382420616659, | |
| "grad_norm": 0.747322678565979, | |
| "learning_rate": 0.00010156580763920848, | |
| "loss": 3.5418, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.8053382420616659, | |
| "eval_loss": 2.9215950965881348, | |
| "eval_runtime": 35.7029, | |
| "eval_samples_per_second": 110.355, | |
| "eval_steps_per_second": 3.249, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.8628624022089277, | |
| "grad_norm": 0.7153074741363525, | |
| "learning_rate": 9.667625402669122e-05, | |
| "loss": 3.5174, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.9203865623561897, | |
| "grad_norm": 0.7129898071289062, | |
| "learning_rate": 9.178670041417396e-05, | |
| "loss": 3.5023, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.9203865623561897, | |
| "eval_loss": 2.8931143283843994, | |
| "eval_runtime": 35.6334, | |
| "eval_samples_per_second": 110.57, | |
| "eval_steps_per_second": 3.255, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.9779107225034515, | |
| "grad_norm": 0.6831061244010925, | |
| "learning_rate": 8.68971468016567e-05, | |
| "loss": 3.4839, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 1.0354348826507134, | |
| "grad_norm": 0.6215568780899048, | |
| "learning_rate": 8.200759318913945e-05, | |
| "loss": 3.4785, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.0354348826507134, | |
| "eval_loss": 2.888288974761963, | |
| "eval_runtime": 35.5637, | |
| "eval_samples_per_second": 110.787, | |
| "eval_steps_per_second": 3.262, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.0929590427979752, | |
| "grad_norm": 0.7072056531906128, | |
| "learning_rate": 7.711803957662219e-05, | |
| "loss": 3.4706, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 1.150483202945237, | |
| "grad_norm": 0.7484083771705627, | |
| "learning_rate": 7.222848596410494e-05, | |
| "loss": 3.4573, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.150483202945237, | |
| "eval_loss": 2.8630568981170654, | |
| "eval_runtime": 34.7986, | |
| "eval_samples_per_second": 113.223, | |
| "eval_steps_per_second": 3.333, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.2080073630924988, | |
| "grad_norm": 0.8469358682632446, | |
| "learning_rate": 6.733893235158767e-05, | |
| "loss": 3.4449, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 1.2655315232397606, | |
| "grad_norm": 0.7269773483276367, | |
| "learning_rate": 6.244937873907042e-05, | |
| "loss": 3.43, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.2655315232397606, | |
| "eval_loss": 2.853081226348877, | |
| "eval_runtime": 34.7036, | |
| "eval_samples_per_second": 113.533, | |
| "eval_steps_per_second": 3.343, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.3230556833870226, | |
| "grad_norm": 0.653972864151001, | |
| "learning_rate": 5.755982512655316e-05, | |
| "loss": 3.4265, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 1.3805798435342844, | |
| "grad_norm": 0.6091383695602417, | |
| "learning_rate": 5.267027151403589e-05, | |
| "loss": 3.4206, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.3805798435342844, | |
| "eval_loss": 2.84901762008667, | |
| "eval_runtime": 34.7272, | |
| "eval_samples_per_second": 113.456, | |
| "eval_steps_per_second": 3.34, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.4381040036815462, | |
| "grad_norm": 0.8110650181770325, | |
| "learning_rate": 4.778071790151864e-05, | |
| "loss": 3.4127, | |
| "step": 6250 | |
| } | |
| ], | |
| "logging_steps": 250, | |
| "max_steps": 8692, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 350, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4833295532556288.0, | |
| "train_batch_size": 34, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |