| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.362204724409449, | |
| "eval_steps": 100, | |
| "global_step": 300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07874015748031496, | |
| "grad_norm": 3.9880661873949093, | |
| "learning_rate": 2.564102564102564e-06, | |
| "loss": 0.8813, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.15748031496062992, | |
| "grad_norm": 3.6693616620597256, | |
| "learning_rate": 5.128205128205128e-06, | |
| "loss": 0.6636, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.23622047244094488, | |
| "grad_norm": 2.199540616164596, | |
| "learning_rate": 7.692307692307694e-06, | |
| "loss": 0.4772, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.31496062992125984, | |
| "grad_norm": 1.967802188964899, | |
| "learning_rate": 9.999789047591563e-06, | |
| "loss": 0.416, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3937007874015748, | |
| "grad_norm": 1.9989660316683298, | |
| "learning_rate": 9.974496289936769e-06, | |
| "loss": 0.3917, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.47244094488188976, | |
| "grad_norm": 2.0075383328381164, | |
| "learning_rate": 9.90725746626209e-06, | |
| "loss": 0.3851, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.5511811023622047, | |
| "grad_norm": 1.8508299028417263, | |
| "learning_rate": 9.798639549376946e-06, | |
| "loss": 0.3912, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.6299212598425197, | |
| "grad_norm": 2.02221509797565, | |
| "learning_rate": 9.64955842986544e-06, | |
| "loss": 0.3666, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.7086614173228346, | |
| "grad_norm": 2.074540853778448, | |
| "learning_rate": 9.461271193091971e-06, | |
| "loss": 0.3573, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7874015748031497, | |
| "grad_norm": 1.7251665239071, | |
| "learning_rate": 9.23536551917611e-06, | |
| "loss": 0.367, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7874015748031497, | |
| "eval_loss": 0.35987070202827454, | |
| "eval_runtime": 44.8478, | |
| "eval_samples_per_second": 5.017, | |
| "eval_steps_per_second": 0.647, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.8661417322834646, | |
| "grad_norm": 1.797771958721456, | |
| "learning_rate": 8.973746295318499e-06, | |
| "loss": 0.3645, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.9448818897637795, | |
| "grad_norm": 1.8039826304958573, | |
| "learning_rate": 8.67861955336566e-06, | |
| "loss": 0.3521, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.0236220472440944, | |
| "grad_norm": 4.883591010598164, | |
| "learning_rate": 8.352473868055746e-06, | |
| "loss": 0.3345, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.1023622047244095, | |
| "grad_norm": 1.4568352538600138, | |
| "learning_rate": 7.998059372799409e-06, | |
| "loss": 0.2637, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.1811023622047245, | |
| "grad_norm": 1.5145500701439651, | |
| "learning_rate": 7.61836456993939e-06, | |
| "loss": 0.2607, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.2598425196850394, | |
| "grad_norm": 1.6758721014591884, | |
| "learning_rate": 7.2165911310299305e-06, | |
| "loss": 0.2653, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.3385826771653544, | |
| "grad_norm": 1.4962522667578417, | |
| "learning_rate": 6.796126899625688e-06, | |
| "loss": 0.2614, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.4173228346456692, | |
| "grad_norm": 1.662185871879351, | |
| "learning_rate": 6.360517324226676e-06, | |
| "loss": 0.2528, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.4960629921259843, | |
| "grad_norm": 1.5992237972859082, | |
| "learning_rate": 5.913435562263036e-06, | |
| "loss": 0.2614, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.574803149606299, | |
| "grad_norm": 1.6376215727810626, | |
| "learning_rate": 5.458651507209518e-06, | |
| "loss": 0.2476, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.574803149606299, | |
| "eval_loss": 0.35231101512908936, | |
| "eval_runtime": 44.7548, | |
| "eval_samples_per_second": 5.027, | |
| "eval_steps_per_second": 0.648, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.6535433070866141, | |
| "grad_norm": 1.5292608519655557, | |
| "learning_rate": 5e-06, | |
| "loss": 0.2528, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.7322834645669292, | |
| "grad_norm": 1.5751478691748728, | |
| "learning_rate": 4.541348492790482e-06, | |
| "loss": 0.2432, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.811023622047244, | |
| "grad_norm": 1.5142862198021734, | |
| "learning_rate": 4.0865644377369666e-06, | |
| "loss": 0.2498, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.889763779527559, | |
| "grad_norm": 1.4779231579448862, | |
| "learning_rate": 3.639482675773324e-06, | |
| "loss": 0.2525, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.968503937007874, | |
| "grad_norm": 1.4163589243219818, | |
| "learning_rate": 3.203873100374314e-06, | |
| "loss": 0.2503, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.047244094488189, | |
| "grad_norm": 1.1803155886068133, | |
| "learning_rate": 2.783408868970071e-06, | |
| "loss": 0.1876, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.1259842519685037, | |
| "grad_norm": 1.2574667771327577, | |
| "learning_rate": 2.381635430060611e-06, | |
| "loss": 0.1608, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.204724409448819, | |
| "grad_norm": 1.3942256471439354, | |
| "learning_rate": 2.0019406272005913e-06, | |
| "loss": 0.1622, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.283464566929134, | |
| "grad_norm": 1.1867880815644567, | |
| "learning_rate": 1.6475261319442553e-06, | |
| "loss": 0.1618, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.362204724409449, | |
| "grad_norm": 1.2093026133477982, | |
| "learning_rate": 1.321380446634342e-06, | |
| "loss": 0.1736, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.362204724409449, | |
| "eval_loss": 0.37573304772377014, | |
| "eval_runtime": 45.0097, | |
| "eval_samples_per_second": 4.999, | |
| "eval_steps_per_second": 0.644, | |
| "step": 300 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 381, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 93586546753536.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |