| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.362204724409449, | |
| "eval_steps": 100, | |
| "global_step": 300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07874015748031496, | |
| "grad_norm": 4.788692220904152, | |
| "learning_rate": 2.564102564102564e-06, | |
| "loss": 0.8915, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.15748031496062992, | |
| "grad_norm": 3.9681155417558394, | |
| "learning_rate": 5.128205128205128e-06, | |
| "loss": 0.6845, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.23622047244094488, | |
| "grad_norm": 2.445419744467811, | |
| "learning_rate": 7.692307692307694e-06, | |
| "loss": 0.4929, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.31496062992125984, | |
| "grad_norm": 1.8819767354510506, | |
| "learning_rate": 9.999789047591563e-06, | |
| "loss": 0.4298, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3937007874015748, | |
| "grad_norm": 2.0760338370227167, | |
| "learning_rate": 9.974496289936769e-06, | |
| "loss": 0.3971, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.47244094488188976, | |
| "grad_norm": 2.0489392968439364, | |
| "learning_rate": 9.90725746626209e-06, | |
| "loss": 0.4066, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.5511811023622047, | |
| "grad_norm": 2.161729558547336, | |
| "learning_rate": 9.798639549376946e-06, | |
| "loss": 0.408, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.6299212598425197, | |
| "grad_norm": 2.1931128647505984, | |
| "learning_rate": 9.64955842986544e-06, | |
| "loss": 0.3848, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.7086614173228346, | |
| "grad_norm": 2.0899020274437192, | |
| "learning_rate": 9.461271193091971e-06, | |
| "loss": 0.3953, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7874015748031497, | |
| "grad_norm": 1.808423743095037, | |
| "learning_rate": 9.23536551917611e-06, | |
| "loss": 0.376, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7874015748031497, | |
| "eval_loss": 0.38672611117362976, | |
| "eval_runtime": 42.5118, | |
| "eval_samples_per_second": 5.293, | |
| "eval_steps_per_second": 0.682, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.8661417322834646, | |
| "grad_norm": 1.9725731315599209, | |
| "learning_rate": 8.973746295318499e-06, | |
| "loss": 0.3838, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.9448818897637795, | |
| "grad_norm": 1.7672987008455687, | |
| "learning_rate": 8.67861955336566e-06, | |
| "loss": 0.3682, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.0236220472440944, | |
| "grad_norm": 1.6213272515956259, | |
| "learning_rate": 8.352473868055746e-06, | |
| "loss": 0.3452, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.1023622047244095, | |
| "grad_norm": 1.5697803097516594, | |
| "learning_rate": 7.998059372799409e-06, | |
| "loss": 0.2646, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.1811023622047245, | |
| "grad_norm": 1.7481930921294027, | |
| "learning_rate": 7.61836456993939e-06, | |
| "loss": 0.2756, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.2598425196850394, | |
| "grad_norm": 1.6109919715034606, | |
| "learning_rate": 7.2165911310299305e-06, | |
| "loss": 0.2839, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.3385826771653544, | |
| "grad_norm": 1.4432436412940057, | |
| "learning_rate": 6.796126899625688e-06, | |
| "loss": 0.2727, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.4173228346456692, | |
| "grad_norm": 1.5903811190023818, | |
| "learning_rate": 6.360517324226676e-06, | |
| "loss": 0.2734, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.4960629921259843, | |
| "grad_norm": 1.687429133617154, | |
| "learning_rate": 5.913435562263036e-06, | |
| "loss": 0.2699, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.574803149606299, | |
| "grad_norm": 1.6632044555521164, | |
| "learning_rate": 5.458651507209518e-06, | |
| "loss": 0.2693, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.574803149606299, | |
| "eval_loss": 0.37786543369293213, | |
| "eval_runtime": 42.6361, | |
| "eval_samples_per_second": 5.277, | |
| "eval_steps_per_second": 0.68, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.6535433070866141, | |
| "grad_norm": 1.8654466909346823, | |
| "learning_rate": 5e-06, | |
| "loss": 0.273, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.7322834645669292, | |
| "grad_norm": 1.3940696116583995, | |
| "learning_rate": 4.541348492790482e-06, | |
| "loss": 0.2531, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.811023622047244, | |
| "grad_norm": 1.7698182117778762, | |
| "learning_rate": 4.0865644377369666e-06, | |
| "loss": 0.2593, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.889763779527559, | |
| "grad_norm": 1.4818408307002349, | |
| "learning_rate": 3.639482675773324e-06, | |
| "loss": 0.2552, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.968503937007874, | |
| "grad_norm": 1.6024667186022836, | |
| "learning_rate": 3.203873100374314e-06, | |
| "loss": 0.2612, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.047244094488189, | |
| "grad_norm": 1.197399928515418, | |
| "learning_rate": 2.783408868970071e-06, | |
| "loss": 0.1957, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.1259842519685037, | |
| "grad_norm": 1.6721162188557013, | |
| "learning_rate": 2.381635430060611e-06, | |
| "loss": 0.1744, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.204724409448819, | |
| "grad_norm": 1.447487806573841, | |
| "learning_rate": 2.0019406272005913e-06, | |
| "loss": 0.1643, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.283464566929134, | |
| "grad_norm": 1.2708402939697436, | |
| "learning_rate": 1.6475261319442553e-06, | |
| "loss": 0.1708, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.362204724409449, | |
| "grad_norm": 1.4299763336477376, | |
| "learning_rate": 1.321380446634342e-06, | |
| "loss": 0.1756, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.362204724409449, | |
| "eval_loss": 0.40329092741012573, | |
| "eval_runtime": 42.5101, | |
| "eval_samples_per_second": 5.293, | |
| "eval_steps_per_second": 0.682, | |
| "step": 300 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 381, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 92652502515712.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |