| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 309, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.032362459546925564, | |
| "grad_norm": 3.921274423599243, | |
| "learning_rate": 5.806451612903226e-06, | |
| "loss": 0.5979, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.06472491909385113, | |
| "grad_norm": 1.9820393323898315, | |
| "learning_rate": 1.2258064516129034e-05, | |
| "loss": 0.2221, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0970873786407767, | |
| "grad_norm": 1.2062925100326538, | |
| "learning_rate": 1.870967741935484e-05, | |
| "loss": 0.1165, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.12944983818770225, | |
| "grad_norm": 0.9094163179397583, | |
| "learning_rate": 1.9959162014075553e-05, | |
| "loss": 0.0929, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.16181229773462782, | |
| "grad_norm": 0.5544825792312622, | |
| "learning_rate": 1.9793829188147406e-05, | |
| "loss": 0.078, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1941747572815534, | |
| "grad_norm": 0.6911172270774841, | |
| "learning_rate": 1.9503556665478066e-05, | |
| "loss": 0.0822, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.22653721682847897, | |
| "grad_norm": 0.6673680543899536, | |
| "learning_rate": 1.9092047447238775e-05, | |
| "loss": 0.0634, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.2588996763754045, | |
| "grad_norm": 0.4891921281814575, | |
| "learning_rate": 1.856455114887056e-05, | |
| "loss": 0.0594, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.2912621359223301, | |
| "grad_norm": 0.4860777258872986, | |
| "learning_rate": 1.792779703083777e-05, | |
| "loss": 0.0668, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.32362459546925565, | |
| "grad_norm": 0.5248063802719116, | |
| "learning_rate": 1.7189908153577473e-05, | |
| "loss": 0.0601, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.3559870550161812, | |
| "grad_norm": 0.5064206123352051, | |
| "learning_rate": 1.636029775176862e-05, | |
| "loss": 0.0585, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.3883495145631068, | |
| "grad_norm": 0.420142263174057, | |
| "learning_rate": 1.544954914987238e-05, | |
| "loss": 0.0571, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.42071197411003236, | |
| "grad_norm": 0.36789119243621826, | |
| "learning_rate": 1.4469280750858854e-05, | |
| "loss": 0.0523, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.45307443365695793, | |
| "grad_norm": 0.3841588497161865, | |
| "learning_rate": 1.3431997820456592e-05, | |
| "loss": 0.0488, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.4854368932038835, | |
| "grad_norm": 0.6112226247787476, | |
| "learning_rate": 1.2350932957710322e-05, | |
| "loss": 0.047, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.517799352750809, | |
| "grad_norm": 0.45051074028015137, | |
| "learning_rate": 1.1239877286961123e-05, | |
| "loss": 0.0462, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.5501618122977346, | |
| "grad_norm": 0.3347930312156677, | |
| "learning_rate": 1.01130045247298e-05, | |
| "loss": 0.0497, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.5825242718446602, | |
| "grad_norm": 0.42931899428367615, | |
| "learning_rate": 8.98469016587892e-06, | |
| "loss": 0.0437, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.6148867313915858, | |
| "grad_norm": 0.3714495599269867, | |
| "learning_rate": 7.869328095692313e-06, | |
| "loss": 0.0426, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.6472491909385113, | |
| "grad_norm": 0.379412978887558, | |
| "learning_rate": 6.781146967348283e-06, | |
| "loss": 0.0431, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6796116504854369, | |
| "grad_norm": 0.34879937767982483, | |
| "learning_rate": 5.7340286872557515e-06, | |
| "loss": 0.0364, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.7119741100323624, | |
| "grad_norm": 0.3119371235370636, | |
| "learning_rate": 4.7413313238324556e-06, | |
| "loss": 0.042, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.7443365695792881, | |
| "grad_norm": 0.391166627407074, | |
| "learning_rate": 3.815718698874672e-06, | |
| "loss": 0.0425, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.7766990291262136, | |
| "grad_norm": 0.2979751229286194, | |
| "learning_rate": 2.9689988354181742e-06, | |
| "loss": 0.0383, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.8090614886731392, | |
| "grad_norm": 0.3025205433368683, | |
| "learning_rate": 2.211973323008041e-06, | |
| "loss": 0.0422, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.8414239482200647, | |
| "grad_norm": 0.409769743680954, | |
| "learning_rate": 1.5542995220217961e-06, | |
| "loss": 0.0416, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.8737864077669902, | |
| "grad_norm": 0.31411364674568176, | |
| "learning_rate": 1.0043673649027519e-06, | |
| "loss": 0.0382, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.9061488673139159, | |
| "grad_norm": 0.256271630525589, | |
| "learning_rate": 5.691923259479093e-07, | |
| "loss": 0.0376, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.9385113268608414, | |
| "grad_norm": 0.276463121175766, | |
| "learning_rate": 2.5432592503288e-07, | |
| "loss": 0.0383, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.970873786407767, | |
| "grad_norm": 0.33534082770347595, | |
| "learning_rate": 6.378490697611761e-08, | |
| "loss": 0.041, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 309, | |
| "total_flos": 3.151751154761728e+17, | |
| "train_loss": 0.07602016984644831, | |
| "train_runtime": 2131.4563, | |
| "train_samples_per_second": 4.639, | |
| "train_steps_per_second": 0.145 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 309, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.151751154761728e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |