| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9968051118210862, | |
| "eval_steps": 500, | |
| "global_step": 39, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.025559105431309903, | |
| "grad_norm": 0.2626091909599136, | |
| "learning_rate": 0.0, | |
| "loss": 0.3458, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.051118210862619806, | |
| "grad_norm": 0.27651781915246215, | |
| "learning_rate": 5e-06, | |
| "loss": 0.3415, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.07667731629392971, | |
| "grad_norm": 0.25637322332981016, | |
| "learning_rate": 1e-05, | |
| "loss": 0.3421, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.10223642172523961, | |
| "grad_norm": 0.28364039020678244, | |
| "learning_rate": 9.981987442712634e-06, | |
| "loss": 0.3483, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.12779552715654952, | |
| "grad_norm": 0.27301365865538696, | |
| "learning_rate": 9.928079551738542e-06, | |
| "loss": 0.3431, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.15335463258785942, | |
| "grad_norm": 0.28350562964567716, | |
| "learning_rate": 9.838664734667496e-06, | |
| "loss": 0.3396, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.17891373801916932, | |
| "grad_norm": 0.25297373231701475, | |
| "learning_rate": 9.714387227305422e-06, | |
| "loss": 0.3419, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.20447284345047922, | |
| "grad_norm": 0.25767126322896966, | |
| "learning_rate": 9.55614245194068e-06, | |
| "loss": 0.3448, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.23003194888178913, | |
| "grad_norm": 0.2540798667409847, | |
| "learning_rate": 9.365070565805941e-06, | |
| "loss": 0.3386, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.25559105431309903, | |
| "grad_norm": 0.26453457604819547, | |
| "learning_rate": 9.142548246219212e-06, | |
| "loss": 0.3403, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.28115015974440893, | |
| "grad_norm": 0.26317235471809736, | |
| "learning_rate": 8.890178771592198e-06, | |
| "loss": 0.3356, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.30670926517571884, | |
| "grad_norm": 0.2721259282527859, | |
| "learning_rate": 8.609780469772623e-06, | |
| "loss": 0.3381, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.33226837060702874, | |
| "grad_norm": 0.2564865411853346, | |
| "learning_rate": 8.303373616950408e-06, | |
| "loss": 0.3425, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.35782747603833864, | |
| "grad_norm": 0.24760846201065675, | |
| "learning_rate": 7.973165881521435e-06, | |
| "loss": 0.3392, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.38338658146964855, | |
| "grad_norm": 0.2947138038022994, | |
| "learning_rate": 7.621536417786159e-06, | |
| "loss": 0.3388, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.40894568690095845, | |
| "grad_norm": 0.25395376472369197, | |
| "learning_rate": 7.251018724088367e-06, | |
| "loss": 0.346, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.43450479233226835, | |
| "grad_norm": 0.25872237613089877, | |
| "learning_rate": 6.864282388901544e-06, | |
| "loss": 0.3364, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.46006389776357826, | |
| "grad_norm": 0.26103012316330376, | |
| "learning_rate": 6.464113856382752e-06, | |
| "loss": 0.3412, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.48562300319488816, | |
| "grad_norm": 0.27160328792849525, | |
| "learning_rate": 6.053396349978632e-06, | |
| "loss": 0.3412, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.5111821086261981, | |
| "grad_norm": 0.3024596185766711, | |
| "learning_rate": 5.635089098734394e-06, | |
| "loss": 0.3409, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.536741214057508, | |
| "grad_norm": 0.258560809986074, | |
| "learning_rate": 5.212206015980742e-06, | |
| "loss": 0.3293, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.5623003194888179, | |
| "grad_norm": 0.2797900071508153, | |
| "learning_rate": 4.78779398401926e-06, | |
| "loss": 0.3415, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.5878594249201278, | |
| "grad_norm": 0.2754521173124183, | |
| "learning_rate": 4.364910901265607e-06, | |
| "loss": 0.3351, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.6134185303514377, | |
| "grad_norm": 0.2586707256698628, | |
| "learning_rate": 3.94660365002137e-06, | |
| "loss": 0.3345, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.6389776357827476, | |
| "grad_norm": 0.2413286522184864, | |
| "learning_rate": 3.5358861436172487e-06, | |
| "loss": 0.3361, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.6645367412140575, | |
| "grad_norm": 0.2616355975835233, | |
| "learning_rate": 3.1357176110984578e-06, | |
| "loss": 0.3345, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.6900958466453674, | |
| "grad_norm": 0.2707932050967337, | |
| "learning_rate": 2.748981275911633e-06, | |
| "loss": 0.3418, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.7156549520766773, | |
| "grad_norm": 0.23620680655540707, | |
| "learning_rate": 2.3784635822138424e-06, | |
| "loss": 0.3347, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.7412140575079872, | |
| "grad_norm": 0.2552890215499816, | |
| "learning_rate": 2.0268341184785674e-06, | |
| "loss": 0.3398, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.7667731629392971, | |
| "grad_norm": 0.26647550560624106, | |
| "learning_rate": 1.6966263830495939e-06, | |
| "loss": 0.3383, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.792332268370607, | |
| "grad_norm": 0.2697021410413706, | |
| "learning_rate": 1.390219530227378e-06, | |
| "loss": 0.3396, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.8178913738019169, | |
| "grad_norm": 0.23926034170768884, | |
| "learning_rate": 1.1098212284078037e-06, | |
| "loss": 0.3331, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.8434504792332268, | |
| "grad_norm": 0.25008124806457394, | |
| "learning_rate": 8.574517537807897e-07, | |
| "loss": 0.3358, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.8690095846645367, | |
| "grad_norm": 0.24627612678594885, | |
| "learning_rate": 6.349294341940593e-07, | |
| "loss": 0.3405, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.8945686900958466, | |
| "grad_norm": 0.2604020013506715, | |
| "learning_rate": 4.43857548059321e-07, | |
| "loss": 0.3319, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.9201277955271565, | |
| "grad_norm": 0.25570313872707945, | |
| "learning_rate": 2.85612772694579e-07, | |
| "loss": 0.3361, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.9456869009584664, | |
| "grad_norm": 0.2604114741212292, | |
| "learning_rate": 1.6133526533250566e-07, | |
| "loss": 0.3336, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.9712460063897763, | |
| "grad_norm": 0.26071349297521423, | |
| "learning_rate": 7.192044826145772e-08, | |
| "loss": 0.333, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.9968051118210862, | |
| "grad_norm": 0.31718449956055733, | |
| "learning_rate": 1.8012557287367394e-08, | |
| "loss": 0.3367, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.9968051118210862, | |
| "step": 39, | |
| "total_flos": 114766825914368.0, | |
| "train_loss": 0.33876595283165956, | |
| "train_runtime": 482.5701, | |
| "train_samples_per_second": 20.722, | |
| "train_steps_per_second": 0.081 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 39, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 40, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 114766825914368.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |