| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 0.6996921046844073, |
| "eval_steps": 500, |
| "global_step": 12726, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.027490653177919506, |
| "grad_norm": 7.844790935516357, |
| "learning_rate": 1.98e-05, |
| "loss": 3.3532, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.05498130635583901, |
| "grad_norm": 7.924389362335205, |
| "learning_rate": 1.9816883693400415e-05, |
| "loss": 2.6056, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.08247195953375852, |
| "grad_norm": 7.497577667236328, |
| "learning_rate": 1.963191772713821e-05, |
| "loss": 2.5447, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.10996261271167802, |
| "grad_norm": 6.73529577255249, |
| "learning_rate": 1.9446951760876e-05, |
| "loss": 2.4492, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.13745326588959753, |
| "grad_norm": 6.306326866149902, |
| "learning_rate": 1.9261985794613793e-05, |
| "loss": 2.4463, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.16494391906751704, |
| "grad_norm": 5.651560306549072, |
| "learning_rate": 1.9077019828351584e-05, |
| "loss": 2.3291, |
| "step": 3000 |
| }, |
| { |
| "epoch": 0.19243457224543656, |
| "grad_norm": 4.0863471031188965, |
| "learning_rate": 1.8892053862089378e-05, |
| "loss": 2.3461, |
| "step": 3500 |
| }, |
| { |
| "epoch": 0.21992522542335605, |
| "grad_norm": 4.746509075164795, |
| "learning_rate": 1.8707087895827168e-05, |
| "loss": 2.2806, |
| "step": 4000 |
| }, |
| { |
| "epoch": 0.24741587860127556, |
| "grad_norm": 4.339619159698486, |
| "learning_rate": 1.8522121929564962e-05, |
| "loss": 2.2746, |
| "step": 4500 |
| }, |
| { |
| "epoch": 0.27490653177919505, |
| "grad_norm": 4.018620014190674, |
| "learning_rate": 1.8337155963302752e-05, |
| "loss": 2.2403, |
| "step": 5000 |
| }, |
| { |
| "epoch": 0.3023971849571146, |
| "grad_norm": 4.380566120147705, |
| "learning_rate": 1.8152189997040546e-05, |
| "loss": 2.226, |
| "step": 5500 |
| }, |
| { |
| "epoch": 0.3298878381350341, |
| "grad_norm": 4.302157402038574, |
| "learning_rate": 1.796722403077834e-05, |
| "loss": 2.2119, |
| "step": 6000 |
| }, |
| { |
| "epoch": 0.3573784913129536, |
| "grad_norm": 4.434130668640137, |
| "learning_rate": 1.778225806451613e-05, |
| "loss": 2.2138, |
| "step": 6500 |
| }, |
| { |
| "epoch": 0.3848691444908731, |
| "grad_norm": 3.770848035812378, |
| "learning_rate": 1.759729209825392e-05, |
| "loss": 2.2103, |
| "step": 7000 |
| }, |
| { |
| "epoch": 0.4123597976687926, |
| "grad_norm": 2.9288535118103027, |
| "learning_rate": 1.7412326131991715e-05, |
| "loss": 2.1397, |
| "step": 7500 |
| }, |
| { |
| "epoch": 0.4398504508467121, |
| "grad_norm": 3.2082479000091553, |
| "learning_rate": 1.722736016572951e-05, |
| "loss": 2.2021, |
| "step": 8000 |
| }, |
| { |
| "epoch": 0.46734110402463164, |
| "grad_norm": 3.6131439208984375, |
| "learning_rate": 1.70423941994673e-05, |
| "loss": 2.2106, |
| "step": 8500 |
| }, |
| { |
| "epoch": 0.4948317572025511, |
| "grad_norm": 2.992415189743042, |
| "learning_rate": 1.6857798165137616e-05, |
| "loss": 2.1246, |
| "step": 9000 |
| }, |
| { |
| "epoch": 0.5223224103804707, |
| "grad_norm": 3.4553306102752686, |
| "learning_rate": 1.667283219887541e-05, |
| "loss": 2.0973, |
| "step": 9500 |
| }, |
| { |
| "epoch": 0.5498130635583901, |
| "grad_norm": 2.82578182220459, |
| "learning_rate": 1.64878662326132e-05, |
| "loss": 2.1257, |
| "step": 10000 |
| }, |
| { |
| "epoch": 0.5773037167363096, |
| "grad_norm": 2.674267292022705, |
| "learning_rate": 1.630290026635099e-05, |
| "loss": 2.13, |
| "step": 10500 |
| }, |
| { |
| "epoch": 0.6047943699142292, |
| "grad_norm": 3.27205491065979, |
| "learning_rate": 1.6117934300088784e-05, |
| "loss": 2.1855, |
| "step": 11000 |
| }, |
| { |
| "epoch": 0.6322850230921486, |
| "grad_norm": 3.042849540710449, |
| "learning_rate": 1.5932968333826578e-05, |
| "loss": 2.1532, |
| "step": 11500 |
| }, |
| { |
| "epoch": 0.6597756762700682, |
| "grad_norm": 3.618957042694092, |
| "learning_rate": 1.5748372299496894e-05, |
| "loss": 2.1202, |
| "step": 12000 |
| }, |
| { |
| "epoch": 0.6872663294479877, |
| "grad_norm": 3.6390328407287598, |
| "learning_rate": 1.5563406333234685e-05, |
| "loss": 2.0902, |
| "step": 12500 |
| } |
| ], |
| "logging_steps": 500, |
| "max_steps": 54564, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 1818, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 3325202399232000.0, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|