| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 0.12358882947118241, |
| "eval_steps": 62, |
| "global_step": 40, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.0030897207367795603, |
| "grad_norm": 60.04155349731445, |
| "learning_rate": 4e-07, |
| "loss": 23.0268, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.006179441473559121, |
| "grad_norm": 58.97097396850586, |
| "learning_rate": 8e-07, |
| "loss": 22.9042, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.009269162210338681, |
| "grad_norm": 59.939247131347656, |
| "learning_rate": 1.2e-06, |
| "loss": 22.4922, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.012358882947118241, |
| "grad_norm": 60.71556091308594, |
| "learning_rate": 1.6e-06, |
| "loss": 23.6818, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.015448603683897801, |
| "grad_norm": 60.084556579589844, |
| "learning_rate": 2e-06, |
| "loss": 22.6323, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.018538324420677363, |
| "grad_norm": 58.05175018310547, |
| "learning_rate": 1.998481516433316e-06, |
| "loss": 21.7936, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.02162804515745692, |
| "grad_norm": 59.613494873046875, |
| "learning_rate": 1.9939306773179494e-06, |
| "loss": 22.5545, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.024717765894236483, |
| "grad_norm": 60.87885284423828, |
| "learning_rate": 1.9863613034027223e-06, |
| "loss": 22.6495, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.027807486631016044, |
| "grad_norm": 59.92291259765625, |
| "learning_rate": 1.9757963826274354e-06, |
| "loss": 22.562, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.030897207367795602, |
| "grad_norm": 59.16581344604492, |
| "learning_rate": 1.9622680003092503e-06, |
| "loss": 20.2875, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.03398692810457516, |
| "grad_norm": 59.31634521484375, |
| "learning_rate": 1.9458172417006346e-06, |
| "loss": 21.7414, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.037076648841354726, |
| "grad_norm": 59.62112045288086, |
| "learning_rate": 1.9264940672148015e-06, |
| "loss": 24.1894, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.040166369578134284, |
| "grad_norm": 59.239402770996094, |
| "learning_rate": 1.9043571606975775e-06, |
| "loss": 21.5377, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.04325609031491384, |
| "grad_norm": 61.078468322753906, |
| "learning_rate": 1.8794737512064888e-06, |
| "loss": 22.3836, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.04634581105169341, |
| "grad_norm": 60.2807502746582, |
| "learning_rate": 1.851919408838327e-06, |
| "loss": 22.8097, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.049435531788472965, |
| "grad_norm": 60.678627014160156, |
| "learning_rate": 1.821777815225245e-06, |
| "loss": 21.4694, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.052525252525252523, |
| "grad_norm": 61.19229507446289, |
| "learning_rate": 1.7891405093963937e-06, |
| "loss": 21.6569, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.05561497326203209, |
| "grad_norm": 60.128238677978516, |
| "learning_rate": 1.754106609776896e-06, |
| "loss": 23.511, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.05870469399881165, |
| "grad_norm": 59.77021408081055, |
| "learning_rate": 1.7167825131684511e-06, |
| "loss": 22.5047, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.061794414735591205, |
| "grad_norm": 61.9268684387207, |
| "learning_rate": 1.6772815716257411e-06, |
| "loss": 22.4478, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.06488413547237076, |
| "grad_norm": 59.85469436645508, |
| "learning_rate": 1.6357237482099683e-06, |
| "loss": 22.8151, |
| "step": 21 |
| }, |
| { |
| "epoch": 0.06797385620915032, |
| "grad_norm": 60.153926849365234, |
| "learning_rate": 1.5922352526649801e-06, |
| "loss": 22.5061, |
| "step": 22 |
| }, |
| { |
| "epoch": 0.0710635769459299, |
| "grad_norm": 59.1512565612793, |
| "learning_rate": 1.5469481581224271e-06, |
| "loss": 22.5223, |
| "step": 23 |
| }, |
| { |
| "epoch": 0.07415329768270945, |
| "grad_norm": 59.45036697387695, |
| "learning_rate": 1.5e-06, |
| "loss": 22.095, |
| "step": 24 |
| }, |
| { |
| "epoch": 0.07724301841948901, |
| "grad_norm": 59.37210464477539, |
| "learning_rate": 1.4515333583108893e-06, |
| "loss": 21.9364, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.08033273915626857, |
| "grad_norm": 60.37602996826172, |
| "learning_rate": 1.4016954246529694e-06, |
| "loss": 22.7162, |
| "step": 26 |
| }, |
| { |
| "epoch": 0.08342245989304813, |
| "grad_norm": 59.79236602783203, |
| "learning_rate": 1.3506375551927544e-06, |
| "loss": 22.3541, |
| "step": 27 |
| }, |
| { |
| "epoch": 0.08651218062982768, |
| "grad_norm": 59.140647888183594, |
| "learning_rate": 1.2985148110016947e-06, |
| "loss": 22.8465, |
| "step": 28 |
| }, |
| { |
| "epoch": 0.08960190136660724, |
| "grad_norm": 59.66078567504883, |
| "learning_rate": 1.245485487140799e-06, |
| "loss": 22.3047, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.09269162210338681, |
| "grad_norm": 58.10475158691406, |
| "learning_rate": 1.1917106319237384e-06, |
| "loss": 23.073, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.09578134284016637, |
| "grad_norm": 60.376319885253906, |
| "learning_rate": 1.1373535578184082e-06, |
| "loss": 22.5172, |
| "step": 31 |
| }, |
| { |
| "epoch": 0.09887106357694593, |
| "grad_norm": 61.557090759277344, |
| "learning_rate": 1.0825793454723324e-06, |
| "loss": 22.0322, |
| "step": 32 |
| }, |
| { |
| "epoch": 0.10196078431372549, |
| "grad_norm": 58.49174118041992, |
| "learning_rate": 1.0275543423681621e-06, |
| "loss": 23.4697, |
| "step": 33 |
| }, |
| { |
| "epoch": 0.10505050505050505, |
| "grad_norm": 59.54422378540039, |
| "learning_rate": 9.72445657631838e-07, |
| "loss": 22.6532, |
| "step": 34 |
| }, |
| { |
| "epoch": 0.1081402257872846, |
| "grad_norm": 59.265708923339844, |
| "learning_rate": 9.174206545276677e-07, |
| "loss": 23.3989, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.11122994652406418, |
| "grad_norm": 60.273563385009766, |
| "learning_rate": 8.626464421815918e-07, |
| "loss": 23.3648, |
| "step": 36 |
| }, |
| { |
| "epoch": 0.11431966726084374, |
| "grad_norm": 60.52754592895508, |
| "learning_rate": 8.082893680762618e-07, |
| "loss": 22.2345, |
| "step": 37 |
| }, |
| { |
| "epoch": 0.1174093879976233, |
| "grad_norm": 60.50078582763672, |
| "learning_rate": 7.545145128592008e-07, |
| "loss": 21.6119, |
| "step": 38 |
| }, |
| { |
| "epoch": 0.12049910873440285, |
| "grad_norm": 59.05815505981445, |
| "learning_rate": 7.014851889983057e-07, |
| "loss": 22.5614, |
| "step": 39 |
| }, |
| { |
| "epoch": 0.12358882947118241, |
| "grad_norm": 61.67491149902344, |
| "learning_rate": 6.493624448072457e-07, |
| "loss": 23.6093, |
| "step": 40 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 62, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 1, |
| "save_steps": 10, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 6.637517228998656e+16, |
| "train_batch_size": 2, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|