| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.12843565373747753, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.003210891343436938, | |
| "grad_norm": 58.007633209228516, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 10.2661, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.006421782686873876, | |
| "grad_norm": 7.404308319091797, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 5.1561, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.009632674030310814, | |
| "grad_norm": 4.782848834991455, | |
| "learning_rate": 1.2e-05, | |
| "loss": 1.4976, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.012843565373747753, | |
| "grad_norm": 2.344947576522827, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.7909, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.016054456717184692, | |
| "grad_norm": 1.2314115762710571, | |
| "learning_rate": 2e-05, | |
| "loss": 0.5032, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.019265348060621627, | |
| "grad_norm": 0.564213752746582, | |
| "learning_rate": 1.9975640502598243e-05, | |
| "loss": 0.3497, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.022476239404058566, | |
| "grad_norm": 0.48756763339042664, | |
| "learning_rate": 1.9902680687415704e-05, | |
| "loss": 0.2661, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.025687130747495505, | |
| "grad_norm": 0.2983498275279999, | |
| "learning_rate": 1.9781476007338058e-05, | |
| "loss": 0.2259, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.028898022090932445, | |
| "grad_norm": 0.23339591920375824, | |
| "learning_rate": 1.961261695938319e-05, | |
| "loss": 0.2106, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.032108913434369384, | |
| "grad_norm": 0.20773403346538544, | |
| "learning_rate": 1.9396926207859085e-05, | |
| "loss": 0.1925, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.035319804777806316, | |
| "grad_norm": 0.14296875894069672, | |
| "learning_rate": 1.913545457642601e-05, | |
| "loss": 0.19, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.038530696121243255, | |
| "grad_norm": 0.15217605233192444, | |
| "learning_rate": 1.8829475928589272e-05, | |
| "loss": 0.1887, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.041741587464680194, | |
| "grad_norm": 0.12879417836666107, | |
| "learning_rate": 1.848048096156426e-05, | |
| "loss": 0.1825, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.04495247880811713, | |
| "grad_norm": 0.11689207702875137, | |
| "learning_rate": 1.8090169943749477e-05, | |
| "loss": 0.1791, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.04816337015155407, | |
| "grad_norm": 0.11093489825725555, | |
| "learning_rate": 1.766044443118978e-05, | |
| "loss": 0.1763, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.05137426149499101, | |
| "grad_norm": 0.12710417807102203, | |
| "learning_rate": 1.7193398003386514e-05, | |
| "loss": 0.176, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.05458515283842795, | |
| "grad_norm": 0.11389511078596115, | |
| "learning_rate": 1.6691306063588583e-05, | |
| "loss": 0.178, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.05779604418186489, | |
| "grad_norm": 0.1070656105875969, | |
| "learning_rate": 1.6156614753256583e-05, | |
| "loss": 0.1713, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.06100693552530182, | |
| "grad_norm": 0.1175711378455162, | |
| "learning_rate": 1.5591929034707468e-05, | |
| "loss": 0.1762, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.06421782686873877, | |
| "grad_norm": 0.11087647080421448, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 0.1732, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.0674287182121757, | |
| "grad_norm": 0.11354115605354309, | |
| "learning_rate": 1.4383711467890776e-05, | |
| "loss": 0.1696, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.07063960955561263, | |
| "grad_norm": 0.11086148023605347, | |
| "learning_rate": 1.3746065934159123e-05, | |
| "loss": 0.169, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.07385050089904957, | |
| "grad_norm": 0.11717818677425385, | |
| "learning_rate": 1.3090169943749475e-05, | |
| "loss": 0.1734, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.07706139224248651, | |
| "grad_norm": 0.10582283139228821, | |
| "learning_rate": 1.2419218955996677e-05, | |
| "loss": 0.1698, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.08027228358592345, | |
| "grad_norm": 0.11204802244901657, | |
| "learning_rate": 1.1736481776669307e-05, | |
| "loss": 0.1688, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.08348317492936039, | |
| "grad_norm": 0.12435556203126907, | |
| "learning_rate": 1.1045284632676535e-05, | |
| "loss": 0.1661, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.08669406627279733, | |
| "grad_norm": 0.10137902945280075, | |
| "learning_rate": 1.0348994967025012e-05, | |
| "loss": 0.1646, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.08990495761623427, | |
| "grad_norm": 0.11211050301790237, | |
| "learning_rate": 9.651005032974994e-06, | |
| "loss": 0.1676, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.0931158489596712, | |
| "grad_norm": 0.10578905791044235, | |
| "learning_rate": 8.954715367323468e-06, | |
| "loss": 0.1676, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.09632674030310814, | |
| "grad_norm": 0.10442763566970825, | |
| "learning_rate": 8.263518223330698e-06, | |
| "loss": 0.164, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.09953763164654508, | |
| "grad_norm": 0.11188539862632751, | |
| "learning_rate": 7.580781044003324e-06, | |
| "loss": 0.1683, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.10274852298998202, | |
| "grad_norm": 0.10560841858386993, | |
| "learning_rate": 6.909830056250527e-06, | |
| "loss": 0.1672, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.10595941433341896, | |
| "grad_norm": 0.11095188558101654, | |
| "learning_rate": 6.25393406584088e-06, | |
| "loss": 0.1653, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.1091703056768559, | |
| "grad_norm": 0.10521899163722992, | |
| "learning_rate": 5.616288532109225e-06, | |
| "loss": 0.1646, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.11238119702029284, | |
| "grad_norm": 0.0966481938958168, | |
| "learning_rate": 5.000000000000003e-06, | |
| "loss": 0.1649, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.11559208836372978, | |
| "grad_norm": 0.10581862181425095, | |
| "learning_rate": 4.408070965292534e-06, | |
| "loss": 0.1641, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.1188029797071667, | |
| "grad_norm": 0.11266249418258667, | |
| "learning_rate": 3.8433852467434175e-06, | |
| "loss": 0.1604, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.12201387105060364, | |
| "grad_norm": 0.10841681063175201, | |
| "learning_rate": 3.308693936411421e-06, | |
| "loss": 0.1639, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.1252247623940406, | |
| "grad_norm": 0.10658198595046997, | |
| "learning_rate": 2.8066019966134907e-06, | |
| "loss": 0.1629, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.12843565373747753, | |
| "grad_norm": 0.10766281187534332, | |
| "learning_rate": 2.339555568810221e-06, | |
| "loss": 0.1676, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.209439921504256e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |