| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.12843565373747753, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.003210891343436938, | |
| "grad_norm": 13.043695449829102, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 2.1383, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.006421782686873876, | |
| "grad_norm": 0.6080975532531738, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.243, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.009632674030310814, | |
| "grad_norm": 0.49613457918167114, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.1628, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.012843565373747753, | |
| "grad_norm": 0.3426452875137329, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.1486, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.016054456717184692, | |
| "grad_norm": 0.387260377407074, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1687, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.019265348060621627, | |
| "grad_norm": 0.3262602388858795, | |
| "learning_rate": 1.9975640502598243e-05, | |
| "loss": 0.1448, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.022476239404058566, | |
| "grad_norm": 0.2525831460952759, | |
| "learning_rate": 1.9902680687415704e-05, | |
| "loss": 0.1396, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.025687130747495505, | |
| "grad_norm": 0.22410941123962402, | |
| "learning_rate": 1.9781476007338058e-05, | |
| "loss": 0.1338, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.028898022090932445, | |
| "grad_norm": 0.24438028037548065, | |
| "learning_rate": 1.961261695938319e-05, | |
| "loss": 0.134, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.032108913434369384, | |
| "grad_norm": 0.27699851989746094, | |
| "learning_rate": 1.9396926207859085e-05, | |
| "loss": 0.1277, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.035319804777806316, | |
| "grad_norm": 0.22139626741409302, | |
| "learning_rate": 1.913545457642601e-05, | |
| "loss": 0.128, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.038530696121243255, | |
| "grad_norm": 0.22160793840885162, | |
| "learning_rate": 1.8829475928589272e-05, | |
| "loss": 0.1281, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.041741587464680194, | |
| "grad_norm": 0.2036096453666687, | |
| "learning_rate": 1.848048096156426e-05, | |
| "loss": 0.1261, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.04495247880811713, | |
| "grad_norm": 0.1912582367658615, | |
| "learning_rate": 1.8090169943749477e-05, | |
| "loss": 0.1222, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.04816337015155407, | |
| "grad_norm": 0.2080652415752411, | |
| "learning_rate": 1.766044443118978e-05, | |
| "loss": 0.1208, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.05137426149499101, | |
| "grad_norm": 0.22128906846046448, | |
| "learning_rate": 1.7193398003386514e-05, | |
| "loss": 0.1205, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.05458515283842795, | |
| "grad_norm": 0.18952251970767975, | |
| "learning_rate": 1.6691306063588583e-05, | |
| "loss": 0.1228, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.05779604418186489, | |
| "grad_norm": 0.20455780625343323, | |
| "learning_rate": 1.6156614753256583e-05, | |
| "loss": 0.1169, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.06100693552530182, | |
| "grad_norm": 0.19518840312957764, | |
| "learning_rate": 1.5591929034707468e-05, | |
| "loss": 0.1207, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.06421782686873877, | |
| "grad_norm": 0.16957563161849976, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 0.1177, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.0674287182121757, | |
| "grad_norm": 0.20795853435993195, | |
| "learning_rate": 1.4383711467890776e-05, | |
| "loss": 0.1141, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.07063960955561263, | |
| "grad_norm": 0.21261201798915863, | |
| "learning_rate": 1.3746065934159123e-05, | |
| "loss": 0.1144, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.07385050089904957, | |
| "grad_norm": 0.2101193517446518, | |
| "learning_rate": 1.3090169943749475e-05, | |
| "loss": 0.1172, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.07706139224248651, | |
| "grad_norm": 0.17617927491664886, | |
| "learning_rate": 1.2419218955996677e-05, | |
| "loss": 0.1133, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.08027228358592345, | |
| "grad_norm": 0.2035655379295349, | |
| "learning_rate": 1.1736481776669307e-05, | |
| "loss": 0.1132, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.08348317492936039, | |
| "grad_norm": 0.20784136652946472, | |
| "learning_rate": 1.1045284632676535e-05, | |
| "loss": 0.11, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.08669406627279733, | |
| "grad_norm": 0.17047621309757233, | |
| "learning_rate": 1.0348994967025012e-05, | |
| "loss": 0.1082, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.08990495761623427, | |
| "grad_norm": 0.17806276679039001, | |
| "learning_rate": 9.651005032974994e-06, | |
| "loss": 0.1101, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.0931158489596712, | |
| "grad_norm": 0.19702066481113434, | |
| "learning_rate": 8.954715367323468e-06, | |
| "loss": 0.1099, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.09632674030310814, | |
| "grad_norm": 0.16987255215644836, | |
| "learning_rate": 8.263518223330698e-06, | |
| "loss": 0.1063, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.09953763164654508, | |
| "grad_norm": 0.188665971159935, | |
| "learning_rate": 7.580781044003324e-06, | |
| "loss": 0.1093, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.10274852298998202, | |
| "grad_norm": 0.18021029233932495, | |
| "learning_rate": 6.909830056250527e-06, | |
| "loss": 0.1079, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.10595941433341896, | |
| "grad_norm": 0.19734810292720795, | |
| "learning_rate": 6.25393406584088e-06, | |
| "loss": 0.1062, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.1091703056768559, | |
| "grad_norm": 0.184854194521904, | |
| "learning_rate": 5.616288532109225e-06, | |
| "loss": 0.1051, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.11238119702029284, | |
| "grad_norm": 0.17506983876228333, | |
| "learning_rate": 5.000000000000003e-06, | |
| "loss": 0.1049, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.11559208836372978, | |
| "grad_norm": 0.19541677832603455, | |
| "learning_rate": 4.408070965292534e-06, | |
| "loss": 0.1033, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.1188029797071667, | |
| "grad_norm": 0.19553428888320923, | |
| "learning_rate": 3.8433852467434175e-06, | |
| "loss": 0.1012, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.12201387105060364, | |
| "grad_norm": 0.17606262862682343, | |
| "learning_rate": 3.308693936411421e-06, | |
| "loss": 0.1035, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.1252247623940406, | |
| "grad_norm": 0.17461085319519043, | |
| "learning_rate": 2.8066019966134907e-06, | |
| "loss": 0.101, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.12843565373747753, | |
| "grad_norm": 0.19402888417243958, | |
| "learning_rate": 2.339555568810221e-06, | |
| "loss": 0.1059, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.196245781970944e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |