| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.12843565373747753, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.003210891343436938, | |
| "grad_norm": 8.177531242370605, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 2.2773, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.006421782686873876, | |
| "grad_norm": 0.6572615504264832, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.2291, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.009632674030310814, | |
| "grad_norm": 0.6884369254112244, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.1612, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.012843565373747753, | |
| "grad_norm": 0.32834213972091675, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.1469, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.016054456717184692, | |
| "grad_norm": 0.3822823166847229, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1699, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.019265348060621627, | |
| "grad_norm": 0.3158350884914398, | |
| "learning_rate": 1.9975640502598243e-05, | |
| "loss": 0.1448, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.022476239404058566, | |
| "grad_norm": 984.7891845703125, | |
| "learning_rate": 1.9902680687415704e-05, | |
| "loss": 0.1638, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.025687130747495505, | |
| "grad_norm": 0.2283572256565094, | |
| "learning_rate": 1.9781476007338058e-05, | |
| "loss": 0.1346, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.028898022090932445, | |
| "grad_norm": 0.24141617119312286, | |
| "learning_rate": 1.961261695938319e-05, | |
| "loss": 0.1347, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.032108913434369384, | |
| "grad_norm": 0.2759339213371277, | |
| "learning_rate": 1.9396926207859085e-05, | |
| "loss": 0.1269, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.035319804777806316, | |
| "grad_norm": 0.23010869324207306, | |
| "learning_rate": 1.913545457642601e-05, | |
| "loss": 0.1286, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.038530696121243255, | |
| "grad_norm": 0.22603748738765717, | |
| "learning_rate": 1.8829475928589272e-05, | |
| "loss": 0.1288, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.041741587464680194, | |
| "grad_norm": 0.20579472184181213, | |
| "learning_rate": 1.848048096156426e-05, | |
| "loss": 0.1253, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.04495247880811713, | |
| "grad_norm": 0.19978702068328857, | |
| "learning_rate": 1.8090169943749477e-05, | |
| "loss": 0.1231, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.04816337015155407, | |
| "grad_norm": 0.24899648129940033, | |
| "learning_rate": 1.766044443118978e-05, | |
| "loss": 0.1225, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.05137426149499101, | |
| "grad_norm": 0.2297627180814743, | |
| "learning_rate": 1.7193398003386514e-05, | |
| "loss": 0.1214, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.05458515283842795, | |
| "grad_norm": 0.1948223114013672, | |
| "learning_rate": 1.6691306063588583e-05, | |
| "loss": 0.1233, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.05779604418186489, | |
| "grad_norm": 0.2076019048690796, | |
| "learning_rate": 1.6156614753256583e-05, | |
| "loss": 0.1173, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.06100693552530182, | |
| "grad_norm": 0.1943691223859787, | |
| "learning_rate": 1.5591929034707468e-05, | |
| "loss": 0.121, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.06421782686873877, | |
| "grad_norm": 0.1736869364976883, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 0.1181, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.0674287182121757, | |
| "grad_norm": 0.2161734253168106, | |
| "learning_rate": 1.4383711467890776e-05, | |
| "loss": 0.1145, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.07063960955561263, | |
| "grad_norm": 0.21289649605751038, | |
| "learning_rate": 1.3746065934159123e-05, | |
| "loss": 0.1148, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.07385050089904957, | |
| "grad_norm": 0.21228502690792084, | |
| "learning_rate": 1.3090169943749475e-05, | |
| "loss": 0.1176, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.07706139224248651, | |
| "grad_norm": 0.16968905925750732, | |
| "learning_rate": 1.2419218955996677e-05, | |
| "loss": 0.1136, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.08027228358592345, | |
| "grad_norm": 0.20333637297153473, | |
| "learning_rate": 1.1736481776669307e-05, | |
| "loss": 0.1135, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.08348317492936039, | |
| "grad_norm": 0.21027198433876038, | |
| "learning_rate": 1.1045284632676535e-05, | |
| "loss": 0.1103, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.08669406627279733, | |
| "grad_norm": 0.16708673536777496, | |
| "learning_rate": 1.0348994967025012e-05, | |
| "loss": 0.1085, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.08990495761623427, | |
| "grad_norm": 0.1804681122303009, | |
| "learning_rate": 9.651005032974994e-06, | |
| "loss": 0.1104, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.0931158489596712, | |
| "grad_norm": 0.19710887968540192, | |
| "learning_rate": 8.954715367323468e-06, | |
| "loss": 0.1102, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.09632674030310814, | |
| "grad_norm": 0.17004291713237762, | |
| "learning_rate": 8.263518223330698e-06, | |
| "loss": 0.1065, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.09953763164654508, | |
| "grad_norm": 0.184707373380661, | |
| "learning_rate": 7.580781044003324e-06, | |
| "loss": 0.1095, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.10274852298998202, | |
| "grad_norm": 0.1840941607952118, | |
| "learning_rate": 6.909830056250527e-06, | |
| "loss": 0.1082, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.10595941433341896, | |
| "grad_norm": 0.1997130811214447, | |
| "learning_rate": 6.25393406584088e-06, | |
| "loss": 0.1065, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.1091703056768559, | |
| "grad_norm": 0.18672604858875275, | |
| "learning_rate": 5.616288532109225e-06, | |
| "loss": 0.1053, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.11238119702029284, | |
| "grad_norm": 0.17353491485118866, | |
| "learning_rate": 5.000000000000003e-06, | |
| "loss": 0.1051, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.11559208836372978, | |
| "grad_norm": 0.1928931623697281, | |
| "learning_rate": 4.408070965292534e-06, | |
| "loss": 0.1036, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.1188029797071667, | |
| "grad_norm": 0.19479507207870483, | |
| "learning_rate": 3.8433852467434175e-06, | |
| "loss": 0.1014, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.12201387105060364, | |
| "grad_norm": 0.17657093703746796, | |
| "learning_rate": 3.308693936411421e-06, | |
| "loss": 0.1036, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.1252247623940406, | |
| "grad_norm": 0.17746590077877045, | |
| "learning_rate": 2.8066019966134907e-06, | |
| "loss": 0.1012, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.12843565373747753, | |
| "grad_norm": 0.18518978357315063, | |
| "learning_rate": 2.339555568810221e-06, | |
| "loss": 0.106, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.196245781970944e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |