Grogros-llama-2-7b-hf-weightsmark-std0.005-key0-ft-OpenMathInstruct / checkpoint-2000 /trainer_state.json
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.12843565373747753, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.003210891343436938, | |
| "grad_norm": 4.611270904541016, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 2.0089, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.006421782686873876, | |
| "grad_norm": 1.1913059949874878, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.2243, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.009632674030310814, | |
| "grad_norm": 0.6590245366096497, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.1619, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.012843565373747753, | |
| "grad_norm": 0.38334786891937256, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.1455, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.016054456717184692, | |
| "grad_norm": 0.3391067683696747, | |
| "learning_rate": 2e-05, | |
| "loss": 0.4212, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.019265348060621627, | |
| "grad_norm": 0.30926135182380676, | |
| "learning_rate": 1.9975640502598243e-05, | |
| "loss": 0.1444, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.022476239404058566, | |
| "grad_norm": 0.26479780673980713, | |
| "learning_rate": 1.9902680687415704e-05, | |
| "loss": 0.1393, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.025687130747495505, | |
| "grad_norm": 0.22582417726516724, | |
| "learning_rate": 1.9781476007338058e-05, | |
| "loss": 0.1415, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.028898022090932445, | |
| "grad_norm": 0.29131439328193665, | |
| "learning_rate": 1.961261695938319e-05, | |
| "loss": 0.1339, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.032108913434369384, | |
| "grad_norm": 0.2689756453037262, | |
| "learning_rate": 1.9396926207859085e-05, | |
| "loss": 0.1292, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.035319804777806316, | |
| "grad_norm": 0.2167866826057434, | |
| "learning_rate": 1.913545457642601e-05, | |
| "loss": 0.1278, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.038530696121243255, | |
| "grad_norm": 0.21978875994682312, | |
| "learning_rate": 1.8829475928589272e-05, | |
| "loss": 0.1292, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.041741587464680194, | |
| "grad_norm": 0.2072092741727829, | |
| "learning_rate": 1.848048096156426e-05, | |
| "loss": 0.1245, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.04495247880811713, | |
| "grad_norm": 0.19041530787944794, | |
| "learning_rate": 1.8090169943749477e-05, | |
| "loss": 0.1224, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.04816337015155407, | |
| "grad_norm": 0.20300881564617157, | |
| "learning_rate": 1.766044443118978e-05, | |
| "loss": 0.1209, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.05137426149499101, | |
| "grad_norm": 0.22919495403766632, | |
| "learning_rate": 1.7193398003386514e-05, | |
| "loss": 0.1204, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.05458515283842795, | |
| "grad_norm": 0.19864174723625183, | |
| "learning_rate": 1.6691306063588583e-05, | |
| "loss": 0.123, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.05779604418186489, | |
| "grad_norm": 0.20735569298267365, | |
| "learning_rate": 1.6156614753256583e-05, | |
| "loss": 0.1171, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.06100693552530182, | |
| "grad_norm": 0.1952231377363205, | |
| "learning_rate": 1.5591929034707468e-05, | |
| "loss": 0.1209, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.06421782686873877, | |
| "grad_norm": 0.1777598112821579, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 0.1178, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.0674287182121757, | |
| "grad_norm": 0.21301181614398956, | |
| "learning_rate": 1.4383711467890776e-05, | |
| "loss": 0.1142, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.07063960955561263, | |
| "grad_norm": 0.2080482542514801, | |
| "learning_rate": 1.3746065934159123e-05, | |
| "loss": 0.1146, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.07385050089904957, | |
| "grad_norm": 0.21014921367168427, | |
| "learning_rate": 1.3090169943749475e-05, | |
| "loss": 0.1174, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.07706139224248651, | |
| "grad_norm": 0.17024265229701996, | |
| "learning_rate": 1.2419218955996677e-05, | |
| "loss": 0.1135, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.08027228358592345, | |
| "grad_norm": 0.20533761382102966, | |
| "learning_rate": 1.1736481776669307e-05, | |
| "loss": 0.1133, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.08348317492936039, | |
| "grad_norm": 0.2093597799539566, | |
| "learning_rate": 1.1045284632676535e-05, | |
| "loss": 0.1102, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.08669406627279733, | |
| "grad_norm": 0.170538991689682, | |
| "learning_rate": 1.0348994967025012e-05, | |
| "loss": 0.1084, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.08990495761623427, | |
| "grad_norm": 0.18303751945495605, | |
| "learning_rate": 9.651005032974994e-06, | |
| "loss": 0.1102, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.0931158489596712, | |
| "grad_norm": 0.19629621505737305, | |
| "learning_rate": 8.954715367323468e-06, | |
| "loss": 0.11, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.09632674030310814, | |
| "grad_norm": 0.17346934974193573, | |
| "learning_rate": 8.263518223330698e-06, | |
| "loss": 0.1064, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.09953763164654508, | |
| "grad_norm": 0.18598853051662445, | |
| "learning_rate": 7.580781044003324e-06, | |
| "loss": 0.1094, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.10274852298998202, | |
| "grad_norm": 0.1827567219734192, | |
| "learning_rate": 6.909830056250527e-06, | |
| "loss": 0.1081, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.10595941433341896, | |
| "grad_norm": 0.20123536884784698, | |
| "learning_rate": 6.25393406584088e-06, | |
| "loss": 0.1064, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.1091703056768559, | |
| "grad_norm": 0.18791522085666656, | |
| "learning_rate": 5.616288532109225e-06, | |
| "loss": 0.1052, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.11238119702029284, | |
| "grad_norm": 0.17722520232200623, | |
| "learning_rate": 5.000000000000003e-06, | |
| "loss": 0.105, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.11559208836372978, | |
| "grad_norm": 0.19842611253261566, | |
| "learning_rate": 4.408070965292534e-06, | |
| "loss": 0.1035, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.1188029797071667, | |
| "grad_norm": 0.19638550281524658, | |
| "learning_rate": 3.8433852467434175e-06, | |
| "loss": 0.1012, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.12201387105060364, | |
| "grad_norm": 0.17620830237865448, | |
| "learning_rate": 3.308693936411421e-06, | |
| "loss": 0.1036, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.1252247623940406, | |
| "grad_norm": 0.17403462529182434, | |
| "learning_rate": 2.8066019966134907e-06, | |
| "loss": 0.1011, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.12843565373747753, | |
| "grad_norm": 0.18668928742408752, | |
| "learning_rate": 2.339555568810221e-06, | |
| "loss": 0.1059, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.196245781970944e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |