| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.212565734846388, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05535566011624689, | |
| "grad_norm": 0.7017408609390259, | |
| "learning_rate": 9.8e-05, | |
| "loss": 1.0841, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.11071132023249378, | |
| "grad_norm": 0.1890324503183365, | |
| "learning_rate": 0.00019800000000000002, | |
| "loss": 0.1941, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.16606698034874065, | |
| "grad_norm": 0.13201060891151428, | |
| "learning_rate": 0.0001998263839556516, | |
| "loss": 0.1889, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.22142264046498755, | |
| "grad_norm": 0.1292983591556549, | |
| "learning_rate": 0.00019929192281085555, | |
| "loss": 0.1859, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.2767783005812344, | |
| "grad_norm": 0.1033887267112732, | |
| "learning_rate": 0.0001983984765530473, | |
| "loss": 0.1853, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.3321339606974813, | |
| "grad_norm": 0.1498780995607376, | |
| "learning_rate": 0.0001971492753936756, | |
| "loss": 0.1859, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.38748962081372823, | |
| "grad_norm": 0.15029650926589966, | |
| "learning_rate": 0.0001955488357587162, | |
| "loss": 0.1849, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.4428452809299751, | |
| "grad_norm": 0.1121128648519516, | |
| "learning_rate": 0.00019360294395975392, | |
| "loss": 0.1852, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.498200941046222, | |
| "grad_norm": 0.09638246148824692, | |
| "learning_rate": 0.00019131863527385433, | |
| "loss": 0.1828, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5535566011624689, | |
| "grad_norm": 0.12960121035575867, | |
| "learning_rate": 0.0001887041685078625, | |
| "loss": 0.1832, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6089122612787158, | |
| "grad_norm": 0.10663899779319763, | |
| "learning_rate": 0.0001857689961390886, | |
| "loss": 0.1805, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.6642679213949626, | |
| "grad_norm": 0.890275776386261, | |
| "learning_rate": 0.00018252373014033646, | |
| "loss": 0.1954, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7196235815112095, | |
| "grad_norm": 0.1450059562921524, | |
| "learning_rate": 0.0001789801036128327, | |
| "loss": 0.2407, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.7749792416274565, | |
| "grad_norm": 0.09030313044786453, | |
| "learning_rate": 0.0001751509283657702, | |
| "loss": 0.1873, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.8303349017437033, | |
| "grad_norm": 0.13033421337604523, | |
| "learning_rate": 0.00017105004859583578, | |
| "loss": 0.1832, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.8856905618599502, | |
| "grad_norm": 0.0832882970571518, | |
| "learning_rate": 0.00016669229083419114, | |
| "loss": 0.1808, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.941046221976197, | |
| "grad_norm": 0.06407424807548523, | |
| "learning_rate": 0.00016209341034187125, | |
| "loss": 0.1785, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.996401882092444, | |
| "grad_norm": 0.09908290952444077, | |
| "learning_rate": 0.00015727003414740492, | |
| "loss": 0.1803, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.050927207306947, | |
| "grad_norm": 0.0823020339012146, | |
| "learning_rate": 0.00015223960093260294, | |
| "loss": 0.1804, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.106282867423194, | |
| "grad_norm": 0.06759258359670639, | |
| "learning_rate": 0.00014702029798385264, | |
| "loss": 0.1805, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.161638527539441, | |
| "grad_norm": 0.09001895040273666, | |
| "learning_rate": 0.00014163099543686964, | |
| "loss": 0.1794, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.2169941876556878, | |
| "grad_norm": 0.06886181980371475, | |
| "learning_rate": 0.00013609117805264063, | |
| "loss": 0.1789, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.2723498477719346, | |
| "grad_norm": 0.10757990926504135, | |
| "learning_rate": 0.0001304208747712189, | |
| "loss": 0.178, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.3277055078881816, | |
| "grad_norm": 0.08470198512077332, | |
| "learning_rate": 0.00012464058629806633, | |
| "loss": 0.1798, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.3830611680044285, | |
| "grad_norm": 0.07767840474843979, | |
| "learning_rate": 0.00011877121098475106, | |
| "loss": 0.1785, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.4384168281206753, | |
| "grad_norm": 0.07554102689027786, | |
| "learning_rate": 0.00011283396927197472, | |
| "loss": 0.1771, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.4937724882369223, | |
| "grad_norm": 0.05920924246311188, | |
| "learning_rate": 0.00010685032696810226, | |
| "loss": 0.1782, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.5491281483531691, | |
| "grad_norm": 0.10883475095033646, | |
| "learning_rate": 0.00010084191764057676, | |
| "loss": 0.1777, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.604483808469416, | |
| "grad_norm": 0.0732041448354721, | |
| "learning_rate": 9.483046440080949e-05, | |
| "loss": 0.1767, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.659839468585663, | |
| "grad_norm": 0.052436333149671555, | |
| "learning_rate": 8.883770136532834e-05, | |
| "loss": 0.1772, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.7151951287019098, | |
| "grad_norm": 0.05955791100859642, | |
| "learning_rate": 8.288529507713752e-05, | |
| "loss": 0.177, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.7705507888181566, | |
| "grad_norm": 0.06988000869750977, | |
| "learning_rate": 7.699476617138598e-05, | |
| "loss": 0.1768, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.8259064489344037, | |
| "grad_norm": 0.06447959691286087, | |
| "learning_rate": 7.118741156855904e-05, | |
| "loss": 0.1778, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.8812621090506503, | |
| "grad_norm": 0.06343986093997955, | |
| "learning_rate": 6.548422747649902e-05, | |
| "loss": 0.1778, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.9366177691668973, | |
| "grad_norm": 0.054386403411626816, | |
| "learning_rate": 5.990583347963793e-05, | |
| "loss": 0.1789, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.9919734292831444, | |
| "grad_norm": 0.06645604968070984, | |
| "learning_rate": 5.44723979898939e-05, | |
| "loss": 0.1774, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.046498754497647, | |
| "grad_norm": 0.08125816285610199, | |
| "learning_rate": 4.9203565328759604e-05, | |
| "loss": 0.1747, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.101854414613894, | |
| "grad_norm": 0.05272316932678223, | |
| "learning_rate": 4.411838470421454e-05, | |
| "loss": 0.1767, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.1572100747301413, | |
| "grad_norm": 0.0572994239628315, | |
| "learning_rate": 3.923524133924069e-05, | |
| "loss": 0.1751, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.212565734846388, | |
| "grad_norm": 0.055648934096097946, | |
| "learning_rate": 3.4571790000943973e-05, | |
| "loss": 0.1742, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2712, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.1463227590937805e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |