| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9746588693957114, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01949317738791423, | |
| "grad_norm": 9.292141914367676, | |
| "learning_rate": 4.967511371020143e-05, | |
| "loss": 6.2548, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03898635477582846, | |
| "grad_norm": 7.766495227813721, | |
| "learning_rate": 4.935022742040286e-05, | |
| "loss": 5.0175, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.05847953216374269, | |
| "grad_norm": 6.4375128746032715, | |
| "learning_rate": 4.902534113060429e-05, | |
| "loss": 4.59, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.07797270955165692, | |
| "grad_norm": 7.395759105682373, | |
| "learning_rate": 4.8700454840805724e-05, | |
| "loss": 4.2891, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.09746588693957114, | |
| "grad_norm": 5.542599678039551, | |
| "learning_rate": 4.837556855100715e-05, | |
| "loss": 4.1904, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.11695906432748537, | |
| "grad_norm": 8.749999046325684, | |
| "learning_rate": 4.805068226120858e-05, | |
| "loss": 4.1512, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.1364522417153996, | |
| "grad_norm": 6.6702446937561035, | |
| "learning_rate": 4.772579597141001e-05, | |
| "loss": 3.9695, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.15594541910331383, | |
| "grad_norm": 13.8934326171875, | |
| "learning_rate": 4.740090968161144e-05, | |
| "loss": 3.9657, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.17543859649122806, | |
| "grad_norm": 15.161931037902832, | |
| "learning_rate": 4.707602339181287e-05, | |
| "loss": 3.9312, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.1949317738791423, | |
| "grad_norm": 8.096311569213867, | |
| "learning_rate": 4.67511371020143e-05, | |
| "loss": 3.9407, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.21442495126705652, | |
| "grad_norm": 6.077565670013428, | |
| "learning_rate": 4.642625081221573e-05, | |
| "loss": 3.7438, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.23391812865497075, | |
| "grad_norm": 5.909548282623291, | |
| "learning_rate": 4.610136452241716e-05, | |
| "loss": 3.8495, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.253411306042885, | |
| "grad_norm": 6.919253349304199, | |
| "learning_rate": 4.577647823261859e-05, | |
| "loss": 3.8178, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.2729044834307992, | |
| "grad_norm": 5.3416218757629395, | |
| "learning_rate": 4.545159194282001e-05, | |
| "loss": 3.6388, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.29239766081871343, | |
| "grad_norm": 6.484707832336426, | |
| "learning_rate": 4.512670565302144e-05, | |
| "loss": 3.8361, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.31189083820662766, | |
| "grad_norm": 6.068100929260254, | |
| "learning_rate": 4.480181936322287e-05, | |
| "loss": 3.7164, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.3313840155945419, | |
| "grad_norm": 6.298447608947754, | |
| "learning_rate": 4.4476933073424304e-05, | |
| "loss": 3.6589, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.3508771929824561, | |
| "grad_norm": 4.752441883087158, | |
| "learning_rate": 4.4152046783625734e-05, | |
| "loss": 3.7328, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.37037037037037035, | |
| "grad_norm": 5.050961971282959, | |
| "learning_rate": 4.3827160493827164e-05, | |
| "loss": 3.7162, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.3898635477582846, | |
| "grad_norm": 4.73190975189209, | |
| "learning_rate": 4.3502274204028594e-05, | |
| "loss": 3.6748, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.4093567251461988, | |
| "grad_norm": 10.872115135192871, | |
| "learning_rate": 4.3177387914230025e-05, | |
| "loss": 3.7618, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.42884990253411304, | |
| "grad_norm": 6.119602680206299, | |
| "learning_rate": 4.285250162443145e-05, | |
| "loss": 3.747, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.44834307992202727, | |
| "grad_norm": 5.332015514373779, | |
| "learning_rate": 4.252761533463288e-05, | |
| "loss": 3.6214, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.4678362573099415, | |
| "grad_norm": 5.08021354675293, | |
| "learning_rate": 4.220272904483431e-05, | |
| "loss": 3.5122, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.4873294346978557, | |
| "grad_norm": 4.609364986419678, | |
| "learning_rate": 4.187784275503574e-05, | |
| "loss": 3.5281, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.50682261208577, | |
| "grad_norm": 24.73488998413086, | |
| "learning_rate": 4.155295646523717e-05, | |
| "loss": 3.7142, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 5.869173526763916, | |
| "learning_rate": 4.12280701754386e-05, | |
| "loss": 3.577, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.5458089668615984, | |
| "grad_norm": 5.6849470138549805, | |
| "learning_rate": 4.090318388564003e-05, | |
| "loss": 3.6146, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.5653021442495126, | |
| "grad_norm": 5.919517517089844, | |
| "learning_rate": 4.057829759584146e-05, | |
| "loss": 3.5417, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.5847953216374269, | |
| "grad_norm": 5.177787780761719, | |
| "learning_rate": 4.025341130604289e-05, | |
| "loss": 3.5453, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6042884990253411, | |
| "grad_norm": 5.569265842437744, | |
| "learning_rate": 3.9928525016244314e-05, | |
| "loss": 3.5203, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.6237816764132553, | |
| "grad_norm": 6.63994026184082, | |
| "learning_rate": 3.9603638726445744e-05, | |
| "loss": 3.5504, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.6432748538011696, | |
| "grad_norm": 5.785546779632568, | |
| "learning_rate": 3.9278752436647174e-05, | |
| "loss": 3.5587, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.6627680311890838, | |
| "grad_norm": 5.489815711975098, | |
| "learning_rate": 3.8953866146848605e-05, | |
| "loss": 3.5612, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.682261208576998, | |
| "grad_norm": 4.650988578796387, | |
| "learning_rate": 3.8628979857050035e-05, | |
| "loss": 3.5062, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.7017543859649122, | |
| "grad_norm": 5.7823872566223145, | |
| "learning_rate": 3.8304093567251465e-05, | |
| "loss": 3.5819, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.7212475633528265, | |
| "grad_norm": 7.364371299743652, | |
| "learning_rate": 3.7979207277452896e-05, | |
| "loss": 3.5845, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 6.099256992340088, | |
| "learning_rate": 3.7654320987654326e-05, | |
| "loss": 3.5787, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.7602339181286549, | |
| "grad_norm": 5.115094184875488, | |
| "learning_rate": 3.7329434697855756e-05, | |
| "loss": 3.5901, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.7797270955165692, | |
| "grad_norm": 5.4442830085754395, | |
| "learning_rate": 3.700454840805718e-05, | |
| "loss": 3.4565, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.7992202729044834, | |
| "grad_norm": 6.6567816734313965, | |
| "learning_rate": 3.667966211825861e-05, | |
| "loss": 3.5481, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.8187134502923976, | |
| "grad_norm": 5.831213474273682, | |
| "learning_rate": 3.635477582846004e-05, | |
| "loss": 3.4274, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.8382066276803118, | |
| "grad_norm": 5.754021167755127, | |
| "learning_rate": 3.602988953866147e-05, | |
| "loss": 3.423, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.8576998050682261, | |
| "grad_norm": 5.590385437011719, | |
| "learning_rate": 3.57050032488629e-05, | |
| "loss": 3.6208, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.8771929824561403, | |
| "grad_norm": 4.159304141998291, | |
| "learning_rate": 3.538011695906433e-05, | |
| "loss": 3.4107, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.8966861598440545, | |
| "grad_norm": 5.699474334716797, | |
| "learning_rate": 3.505523066926576e-05, | |
| "loss": 3.3759, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.9161793372319688, | |
| "grad_norm": 5.0284929275512695, | |
| "learning_rate": 3.473034437946719e-05, | |
| "loss": 3.4719, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.935672514619883, | |
| "grad_norm": 4.884040832519531, | |
| "learning_rate": 3.440545808966862e-05, | |
| "loss": 3.5534, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.9551656920077972, | |
| "grad_norm": 5.428916931152344, | |
| "learning_rate": 3.4080571799870045e-05, | |
| "loss": 3.3919, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.9746588693957114, | |
| "grad_norm": 5.549111843109131, | |
| "learning_rate": 3.3755685510071476e-05, | |
| "loss": 3.2799, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1539, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1045168128000000.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |