| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 40686, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03686771862557145, | |
| "grad_norm": 8.30236530303955, | |
| "learning_rate": 4.938676694686133e-05, | |
| "loss": 0.7878, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.0737354372511429, | |
| "grad_norm": 6.442229270935059, | |
| "learning_rate": 4.8772304969768473e-05, | |
| "loss": 0.5945, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.11060315587671435, | |
| "grad_norm": 5.837690830230713, | |
| "learning_rate": 4.815784299267562e-05, | |
| "loss": 0.5546, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.1474708745022858, | |
| "grad_norm": 6.377337455749512, | |
| "learning_rate": 4.7543381015582754e-05, | |
| "loss": 0.5348, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.18433859312785725, | |
| "grad_norm": 6.086786270141602, | |
| "learning_rate": 4.69289190384899e-05, | |
| "loss": 0.5203, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.2212063117534287, | |
| "grad_norm": 8.050482749938965, | |
| "learning_rate": 4.631445706139704e-05, | |
| "loss": 0.506, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.2580740303790002, | |
| "grad_norm": 6.228991508483887, | |
| "learning_rate": 4.5699995084304186e-05, | |
| "loss": 0.49, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.2949417490045716, | |
| "grad_norm": 9.439841270446777, | |
| "learning_rate": 4.508553310721132e-05, | |
| "loss": 0.4804, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.3318094676301431, | |
| "grad_norm": 8.305259704589844, | |
| "learning_rate": 4.4471071130118466e-05, | |
| "loss": 0.4785, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.3686771862557145, | |
| "grad_norm": 9.281148910522461, | |
| "learning_rate": 4.385660915302561e-05, | |
| "loss": 0.4656, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.405544904881286, | |
| "grad_norm": 10.092153549194336, | |
| "learning_rate": 4.3242147175932754e-05, | |
| "loss": 0.4575, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.4424126235068574, | |
| "grad_norm": 5.881900787353516, | |
| "learning_rate": 4.26276851988399e-05, | |
| "loss": 0.4653, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.47928034213242887, | |
| "grad_norm": 5.321037769317627, | |
| "learning_rate": 4.201322322174704e-05, | |
| "loss": 0.4597, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.5161480607580003, | |
| "grad_norm": 3.0871853828430176, | |
| "learning_rate": 4.1398761244654185e-05, | |
| "loss": 0.4398, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.5530157793835717, | |
| "grad_norm": 6.368654251098633, | |
| "learning_rate": 4.078429926756133e-05, | |
| "loss": 0.4489, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.5898834980091432, | |
| "grad_norm": 5.613560676574707, | |
| "learning_rate": 4.0169837290468466e-05, | |
| "loss": 0.4438, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.6267512166347147, | |
| "grad_norm": 6.292891979217529, | |
| "learning_rate": 3.955537531337561e-05, | |
| "loss": 0.4335, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.6636189352602861, | |
| "grad_norm": 3.034226179122925, | |
| "learning_rate": 3.8940913336282753e-05, | |
| "loss": 0.4331, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.7004866538858575, | |
| "grad_norm": 3.5885934829711914, | |
| "learning_rate": 3.83264513591899e-05, | |
| "loss": 0.4289, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.737354372511429, | |
| "grad_norm": 7.238613128662109, | |
| "learning_rate": 3.7711989382097034e-05, | |
| "loss": 0.4218, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.7742220911370005, | |
| "grad_norm": 6.084052562713623, | |
| "learning_rate": 3.709752740500418e-05, | |
| "loss": 0.4196, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.811089809762572, | |
| "grad_norm": 8.307782173156738, | |
| "learning_rate": 3.648306542791132e-05, | |
| "loss": 0.4255, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.8479575283881433, | |
| "grad_norm": 4.905970573425293, | |
| "learning_rate": 3.5868603450818465e-05, | |
| "loss": 0.4222, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.8848252470137148, | |
| "grad_norm": 4.488884449005127, | |
| "learning_rate": 3.52541414737256e-05, | |
| "loss": 0.4149, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.9216929656392863, | |
| "grad_norm": 9.317034721374512, | |
| "learning_rate": 3.4639679496632746e-05, | |
| "loss": 0.4005, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.9585606842648577, | |
| "grad_norm": 3.8271901607513428, | |
| "learning_rate": 3.402521751953989e-05, | |
| "loss": 0.4114, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.9954284028904291, | |
| "grad_norm": 5.5162482261657715, | |
| "learning_rate": 3.3410755542447034e-05, | |
| "loss": 0.4051, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.0322961215160005, | |
| "grad_norm": 5.347577095031738, | |
| "learning_rate": 3.279629356535418e-05, | |
| "loss": 0.3568, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.069163840141572, | |
| "grad_norm": 4.08383321762085, | |
| "learning_rate": 3.218183158826132e-05, | |
| "loss": 0.3536, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.1060315587671434, | |
| "grad_norm": 3.5846598148345947, | |
| "learning_rate": 3.1567369611168465e-05, | |
| "loss": 0.3755, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.142899277392715, | |
| "grad_norm": 11.44876480102539, | |
| "learning_rate": 3.095290763407561e-05, | |
| "loss": 0.3682, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.1797669960182864, | |
| "grad_norm": 4.519806385040283, | |
| "learning_rate": 3.0338445656982746e-05, | |
| "loss": 0.3671, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.2166347146438579, | |
| "grad_norm": 7.143647193908691, | |
| "learning_rate": 2.972398367988989e-05, | |
| "loss": 0.3667, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.2535024332694293, | |
| "grad_norm": 10.113604545593262, | |
| "learning_rate": 2.9109521702797033e-05, | |
| "loss": 0.3656, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.2903701518950008, | |
| "grad_norm": 3.7289226055145264, | |
| "learning_rate": 2.8495059725704177e-05, | |
| "loss": 0.3593, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.3272378705205723, | |
| "grad_norm": 4.099955081939697, | |
| "learning_rate": 2.7880597748611314e-05, | |
| "loss": 0.3634, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.3641055891461438, | |
| "grad_norm": 5.327935218811035, | |
| "learning_rate": 2.7266135771518458e-05, | |
| "loss": 0.3661, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.400973307771715, | |
| "grad_norm": 4.683801174163818, | |
| "learning_rate": 2.66516737944256e-05, | |
| "loss": 0.3525, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.4378410263972865, | |
| "grad_norm": 6.552036762237549, | |
| "learning_rate": 2.6037211817332745e-05, | |
| "loss": 0.3589, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.474708745022858, | |
| "grad_norm": 6.014011859893799, | |
| "learning_rate": 2.5422749840239886e-05, | |
| "loss": 0.3609, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.5115764636484295, | |
| "grad_norm": 4.739955902099609, | |
| "learning_rate": 2.480828786314703e-05, | |
| "loss": 0.3574, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 1.548444182274001, | |
| "grad_norm": 8.343321800231934, | |
| "learning_rate": 2.4193825886054173e-05, | |
| "loss": 0.3513, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 1.5853119008995722, | |
| "grad_norm": 5.646194934844971, | |
| "learning_rate": 2.3579363908961314e-05, | |
| "loss": 0.3614, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 1.6221796195251437, | |
| "grad_norm": 5.9467973709106445, | |
| "learning_rate": 2.2964901931868457e-05, | |
| "loss": 0.3548, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 1.6590473381507151, | |
| "grad_norm": 7.322890281677246, | |
| "learning_rate": 2.2350439954775598e-05, | |
| "loss": 0.3493, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 1.6959150567762866, | |
| "grad_norm": 6.996487617492676, | |
| "learning_rate": 2.173597797768274e-05, | |
| "loss": 0.3569, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 1.732782775401858, | |
| "grad_norm": 6.08064079284668, | |
| "learning_rate": 2.1121516000589882e-05, | |
| "loss": 0.3471, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 1.7696504940274296, | |
| "grad_norm": 4.0622711181640625, | |
| "learning_rate": 2.0507054023497026e-05, | |
| "loss": 0.3391, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 1.806518212653001, | |
| "grad_norm": 5.080845355987549, | |
| "learning_rate": 1.989259204640417e-05, | |
| "loss": 0.3456, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 1.8433859312785725, | |
| "grad_norm": 6.117412090301514, | |
| "learning_rate": 1.9278130069311313e-05, | |
| "loss": 0.3486, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 1.880253649904144, | |
| "grad_norm": 5.242644786834717, | |
| "learning_rate": 1.8663668092218454e-05, | |
| "loss": 0.3445, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 1.9171213685297155, | |
| "grad_norm": 10.171354293823242, | |
| "learning_rate": 1.8049206115125597e-05, | |
| "loss": 0.3462, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 1.953989087155287, | |
| "grad_norm": 4.752163887023926, | |
| "learning_rate": 1.7434744138032738e-05, | |
| "loss": 0.3495, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 1.9908568057808584, | |
| "grad_norm": 5.205504417419434, | |
| "learning_rate": 1.682028216093988e-05, | |
| "loss": 0.349, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 2.02772452440643, | |
| "grad_norm": 5.852423191070557, | |
| "learning_rate": 1.6205820183847022e-05, | |
| "loss": 0.3112, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 2.064592243032001, | |
| "grad_norm": 9.391754150390625, | |
| "learning_rate": 1.5591358206754166e-05, | |
| "loss": 0.309, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 2.1014599616575724, | |
| "grad_norm": 7.519664287567139, | |
| "learning_rate": 1.4976896229661308e-05, | |
| "loss": 0.2971, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 2.138327680283144, | |
| "grad_norm": 4.092874526977539, | |
| "learning_rate": 1.4362434252568452e-05, | |
| "loss": 0.3057, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 2.1751953989087154, | |
| "grad_norm": 8.466115951538086, | |
| "learning_rate": 1.3747972275475594e-05, | |
| "loss": 0.2992, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 2.212063117534287, | |
| "grad_norm": 4.548240661621094, | |
| "learning_rate": 1.3133510298382737e-05, | |
| "loss": 0.3031, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 2.2489308361598583, | |
| "grad_norm": 8.980031967163086, | |
| "learning_rate": 1.2519048321289878e-05, | |
| "loss": 0.2981, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 2.28579855478543, | |
| "grad_norm": 7.443713188171387, | |
| "learning_rate": 1.1904586344197021e-05, | |
| "loss": 0.3039, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 2.3226662734110013, | |
| "grad_norm": 4.782769203186035, | |
| "learning_rate": 1.1290124367104164e-05, | |
| "loss": 0.3038, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 2.3595339920365728, | |
| "grad_norm": 7.365405082702637, | |
| "learning_rate": 1.0675662390011307e-05, | |
| "loss": 0.3013, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 2.3964017106621442, | |
| "grad_norm": 4.001774787902832, | |
| "learning_rate": 1.006120041291845e-05, | |
| "loss": 0.3031, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 2.4332694292877157, | |
| "grad_norm": 4.557787895202637, | |
| "learning_rate": 9.446738435825591e-06, | |
| "loss": 0.3076, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 2.470137147913287, | |
| "grad_norm": 6.75930118560791, | |
| "learning_rate": 8.832276458732734e-06, | |
| "loss": 0.2999, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 2.5070048665388587, | |
| "grad_norm": 9.001293182373047, | |
| "learning_rate": 8.217814481639877e-06, | |
| "loss": 0.3024, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 2.54387258516443, | |
| "grad_norm": 9.55263900756836, | |
| "learning_rate": 7.603352504547019e-06, | |
| "loss": 0.2994, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 2.5807403037900016, | |
| "grad_norm": 5.571417808532715, | |
| "learning_rate": 6.9888905274541614e-06, | |
| "loss": 0.3043, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 2.617608022415573, | |
| "grad_norm": 5.260648727416992, | |
| "learning_rate": 6.374428550361304e-06, | |
| "loss": 0.2964, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 2.6544757410411446, | |
| "grad_norm": 9.309138298034668, | |
| "learning_rate": 5.7599665732684464e-06, | |
| "loss": 0.302, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 2.691343459666716, | |
| "grad_norm": 6.128640651702881, | |
| "learning_rate": 5.145504596175589e-06, | |
| "loss": 0.2999, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 2.7282111782922875, | |
| "grad_norm": 8.606386184692383, | |
| "learning_rate": 4.5310426190827314e-06, | |
| "loss": 0.2916, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 2.7650788969178586, | |
| "grad_norm": 5.320854663848877, | |
| "learning_rate": 3.916580641989874e-06, | |
| "loss": 0.2931, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 2.80194661554343, | |
| "grad_norm": 8.016927719116211, | |
| "learning_rate": 3.3021186648970164e-06, | |
| "loss": 0.2912, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 2.8388143341690015, | |
| "grad_norm": 4.088160514831543, | |
| "learning_rate": 2.687656687804159e-06, | |
| "loss": 0.3015, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 2.875682052794573, | |
| "grad_norm": 9.513310432434082, | |
| "learning_rate": 2.0731947107113014e-06, | |
| "loss": 0.3003, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 2.9125497714201445, | |
| "grad_norm": 3.679173231124878, | |
| "learning_rate": 1.4587327336184437e-06, | |
| "loss": 0.2886, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 2.949417490045716, | |
| "grad_norm": 6.186450481414795, | |
| "learning_rate": 8.442707565255862e-07, | |
| "loss": 0.2975, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 2.9862852086712874, | |
| "grad_norm": 7.218328475952148, | |
| "learning_rate": 2.298087794327287e-07, | |
| "loss": 0.2925, | |
| "step": 40500 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 40686, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9575666049362688.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |