| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 14.544, |
| "eval_steps": 500, |
| "global_step": 465, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.32, |
| "grad_norm": 72.67546081542969, |
| "learning_rate": 3.8297872340425535e-06, |
| "loss": 10.0348, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.64, |
| "grad_norm": 41.243839263916016, |
| "learning_rate": 8.085106382978723e-06, |
| "loss": 4.7312, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.96, |
| "grad_norm": 34.776241302490234, |
| "learning_rate": 1.2340425531914895e-05, |
| "loss": 3.4362, |
| "step": 30 |
| }, |
| { |
| "epoch": 1.256, |
| "grad_norm": 12.210166931152344, |
| "learning_rate": 1.6595744680851064e-05, |
| "loss": 2.5739, |
| "step": 40 |
| }, |
| { |
| "epoch": 1.576, |
| "grad_norm": 11.936144828796387, |
| "learning_rate": 1.9998870284726968e-05, |
| "loss": 2.4828, |
| "step": 50 |
| }, |
| { |
| "epoch": 1.896, |
| "grad_norm": 5.305131912231445, |
| "learning_rate": 1.9959357045100764e-05, |
| "loss": 2.1904, |
| "step": 60 |
| }, |
| { |
| "epoch": 2.192, |
| "grad_norm": 6.481784820556641, |
| "learning_rate": 1.9863613034027224e-05, |
| "loss": 1.893, |
| "step": 70 |
| }, |
| { |
| "epoch": 2.512, |
| "grad_norm": 2.9933059215545654, |
| "learning_rate": 1.971217882451521e-05, |
| "loss": 1.9328, |
| "step": 80 |
| }, |
| { |
| "epoch": 2.832, |
| "grad_norm": 5.0720906257629395, |
| "learning_rate": 1.9505909417784758e-05, |
| "loss": 1.8361, |
| "step": 90 |
| }, |
| { |
| "epoch": 3.128, |
| "grad_norm": 4.453537940979004, |
| "learning_rate": 1.9245969415909464e-05, |
| "loss": 1.6677, |
| "step": 100 |
| }, |
| { |
| "epoch": 3.448, |
| "grad_norm": 3.9254183769226074, |
| "learning_rate": 1.8933826446444933e-05, |
| "loss": 1.7492, |
| "step": 110 |
| }, |
| { |
| "epoch": 3.768, |
| "grad_norm": 2.421752691268921, |
| "learning_rate": 1.8571242876167995e-05, |
| "loss": 1.6435, |
| "step": 120 |
| }, |
| { |
| "epoch": 4.064, |
| "grad_norm": 3.5012094974517822, |
| "learning_rate": 1.8160265860711134e-05, |
| "loss": 1.478, |
| "step": 130 |
| }, |
| { |
| "epoch": 4.384, |
| "grad_norm": 4.192974090576172, |
| "learning_rate": 1.770321578627213e-05, |
| "loss": 1.4807, |
| "step": 140 |
| }, |
| { |
| "epoch": 4.704, |
| "grad_norm": 2.528031826019287, |
| "learning_rate": 1.7202673168657318e-05, |
| "loss": 1.375, |
| "step": 150 |
| }, |
| { |
| "epoch": 5.0, |
| "grad_norm": 1.0680221319198608, |
| "learning_rate": 1.6661464083626734e-05, |
| "loss": 1.2751, |
| "step": 160 |
| }, |
| { |
| "epoch": 5.32, |
| "grad_norm": 2.499868392944336, |
| "learning_rate": 1.6082644210801846e-05, |
| "loss": 1.2887, |
| "step": 170 |
| }, |
| { |
| "epoch": 5.64, |
| "grad_norm": 2.785773754119873, |
| "learning_rate": 1.5469481581224274e-05, |
| "loss": 1.2852, |
| "step": 180 |
| }, |
| { |
| "epoch": 5.96, |
| "grad_norm": 2.820884943008423, |
| "learning_rate": 1.4825438125973263e-05, |
| "loss": 1.2329, |
| "step": 190 |
| }, |
| { |
| "epoch": 6.256, |
| "grad_norm": 2.5425021648406982, |
| "learning_rate": 1.4154150130018867e-05, |
| "loss": 1.1001, |
| "step": 200 |
| }, |
| { |
| "epoch": 6.576, |
| "grad_norm": 2.7974514961242676, |
| "learning_rate": 1.3459407701668762e-05, |
| "loss": 1.1543, |
| "step": 210 |
| }, |
| { |
| "epoch": 6.896, |
| "grad_norm": 2.438002586364746, |
| "learning_rate": 1.2745133373524855e-05, |
| "loss": 1.132, |
| "step": 220 |
| }, |
| { |
| "epoch": 7.192, |
| "grad_norm": 2.9781107902526855, |
| "learning_rate": 1.2015359955769021e-05, |
| "loss": 1.0284, |
| "step": 230 |
| }, |
| { |
| "epoch": 7.5120000000000005, |
| "grad_norm": 3.271495819091797, |
| "learning_rate": 1.127420776681905e-05, |
| "loss": 1.0392, |
| "step": 240 |
| }, |
| { |
| "epoch": 7.832, |
| "grad_norm": 3.1173477172851562, |
| "learning_rate": 1.0525861369910877e-05, |
| "loss": 1.0436, |
| "step": 250 |
| }, |
| { |
| "epoch": 8.128, |
| "grad_norm": 2.84490704536438, |
| "learning_rate": 9.77454594695308e-06, |
| "loss": 0.8973, |
| "step": 260 |
| }, |
| { |
| "epoch": 8.448, |
| "grad_norm": 3.3770253658294678, |
| "learning_rate": 9.024503443047318e-06, |
| "loss": 0.9455, |
| "step": 270 |
| }, |
| { |
| "epoch": 8.768, |
| "grad_norm": 3.168492317199707, |
| "learning_rate": 8.279968616363417e-06, |
| "loss": 0.9297, |
| "step": 280 |
| }, |
| { |
| "epoch": 9.064, |
| "grad_norm": 3.5850722789764404, |
| "learning_rate": 7.545145128592009e-06, |
| "loss": 0.8414, |
| "step": 290 |
| }, |
| { |
| "epoch": 9.384, |
| "grad_norm": 2.5484402179718018, |
| "learning_rate": 6.824181810968675e-06, |
| "loss": 0.8513, |
| "step": 300 |
| }, |
| { |
| "epoch": 9.704, |
| "grad_norm": 3.354924201965332, |
| "learning_rate": 6.121149239872151e-06, |
| "loss": 0.8351, |
| "step": 310 |
| }, |
| { |
| "epoch": 10.0, |
| "grad_norm": 1.4634054899215698, |
| "learning_rate": 5.440016754251364e-06, |
| "loss": 0.7886, |
| "step": 320 |
| }, |
| { |
| "epoch": 10.32, |
| "grad_norm": 3.3187077045440674, |
| "learning_rate": 4.784630044641435e-06, |
| "loss": 0.7832, |
| "step": 330 |
| }, |
| { |
| "epoch": 10.64, |
| "grad_norm": 2.827324390411377, |
| "learning_rate": 4.1586894403016576e-06, |
| "loss": 0.7555, |
| "step": 340 |
| }, |
| { |
| "epoch": 10.96, |
| "grad_norm": 2.74627685546875, |
| "learning_rate": 3.565729017066729e-06, |
| "loss": 0.7529, |
| "step": 350 |
| }, |
| { |
| "epoch": 11.256, |
| "grad_norm": 2.8250672817230225, |
| "learning_rate": 3.0090966438688774e-06, |
| "loss": 0.6507, |
| "step": 360 |
| }, |
| { |
| "epoch": 11.576, |
| "grad_norm": 3.070279359817505, |
| "learning_rate": 2.491935080588658e-06, |
| "loss": 0.7048, |
| "step": 370 |
| }, |
| { |
| "epoch": 11.896, |
| "grad_norm": 2.929745674133301, |
| "learning_rate": 2.01716423395644e-06, |
| "loss": 0.6911, |
| "step": 380 |
| }, |
| { |
| "epoch": 12.192, |
| "grad_norm": 2.552579879760742, |
| "learning_rate": 1.587464671688187e-06, |
| "loss": 0.6039, |
| "step": 390 |
| }, |
| { |
| "epoch": 12.512, |
| "grad_norm": 2.608180046081543, |
| "learning_rate": 1.2052624879351105e-06, |
| "loss": 0.6456, |
| "step": 400 |
| }, |
| { |
| "epoch": 12.832, |
| "grad_norm": 2.6534199714660645, |
| "learning_rate": 8.727156054972374e-07, |
| "loss": 0.6479, |
| "step": 410 |
| }, |
| { |
| "epoch": 13.128, |
| "grad_norm": 2.4939088821411133, |
| "learning_rate": 5.917015921389569e-07, |
| "loss": 0.6013, |
| "step": 420 |
| }, |
| { |
| "epoch": 13.448, |
| "grad_norm": 2.7369675636291504, |
| "learning_rate": 3.638070597958665e-07, |
| "loss": 0.6168, |
| "step": 430 |
| }, |
| { |
| "epoch": 13.768, |
| "grad_norm": 2.4181711673736572, |
| "learning_rate": 1.903187065253076e-07, |
| "loss": 0.6093, |
| "step": 440 |
| }, |
| { |
| "epoch": 14.064, |
| "grad_norm": 2.7222864627838135, |
| "learning_rate": 7.22160517779169e-08, |
| "loss": 0.5911, |
| "step": 450 |
| }, |
| { |
| "epoch": 14.384, |
| "grad_norm": 2.8161678314208984, |
| "learning_rate": 1.0165906007056914e-08, |
| "loss": 0.6135, |
| "step": 460 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 465, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 15, |
| "save_steps": 1000, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.565274744469586e+17, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|