| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 1485, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04040404040404041, | |
| "grad_norm": 0.41251227259635925, | |
| "learning_rate": 3.3670033670033673e-07, | |
| "loss": 1.6394, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.08080808080808081, | |
| "grad_norm": 0.4119182229042053, | |
| "learning_rate": 6.734006734006735e-07, | |
| "loss": 1.6705, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.12121212121212122, | |
| "grad_norm": 0.4101647734642029, | |
| "learning_rate": 1.01010101010101e-06, | |
| "loss": 1.6682, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.16161616161616163, | |
| "grad_norm": 0.4267776310443878, | |
| "learning_rate": 1.346801346801347e-06, | |
| "loss": 1.55, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.20202020202020202, | |
| "grad_norm": 0.444989413022995, | |
| "learning_rate": 1.6835016835016838e-06, | |
| "loss": 1.6651, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.24242424242424243, | |
| "grad_norm": 0.49537068605422974, | |
| "learning_rate": 2.02020202020202e-06, | |
| "loss": 1.5103, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2828282828282828, | |
| "grad_norm": 0.4137594699859619, | |
| "learning_rate": 2.3569023569023572e-06, | |
| "loss": 1.5489, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.32323232323232326, | |
| "grad_norm": 0.4259663224220276, | |
| "learning_rate": 2.693602693602694e-06, | |
| "loss": 1.6416, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.36363636363636365, | |
| "grad_norm": 0.39349138736724854, | |
| "learning_rate": 3.0303030303030305e-06, | |
| "loss": 1.554, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.40404040404040403, | |
| "grad_norm": 0.4005858302116394, | |
| "learning_rate": 3.3670033670033675e-06, | |
| "loss": 1.523, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.4026505947113037, | |
| "learning_rate": 3.7037037037037037e-06, | |
| "loss": 1.4523, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.48484848484848486, | |
| "grad_norm": 0.48675334453582764, | |
| "learning_rate": 4.04040404040404e-06, | |
| "loss": 1.3475, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5252525252525253, | |
| "grad_norm": 0.7367889881134033, | |
| "learning_rate": 4.377104377104377e-06, | |
| "loss": 1.2313, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.5656565656565656, | |
| "grad_norm": 0.47958847880363464, | |
| "learning_rate": 4.7138047138047145e-06, | |
| "loss": 0.9929, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6060606060606061, | |
| "grad_norm": 0.5394216179847717, | |
| "learning_rate": 4.999921328558333e-06, | |
| "loss": 0.974, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6464646464646465, | |
| "grad_norm": 0.6015712022781372, | |
| "learning_rate": 4.995377268577495e-06, | |
| "loss": 0.8598, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.6868686868686869, | |
| "grad_norm": 0.45018038153648376, | |
| "learning_rate": 4.983854712613647e-06, | |
| "loss": 0.8335, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7272727272727273, | |
| "grad_norm": 0.5190030336380005, | |
| "learning_rate": 4.965385884295467e-06, | |
| "loss": 0.8485, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.7676767676767676, | |
| "grad_norm": 0.504775881767273, | |
| "learning_rate": 4.941453335558682e-06, | |
| "loss": 0.8446, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8080808080808081, | |
| "grad_norm": 0.5671610832214355, | |
| "learning_rate": 4.9096053963998555e-06, | |
| "loss": 0.796, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.8484848484848485, | |
| "grad_norm": 0.6207985281944275, | |
| "learning_rate": 4.871018828260492e-06, | |
| "loss": 0.8135, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.6469517350196838, | |
| "learning_rate": 4.825801541160509e-06, | |
| "loss": 0.8143, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.9292929292929293, | |
| "grad_norm": 0.5718827247619629, | |
| "learning_rate": 4.7740799883862966e-06, | |
| "loss": 0.79, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.9696969696969697, | |
| "grad_norm": 0.9612074494361877, | |
| "learning_rate": 4.715998812855305e-06, | |
| "loss": 0.7133, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.0101010101010102, | |
| "grad_norm": 0.6769607067108154, | |
| "learning_rate": 4.651720442612076e-06, | |
| "loss": 0.765, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.0505050505050506, | |
| "grad_norm": 0.6824682354927063, | |
| "learning_rate": 4.5814246365869285e-06, | |
| "loss": 0.7051, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.0909090909090908, | |
| "grad_norm": 0.4644620716571808, | |
| "learning_rate": 4.50530798188761e-06, | |
| "loss": 0.7534, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.1313131313131313, | |
| "grad_norm": 0.8265301585197449, | |
| "learning_rate": 4.423583344029786e-06, | |
| "loss": 0.7873, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.1717171717171717, | |
| "grad_norm": 0.6619002819061279, | |
| "learning_rate": 4.336479271643833e-06, | |
| "loss": 0.7264, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.2121212121212122, | |
| "grad_norm": 0.5203114151954651, | |
| "learning_rate": 4.244239357322705e-06, | |
| "loss": 0.7754, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.2525252525252526, | |
| "grad_norm": 0.5513920783996582, | |
| "learning_rate": 4.1471215563983125e-06, | |
| "loss": 0.694, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.2929292929292928, | |
| "grad_norm": 0.6095554232597351, | |
| "learning_rate": 4.045397465551513e-06, | |
| "loss": 0.7402, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.46472665667533875, | |
| "learning_rate": 3.93935156327311e-06, | |
| "loss": 0.741, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.3737373737373737, | |
| "grad_norm": 0.6970565915107727, | |
| "learning_rate": 3.82928041429998e-06, | |
| "loss": 0.7332, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.4141414141414141, | |
| "grad_norm": 0.46008697152137756, | |
| "learning_rate": 3.715491840251172e-06, | |
| "loss": 0.7401, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.4545454545454546, | |
| "grad_norm": 0.5285646319389343, | |
| "learning_rate": 3.598304058783357e-06, | |
| "loss": 0.7809, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.494949494949495, | |
| "grad_norm": 0.46231013536453247, | |
| "learning_rate": 3.478044793673025e-06, | |
| "loss": 0.7506, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.5353535353535355, | |
| "grad_norm": 0.5669680833816528, | |
| "learning_rate": 3.3550503583141726e-06, | |
| "loss": 0.732, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.5757575757575757, | |
| "grad_norm": 0.6222017407417297, | |
| "learning_rate": 3.2296647151945116e-06, | |
| "loss": 0.7184, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.6161616161616161, | |
| "grad_norm": 0.5148311853408813, | |
| "learning_rate": 3.102238513980471e-06, | |
| "loss": 0.7674, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.6565656565656566, | |
| "grad_norm": 0.5587109327316284, | |
| "learning_rate": 2.973128110901026e-06, | |
| "loss": 0.7514, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.696969696969697, | |
| "grad_norm": 0.6635349988937378, | |
| "learning_rate": 2.842694572172737e-06, | |
| "loss": 0.7388, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.7373737373737375, | |
| "grad_norm": 0.5663965344429016, | |
| "learning_rate": 2.7113026642529733e-06, | |
| "loss": 0.7061, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.7777777777777777, | |
| "grad_norm": 0.5994731783866882, | |
| "learning_rate": 2.57931983374517e-06, | |
| "loss": 0.726, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.8181818181818183, | |
| "grad_norm": 0.5646547079086304, | |
| "learning_rate": 2.4471151798088465e-06, | |
| "loss": 0.7363, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.8585858585858586, | |
| "grad_norm": 0.575791597366333, | |
| "learning_rate": 2.3150584219481644e-06, | |
| "loss": 0.7105, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.898989898989899, | |
| "grad_norm": 0.43835175037384033, | |
| "learning_rate": 2.183518866065627e-06, | |
| "loss": 0.688, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.9393939393939394, | |
| "grad_norm": 0.6634537577629089, | |
| "learning_rate": 2.0528643716724572e-06, | |
| "loss": 0.62, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.9797979797979797, | |
| "grad_norm": 0.45726099610328674, | |
| "learning_rate": 1.9234603231439e-06, | |
| "loss": 0.7451, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.0202020202020203, | |
| "grad_norm": 0.564659833908081, | |
| "learning_rate": 1.7956686078964257e-06, | |
| "loss": 0.6724, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.0606060606060606, | |
| "grad_norm": 0.6070213317871094, | |
| "learning_rate": 1.6698466043444122e-06, | |
| "loss": 0.7313, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.101010101010101, | |
| "grad_norm": 0.5348501801490784, | |
| "learning_rate": 1.546346182466566e-06, | |
| "loss": 0.7431, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.1414141414141414, | |
| "grad_norm": 0.5485573410987854, | |
| "learning_rate": 1.425512719777071e-06, | |
| "loss": 0.7365, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.1818181818181817, | |
| "grad_norm": 0.5143328905105591, | |
| "learning_rate": 1.3076841354533658e-06, | |
| "loss": 0.7516, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.2222222222222223, | |
| "grad_norm": 0.4357577860355377, | |
| "learning_rate": 1.1931899453216698e-06, | |
| "loss": 0.6963, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.2626262626262625, | |
| "grad_norm": 0.5489456057548523, | |
| "learning_rate": 1.0823503403430736e-06, | |
| "loss": 0.6867, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.303030303030303, | |
| "grad_norm": 0.5727762579917908, | |
| "learning_rate": 9.754752911772616e-07, | |
| "loss": 0.6873, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.3434343434343434, | |
| "grad_norm": 0.469957172870636, | |
| "learning_rate": 8.728636813280164e-07, | |
| "loss": 0.691, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.3838383838383836, | |
| "grad_norm": 0.7846599817276001, | |
| "learning_rate": 7.748024712947205e-07, | |
| "loss": 0.7064, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.4242424242424243, | |
| "grad_norm": 0.5549547076225281, | |
| "learning_rate": 6.815658960673782e-07, | |
| "loss": 0.7707, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.4646464646464645, | |
| "grad_norm": 0.4853402078151703, | |
| "learning_rate": 5.934146982094049e-07, | |
| "loss": 0.7368, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.505050505050505, | |
| "grad_norm": 0.6494936943054199, | |
| "learning_rate": 5.105953986729196e-07, | |
| "loss": 0.7046, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.5454545454545454, | |
| "grad_norm": 0.48534753918647766, | |
| "learning_rate": 4.3333960738577236e-07, | |
| "loss": 0.6873, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.5858585858585856, | |
| "grad_norm": 0.6183688640594482, | |
| "learning_rate": 3.6186337553827747e-07, | |
| "loss": 0.7095, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.6262626262626263, | |
| "grad_norm": 0.5592140555381775, | |
| "learning_rate": 2.963665913810451e-07, | |
| "loss": 0.6618, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.6666666666666665, | |
| "grad_norm": 0.5510610938072205, | |
| "learning_rate": 2.370324212235936e-07, | |
| "loss": 0.7339, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.707070707070707, | |
| "grad_norm": 0.6054675579071045, | |
| "learning_rate": 1.840267971970344e-07, | |
| "loss": 0.7237, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.7474747474747474, | |
| "grad_norm": 0.6097862720489502, | |
| "learning_rate": 1.3749795321332887e-07, | |
| "loss": 0.7374, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.787878787878788, | |
| "grad_norm": 0.4065544605255127, | |
| "learning_rate": 9.757601041885694e-08, | |
| "loss": 0.7664, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.8282828282828283, | |
| "grad_norm": 0.573664665222168, | |
| "learning_rate": 6.437261330158206e-08, | |
| "loss": 0.7463, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.8686868686868685, | |
| "grad_norm": 0.47344183921813965, | |
| "learning_rate": 3.798061746947995e-08, | |
| "loss": 0.6859, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.909090909090909, | |
| "grad_norm": 0.48462924361228943, | |
| "learning_rate": 1.847382997337943e-08, | |
| "loss": 0.659, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.9494949494949494, | |
| "grad_norm": 0.6021179556846619, | |
| "learning_rate": 5.906802900412789e-09, | |
| "loss": 0.6971, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.98989898989899, | |
| "grad_norm": 0.5301674008369446, | |
| "learning_rate": 3.146808153123293e-10, | |
| "loss": 0.6606, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 1485, | |
| "total_flos": 6.836552056302797e+16, | |
| "train_loss": 0.8828093713381475, | |
| "train_runtime": 1774.0294, | |
| "train_samples_per_second": 3.348, | |
| "train_steps_per_second": 0.837 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 1485, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.836552056302797e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |