| { | |
| "best_metric": 0.8851567498400512, | |
| "best_model_checkpoint": "./checkpoints/beomi_kcbert-base\\checkpoint-4689", | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 4689, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03198976327575176, | |
| "grad_norm": 8.217513084411621, | |
| "learning_rate": 1.9786734911494988e-05, | |
| "loss": 0.89, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.06397952655150352, | |
| "grad_norm": 14.240294456481934, | |
| "learning_rate": 1.9573469822989978e-05, | |
| "loss": 0.6064, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.09596928982725528, | |
| "grad_norm": 13.745306015014648, | |
| "learning_rate": 1.9360204734484968e-05, | |
| "loss": 0.5601, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.12795905310300704, | |
| "grad_norm": 15.16039752960205, | |
| "learning_rate": 1.9146939645979955e-05, | |
| "loss": 0.4447, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1599488163787588, | |
| "grad_norm": 13.24716567993164, | |
| "learning_rate": 1.8933674557474945e-05, | |
| "loss": 0.5015, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.19193857965451055, | |
| "grad_norm": 5.5529327392578125, | |
| "learning_rate": 1.872040946896993e-05, | |
| "loss": 0.4449, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.22392834293026231, | |
| "grad_norm": 13.999655723571777, | |
| "learning_rate": 1.8507144380464918e-05, | |
| "loss": 0.4436, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.2559181062060141, | |
| "grad_norm": 7.681272029876709, | |
| "learning_rate": 1.8293879291959908e-05, | |
| "loss": 0.4384, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.28790786948176583, | |
| "grad_norm": 10.44015121459961, | |
| "learning_rate": 1.8080614203454897e-05, | |
| "loss": 0.4037, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.3198976327575176, | |
| "grad_norm": 10.103533744812012, | |
| "learning_rate": 1.7867349114949884e-05, | |
| "loss": 0.4119, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.35188739603326935, | |
| "grad_norm": 10.231204986572266, | |
| "learning_rate": 1.7654084026444874e-05, | |
| "loss": 0.4122, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.3838771593090211, | |
| "grad_norm": 5.318388938903809, | |
| "learning_rate": 1.744081893793986e-05, | |
| "loss": 0.3774, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.41586692258477287, | |
| "grad_norm": 4.911611557006836, | |
| "learning_rate": 1.7227553849434847e-05, | |
| "loss": 0.4413, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.44785668586052463, | |
| "grad_norm": 9.968073844909668, | |
| "learning_rate": 1.7014288760929837e-05, | |
| "loss": 0.3951, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.4798464491362764, | |
| "grad_norm": 9.88436222076416, | |
| "learning_rate": 1.6801023672424827e-05, | |
| "loss": 0.3998, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.5118362124120281, | |
| "grad_norm": 9.33141803741455, | |
| "learning_rate": 1.6587758583919813e-05, | |
| "loss": 0.3632, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5438259756877799, | |
| "grad_norm": 9.803372383117676, | |
| "learning_rate": 1.6374493495414803e-05, | |
| "loss": 0.4311, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.5758157389635317, | |
| "grad_norm": 6.760705947875977, | |
| "learning_rate": 1.616122840690979e-05, | |
| "loss": 0.4055, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6078055022392834, | |
| "grad_norm": 6.0470290184021, | |
| "learning_rate": 1.5947963318404776e-05, | |
| "loss": 0.384, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.6397952655150352, | |
| "grad_norm": 11.182646751403809, | |
| "learning_rate": 1.5734698229899766e-05, | |
| "loss": 0.4165, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6717850287907869, | |
| "grad_norm": 15.545323371887207, | |
| "learning_rate": 1.5521433141394756e-05, | |
| "loss": 0.3785, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.7037747920665387, | |
| "grad_norm": 9.248072624206543, | |
| "learning_rate": 1.5308168052889743e-05, | |
| "loss": 0.3709, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.7357645553422905, | |
| "grad_norm": 1.7462440729141235, | |
| "learning_rate": 1.5094902964384733e-05, | |
| "loss": 0.4148, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.7677543186180422, | |
| "grad_norm": 7.342015266418457, | |
| "learning_rate": 1.488163787587972e-05, | |
| "loss": 0.402, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.799744081893794, | |
| "grad_norm": 8.367056846618652, | |
| "learning_rate": 1.4668372787374708e-05, | |
| "loss": 0.3647, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.8317338451695457, | |
| "grad_norm": 20.332612991333008, | |
| "learning_rate": 1.4455107698869698e-05, | |
| "loss": 0.3474, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.8637236084452975, | |
| "grad_norm": 5.561351776123047, | |
| "learning_rate": 1.4241842610364684e-05, | |
| "loss": 0.3877, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.8957133717210493, | |
| "grad_norm": 6.848100185394287, | |
| "learning_rate": 1.4028577521859672e-05, | |
| "loss": 0.3215, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.927703134996801, | |
| "grad_norm": 3.406212568283081, | |
| "learning_rate": 1.3815312433354662e-05, | |
| "loss": 0.3549, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.9596928982725528, | |
| "grad_norm": 12.684820175170898, | |
| "learning_rate": 1.3602047344849649e-05, | |
| "loss": 0.3429, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.9916826615483045, | |
| "grad_norm": 7.167674541473389, | |
| "learning_rate": 1.3388782256344637e-05, | |
| "loss": 0.2946, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.8694817658349329, | |
| "eval_f1": 0.8560898036594571, | |
| "eval_loss": 0.3504190444946289, | |
| "eval_runtime": 65.935, | |
| "eval_samples_per_second": 47.41, | |
| "eval_steps_per_second": 2.973, | |
| "step": 1563 | |
| }, | |
| { | |
| "epoch": 1.0236724248240563, | |
| "grad_norm": 6.036362171173096, | |
| "learning_rate": 1.3175517167839627e-05, | |
| "loss": 0.2343, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.055662188099808, | |
| "grad_norm": 12.397319793701172, | |
| "learning_rate": 1.2962252079334613e-05, | |
| "loss": 0.2676, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.0876519513755598, | |
| "grad_norm": 14.472246170043945, | |
| "learning_rate": 1.2748986990829602e-05, | |
| "loss": 0.2351, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.1196417146513116, | |
| "grad_norm": 4.455846786499023, | |
| "learning_rate": 1.2535721902324592e-05, | |
| "loss": 0.2011, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.1516314779270633, | |
| "grad_norm": 2.5196011066436768, | |
| "learning_rate": 1.2322456813819578e-05, | |
| "loss": 0.2774, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.183621241202815, | |
| "grad_norm": 4.304745197296143, | |
| "learning_rate": 1.2109191725314566e-05, | |
| "loss": 0.2192, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 1.2156110044785668, | |
| "grad_norm": 3.972710609436035, | |
| "learning_rate": 1.1895926636809556e-05, | |
| "loss": 0.2787, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.2476007677543186, | |
| "grad_norm": 10.098494529724121, | |
| "learning_rate": 1.1682661548304543e-05, | |
| "loss": 0.2402, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 1.2795905310300704, | |
| "grad_norm": 3.2702131271362305, | |
| "learning_rate": 1.1469396459799531e-05, | |
| "loss": 0.2567, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.3115802943058221, | |
| "grad_norm": 3.6833343505859375, | |
| "learning_rate": 1.1256131371294521e-05, | |
| "loss": 0.2429, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 1.3435700575815739, | |
| "grad_norm": 8.543336868286133, | |
| "learning_rate": 1.1042866282789508e-05, | |
| "loss": 0.2178, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.3755598208573256, | |
| "grad_norm": 8.93952751159668, | |
| "learning_rate": 1.0829601194284496e-05, | |
| "loss": 0.223, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 1.4075495841330774, | |
| "grad_norm": 0.37753650546073914, | |
| "learning_rate": 1.0616336105779486e-05, | |
| "loss": 0.202, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.4395393474088292, | |
| "grad_norm": 11.612641334533691, | |
| "learning_rate": 1.0403071017274472e-05, | |
| "loss": 0.242, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 1.471529110684581, | |
| "grad_norm": 28.171804428100586, | |
| "learning_rate": 1.018980592876946e-05, | |
| "loss": 0.2103, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.5035188739603327, | |
| "grad_norm": 4.860315322875977, | |
| "learning_rate": 9.976540840264449e-06, | |
| "loss": 0.2168, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 1.5355086372360844, | |
| "grad_norm": 6.9845499992370605, | |
| "learning_rate": 9.763275751759437e-06, | |
| "loss": 0.2225, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.5674984005118362, | |
| "grad_norm": 9.760077476501465, | |
| "learning_rate": 9.550010663254427e-06, | |
| "loss": 0.2069, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 1.599488163787588, | |
| "grad_norm": 19.529109954833984, | |
| "learning_rate": 9.336745574749414e-06, | |
| "loss": 0.2546, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.6314779270633397, | |
| "grad_norm": 13.040373802185059, | |
| "learning_rate": 9.123480486244403e-06, | |
| "loss": 0.2327, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 1.6634676903390915, | |
| "grad_norm": 30.015762329101562, | |
| "learning_rate": 8.910215397739392e-06, | |
| "loss": 0.2421, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.6954574536148432, | |
| "grad_norm": 7.028976917266846, | |
| "learning_rate": 8.696950309234378e-06, | |
| "loss": 0.227, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 1.727447216890595, | |
| "grad_norm": 8.363308906555176, | |
| "learning_rate": 8.483685220729368e-06, | |
| "loss": 0.226, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.7594369801663468, | |
| "grad_norm": 6.349699020385742, | |
| "learning_rate": 8.270420132224356e-06, | |
| "loss": 0.2472, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 1.7914267434420985, | |
| "grad_norm": 6.191136837005615, | |
| "learning_rate": 8.057155043719343e-06, | |
| "loss": 0.2457, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.8234165067178503, | |
| "grad_norm": 0.29677829146385193, | |
| "learning_rate": 7.843889955214333e-06, | |
| "loss": 0.2324, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 1.855406269993602, | |
| "grad_norm": 10.708757400512695, | |
| "learning_rate": 7.630624866709321e-06, | |
| "loss": 0.2525, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.8873960332693538, | |
| "grad_norm": 9.20060920715332, | |
| "learning_rate": 7.4173597782043085e-06, | |
| "loss": 0.2107, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 1.9193857965451055, | |
| "grad_norm": 17.06194305419922, | |
| "learning_rate": 7.204094689699297e-06, | |
| "loss": 0.2025, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.9513755598208573, | |
| "grad_norm": 8.665574073791504, | |
| "learning_rate": 6.990829601194286e-06, | |
| "loss": 0.2297, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 1.983365323096609, | |
| "grad_norm": 6.722887992858887, | |
| "learning_rate": 6.777564512689273e-06, | |
| "loss": 0.195, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.8829174664107485, | |
| "eval_f1": 0.8726527951413109, | |
| "eval_loss": 0.3595542907714844, | |
| "eval_runtime": 66.0769, | |
| "eval_samples_per_second": 47.309, | |
| "eval_steps_per_second": 2.966, | |
| "step": 3126 | |
| }, | |
| { | |
| "epoch": 2.015355086372361, | |
| "grad_norm": 7.179947853088379, | |
| "learning_rate": 6.5642994241842614e-06, | |
| "loss": 0.1669, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 2.0473448496481126, | |
| "grad_norm": 12.716062545776367, | |
| "learning_rate": 6.3510343356792505e-06, | |
| "loss": 0.1207, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 2.0793346129238643, | |
| "grad_norm": 10.967968940734863, | |
| "learning_rate": 6.137769247174238e-06, | |
| "loss": 0.1268, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 2.111324376199616, | |
| "grad_norm": 0.44536763429641724, | |
| "learning_rate": 5.924504158669226e-06, | |
| "loss": 0.1287, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 2.143314139475368, | |
| "grad_norm": 12.086326599121094, | |
| "learning_rate": 5.711239070164215e-06, | |
| "loss": 0.1008, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 2.1753039027511196, | |
| "grad_norm": 21.13951301574707, | |
| "learning_rate": 5.497973981659203e-06, | |
| "loss": 0.1798, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 2.2072936660268714, | |
| "grad_norm": 24.906211853027344, | |
| "learning_rate": 5.284708893154191e-06, | |
| "loss": 0.1484, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 2.239283429302623, | |
| "grad_norm": 20.485675811767578, | |
| "learning_rate": 5.07144380464918e-06, | |
| "loss": 0.1568, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.271273192578375, | |
| "grad_norm": 12.844356536865234, | |
| "learning_rate": 4.858178716144167e-06, | |
| "loss": 0.1197, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 2.3032629558541267, | |
| "grad_norm": 16.984413146972656, | |
| "learning_rate": 4.644913627639156e-06, | |
| "loss": 0.1296, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 2.3352527191298784, | |
| "grad_norm": 0.032408133149147034, | |
| "learning_rate": 4.431648539134144e-06, | |
| "loss": 0.1151, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 2.36724248240563, | |
| "grad_norm": 0.1846800148487091, | |
| "learning_rate": 4.218383450629132e-06, | |
| "loss": 0.1797, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 2.399232245681382, | |
| "grad_norm": 7.689809799194336, | |
| "learning_rate": 4.005118362124121e-06, | |
| "loss": 0.1725, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 2.4312220089571337, | |
| "grad_norm": 10.447749137878418, | |
| "learning_rate": 3.791853273619109e-06, | |
| "loss": 0.1244, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 2.4632117722328855, | |
| "grad_norm": 11.911157608032227, | |
| "learning_rate": 3.5785881851140968e-06, | |
| "loss": 0.1316, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 2.495201535508637, | |
| "grad_norm": 8.964936256408691, | |
| "learning_rate": 3.3653230966090854e-06, | |
| "loss": 0.1384, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 2.527191298784389, | |
| "grad_norm": 1.8026219606399536, | |
| "learning_rate": 3.1520580081040737e-06, | |
| "loss": 0.1148, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 2.5591810620601407, | |
| "grad_norm": 13.85440444946289, | |
| "learning_rate": 2.9387929195990615e-06, | |
| "loss": 0.1436, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.5911708253358925, | |
| "grad_norm": 0.33746129274368286, | |
| "learning_rate": 2.72552783109405e-06, | |
| "loss": 0.1175, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 2.6231605886116443, | |
| "grad_norm": 23.944438934326172, | |
| "learning_rate": 2.5122627425890384e-06, | |
| "loss": 0.1361, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 2.655150351887396, | |
| "grad_norm": 46.74479293823242, | |
| "learning_rate": 2.2989976540840266e-06, | |
| "loss": 0.1624, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 2.6871401151631478, | |
| "grad_norm": 1.4517056941986084, | |
| "learning_rate": 2.085732565579015e-06, | |
| "loss": 0.1308, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 2.7191298784388995, | |
| "grad_norm": 16.07023048400879, | |
| "learning_rate": 1.872467477074003e-06, | |
| "loss": 0.1477, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 2.7511196417146513, | |
| "grad_norm": 0.09992417693138123, | |
| "learning_rate": 1.6592023885689915e-06, | |
| "loss": 0.1426, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 2.783109404990403, | |
| "grad_norm": 24.815753936767578, | |
| "learning_rate": 1.4459373000639796e-06, | |
| "loss": 0.1494, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 2.815099168266155, | |
| "grad_norm": 0.5908675789833069, | |
| "learning_rate": 1.2326722115589678e-06, | |
| "loss": 0.1505, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 2.8470889315419066, | |
| "grad_norm": 19.279033660888672, | |
| "learning_rate": 1.019407123053956e-06, | |
| "loss": 0.1432, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 2.8790786948176583, | |
| "grad_norm": 20.35901641845703, | |
| "learning_rate": 8.061420345489445e-07, | |
| "loss": 0.1479, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.91106845809341, | |
| "grad_norm": 38.45198059082031, | |
| "learning_rate": 5.928769460439326e-07, | |
| "loss": 0.1184, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 2.943058221369162, | |
| "grad_norm": 15.500663757324219, | |
| "learning_rate": 3.796118575389209e-07, | |
| "loss": 0.1638, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 2.9750479846449136, | |
| "grad_norm": 7.924103260040283, | |
| "learning_rate": 1.6634676903390917e-07, | |
| "loss": 0.1474, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.8851567498400512, | |
| "eval_f1": 0.8744039386565773, | |
| "eval_loss": 0.48145365715026855, | |
| "eval_runtime": 67.22, | |
| "eval_samples_per_second": 46.504, | |
| "eval_steps_per_second": 2.916, | |
| "step": 4689 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 4689, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 766896986987838.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |