| { | |
| "best_metric": 0.8976327575175944, | |
| "best_model_checkpoint": "./checkpoints/klue_bert-base\\checkpoint-4689", | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 4689, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03198976327575176, | |
| "grad_norm": 8.30168342590332, | |
| "learning_rate": 1.9786734911494988e-05, | |
| "loss": 0.7464, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.06397952655150352, | |
| "grad_norm": 9.065665245056152, | |
| "learning_rate": 1.9573469822989978e-05, | |
| "loss": 0.539, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.09596928982725528, | |
| "grad_norm": 14.672710418701172, | |
| "learning_rate": 1.9360204734484968e-05, | |
| "loss": 0.4953, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.12795905310300704, | |
| "grad_norm": 11.273133277893066, | |
| "learning_rate": 1.9146939645979955e-05, | |
| "loss": 0.4002, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1599488163787588, | |
| "grad_norm": 10.856703758239746, | |
| "learning_rate": 1.8933674557474945e-05, | |
| "loss": 0.4415, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.19193857965451055, | |
| "grad_norm": 6.359602451324463, | |
| "learning_rate": 1.872040946896993e-05, | |
| "loss": 0.4559, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.22392834293026231, | |
| "grad_norm": 9.03753662109375, | |
| "learning_rate": 1.8507144380464918e-05, | |
| "loss": 0.4145, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.2559181062060141, | |
| "grad_norm": 10.679155349731445, | |
| "learning_rate": 1.8293879291959908e-05, | |
| "loss": 0.3769, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.28790786948176583, | |
| "grad_norm": 11.417706489562988, | |
| "learning_rate": 1.8080614203454897e-05, | |
| "loss": 0.3864, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.3198976327575176, | |
| "grad_norm": 7.758769512176514, | |
| "learning_rate": 1.7867349114949884e-05, | |
| "loss": 0.3608, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.35188739603326935, | |
| "grad_norm": 10.617319107055664, | |
| "learning_rate": 1.7654084026444874e-05, | |
| "loss": 0.4189, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.3838771593090211, | |
| "grad_norm": 7.695286273956299, | |
| "learning_rate": 1.744081893793986e-05, | |
| "loss": 0.3739, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.41586692258477287, | |
| "grad_norm": 4.655588626861572, | |
| "learning_rate": 1.7227553849434847e-05, | |
| "loss": 0.3978, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.44785668586052463, | |
| "grad_norm": 7.375668525695801, | |
| "learning_rate": 1.7014288760929837e-05, | |
| "loss": 0.3624, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.4798464491362764, | |
| "grad_norm": 8.715352058410645, | |
| "learning_rate": 1.6801023672424827e-05, | |
| "loss": 0.3983, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.5118362124120281, | |
| "grad_norm": 4.942613124847412, | |
| "learning_rate": 1.6587758583919813e-05, | |
| "loss": 0.3264, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5438259756877799, | |
| "grad_norm": 9.237791061401367, | |
| "learning_rate": 1.6374493495414803e-05, | |
| "loss": 0.3811, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.5758157389635317, | |
| "grad_norm": 10.856432914733887, | |
| "learning_rate": 1.616122840690979e-05, | |
| "loss": 0.3965, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6078055022392834, | |
| "grad_norm": 7.043545246124268, | |
| "learning_rate": 1.5947963318404776e-05, | |
| "loss": 0.3702, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.6397952655150352, | |
| "grad_norm": 7.454297065734863, | |
| "learning_rate": 1.5734698229899766e-05, | |
| "loss": 0.3946, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6717850287907869, | |
| "grad_norm": 17.558452606201172, | |
| "learning_rate": 1.5521433141394756e-05, | |
| "loss": 0.3569, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.7037747920665387, | |
| "grad_norm": 10.30020809173584, | |
| "learning_rate": 1.5308168052889743e-05, | |
| "loss": 0.3621, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.7357645553422905, | |
| "grad_norm": 8.323447227478027, | |
| "learning_rate": 1.5094902964384733e-05, | |
| "loss": 0.3507, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.7677543186180422, | |
| "grad_norm": 5.383088111877441, | |
| "learning_rate": 1.488163787587972e-05, | |
| "loss": 0.3927, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.799744081893794, | |
| "grad_norm": 6.308414936065674, | |
| "learning_rate": 1.4668372787374708e-05, | |
| "loss": 0.3469, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.8317338451695457, | |
| "grad_norm": 8.680594444274902, | |
| "learning_rate": 1.4455107698869698e-05, | |
| "loss": 0.3442, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.8637236084452975, | |
| "grad_norm": 3.560561418533325, | |
| "learning_rate": 1.4241842610364684e-05, | |
| "loss": 0.3829, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.8957133717210493, | |
| "grad_norm": 8.998127937316895, | |
| "learning_rate": 1.4028577521859672e-05, | |
| "loss": 0.3245, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.927703134996801, | |
| "grad_norm": 2.6510281562805176, | |
| "learning_rate": 1.3815312433354662e-05, | |
| "loss": 0.3591, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.9596928982725528, | |
| "grad_norm": 10.488450050354004, | |
| "learning_rate": 1.3602047344849649e-05, | |
| "loss": 0.3355, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.9916826615483045, | |
| "grad_norm": 6.973298072814941, | |
| "learning_rate": 1.3388782256344637e-05, | |
| "loss": 0.3017, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.8835572616762636, | |
| "eval_f1": 0.8734376826237961, | |
| "eval_loss": 0.3096177279949188, | |
| "eval_runtime": 70.7512, | |
| "eval_samples_per_second": 44.183, | |
| "eval_steps_per_second": 2.77, | |
| "step": 1563 | |
| }, | |
| { | |
| "epoch": 1.0236724248240563, | |
| "grad_norm": 7.356038570404053, | |
| "learning_rate": 1.3175517167839627e-05, | |
| "loss": 0.2326, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.055662188099808, | |
| "grad_norm": 12.944537162780762, | |
| "learning_rate": 1.2962252079334613e-05, | |
| "loss": 0.2559, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.0876519513755598, | |
| "grad_norm": 29.164777755737305, | |
| "learning_rate": 1.2748986990829602e-05, | |
| "loss": 0.2468, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.1196417146513116, | |
| "grad_norm": 4.75251579284668, | |
| "learning_rate": 1.2535721902324592e-05, | |
| "loss": 0.2388, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.1516314779270633, | |
| "grad_norm": 4.9411773681640625, | |
| "learning_rate": 1.2322456813819578e-05, | |
| "loss": 0.2788, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.183621241202815, | |
| "grad_norm": 12.609339714050293, | |
| "learning_rate": 1.2109191725314566e-05, | |
| "loss": 0.2343, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 1.2156110044785668, | |
| "grad_norm": 6.632409572601318, | |
| "learning_rate": 1.1895926636809556e-05, | |
| "loss": 0.2845, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.2476007677543186, | |
| "grad_norm": 13.227570533752441, | |
| "learning_rate": 1.1682661548304543e-05, | |
| "loss": 0.2484, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 1.2795905310300704, | |
| "grad_norm": 9.043356895446777, | |
| "learning_rate": 1.1469396459799531e-05, | |
| "loss": 0.2781, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.3115802943058221, | |
| "grad_norm": 1.4863983392715454, | |
| "learning_rate": 1.1256131371294521e-05, | |
| "loss": 0.2486, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 1.3435700575815739, | |
| "grad_norm": 11.874574661254883, | |
| "learning_rate": 1.1042866282789508e-05, | |
| "loss": 0.2319, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.3755598208573256, | |
| "grad_norm": 10.544360160827637, | |
| "learning_rate": 1.0829601194284496e-05, | |
| "loss": 0.2629, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 1.4075495841330774, | |
| "grad_norm": 1.597615122795105, | |
| "learning_rate": 1.0616336105779486e-05, | |
| "loss": 0.2424, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.4395393474088292, | |
| "grad_norm": 6.933614730834961, | |
| "learning_rate": 1.0403071017274472e-05, | |
| "loss": 0.2596, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 1.471529110684581, | |
| "grad_norm": 2.243976593017578, | |
| "learning_rate": 1.018980592876946e-05, | |
| "loss": 0.2746, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.5035188739603327, | |
| "grad_norm": 3.0834619998931885, | |
| "learning_rate": 9.976540840264449e-06, | |
| "loss": 0.2143, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 1.5355086372360844, | |
| "grad_norm": 21.907583236694336, | |
| "learning_rate": 9.763275751759437e-06, | |
| "loss": 0.2311, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.5674984005118362, | |
| "grad_norm": 1.5895259380340576, | |
| "learning_rate": 9.550010663254427e-06, | |
| "loss": 0.2487, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 1.599488163787588, | |
| "grad_norm": 10.364677429199219, | |
| "learning_rate": 9.336745574749414e-06, | |
| "loss": 0.2623, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.6314779270633397, | |
| "grad_norm": 10.95889663696289, | |
| "learning_rate": 9.123480486244403e-06, | |
| "loss": 0.2233, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 1.6634676903390915, | |
| "grad_norm": 21.338903427124023, | |
| "learning_rate": 8.910215397739392e-06, | |
| "loss": 0.25, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.6954574536148432, | |
| "grad_norm": 5.0549139976501465, | |
| "learning_rate": 8.696950309234378e-06, | |
| "loss": 0.2466, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 1.727447216890595, | |
| "grad_norm": 5.613836765289307, | |
| "learning_rate": 8.483685220729368e-06, | |
| "loss": 0.2603, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.7594369801663468, | |
| "grad_norm": 11.103569030761719, | |
| "learning_rate": 8.270420132224356e-06, | |
| "loss": 0.259, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 1.7914267434420985, | |
| "grad_norm": 3.3913040161132812, | |
| "learning_rate": 8.057155043719343e-06, | |
| "loss": 0.2458, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.8234165067178503, | |
| "grad_norm": 0.5389057993888855, | |
| "learning_rate": 7.843889955214333e-06, | |
| "loss": 0.2366, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 1.855406269993602, | |
| "grad_norm": 8.419164657592773, | |
| "learning_rate": 7.630624866709321e-06, | |
| "loss": 0.2716, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.8873960332693538, | |
| "grad_norm": 5.326470375061035, | |
| "learning_rate": 7.4173597782043085e-06, | |
| "loss": 0.2302, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 1.9193857965451055, | |
| "grad_norm": 6.134453296661377, | |
| "learning_rate": 7.204094689699297e-06, | |
| "loss": 0.2236, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.9513755598208573, | |
| "grad_norm": 13.053471565246582, | |
| "learning_rate": 6.990829601194286e-06, | |
| "loss": 0.2549, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 1.983365323096609, | |
| "grad_norm": 16.08262825012207, | |
| "learning_rate": 6.777564512689273e-06, | |
| "loss": 0.2065, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.891234804862444, | |
| "eval_f1": 0.8816158583539794, | |
| "eval_loss": 0.32736125588417053, | |
| "eval_runtime": 69.7253, | |
| "eval_samples_per_second": 44.833, | |
| "eval_steps_per_second": 2.811, | |
| "step": 3126 | |
| }, | |
| { | |
| "epoch": 2.015355086372361, | |
| "grad_norm": 12.55622386932373, | |
| "learning_rate": 6.5642994241842614e-06, | |
| "loss": 0.1925, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 2.0473448496481126, | |
| "grad_norm": 9.97314167022705, | |
| "learning_rate": 6.3510343356792505e-06, | |
| "loss": 0.1697, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 2.0793346129238643, | |
| "grad_norm": 13.542362213134766, | |
| "learning_rate": 6.137769247174238e-06, | |
| "loss": 0.1759, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 2.111324376199616, | |
| "grad_norm": 2.1672303676605225, | |
| "learning_rate": 5.924504158669226e-06, | |
| "loss": 0.163, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 2.143314139475368, | |
| "grad_norm": 10.153268814086914, | |
| "learning_rate": 5.711239070164215e-06, | |
| "loss": 0.152, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 2.1753039027511196, | |
| "grad_norm": 30.200542449951172, | |
| "learning_rate": 5.497973981659203e-06, | |
| "loss": 0.2203, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 2.2072936660268714, | |
| "grad_norm": 7.482994079589844, | |
| "learning_rate": 5.284708893154191e-06, | |
| "loss": 0.1699, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 2.239283429302623, | |
| "grad_norm": 18.278284072875977, | |
| "learning_rate": 5.07144380464918e-06, | |
| "loss": 0.1624, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.271273192578375, | |
| "grad_norm": 15.992013931274414, | |
| "learning_rate": 4.858178716144167e-06, | |
| "loss": 0.1354, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 2.3032629558541267, | |
| "grad_norm": 8.946208953857422, | |
| "learning_rate": 4.644913627639156e-06, | |
| "loss": 0.176, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 2.3352527191298784, | |
| "grad_norm": 0.24096710979938507, | |
| "learning_rate": 4.431648539134144e-06, | |
| "loss": 0.1859, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 2.36724248240563, | |
| "grad_norm": 1.182032585144043, | |
| "learning_rate": 4.218383450629132e-06, | |
| "loss": 0.1643, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 2.399232245681382, | |
| "grad_norm": 14.64736270904541, | |
| "learning_rate": 4.005118362124121e-06, | |
| "loss": 0.1802, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 2.4312220089571337, | |
| "grad_norm": 28.632488250732422, | |
| "learning_rate": 3.791853273619109e-06, | |
| "loss": 0.1754, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 2.4632117722328855, | |
| "grad_norm": 12.297914505004883, | |
| "learning_rate": 3.5785881851140968e-06, | |
| "loss": 0.1654, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 2.495201535508637, | |
| "grad_norm": 8.145957946777344, | |
| "learning_rate": 3.3653230966090854e-06, | |
| "loss": 0.1754, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 2.527191298784389, | |
| "grad_norm": 1.9005142450332642, | |
| "learning_rate": 3.1520580081040737e-06, | |
| "loss": 0.1292, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 2.5591810620601407, | |
| "grad_norm": 23.53730010986328, | |
| "learning_rate": 2.9387929195990615e-06, | |
| "loss": 0.1386, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.5911708253358925, | |
| "grad_norm": 2.064774751663208, | |
| "learning_rate": 2.72552783109405e-06, | |
| "loss": 0.1667, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 2.6231605886116443, | |
| "grad_norm": 34.96377182006836, | |
| "learning_rate": 2.5122627425890384e-06, | |
| "loss": 0.2021, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 2.655150351887396, | |
| "grad_norm": 20.485219955444336, | |
| "learning_rate": 2.2989976540840266e-06, | |
| "loss": 0.1614, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 2.6871401151631478, | |
| "grad_norm": 13.900922775268555, | |
| "learning_rate": 2.085732565579015e-06, | |
| "loss": 0.1185, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 2.7191298784388995, | |
| "grad_norm": 0.96761155128479, | |
| "learning_rate": 1.872467477074003e-06, | |
| "loss": 0.1539, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 2.7511196417146513, | |
| "grad_norm": 1.9782915115356445, | |
| "learning_rate": 1.6592023885689915e-06, | |
| "loss": 0.1883, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 2.783109404990403, | |
| "grad_norm": 14.365890502929688, | |
| "learning_rate": 1.4459373000639796e-06, | |
| "loss": 0.2019, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 2.815099168266155, | |
| "grad_norm": 17.920162200927734, | |
| "learning_rate": 1.2326722115589678e-06, | |
| "loss": 0.1614, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 2.8470889315419066, | |
| "grad_norm": 7.565415859222412, | |
| "learning_rate": 1.019407123053956e-06, | |
| "loss": 0.1581, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 2.8790786948176583, | |
| "grad_norm": 4.440770149230957, | |
| "learning_rate": 8.061420345489445e-07, | |
| "loss": 0.1759, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.91106845809341, | |
| "grad_norm": 14.82466983795166, | |
| "learning_rate": 5.928769460439326e-07, | |
| "loss": 0.1288, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 2.943058221369162, | |
| "grad_norm": 15.672795295715332, | |
| "learning_rate": 3.796118575389209e-07, | |
| "loss": 0.205, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 2.9750479846449136, | |
| "grad_norm": 13.70653247833252, | |
| "learning_rate": 1.6634676903390917e-07, | |
| "loss": 0.1756, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.8976327575175944, | |
| "eval_f1": 0.8871674993824495, | |
| "eval_loss": 0.3958832323551178, | |
| "eval_runtime": 71.1516, | |
| "eval_samples_per_second": 43.934, | |
| "eval_steps_per_second": 2.755, | |
| "step": 4689 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 4689, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 824050668588336.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |