| { | |
| "best_global_step": 4491, | |
| "best_metric": 0.889795918367347, | |
| "best_model_checkpoint": "./bert-tiny-heading-classifier\\checkpoint-4491", | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 4491, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.033400133600534405, | |
| "grad_norm": 4.833153247833252, | |
| "learning_rate": 2.45e-05, | |
| "loss": 0.6188, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.06680026720106881, | |
| "grad_norm": 1.177682876586914, | |
| "learning_rate": 4.9500000000000004e-05, | |
| "loss": 0.412, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.10020040080160321, | |
| "grad_norm": 1.452412724494934, | |
| "learning_rate": 4.979370158302459e-05, | |
| "loss": 0.2571, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.13360053440213762, | |
| "grad_norm": 1.0122822523117065, | |
| "learning_rate": 4.958319299427417e-05, | |
| "loss": 0.2227, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.16700066800267202, | |
| "grad_norm": 2.77670955657959, | |
| "learning_rate": 4.937268440552375e-05, | |
| "loss": 0.1801, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.20040080160320642, | |
| "grad_norm": 1.1728415489196777, | |
| "learning_rate": 4.9162175816773325e-05, | |
| "loss": 0.214, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.23380093520374082, | |
| "grad_norm": 1.3484389781951904, | |
| "learning_rate": 4.8951667228022907e-05, | |
| "loss": 0.183, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.26720106880427524, | |
| "grad_norm": 1.534583330154419, | |
| "learning_rate": 4.874115863927248e-05, | |
| "loss": 0.2521, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.30060120240480964, | |
| "grad_norm": 1.8582364320755005, | |
| "learning_rate": 4.853065005052206e-05, | |
| "loss": 0.2323, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.33400133600534404, | |
| "grad_norm": 1.7614449262619019, | |
| "learning_rate": 4.832014146177164e-05, | |
| "loss": 0.1383, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.36740146960587844, | |
| "grad_norm": 3.3362653255462646, | |
| "learning_rate": 4.810963287302122e-05, | |
| "loss": 0.1536, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.40080160320641284, | |
| "grad_norm": 0.37206974625587463, | |
| "learning_rate": 4.78991242842708e-05, | |
| "loss": 0.1457, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.43420173680694724, | |
| "grad_norm": 4.668218612670898, | |
| "learning_rate": 4.768861569552038e-05, | |
| "loss": 0.1306, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.46760187040748163, | |
| "grad_norm": 0.343142569065094, | |
| "learning_rate": 4.747810710676996e-05, | |
| "loss": 0.1527, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.501002004008016, | |
| "grad_norm": 1.6645559072494507, | |
| "learning_rate": 4.726759851801954e-05, | |
| "loss": 0.2129, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.5344021376085505, | |
| "grad_norm": 0.23233528435230255, | |
| "learning_rate": 4.7057089929269117e-05, | |
| "loss": 0.109, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5678022712090849, | |
| "grad_norm": 0.19723999500274658, | |
| "learning_rate": 4.68465813405187e-05, | |
| "loss": 0.0968, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.6012024048096193, | |
| "grad_norm": 0.19006595015525818, | |
| "learning_rate": 4.663607275176827e-05, | |
| "loss": 0.1053, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6346025384101537, | |
| "grad_norm": 0.1779528111219406, | |
| "learning_rate": 4.642556416301785e-05, | |
| "loss": 0.1444, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.6680026720106881, | |
| "grad_norm": 0.2541033923625946, | |
| "learning_rate": 4.621505557426743e-05, | |
| "loss": 0.1217, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7014028056112225, | |
| "grad_norm": 0.24320191144943237, | |
| "learning_rate": 4.600454698551701e-05, | |
| "loss": 0.1758, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.7348029392117569, | |
| "grad_norm": 0.15954767167568207, | |
| "learning_rate": 4.579403839676659e-05, | |
| "loss": 0.0936, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.7682030728122913, | |
| "grad_norm": 0.17993752658367157, | |
| "learning_rate": 4.5583529808016164e-05, | |
| "loss": 0.098, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.8016032064128257, | |
| "grad_norm": 0.22059965133666992, | |
| "learning_rate": 4.5373021219265746e-05, | |
| "loss": 0.1256, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.8350033400133601, | |
| "grad_norm": 0.3228837549686432, | |
| "learning_rate": 4.516251263051533e-05, | |
| "loss": 0.1258, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.8684034736138945, | |
| "grad_norm": 7.371569633483887, | |
| "learning_rate": 4.495200404176491e-05, | |
| "loss": 0.1075, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.9018036072144289, | |
| "grad_norm": 0.17234943807125092, | |
| "learning_rate": 4.474149545301449e-05, | |
| "loss": 0.1442, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.9352037408149633, | |
| "grad_norm": 6.882818222045898, | |
| "learning_rate": 4.453098686426406e-05, | |
| "loss": 0.0995, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.9686038744154977, | |
| "grad_norm": 0.19052863121032715, | |
| "learning_rate": 4.4320478275513645e-05, | |
| "loss": 0.1307, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9730241362991008, | |
| "eval_f1": 0.7373271889400922, | |
| "eval_loss": 0.07805395871400833, | |
| "eval_runtime": 1.8035, | |
| "eval_samples_per_second": 1171.611, | |
| "eval_steps_per_second": 146.937, | |
| "step": 1497 | |
| }, | |
| { | |
| "epoch": 1.002004008016032, | |
| "grad_norm": 4.44326639175415, | |
| "learning_rate": 4.410996968676322e-05, | |
| "loss": 0.1296, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.0354041416165665, | |
| "grad_norm": 0.25975069403648376, | |
| "learning_rate": 4.38994610980128e-05, | |
| "loss": 0.104, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.0688042752171008, | |
| "grad_norm": 0.07822130620479584, | |
| "learning_rate": 4.368895250926238e-05, | |
| "loss": 0.0573, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.1022044088176353, | |
| "grad_norm": 0.12303607165813446, | |
| "learning_rate": 4.3478443920511956e-05, | |
| "loss": 0.0852, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.1356045424181698, | |
| "grad_norm": 0.557543158531189, | |
| "learning_rate": 4.326793533176154e-05, | |
| "loss": 0.1073, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.169004676018704, | |
| "grad_norm": 0.113058902323246, | |
| "learning_rate": 4.305742674301111e-05, | |
| "loss": 0.1551, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.2024048096192386, | |
| "grad_norm": 13.832112312316895, | |
| "learning_rate": 4.284691815426069e-05, | |
| "loss": 0.0685, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.2358049432197729, | |
| "grad_norm": 0.07243803143501282, | |
| "learning_rate": 4.263640956551028e-05, | |
| "loss": 0.0749, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 1.2692050768203074, | |
| "grad_norm": 0.13568474352359772, | |
| "learning_rate": 4.2425900976759855e-05, | |
| "loss": 0.0547, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.3026052104208417, | |
| "grad_norm": 2.009340524673462, | |
| "learning_rate": 4.2215392388009436e-05, | |
| "loss": 0.1446, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 1.3360053440213762, | |
| "grad_norm": 19.283615112304688, | |
| "learning_rate": 4.200488379925901e-05, | |
| "loss": 0.0845, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.3694054776219104, | |
| "grad_norm": 0.06987571716308594, | |
| "learning_rate": 4.179437521050859e-05, | |
| "loss": 0.0538, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 1.402805611222445, | |
| "grad_norm": 7.164772987365723, | |
| "learning_rate": 4.158386662175817e-05, | |
| "loss": 0.0849, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.4362057448229792, | |
| "grad_norm": 0.1325785368680954, | |
| "learning_rate": 4.137335803300775e-05, | |
| "loss": 0.0688, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 1.4696058784235138, | |
| "grad_norm": 10.17591667175293, | |
| "learning_rate": 4.116284944425733e-05, | |
| "loss": 0.0725, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.503006012024048, | |
| "grad_norm": 42.686309814453125, | |
| "learning_rate": 4.09523408555069e-05, | |
| "loss": 0.1241, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 1.5364061456245826, | |
| "grad_norm": 0.5147813558578491, | |
| "learning_rate": 4.0741832266756484e-05, | |
| "loss": 0.1731, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.569806279225117, | |
| "grad_norm": 6.496396064758301, | |
| "learning_rate": 4.0531323678006065e-05, | |
| "loss": 0.0728, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 1.6032064128256514, | |
| "grad_norm": 0.08844903111457825, | |
| "learning_rate": 4.0320815089255646e-05, | |
| "loss": 0.0764, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.6366065464261856, | |
| "grad_norm": 0.055337030440568924, | |
| "learning_rate": 4.011030650050523e-05, | |
| "loss": 0.0978, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 1.6700066800267201, | |
| "grad_norm": 1.4330482482910156, | |
| "learning_rate": 3.98997979117548e-05, | |
| "loss": 0.1094, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.7034068136272547, | |
| "grad_norm": 0.0550062395632267, | |
| "learning_rate": 3.968928932300438e-05, | |
| "loss": 0.073, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 1.736806947227789, | |
| "grad_norm": 25.3029842376709, | |
| "learning_rate": 3.9478780734253964e-05, | |
| "loss": 0.0979, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.7702070808283232, | |
| "grad_norm": 0.13691256940364838, | |
| "learning_rate": 3.926827214550354e-05, | |
| "loss": 0.0936, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 1.8036072144288577, | |
| "grad_norm": 0.05481144040822983, | |
| "learning_rate": 3.905776355675312e-05, | |
| "loss": 0.0592, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.8370073480293923, | |
| "grad_norm": 0.1150580495595932, | |
| "learning_rate": 3.8847254968002694e-05, | |
| "loss": 0.0874, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 1.8704074816299265, | |
| "grad_norm": 0.04762636497616768, | |
| "learning_rate": 3.8636746379252275e-05, | |
| "loss": 0.0511, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.9038076152304608, | |
| "grad_norm": 0.08567818254232407, | |
| "learning_rate": 3.8426237790501856e-05, | |
| "loss": 0.1435, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 1.9372077488309953, | |
| "grad_norm": 0.05470910295844078, | |
| "learning_rate": 3.821572920175143e-05, | |
| "loss": 0.0494, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.9706078824315298, | |
| "grad_norm": 0.04921048507094383, | |
| "learning_rate": 3.800522061300101e-05, | |
| "loss": 0.0849, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9834358731661146, | |
| "eval_f1": 0.8535564853556485, | |
| "eval_loss": 0.05979253724217415, | |
| "eval_runtime": 1.8042, | |
| "eval_samples_per_second": 1171.183, | |
| "eval_steps_per_second": 146.883, | |
| "step": 2994 | |
| }, | |
| { | |
| "epoch": 2.004008016032064, | |
| "grad_norm": 0.28419962525367737, | |
| "learning_rate": 3.779471202425059e-05, | |
| "loss": 0.1224, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.0374081496325984, | |
| "grad_norm": 0.06919294595718384, | |
| "learning_rate": 3.7584203435500174e-05, | |
| "loss": 0.0558, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 2.070808283233133, | |
| "grad_norm": 0.04579733684659004, | |
| "learning_rate": 3.7373694846749755e-05, | |
| "loss": 0.0493, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.1042084168336674, | |
| "grad_norm": 17.734146118164062, | |
| "learning_rate": 3.716318625799933e-05, | |
| "loss": 0.0765, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 2.1376085504342015, | |
| "grad_norm": 0.1230553463101387, | |
| "learning_rate": 3.695267766924891e-05, | |
| "loss": 0.0449, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 2.171008684034736, | |
| "grad_norm": 0.10777395218610764, | |
| "learning_rate": 3.6742169080498485e-05, | |
| "loss": 0.0994, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 2.2044088176352705, | |
| "grad_norm": 0.058282237499952316, | |
| "learning_rate": 3.6531660491748066e-05, | |
| "loss": 0.0603, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 2.237808951235805, | |
| "grad_norm": 0.03964553028345108, | |
| "learning_rate": 3.632115190299765e-05, | |
| "loss": 0.0696, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 2.2712090848363395, | |
| "grad_norm": 0.07650401443243027, | |
| "learning_rate": 3.611064331424722e-05, | |
| "loss": 0.0969, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 2.3046092184368736, | |
| "grad_norm": 43.522613525390625, | |
| "learning_rate": 3.59001347254968e-05, | |
| "loss": 0.0784, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 2.338009352037408, | |
| "grad_norm": 1.7156099081039429, | |
| "learning_rate": 3.568962613674638e-05, | |
| "loss": 0.0947, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.3714094856379426, | |
| "grad_norm": 0.03075309470295906, | |
| "learning_rate": 3.547911754799596e-05, | |
| "loss": 0.0782, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 2.404809619238477, | |
| "grad_norm": 0.032964251935482025, | |
| "learning_rate": 3.526860895924554e-05, | |
| "loss": 0.0692, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 2.438209752839011, | |
| "grad_norm": 0.038821104913949966, | |
| "learning_rate": 3.505810037049512e-05, | |
| "loss": 0.086, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 2.4716098864395457, | |
| "grad_norm": 0.03800545632839203, | |
| "learning_rate": 3.48475917817447e-05, | |
| "loss": 0.0151, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 2.50501002004008, | |
| "grad_norm": 0.027952782809734344, | |
| "learning_rate": 3.4637083192994276e-05, | |
| "loss": 0.0373, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 2.5384101536406147, | |
| "grad_norm": 0.02669738233089447, | |
| "learning_rate": 3.442657460424386e-05, | |
| "loss": 0.073, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 2.5718102872411492, | |
| "grad_norm": 1.2310236692428589, | |
| "learning_rate": 3.421606601549344e-05, | |
| "loss": 0.0687, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 2.6052104208416833, | |
| "grad_norm": 0.035898588597774506, | |
| "learning_rate": 3.400555742674301e-05, | |
| "loss": 0.0668, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 2.638610554442218, | |
| "grad_norm": 64.2850112915039, | |
| "learning_rate": 3.3795048837992594e-05, | |
| "loss": 0.0715, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 2.6720106880427523, | |
| "grad_norm": 2.408921480178833, | |
| "learning_rate": 3.358454024924217e-05, | |
| "loss": 0.0703, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.7054108216432864, | |
| "grad_norm": 0.048116762191057205, | |
| "learning_rate": 3.337403166049175e-05, | |
| "loss": 0.0608, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 2.738810955243821, | |
| "grad_norm": 0.0268548671156168, | |
| "learning_rate": 3.3163523071741324e-05, | |
| "loss": 0.056, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 2.7722110888443554, | |
| "grad_norm": 0.02951057441532612, | |
| "learning_rate": 3.2953014482990905e-05, | |
| "loss": 0.0716, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 2.80561122244489, | |
| "grad_norm": 0.04967193678021431, | |
| "learning_rate": 3.2742505894240486e-05, | |
| "loss": 0.0821, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 2.8390113560454244, | |
| "grad_norm": 0.06555014103651047, | |
| "learning_rate": 3.253199730549007e-05, | |
| "loss": 0.0894, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 2.8724114896459585, | |
| "grad_norm": 0.07022852450609207, | |
| "learning_rate": 3.232148871673965e-05, | |
| "loss": 0.0818, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 2.905811623246493, | |
| "grad_norm": 0.031209150329232216, | |
| "learning_rate": 3.211098012798922e-05, | |
| "loss": 0.0808, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 2.9392117568470275, | |
| "grad_norm": 0.026557113975286484, | |
| "learning_rate": 3.1900471539238804e-05, | |
| "loss": 0.0345, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 2.9726118904475616, | |
| "grad_norm": 0.24660851061344147, | |
| "learning_rate": 3.1689962950488385e-05, | |
| "loss": 0.0685, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.987221959299574, | |
| "eval_f1": 0.889795918367347, | |
| "eval_loss": 0.0543304942548275, | |
| "eval_runtime": 1.7917, | |
| "eval_samples_per_second": 1179.344, | |
| "eval_steps_per_second": 147.906, | |
| "step": 4491 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 11976, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 22815406264320.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |