| { | |
| "best_global_step": 2667, | |
| "best_metric": 0.8418311044589408, | |
| "best_model_checkpoint": "3class_classification_results/run_20250610_161638/checkpoints/checkpoint-2667", | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 8890, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.056274620146314014, | |
| "grad_norm": 1.2899854183197021, | |
| "learning_rate": 1.1023622047244096e-06, | |
| "loss": 1.1147, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.11254924029262803, | |
| "grad_norm": 2.2132718563079834, | |
| "learning_rate": 2.227221597300338e-06, | |
| "loss": 1.0842, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.16882386043894204, | |
| "grad_norm": 2.2031383514404297, | |
| "learning_rate": 3.3520809898762658e-06, | |
| "loss": 0.9439, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.22509848058525606, | |
| "grad_norm": 2.492532730102539, | |
| "learning_rate": 4.476940382452194e-06, | |
| "loss": 0.7497, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.28137310073157007, | |
| "grad_norm": 5.415732383728027, | |
| "learning_rate": 5.6017997750281215e-06, | |
| "loss": 0.5953, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.3376477208778841, | |
| "grad_norm": 3.040374755859375, | |
| "learning_rate": 6.726659167604051e-06, | |
| "loss": 0.47, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.3939223410241981, | |
| "grad_norm": 2.420459747314453, | |
| "learning_rate": 7.851518560179978e-06, | |
| "loss": 0.4128, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.4501969611705121, | |
| "grad_norm": 2.526780128479004, | |
| "learning_rate": 8.976377952755906e-06, | |
| "loss": 0.3899, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5064715813168261, | |
| "grad_norm": 6.837190628051758, | |
| "learning_rate": 1.0101237345331833e-05, | |
| "loss": 0.4092, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5627462014631401, | |
| "grad_norm": 2.692833185195923, | |
| "learning_rate": 1.1226096737907763e-05, | |
| "loss": 0.3887, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6190208216094542, | |
| "grad_norm": 1.8825379610061646, | |
| "learning_rate": 1.2350956130483691e-05, | |
| "loss": 0.3891, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.6752954417557682, | |
| "grad_norm": 3.277323007583618, | |
| "learning_rate": 1.3453318335208101e-05, | |
| "loss": 0.3878, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7315700619020822, | |
| "grad_norm": 2.1386890411376953, | |
| "learning_rate": 1.4578177727784028e-05, | |
| "loss": 0.3823, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.7878446820483962, | |
| "grad_norm": 5.464720249176025, | |
| "learning_rate": 1.5703037120359956e-05, | |
| "loss": 0.3854, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.8441193021947102, | |
| "grad_norm": 2.195767402648926, | |
| "learning_rate": 1.6827896512935883e-05, | |
| "loss": 0.383, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.9003939223410242, | |
| "grad_norm": 3.1674859523773193, | |
| "learning_rate": 1.7952755905511813e-05, | |
| "loss": 0.3622, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.9566685424873382, | |
| "grad_norm": 2.137552499771118, | |
| "learning_rate": 1.9077615298087743e-05, | |
| "loss": 0.3679, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.8270935960591133, | |
| "eval_f1_fix": 0.9979395604395604, | |
| "eval_f1_issue": 0.8027574563871693, | |
| "eval_f1_symptom": 0.77491961414791, | |
| "eval_f1_weighted": 0.8262810094793769, | |
| "eval_loss": 0.3870583176612854, | |
| "eval_precision_fix": 0.9965706447187929, | |
| "eval_precision_issue": 0.7557615894039735, | |
| "eval_precision_symptom": 0.8347765846899896, | |
| "eval_precision_weighted": 0.8313149571029931, | |
| "eval_recall_fix": 0.9993122420907841, | |
| "eval_recall_issue": 0.855985598559856, | |
| "eval_recall_symptom": 0.723072307230723, | |
| "eval_recall_weighted": 0.8270935960591133, | |
| "eval_runtime": 50.1716, | |
| "eval_samples_per_second": 161.845, | |
| "eval_steps_per_second": 2.531, | |
| "step": 889 | |
| }, | |
| { | |
| "epoch": 1.012380416432189, | |
| "grad_norm": 2.139568567276001, | |
| "learning_rate": 1.9977502812148483e-05, | |
| "loss": 0.3606, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.068655036578503, | |
| "grad_norm": 1.6121031045913696, | |
| "learning_rate": 1.9852518435195604e-05, | |
| "loss": 0.3454, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.124929656724817, | |
| "grad_norm": 2.136821985244751, | |
| "learning_rate": 1.972753405824272e-05, | |
| "loss": 0.3622, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.181204276871131, | |
| "grad_norm": 2.97175669670105, | |
| "learning_rate": 1.960254968128984e-05, | |
| "loss": 0.3678, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.237478897017445, | |
| "grad_norm": 2.862729549407959, | |
| "learning_rate": 1.947756530433696e-05, | |
| "loss": 0.3742, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.293753517163759, | |
| "grad_norm": 1.6733105182647705, | |
| "learning_rate": 1.935258092738408e-05, | |
| "loss": 0.3575, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.350028137310073, | |
| "grad_norm": 2.2922866344451904, | |
| "learning_rate": 1.92275965504312e-05, | |
| "loss": 0.3593, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.406302757456387, | |
| "grad_norm": 1.7616901397705078, | |
| "learning_rate": 1.9102612173478316e-05, | |
| "loss": 0.371, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.4625773776027011, | |
| "grad_norm": 3.089038610458374, | |
| "learning_rate": 1.8977627796525436e-05, | |
| "loss": 0.3577, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.5188519977490151, | |
| "grad_norm": 2.3721745014190674, | |
| "learning_rate": 1.8852643419572557e-05, | |
| "loss": 0.3542, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.5751266178953292, | |
| "grad_norm": 4.346639156341553, | |
| "learning_rate": 1.8727659042619674e-05, | |
| "loss": 0.3562, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.6314012380416432, | |
| "grad_norm": 1.6947968006134033, | |
| "learning_rate": 1.8602674665666794e-05, | |
| "loss": 0.344, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.6876758581879572, | |
| "grad_norm": 3.7869679927825928, | |
| "learning_rate": 1.847769028871391e-05, | |
| "loss": 0.3459, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.7439504783342712, | |
| "grad_norm": 3.708456039428711, | |
| "learning_rate": 1.835270591176103e-05, | |
| "loss": 0.3502, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.8002250984805852, | |
| "grad_norm": 1.927826166152954, | |
| "learning_rate": 1.8227721534808152e-05, | |
| "loss": 0.343, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.8564997186268992, | |
| "grad_norm": 3.222015142440796, | |
| "learning_rate": 1.810273715785527e-05, | |
| "loss": 0.3358, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.9127743387732132, | |
| "grad_norm": 2.057603597640991, | |
| "learning_rate": 1.797775278090239e-05, | |
| "loss": 0.3532, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.9690489589195272, | |
| "grad_norm": 3.3761789798736572, | |
| "learning_rate": 1.7852768403949506e-05, | |
| "loss": 0.3392, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.8397783251231528, | |
| "eval_f1_fix": 0.9979395604395604, | |
| "eval_f1_issue": 0.7946936197094125, | |
| "eval_f1_symptom": 0.8147512864493996, | |
| "eval_f1_weighted": 0.839320688806212, | |
| "eval_loss": 0.3833693265914917, | |
| "eval_precision_fix": 0.9965706447187929, | |
| "eval_precision_issue": 0.8389463154384795, | |
| "eval_precision_symptom": 0.778050778050778, | |
| "eval_precision_weighted": 0.8421754963079828, | |
| "eval_recall_fix": 0.9993122420907841, | |
| "eval_recall_issue": 0.7548754875487549, | |
| "eval_recall_symptom": 0.8550855085508551, | |
| "eval_recall_weighted": 0.8397783251231528, | |
| "eval_runtime": 51.072, | |
| "eval_samples_per_second": 158.991, | |
| "eval_steps_per_second": 2.487, | |
| "step": 1778 | |
| }, | |
| { | |
| "epoch": 2.024760832864378, | |
| "grad_norm": 1.9975008964538574, | |
| "learning_rate": 1.7727784026996627e-05, | |
| "loss": 0.3269, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.081035453010692, | |
| "grad_norm": 1.5703338384628296, | |
| "learning_rate": 1.7602799650043747e-05, | |
| "loss": 0.3253, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.137310073157006, | |
| "grad_norm": 1.7055504322052002, | |
| "learning_rate": 1.7477815273090864e-05, | |
| "loss": 0.3076, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.19358469330332, | |
| "grad_norm": 2.055391550064087, | |
| "learning_rate": 1.7352830896137985e-05, | |
| "loss": 0.3175, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.249859313449634, | |
| "grad_norm": 2.3307132720947266, | |
| "learning_rate": 1.72278465191851e-05, | |
| "loss": 0.3166, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.306133933595948, | |
| "grad_norm": 1.9831819534301758, | |
| "learning_rate": 1.7102862142232222e-05, | |
| "loss": 0.3272, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 2.362408553742262, | |
| "grad_norm": 2.037224292755127, | |
| "learning_rate": 1.6977877765279342e-05, | |
| "loss": 0.3217, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 2.418683173888576, | |
| "grad_norm": 2.2483575344085693, | |
| "learning_rate": 1.6852893388326463e-05, | |
| "loss": 0.3483, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 2.47495779403489, | |
| "grad_norm": 2.124046564102173, | |
| "learning_rate": 1.672790901137358e-05, | |
| "loss": 0.3032, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 2.531232414181204, | |
| "grad_norm": 2.424370288848877, | |
| "learning_rate": 1.6602924634420697e-05, | |
| "loss": 0.3173, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 2.587507034327518, | |
| "grad_norm": 3.231492757797241, | |
| "learning_rate": 1.6477940257467817e-05, | |
| "loss": 0.3346, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 2.643781654473832, | |
| "grad_norm": 1.850974440574646, | |
| "learning_rate": 1.6355455568053996e-05, | |
| "loss": 0.3341, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 2.700056274620146, | |
| "grad_norm": 2.1213784217834473, | |
| "learning_rate": 1.6230471191101113e-05, | |
| "loss": 0.3137, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 2.75633089476646, | |
| "grad_norm": 1.9203131198883057, | |
| "learning_rate": 1.6105486814148233e-05, | |
| "loss": 0.3434, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 2.812605514912774, | |
| "grad_norm": 2.571561336517334, | |
| "learning_rate": 1.598050243719535e-05, | |
| "loss": 0.311, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.8688801350590882, | |
| "grad_norm": 1.646263837814331, | |
| "learning_rate": 1.585551806024247e-05, | |
| "loss": 0.3265, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 2.9251547552054022, | |
| "grad_norm": 3.992446184158325, | |
| "learning_rate": 1.573053368328959e-05, | |
| "loss": 0.3231, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 2.9814293753517163, | |
| "grad_norm": 3.225480794906616, | |
| "learning_rate": 1.5605549306336708e-05, | |
| "loss": 0.3164, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.8419950738916256, | |
| "eval_f1_fix": 0.998282377189969, | |
| "eval_f1_issue": 0.8020994133991973, | |
| "eval_f1_symptom": 0.8133119252663845, | |
| "eval_f1_weighted": 0.8418311044589408, | |
| "eval_loss": 0.3592279255390167, | |
| "eval_precision_fix": 0.997254632807138, | |
| "eval_precision_issue": 0.8260731319554849, | |
| "eval_precision_symptom": 0.7919272313814667, | |
| "eval_precision_weighted": 0.8427097841260638, | |
| "eval_recall_fix": 0.9993122420907841, | |
| "eval_recall_issue": 0.7794779477947795, | |
| "eval_recall_symptom": 0.8358835883588359, | |
| "eval_recall_weighted": 0.8419950738916256, | |
| "eval_runtime": 51.3031, | |
| "eval_samples_per_second": 158.275, | |
| "eval_steps_per_second": 2.475, | |
| "step": 2667 | |
| }, | |
| { | |
| "epoch": 3.037141249296567, | |
| "grad_norm": 2.5545248985290527, | |
| "learning_rate": 1.548056492938383e-05, | |
| "loss": 0.3147, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 3.093415869442881, | |
| "grad_norm": 1.628539800643921, | |
| "learning_rate": 1.5355580552430946e-05, | |
| "loss": 0.2979, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 3.1496904895891955, | |
| "grad_norm": 1.9369264841079712, | |
| "learning_rate": 1.5230596175478068e-05, | |
| "loss": 0.2869, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 3.205965109735509, | |
| "grad_norm": 2.5903725624084473, | |
| "learning_rate": 1.5105611798525186e-05, | |
| "loss": 0.272, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 3.2622397298818235, | |
| "grad_norm": 2.560624361038208, | |
| "learning_rate": 1.4980627421572305e-05, | |
| "loss": 0.2914, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 3.3185143500281375, | |
| "grad_norm": 3.008889675140381, | |
| "learning_rate": 1.4855643044619424e-05, | |
| "loss": 0.2858, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 3.3747889701744516, | |
| "grad_norm": 2.466245651245117, | |
| "learning_rate": 1.4730658667666542e-05, | |
| "loss": 0.3021, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 3.4310635903207656, | |
| "grad_norm": 2.1926143169403076, | |
| "learning_rate": 1.4605674290713663e-05, | |
| "loss": 0.2961, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 3.4873382104670796, | |
| "grad_norm": 2.4998419284820557, | |
| "learning_rate": 1.4480689913760782e-05, | |
| "loss": 0.2851, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 3.5436128306133936, | |
| "grad_norm": 2.801224946975708, | |
| "learning_rate": 1.43557055368079e-05, | |
| "loss": 0.3003, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 3.5998874507597076, | |
| "grad_norm": 1.9596877098083496, | |
| "learning_rate": 1.4230721159855019e-05, | |
| "loss": 0.3019, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 3.6561620709060216, | |
| "grad_norm": 1.780312418937683, | |
| "learning_rate": 1.4105736782902138e-05, | |
| "loss": 0.2962, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 3.7124366910523356, | |
| "grad_norm": 3.569025754928589, | |
| "learning_rate": 1.3980752405949258e-05, | |
| "loss": 0.2923, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 3.7687113111986497, | |
| "grad_norm": 2.3551599979400635, | |
| "learning_rate": 1.3855768028996377e-05, | |
| "loss": 0.3113, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 3.8249859313449637, | |
| "grad_norm": 1.77907133102417, | |
| "learning_rate": 1.3730783652043495e-05, | |
| "loss": 0.3056, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 3.8812605514912777, | |
| "grad_norm": 4.4045610427856445, | |
| "learning_rate": 1.3605799275090614e-05, | |
| "loss": 0.2942, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 3.9375351716375917, | |
| "grad_norm": 1.8489115238189697, | |
| "learning_rate": 1.3480814898137733e-05, | |
| "loss": 0.3136, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 3.9938097917839057, | |
| "grad_norm": 1.9342581033706665, | |
| "learning_rate": 1.3355830521184853e-05, | |
| "loss": 0.3061, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.84064039408867, | |
| "eval_f1_fix": 0.998282377189969, | |
| "eval_f1_issue": 0.7974306752310826, | |
| "eval_f1_symptom": 0.8142816009213936, | |
| "eval_f1_weighted": 0.8403127577401992, | |
| "eval_loss": 0.3966975510120392, | |
| "eval_precision_fix": 0.997254632807138, | |
| "eval_precision_issue": 0.8344262295081967, | |
| "eval_precision_symptom": 0.782729034043731, | |
| "eval_precision_weighted": 0.8423628977241568, | |
| "eval_recall_fix": 0.9993122420907841, | |
| "eval_recall_issue": 0.7635763576357636, | |
| "eval_recall_symptom": 0.8484848484848485, | |
| "eval_recall_weighted": 0.84064039408867, | |
| "eval_runtime": 50.3322, | |
| "eval_samples_per_second": 161.328, | |
| "eval_steps_per_second": 2.523, | |
| "step": 3556 | |
| }, | |
| { | |
| "epoch": 4.049521665728756, | |
| "grad_norm": 2.619023084640503, | |
| "learning_rate": 1.3230846144231972e-05, | |
| "loss": 0.2784, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 4.10579628587507, | |
| "grad_norm": 2.1567516326904297, | |
| "learning_rate": 1.310586176727909e-05, | |
| "loss": 0.2836, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 4.162070906021384, | |
| "grad_norm": 1.734316110610962, | |
| "learning_rate": 1.298087739032621e-05, | |
| "loss": 0.2725, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 4.218345526167698, | |
| "grad_norm": 2.4064743518829346, | |
| "learning_rate": 1.2855893013373328e-05, | |
| "loss": 0.2674, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 4.274620146314012, | |
| "grad_norm": 4.878196716308594, | |
| "learning_rate": 1.2730908636420448e-05, | |
| "loss": 0.2614, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 4.330894766460326, | |
| "grad_norm": 3.776557683944702, | |
| "learning_rate": 1.2605924259467567e-05, | |
| "loss": 0.2759, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 4.38716938660664, | |
| "grad_norm": 2.9769930839538574, | |
| "learning_rate": 1.2480939882514686e-05, | |
| "loss": 0.27, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 4.443444006752954, | |
| "grad_norm": 2.369786500930786, | |
| "learning_rate": 1.2355955505561805e-05, | |
| "loss": 0.2661, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 4.499718626899268, | |
| "grad_norm": 3.501908540725708, | |
| "learning_rate": 1.2230971128608923e-05, | |
| "loss": 0.2756, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 4.555993247045582, | |
| "grad_norm": 3.2472310066223145, | |
| "learning_rate": 1.2105986751656045e-05, | |
| "loss": 0.2638, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 4.612267867191896, | |
| "grad_norm": 1.875646710395813, | |
| "learning_rate": 1.1981002374703164e-05, | |
| "loss": 0.2455, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 4.66854248733821, | |
| "grad_norm": 2.917334794998169, | |
| "learning_rate": 1.1856017997750283e-05, | |
| "loss": 0.2608, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 4.724817107484524, | |
| "grad_norm": 2.597090482711792, | |
| "learning_rate": 1.1731033620797401e-05, | |
| "loss": 0.2565, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 4.781091727630838, | |
| "grad_norm": 2.790834903717041, | |
| "learning_rate": 1.160604924384452e-05, | |
| "loss": 0.2724, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 4.837366347777152, | |
| "grad_norm": 2.6425554752349854, | |
| "learning_rate": 1.148106486689164e-05, | |
| "loss": 0.2826, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 4.893640967923466, | |
| "grad_norm": 2.039649486541748, | |
| "learning_rate": 1.135608048993876e-05, | |
| "loss": 0.2584, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 4.94991558806978, | |
| "grad_norm": 3.3404409885406494, | |
| "learning_rate": 1.1231096112985878e-05, | |
| "loss": 0.2704, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.8385467980295567, | |
| "eval_f1_fix": 0.9986254295532646, | |
| "eval_f1_issue": 0.7967101179391682, | |
| "eval_f1_symptom": 0.8100493755445832, | |
| "eval_f1_weighted": 0.8383412273832256, | |
| "eval_loss": 0.3984265625476837, | |
| "eval_precision_fix": 0.9979395604395604, | |
| "eval_precision_issue": 0.825136612021858, | |
| "eval_precision_symptom": 0.7849704475091472, | |
| "eval_precision_weighted": 0.839592481563542, | |
| "eval_recall_fix": 0.9993122420907841, | |
| "eval_recall_issue": 0.7701770177017702, | |
| "eval_recall_symptom": 0.8367836783678367, | |
| "eval_recall_weighted": 0.8385467980295567, | |
| "eval_runtime": 103.4526, | |
| "eval_samples_per_second": 78.49, | |
| "eval_steps_per_second": 1.228, | |
| "step": 4445 | |
| }, | |
| { | |
| "epoch": 5.0056274620146315, | |
| "grad_norm": 1.412779688835144, | |
| "learning_rate": 1.1106111736032997e-05, | |
| "loss": 0.2558, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 5.0619020821609455, | |
| "grad_norm": 3.5638949871063232, | |
| "learning_rate": 1.0981127359080115e-05, | |
| "loss": 0.2201, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 5.1181767023072595, | |
| "grad_norm": 2.8439533710479736, | |
| "learning_rate": 1.0856142982127236e-05, | |
| "loss": 0.2406, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 5.1744513224535735, | |
| "grad_norm": 3.5135629177093506, | |
| "learning_rate": 1.0731158605174354e-05, | |
| "loss": 0.2282, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 5.2307259425998875, | |
| "grad_norm": 3.6669528484344482, | |
| "learning_rate": 1.0606174228221473e-05, | |
| "loss": 0.2266, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 5.287000562746202, | |
| "grad_norm": 3.3074584007263184, | |
| "learning_rate": 1.0481189851268592e-05, | |
| "loss": 0.2635, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 5.343275182892516, | |
| "grad_norm": 2.5459887981414795, | |
| "learning_rate": 1.035620547431571e-05, | |
| "loss": 0.2471, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 5.39954980303883, | |
| "grad_norm": 3.6159589290618896, | |
| "learning_rate": 1.0231221097362831e-05, | |
| "loss": 0.2443, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 5.455824423185144, | |
| "grad_norm": 4.78350830078125, | |
| "learning_rate": 1.010623672040995e-05, | |
| "loss": 0.2465, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 5.512099043331458, | |
| "grad_norm": 3.147178888320923, | |
| "learning_rate": 9.981252343457068e-06, | |
| "loss": 0.2523, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 5.568373663477772, | |
| "grad_norm": 3.182088613510132, | |
| "learning_rate": 9.856267966504189e-06, | |
| "loss": 0.2382, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 5.624648283624086, | |
| "grad_norm": 4.077524662017822, | |
| "learning_rate": 9.733783277090364e-06, | |
| "loss": 0.2342, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 5.6809229037704, | |
| "grad_norm": 4.5491509437561035, | |
| "learning_rate": 9.608798900137485e-06, | |
| "loss": 0.2438, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 5.737197523916714, | |
| "grad_norm": 3.9461286067962646, | |
| "learning_rate": 9.483814523184603e-06, | |
| "loss": 0.2294, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 5.793472144063028, | |
| "grad_norm": 2.644874095916748, | |
| "learning_rate": 9.358830146231722e-06, | |
| "loss": 0.2431, | |
| "step": 5150 | |
| }, | |
| { | |
| "epoch": 5.849746764209342, | |
| "grad_norm": 3.191740036010742, | |
| "learning_rate": 9.23384576927884e-06, | |
| "loss": 0.2455, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 5.906021384355656, | |
| "grad_norm": 2.2712607383728027, | |
| "learning_rate": 9.10886139232596e-06, | |
| "loss": 0.2407, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 5.96229600450197, | |
| "grad_norm": 2.775583505630493, | |
| "learning_rate": 8.98387701537308e-06, | |
| "loss": 0.2551, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.8370689655172414, | |
| "eval_f1_fix": 0.9979395604395604, | |
| "eval_f1_issue": 0.8008436275986743, | |
| "eval_f1_symptom": 0.8029895366218236, | |
| "eval_f1_weighted": 0.8370172484268523, | |
| "eval_loss": 0.4547211527824402, | |
| "eval_precision_fix": 0.9965706447187929, | |
| "eval_precision_issue": 0.8042360060514372, | |
| "eval_precision_symptom": 0.8001191540065534, | |
| "eval_precision_weighted": 0.8369863874254196, | |
| "eval_recall_fix": 0.9993122420907841, | |
| "eval_recall_issue": 0.7974797479747975, | |
| "eval_recall_symptom": 0.8058805880588059, | |
| "eval_recall_weighted": 0.8370689655172414, | |
| "eval_runtime": 52.2924, | |
| "eval_samples_per_second": 155.281, | |
| "eval_steps_per_second": 2.429, | |
| "step": 5334 | |
| }, | |
| { | |
| "epoch": 6.01800787844682, | |
| "grad_norm": 2.528533697128296, | |
| "learning_rate": 8.858892638420198e-06, | |
| "loss": 0.2138, | |
| "step": 5350 | |
| }, | |
| { | |
| "epoch": 6.074282498593134, | |
| "grad_norm": 5.1876091957092285, | |
| "learning_rate": 8.733908261467317e-06, | |
| "loss": 0.2319, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 6.130557118739448, | |
| "grad_norm": 1.4345381259918213, | |
| "learning_rate": 8.608923884514436e-06, | |
| "loss": 0.2118, | |
| "step": 5450 | |
| }, | |
| { | |
| "epoch": 6.186831738885762, | |
| "grad_norm": 6.736217021942139, | |
| "learning_rate": 8.483939507561556e-06, | |
| "loss": 0.2171, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 6.243106359032076, | |
| "grad_norm": 3.6418821811676025, | |
| "learning_rate": 8.358955130608675e-06, | |
| "loss": 0.2339, | |
| "step": 5550 | |
| }, | |
| { | |
| "epoch": 6.299380979178391, | |
| "grad_norm": 3.062988758087158, | |
| "learning_rate": 8.233970753655794e-06, | |
| "loss": 0.2121, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 6.355655599324704, | |
| "grad_norm": 3.603121519088745, | |
| "learning_rate": 8.108986376702912e-06, | |
| "loss": 0.211, | |
| "step": 5650 | |
| }, | |
| { | |
| "epoch": 6.411930219471018, | |
| "grad_norm": 3.1732518672943115, | |
| "learning_rate": 7.984001999750031e-06, | |
| "loss": 0.1988, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 6.468204839617332, | |
| "grad_norm": 4.5081071853637695, | |
| "learning_rate": 7.859017622797151e-06, | |
| "loss": 0.2006, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 6.524479459763647, | |
| "grad_norm": 3.192850112915039, | |
| "learning_rate": 7.73403324584427e-06, | |
| "loss": 0.2225, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 6.58075407990996, | |
| "grad_norm": 4.354557514190674, | |
| "learning_rate": 7.609048868891389e-06, | |
| "loss": 0.2232, | |
| "step": 5850 | |
| }, | |
| { | |
| "epoch": 6.637028700056275, | |
| "grad_norm": 2.804410934448242, | |
| "learning_rate": 7.484064491938508e-06, | |
| "loss": 0.2207, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 6.693303320202588, | |
| "grad_norm": 4.296571731567383, | |
| "learning_rate": 7.359080114985627e-06, | |
| "loss": 0.21, | |
| "step": 5950 | |
| }, | |
| { | |
| "epoch": 6.749577940348903, | |
| "grad_norm": 2.1833271980285645, | |
| "learning_rate": 7.234095738032747e-06, | |
| "loss": 0.2101, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 6.805852560495216, | |
| "grad_norm": 4.7521138191223145, | |
| "learning_rate": 7.109111361079865e-06, | |
| "loss": 0.2263, | |
| "step": 6050 | |
| }, | |
| { | |
| "epoch": 6.862127180641531, | |
| "grad_norm": 4.192446231842041, | |
| "learning_rate": 6.984126984126984e-06, | |
| "loss": 0.2127, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 6.918401800787844, | |
| "grad_norm": 3.3775792121887207, | |
| "learning_rate": 6.859142607174104e-06, | |
| "loss": 0.2254, | |
| "step": 6150 | |
| }, | |
| { | |
| "epoch": 6.974676420934159, | |
| "grad_norm": 3.5780911445617676, | |
| "learning_rate": 6.734158230221223e-06, | |
| "loss": 0.1976, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.8341133004926108, | |
| "eval_f1_fix": 0.998282377189969, | |
| "eval_f1_issue": 0.7951293759512937, | |
| "eval_f1_symptom": 0.8013019677467081, | |
| "eval_f1_weighted": 0.8340404242585783, | |
| "eval_loss": 0.45761045813560486, | |
| "eval_precision_fix": 0.997254632807138, | |
| "eval_precision_issue": 0.8069199876428792, | |
| "eval_precision_symptom": 0.7904261529480444, | |
| "eval_precision_weighted": 0.8342318870309269, | |
| "eval_recall_fix": 0.9993122420907841, | |
| "eval_recall_issue": 0.7836783678367837, | |
| "eval_recall_symptom": 0.8124812481248125, | |
| "eval_recall_weighted": 0.8341133004926108, | |
| "eval_runtime": 50.9303, | |
| "eval_samples_per_second": 159.433, | |
| "eval_steps_per_second": 2.494, | |
| "step": 6223 | |
| }, | |
| { | |
| "epoch": 7.0303882948790095, | |
| "grad_norm": 6.685973167419434, | |
| "learning_rate": 6.609173853268343e-06, | |
| "loss": 0.197, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 7.0866629150253235, | |
| "grad_norm": 3.8742218017578125, | |
| "learning_rate": 6.484189476315461e-06, | |
| "loss": 0.183, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 7.1429375351716375, | |
| "grad_norm": 5.564633846282959, | |
| "learning_rate": 6.35920509936258e-06, | |
| "loss": 0.2029, | |
| "step": 6350 | |
| }, | |
| { | |
| "epoch": 7.1992121553179516, | |
| "grad_norm": 4.8350677490234375, | |
| "learning_rate": 6.2342207224097e-06, | |
| "loss": 0.1889, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 7.255486775464266, | |
| "grad_norm": 2.719174385070801, | |
| "learning_rate": 6.109236345456818e-06, | |
| "loss": 0.2021, | |
| "step": 6450 | |
| }, | |
| { | |
| "epoch": 7.31176139561058, | |
| "grad_norm": 3.9081249237060547, | |
| "learning_rate": 5.984251968503938e-06, | |
| "loss": 0.1916, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 7.368036015756894, | |
| "grad_norm": 4.187448024749756, | |
| "learning_rate": 5.859267591551057e-06, | |
| "loss": 0.2077, | |
| "step": 6550 | |
| }, | |
| { | |
| "epoch": 7.424310635903208, | |
| "grad_norm": 2.6563658714294434, | |
| "learning_rate": 5.734283214598175e-06, | |
| "loss": 0.2, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 7.480585256049522, | |
| "grad_norm": 5.274929523468018, | |
| "learning_rate": 5.609298837645295e-06, | |
| "loss": 0.203, | |
| "step": 6650 | |
| }, | |
| { | |
| "epoch": 7.536859876195836, | |
| "grad_norm": 4.843599319458008, | |
| "learning_rate": 5.4843144606924135e-06, | |
| "loss": 0.1909, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 7.59313449634215, | |
| "grad_norm": 3.966811418533325, | |
| "learning_rate": 5.359330083739533e-06, | |
| "loss": 0.1953, | |
| "step": 6750 | |
| }, | |
| { | |
| "epoch": 7.649409116488464, | |
| "grad_norm": 3.038121461868286, | |
| "learning_rate": 5.234345706786652e-06, | |
| "loss": 0.2036, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 7.705683736634778, | |
| "grad_norm": 4.253869533538818, | |
| "learning_rate": 5.1093613298337705e-06, | |
| "loss": 0.1942, | |
| "step": 6850 | |
| }, | |
| { | |
| "epoch": 7.761958356781092, | |
| "grad_norm": 2.9078307151794434, | |
| "learning_rate": 4.984376952880891e-06, | |
| "loss": 0.1957, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 7.818232976927406, | |
| "grad_norm": 4.637989044189453, | |
| "learning_rate": 4.85939257592801e-06, | |
| "loss": 0.1803, | |
| "step": 6950 | |
| }, | |
| { | |
| "epoch": 7.87450759707372, | |
| "grad_norm": 4.39274787902832, | |
| "learning_rate": 4.734408198975128e-06, | |
| "loss": 0.2025, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 7.930782217220034, | |
| "grad_norm": 4.669205665588379, | |
| "learning_rate": 4.609423822022248e-06, | |
| "loss": 0.195, | |
| "step": 7050 | |
| }, | |
| { | |
| "epoch": 7.987056837366348, | |
| "grad_norm": 5.979293346405029, | |
| "learning_rate": 4.4844394450693665e-06, | |
| "loss": 0.1966, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.8283251231527093, | |
| "eval_f1_fix": 0.998282377189969, | |
| "eval_f1_issue": 0.7821735731039875, | |
| "eval_f1_symptom": 0.799538505912893, | |
| "eval_f1_weighted": 0.8279986374134825, | |
| "eval_loss": 0.5127028226852417, | |
| "eval_precision_fix": 0.997254632807138, | |
| "eval_precision_issue": 0.8167864141084259, | |
| "eval_precision_symptom": 0.7697861705081922, | |
| "eval_precision_weighted": 0.8298096872695525, | |
| "eval_recall_fix": 0.9993122420907841, | |
| "eval_recall_issue": 0.7503750375037503, | |
| "eval_recall_symptom": 0.8316831683168316, | |
| "eval_recall_weighted": 0.8283251231527093, | |
| "eval_runtime": 51.6048, | |
| "eval_samples_per_second": 157.35, | |
| "eval_steps_per_second": 2.461, | |
| "step": 7112 | |
| }, | |
| { | |
| "epoch": 8.042768711311199, | |
| "grad_norm": 4.806234836578369, | |
| "learning_rate": 4.359455068116486e-06, | |
| "loss": 0.1907, | |
| "step": 7150 | |
| }, | |
| { | |
| "epoch": 8.099043331457512, | |
| "grad_norm": 4.466273784637451, | |
| "learning_rate": 4.234470691163605e-06, | |
| "loss": 0.1822, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 8.155317951603827, | |
| "grad_norm": 6.873985767364502, | |
| "learning_rate": 4.1094863142107235e-06, | |
| "loss": 0.174, | |
| "step": 7250 | |
| }, | |
| { | |
| "epoch": 8.21159257175014, | |
| "grad_norm": 8.243498802185059, | |
| "learning_rate": 3.984501937257843e-06, | |
| "loss": 0.1684, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 8.267867191896455, | |
| "grad_norm": 4.151490211486816, | |
| "learning_rate": 3.859517560304963e-06, | |
| "loss": 0.1736, | |
| "step": 7350 | |
| }, | |
| { | |
| "epoch": 8.324141812042768, | |
| "grad_norm": 5.426071643829346, | |
| "learning_rate": 3.7345331833520813e-06, | |
| "loss": 0.1849, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 8.380416432189083, | |
| "grad_norm": 3.12210750579834, | |
| "learning_rate": 3.6095488063992e-06, | |
| "loss": 0.1703, | |
| "step": 7450 | |
| }, | |
| { | |
| "epoch": 8.436691052335396, | |
| "grad_norm": 4.037646770477295, | |
| "learning_rate": 3.4845644294463195e-06, | |
| "loss": 0.1632, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 8.492965672481711, | |
| "grad_norm": 3.56674861907959, | |
| "learning_rate": 3.3595800524934387e-06, | |
| "loss": 0.1709, | |
| "step": 7550 | |
| }, | |
| { | |
| "epoch": 8.549240292628024, | |
| "grad_norm": 4.258781433105469, | |
| "learning_rate": 3.234595675540558e-06, | |
| "loss": 0.1754, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 8.605514912774339, | |
| "grad_norm": 4.218986511230469, | |
| "learning_rate": 3.109611298587677e-06, | |
| "loss": 0.1683, | |
| "step": 7650 | |
| }, | |
| { | |
| "epoch": 8.661789532920652, | |
| "grad_norm": 4.169147491455078, | |
| "learning_rate": 2.984626921634796e-06, | |
| "loss": 0.1865, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 8.718064153066967, | |
| "grad_norm": 2.8161799907684326, | |
| "learning_rate": 2.8596425446819147e-06, | |
| "loss": 0.1877, | |
| "step": 7750 | |
| }, | |
| { | |
| "epoch": 8.77433877321328, | |
| "grad_norm": 3.580389976501465, | |
| "learning_rate": 2.734658167729034e-06, | |
| "loss": 0.1694, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 8.830613393359595, | |
| "grad_norm": 5.852592468261719, | |
| "learning_rate": 2.6096737907761534e-06, | |
| "loss": 0.1801, | |
| "step": 7850 | |
| }, | |
| { | |
| "epoch": 8.886888013505908, | |
| "grad_norm": 3.4175920486450195, | |
| "learning_rate": 2.484689413823272e-06, | |
| "loss": 0.1768, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 8.943162633652223, | |
| "grad_norm": 6.1865081787109375, | |
| "learning_rate": 2.3597050368703912e-06, | |
| "loss": 0.1811, | |
| "step": 7950 | |
| }, | |
| { | |
| "epoch": 8.999437253798536, | |
| "grad_norm": 5.233773708343506, | |
| "learning_rate": 2.2347206599175104e-06, | |
| "loss": 0.1829, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.8298029556650246, | |
| "eval_f1_fix": 0.9986254295532646, | |
| "eval_f1_issue": 0.7910425177787865, | |
| "eval_f1_symptom": 0.7948221990775183, | |
| "eval_f1_weighted": 0.8297645906222303, | |
| "eval_loss": 0.5134788751602173, | |
| "eval_precision_fix": 0.9979395604395604, | |
| "eval_precision_issue": 0.797924297924298, | |
| "eval_precision_symptom": 0.788370720188902, | |
| "eval_precision_weighted": 0.8298184010160612, | |
| "eval_recall_fix": 0.9993122420907841, | |
| "eval_recall_issue": 0.7842784278427842, | |
| "eval_recall_symptom": 0.8013801380138014, | |
| "eval_recall_weighted": 0.8298029556650246, | |
| "eval_runtime": 53.8444, | |
| "eval_samples_per_second": 150.805, | |
| "eval_steps_per_second": 2.359, | |
| "step": 8001 | |
| }, | |
| { | |
| "epoch": 9.055149127743388, | |
| "grad_norm": 3.5759809017181396, | |
| "learning_rate": 2.1097362829646295e-06, | |
| "loss": 0.1629, | |
| "step": 8050 | |
| }, | |
| { | |
| "epoch": 9.111423747889702, | |
| "grad_norm": 3.6214778423309326, | |
| "learning_rate": 1.987251593550806e-06, | |
| "loss": 0.1694, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 9.167698368036016, | |
| "grad_norm": 6.99995756149292, | |
| "learning_rate": 1.8622672165979255e-06, | |
| "loss": 0.1775, | |
| "step": 8150 | |
| }, | |
| { | |
| "epoch": 9.22397298818233, | |
| "grad_norm": 4.065187931060791, | |
| "learning_rate": 1.7372828396450444e-06, | |
| "loss": 0.182, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 9.280247608328644, | |
| "grad_norm": 2.9803073406219482, | |
| "learning_rate": 1.6122984626921637e-06, | |
| "loss": 0.168, | |
| "step": 8250 | |
| }, | |
| { | |
| "epoch": 9.336522228474958, | |
| "grad_norm": 5.370956897735596, | |
| "learning_rate": 1.4873140857392826e-06, | |
| "loss": 0.1493, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 9.392796848621272, | |
| "grad_norm": 5.8644890785217285, | |
| "learning_rate": 1.3623297087864017e-06, | |
| "loss": 0.1511, | |
| "step": 8350 | |
| }, | |
| { | |
| "epoch": 9.449071468767587, | |
| "grad_norm": 4.59880256652832, | |
| "learning_rate": 1.2373453318335209e-06, | |
| "loss": 0.1626, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 9.5053460889139, | |
| "grad_norm": 3.982597827911377, | |
| "learning_rate": 1.11236095488064e-06, | |
| "loss": 0.165, | |
| "step": 8450 | |
| }, | |
| { | |
| "epoch": 9.561620709060215, | |
| "grad_norm": 6.357668876647949, | |
| "learning_rate": 9.873765779277591e-07, | |
| "loss": 0.1659, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 9.617895329206528, | |
| "grad_norm": 8.072972297668457, | |
| "learning_rate": 8.623922009748782e-07, | |
| "loss": 0.186, | |
| "step": 8550 | |
| }, | |
| { | |
| "epoch": 9.674169949352843, | |
| "grad_norm": 4.884040832519531, | |
| "learning_rate": 7.374078240219973e-07, | |
| "loss": 0.1702, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 9.730444569499156, | |
| "grad_norm": 7.7868123054504395, | |
| "learning_rate": 6.124234470691164e-07, | |
| "loss": 0.1603, | |
| "step": 8650 | |
| }, | |
| { | |
| "epoch": 9.78671918964547, | |
| "grad_norm": 5.416253089904785, | |
| "learning_rate": 4.874390701162355e-07, | |
| "loss": 0.152, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 9.842993809791784, | |
| "grad_norm": 5.989937782287598, | |
| "learning_rate": 3.6245469316335464e-07, | |
| "loss": 0.1558, | |
| "step": 8750 | |
| }, | |
| { | |
| "epoch": 9.899268429938099, | |
| "grad_norm": 5.493338108062744, | |
| "learning_rate": 2.374703162104737e-07, | |
| "loss": 0.1645, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 9.955543050084412, | |
| "grad_norm": 6.584114074707031, | |
| "learning_rate": 1.124859392575928e-07, | |
| "loss": 0.1569, | |
| "step": 8850 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.8299261083743842, | |
| "eval_f1_fix": 0.998282377189969, | |
| "eval_f1_issue": 0.7886029411764706, | |
| "eval_f1_symptom": 0.7975297750330834, | |
| "eval_f1_weighted": 0.8298131674335786, | |
| "eval_loss": 0.5292249321937561, | |
| "eval_precision_fix": 0.997254632807138, | |
| "eval_precision_issue": 0.8056338028169014, | |
| "eval_precision_symptom": 0.7820069204152249, | |
| "eval_precision_weighted": 0.8302481239697359, | |
| "eval_recall_fix": 0.9993122420907841, | |
| "eval_recall_issue": 0.7722772277227723, | |
| "eval_recall_symptom": 0.8136813681368137, | |
| "eval_recall_weighted": 0.8299261083743842, | |
| "eval_runtime": 51.5276, | |
| "eval_samples_per_second": 157.585, | |
| "eval_steps_per_second": 2.465, | |
| "step": 8890 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 8890, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.73930339431168e+16, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |