| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9996925914540424, | |
| "eval_steps": 40, | |
| "global_step": 813, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0, | |
| "eval_accuracy": 0.7486631016042781, | |
| "eval_f1": 0.11320754716981132, | |
| "eval_loss": 0.6173437237739563, | |
| "eval_precision": 1.0, | |
| "eval_recall": 0.06, | |
| "eval_runtime": 21.3395, | |
| "eval_samples_per_second": 2.343, | |
| "eval_steps_per_second": 0.187, | |
| "step": 0 | |
| }, | |
| { | |
| "epoch": 0.0012296341838303104, | |
| "grad_norm": 0.16947800993918843, | |
| "learning_rate": 2.439024390243903e-07, | |
| "loss": 0.4396, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.002459268367660621, | |
| "grad_norm": 0.10672431054661773, | |
| "learning_rate": 4.878048780487805e-07, | |
| "loss": 0.3938, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0036889025514909315, | |
| "grad_norm": 0.10764229431289964, | |
| "learning_rate": 7.317073170731707e-07, | |
| "loss": 0.4124, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.004918536735321242, | |
| "grad_norm": 0.13601545597787354, | |
| "learning_rate": 9.75609756097561e-07, | |
| "loss": 0.3352, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.006148170919151552, | |
| "grad_norm": 0.11702963264673152, | |
| "learning_rate": 1.2195121951219514e-06, | |
| "loss": 0.3986, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.007377805102981863, | |
| "grad_norm": 0.07207375174046018, | |
| "learning_rate": 1.4634146341463414e-06, | |
| "loss": 0.3112, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.008607439286812173, | |
| "grad_norm": 0.08042057453295026, | |
| "learning_rate": 1.707317073170732e-06, | |
| "loss": 0.2993, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.009837073470642483, | |
| "grad_norm": 0.10719283571963431, | |
| "learning_rate": 1.951219512195122e-06, | |
| "loss": 0.3187, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.011066707654472794, | |
| "grad_norm": 0.12045461326343893, | |
| "learning_rate": 2.1951219512195125e-06, | |
| "loss": 0.3989, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.012296341838303105, | |
| "grad_norm": 0.12729033097448758, | |
| "learning_rate": 2.4390243902439027e-06, | |
| "loss": 0.3585, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.013525976022133415, | |
| "grad_norm": 0.11623558174754572, | |
| "learning_rate": 2.682926829268293e-06, | |
| "loss": 0.3616, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.014755610205963726, | |
| "grad_norm": 0.13211929335210473, | |
| "learning_rate": 2.926829268292683e-06, | |
| "loss": 0.4033, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.015985244389794036, | |
| "grad_norm": 0.11019370723942552, | |
| "learning_rate": 3.1707317073170736e-06, | |
| "loss": 0.3872, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.017214878573624345, | |
| "grad_norm": 0.09793475856878364, | |
| "learning_rate": 3.414634146341464e-06, | |
| "loss": 0.3411, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.018444512757454658, | |
| "grad_norm": 0.08007568466635859, | |
| "learning_rate": 3.6585365853658537e-06, | |
| "loss": 0.3438, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.019674146941284967, | |
| "grad_norm": 0.16273549094219145, | |
| "learning_rate": 3.902439024390244e-06, | |
| "loss": 0.4152, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.02090378112511528, | |
| "grad_norm": 0.15484255697817179, | |
| "learning_rate": 4.146341463414634e-06, | |
| "loss": 0.412, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.022133415308945588, | |
| "grad_norm": 0.1722427296318043, | |
| "learning_rate": 4.390243902439025e-06, | |
| "loss": 0.4369, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.0233630494927759, | |
| "grad_norm": 0.1677143599141389, | |
| "learning_rate": 4.634146341463416e-06, | |
| "loss": 0.4091, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.02459268367660621, | |
| "grad_norm": 0.14218623937593805, | |
| "learning_rate": 4.8780487804878055e-06, | |
| "loss": 0.3716, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02582231786043652, | |
| "grad_norm": 0.16686175502369677, | |
| "learning_rate": 5.121951219512195e-06, | |
| "loss": 0.4109, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.02705195204426683, | |
| "grad_norm": 0.127157894593906, | |
| "learning_rate": 5.365853658536586e-06, | |
| "loss": 0.3755, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.028281586228097143, | |
| "grad_norm": 0.17122211737299453, | |
| "learning_rate": 5.609756097560977e-06, | |
| "loss": 0.3889, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.02951122041192745, | |
| "grad_norm": 0.10318832215404608, | |
| "learning_rate": 5.853658536585366e-06, | |
| "loss": 0.3629, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.03074085459575776, | |
| "grad_norm": 0.11234944728899221, | |
| "learning_rate": 6.0975609756097564e-06, | |
| "loss": 0.3423, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.03197048877958807, | |
| "grad_norm": 0.217020076775547, | |
| "learning_rate": 6.341463414634147e-06, | |
| "loss": 0.4437, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.033200122963418385, | |
| "grad_norm": 0.1274610275221283, | |
| "learning_rate": 6.585365853658538e-06, | |
| "loss": 0.3765, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.03442975714724869, | |
| "grad_norm": 0.12731101294980054, | |
| "learning_rate": 6.829268292682928e-06, | |
| "loss": 0.3493, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.035659391331079, | |
| "grad_norm": 0.2033936468227057, | |
| "learning_rate": 7.0731707317073175e-06, | |
| "loss": 0.4386, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.036889025514909315, | |
| "grad_norm": 0.16896657855949457, | |
| "learning_rate": 7.317073170731707e-06, | |
| "loss": 0.4073, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.03811865969873963, | |
| "grad_norm": 0.21928137355117602, | |
| "learning_rate": 7.560975609756098e-06, | |
| "loss": 0.4677, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.03934829388256993, | |
| "grad_norm": 0.17279040555232075, | |
| "learning_rate": 7.804878048780489e-06, | |
| "loss": 0.3658, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.040577928066400246, | |
| "grad_norm": 0.16581055578879605, | |
| "learning_rate": 8.048780487804879e-06, | |
| "loss": 0.4077, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.04180756225023056, | |
| "grad_norm": 0.074521083746626, | |
| "learning_rate": 8.292682926829268e-06, | |
| "loss": 0.3224, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.04303719643406086, | |
| "grad_norm": 0.21248215267519446, | |
| "learning_rate": 8.536585365853658e-06, | |
| "loss": 0.4573, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.044266830617891176, | |
| "grad_norm": 0.23670156299545972, | |
| "learning_rate": 8.78048780487805e-06, | |
| "loss": 0.4131, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.04549646480172149, | |
| "grad_norm": 0.11101522460056251, | |
| "learning_rate": 9.02439024390244e-06, | |
| "loss": 0.3579, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.0467260989855518, | |
| "grad_norm": 0.19435184126121027, | |
| "learning_rate": 9.268292682926831e-06, | |
| "loss": 0.3918, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.047955733169382106, | |
| "grad_norm": 0.1863872389043058, | |
| "learning_rate": 9.51219512195122e-06, | |
| "loss": 0.4052, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.04918536735321242, | |
| "grad_norm": 0.18622429844059213, | |
| "learning_rate": 9.756097560975611e-06, | |
| "loss": 0.3808, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04918536735321242, | |
| "eval_accuracy": 0.7486631016042781, | |
| "eval_f1": 0.14545454545454545, | |
| "eval_loss": 0.569531261920929, | |
| "eval_precision": 0.8, | |
| "eval_recall": 0.08, | |
| "eval_runtime": 22.9388, | |
| "eval_samples_per_second": 2.18, | |
| "eval_steps_per_second": 0.174, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.05041500153704273, | |
| "grad_norm": 0.17516035028283702, | |
| "learning_rate": 1e-05, | |
| "loss": 0.4407, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.05164463572087304, | |
| "grad_norm": 0.21385095115475125, | |
| "learning_rate": 1.024390243902439e-05, | |
| "loss": 0.4446, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.05287426990470335, | |
| "grad_norm": 0.1148381370483627, | |
| "learning_rate": 1.0487804878048782e-05, | |
| "loss": 0.3364, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.05410390408853366, | |
| "grad_norm": 0.25017332978297385, | |
| "learning_rate": 1.0731707317073172e-05, | |
| "loss": 0.4267, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.05533353827236397, | |
| "grad_norm": 0.09329103801649505, | |
| "learning_rate": 1.0975609756097562e-05, | |
| "loss": 0.3926, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.056563172456194286, | |
| "grad_norm": 0.11081599392042418, | |
| "learning_rate": 1.1219512195121953e-05, | |
| "loss": 0.2938, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.05779280664002459, | |
| "grad_norm": 0.13608091036505457, | |
| "learning_rate": 1.1463414634146342e-05, | |
| "loss": 0.3615, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.0590224408238549, | |
| "grad_norm": 0.07959994638087453, | |
| "learning_rate": 1.1707317073170731e-05, | |
| "loss": 0.3459, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.060252075007685216, | |
| "grad_norm": 0.10921168296006675, | |
| "learning_rate": 1.1951219512195123e-05, | |
| "loss": 0.3312, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.06148170919151552, | |
| "grad_norm": 0.06644688369030896, | |
| "learning_rate": 1.2195121951219513e-05, | |
| "loss": 0.3201, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.06271134337534584, | |
| "grad_norm": 0.14882782025175664, | |
| "learning_rate": 1.2439024390243903e-05, | |
| "loss": 0.3776, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.06394097755917615, | |
| "grad_norm": 0.0871782676219407, | |
| "learning_rate": 1.2682926829268294e-05, | |
| "loss": 0.3376, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.06517061174300645, | |
| "grad_norm": 0.0948350188983306, | |
| "learning_rate": 1.2926829268292684e-05, | |
| "loss": 0.3633, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.06640024592683677, | |
| "grad_norm": 0.09736024653545118, | |
| "learning_rate": 1.3170731707317076e-05, | |
| "loss": 0.2994, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.06762988011066708, | |
| "grad_norm": 0.07422207202677711, | |
| "learning_rate": 1.3414634146341466e-05, | |
| "loss": 0.334, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.06885951429449738, | |
| "grad_norm": 0.09007615228726544, | |
| "learning_rate": 1.3658536585365855e-05, | |
| "loss": 0.408, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.0700891484783277, | |
| "grad_norm": 0.08808964527252541, | |
| "learning_rate": 1.3902439024390244e-05, | |
| "loss": 0.3315, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.071318782662158, | |
| "grad_norm": 0.09549009522835197, | |
| "learning_rate": 1.4146341463414635e-05, | |
| "loss": 0.3246, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.07254841684598831, | |
| "grad_norm": 0.10965982967611958, | |
| "learning_rate": 1.4390243902439025e-05, | |
| "loss": 0.319, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.07377805102981863, | |
| "grad_norm": 0.1567439779382465, | |
| "learning_rate": 1.4634146341463415e-05, | |
| "loss": 0.2774, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.07500768521364894, | |
| "grad_norm": 0.10296359556505037, | |
| "learning_rate": 1.4878048780487806e-05, | |
| "loss": 0.2978, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.07623731939747926, | |
| "grad_norm": 0.14416206880229074, | |
| "learning_rate": 1.5121951219512196e-05, | |
| "loss": 0.3291, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.07746695358130956, | |
| "grad_norm": 0.108732502578673, | |
| "learning_rate": 1.5365853658536586e-05, | |
| "loss": 0.3499, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.07869658776513987, | |
| "grad_norm": 0.08767021283689419, | |
| "learning_rate": 1.5609756097560978e-05, | |
| "loss": 0.3501, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.07992622194897019, | |
| "grad_norm": 0.09677893965132829, | |
| "learning_rate": 1.585365853658537e-05, | |
| "loss": 0.3347, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.08115585613280049, | |
| "grad_norm": 0.16955814556761048, | |
| "learning_rate": 1.6097560975609757e-05, | |
| "loss": 0.3004, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.0823854903166308, | |
| "grad_norm": 0.15293030890798887, | |
| "learning_rate": 1.6341463414634145e-05, | |
| "loss": 0.2939, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.08361512450046112, | |
| "grad_norm": 0.16572500214125407, | |
| "learning_rate": 1.6585365853658537e-05, | |
| "loss": 0.2839, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.08484475868429142, | |
| "grad_norm": 0.2146030454559332, | |
| "learning_rate": 1.682926829268293e-05, | |
| "loss": 0.3618, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.08607439286812173, | |
| "grad_norm": 0.12462259952049407, | |
| "learning_rate": 1.7073170731707317e-05, | |
| "loss": 0.3311, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.08730402705195205, | |
| "grad_norm": 0.20365001675320832, | |
| "learning_rate": 1.7317073170731708e-05, | |
| "loss": 0.3036, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.08853366123578235, | |
| "grad_norm": 0.09346661832479064, | |
| "learning_rate": 1.75609756097561e-05, | |
| "loss": 0.3072, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.08976329541961267, | |
| "grad_norm": 0.11110374080050489, | |
| "learning_rate": 1.7804878048780488e-05, | |
| "loss": 0.3134, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.09099292960344298, | |
| "grad_norm": 0.10890373451648423, | |
| "learning_rate": 1.804878048780488e-05, | |
| "loss": 0.356, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.09222256378727328, | |
| "grad_norm": 0.11734882617850208, | |
| "learning_rate": 1.829268292682927e-05, | |
| "loss": 0.3112, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.0934521979711036, | |
| "grad_norm": 0.11663520206712856, | |
| "learning_rate": 1.8536585365853663e-05, | |
| "loss": 0.284, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.0946818321549339, | |
| "grad_norm": 0.19501804198639125, | |
| "learning_rate": 1.878048780487805e-05, | |
| "loss": 0.3381, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.09591146633876421, | |
| "grad_norm": 0.1206669623358565, | |
| "learning_rate": 1.902439024390244e-05, | |
| "loss": 0.3051, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.09714110052259453, | |
| "grad_norm": 0.11698791484099845, | |
| "learning_rate": 1.926829268292683e-05, | |
| "loss": 0.3404, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.09837073470642484, | |
| "grad_norm": 0.12615671370962322, | |
| "learning_rate": 1.9512195121951222e-05, | |
| "loss": 0.3036, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.09837073470642484, | |
| "eval_accuracy": 0.7647058823529411, | |
| "eval_f1": 0.3888888888888889, | |
| "eval_loss": 0.4815624952316284, | |
| "eval_precision": 0.6363636363636364, | |
| "eval_recall": 0.28, | |
| "eval_runtime": 23.4521, | |
| "eval_samples_per_second": 2.132, | |
| "eval_steps_per_second": 0.171, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.09960036889025516, | |
| "grad_norm": 0.1379739378249637, | |
| "learning_rate": 1.975609756097561e-05, | |
| "loss": 0.3091, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.10083000307408546, | |
| "grad_norm": 0.12376996023773659, | |
| "learning_rate": 2e-05, | |
| "loss": 0.3565, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.10205963725791577, | |
| "grad_norm": 0.16407428492825843, | |
| "learning_rate": 1.9999907650547006e-05, | |
| "loss": 0.2773, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.10328927144174609, | |
| "grad_norm": 0.15385060040106613, | |
| "learning_rate": 1.999963060389371e-05, | |
| "loss": 0.2907, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.10451890562557639, | |
| "grad_norm": 0.12708853586504387, | |
| "learning_rate": 1.9999168865157137e-05, | |
| "loss": 0.293, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.1057485398094067, | |
| "grad_norm": 0.14362096666809113, | |
| "learning_rate": 1.999852244286554e-05, | |
| "loss": 0.2501, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.10697817399323702, | |
| "grad_norm": 0.2774555130276722, | |
| "learning_rate": 1.9997691348958278e-05, | |
| "loss": 0.3742, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.10820780817706732, | |
| "grad_norm": 0.16184014678377695, | |
| "learning_rate": 1.999667559878556e-05, | |
| "loss": 0.2938, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.10943744236089763, | |
| "grad_norm": 0.1423227248902307, | |
| "learning_rate": 1.9995475211108183e-05, | |
| "loss": 0.2442, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.11066707654472795, | |
| "grad_norm": 0.16870686952180433, | |
| "learning_rate": 1.9994090208097176e-05, | |
| "loss": 0.317, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.11189671072855825, | |
| "grad_norm": 0.13779932314182178, | |
| "learning_rate": 1.9992520615333393e-05, | |
| "loss": 0.2261, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.11312634491238857, | |
| "grad_norm": 0.15241908530978227, | |
| "learning_rate": 1.9990766461807037e-05, | |
| "loss": 0.285, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.11435597909621888, | |
| "grad_norm": 0.1874412460711097, | |
| "learning_rate": 1.9988827779917138e-05, | |
| "loss": 0.3128, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.11558561328004918, | |
| "grad_norm": 0.14378093589386245, | |
| "learning_rate": 1.9986704605470932e-05, | |
| "loss": 0.2551, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.1168152474638795, | |
| "grad_norm": 0.19017415285786005, | |
| "learning_rate": 1.9984396977683223e-05, | |
| "loss": 0.2813, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.1180448816477098, | |
| "grad_norm": 0.18031748830920308, | |
| "learning_rate": 1.998190493917564e-05, | |
| "loss": 0.3183, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.11927451583154011, | |
| "grad_norm": 0.2342185870662447, | |
| "learning_rate": 1.9979228535975866e-05, | |
| "loss": 0.2367, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.12050415001537043, | |
| "grad_norm": 0.19670363968815147, | |
| "learning_rate": 1.9976367817516773e-05, | |
| "loss": 0.216, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.12173378419920074, | |
| "grad_norm": 0.1727744553981885, | |
| "learning_rate": 1.9973322836635517e-05, | |
| "loss": 0.2246, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.12296341838303104, | |
| "grad_norm": 0.1616269560443287, | |
| "learning_rate": 1.9970093649572567e-05, | |
| "loss": 0.2257, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.12419305256686136, | |
| "grad_norm": 0.18210033905044673, | |
| "learning_rate": 1.9966680315970647e-05, | |
| "loss": 0.2629, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.12542268675069168, | |
| "grad_norm": 0.22735479010308166, | |
| "learning_rate": 1.996308289887366e-05, | |
| "loss": 0.3055, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.12665232093452197, | |
| "grad_norm": 0.21880883838789394, | |
| "learning_rate": 1.9959301464725507e-05, | |
| "loss": 0.2896, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.1278819551183523, | |
| "grad_norm": 0.1568188763941854, | |
| "learning_rate": 1.995533608336886e-05, | |
| "loss": 0.1856, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.1291115893021826, | |
| "grad_norm": 0.21046557286925857, | |
| "learning_rate": 1.995118682804388e-05, | |
| "loss": 0.2328, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.1303412234860129, | |
| "grad_norm": 0.23586873079680978, | |
| "learning_rate": 1.9946853775386857e-05, | |
| "loss": 0.2608, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.13157085766984322, | |
| "grad_norm": 0.19655270590211088, | |
| "learning_rate": 1.9942337005428805e-05, | |
| "loss": 0.2982, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.13280049185367354, | |
| "grad_norm": 0.31014757888444056, | |
| "learning_rate": 1.9937636601593965e-05, | |
| "loss": 0.3041, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.13403012603750383, | |
| "grad_norm": 0.3337300802942654, | |
| "learning_rate": 1.9932752650698285e-05, | |
| "loss": 0.3085, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.13525976022133415, | |
| "grad_norm": 0.2637379526580471, | |
| "learning_rate": 1.9927685242947804e-05, | |
| "loss": 0.301, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.13648939440516447, | |
| "grad_norm": 0.22577400909500966, | |
| "learning_rate": 1.9922434471936987e-05, | |
| "loss": 0.2234, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.13771902858899476, | |
| "grad_norm": 0.21966929441456037, | |
| "learning_rate": 1.9917000434647e-05, | |
| "loss": 0.2743, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.13894866277282508, | |
| "grad_norm": 0.2540802763063313, | |
| "learning_rate": 1.991138323144392e-05, | |
| "loss": 0.2307, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.1401782969566554, | |
| "grad_norm": 0.3204592419266727, | |
| "learning_rate": 1.990558296607687e-05, | |
| "loss": 0.2156, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.1414079311404857, | |
| "grad_norm": 0.24090237444938584, | |
| "learning_rate": 1.9899599745676123e-05, | |
| "loss": 0.2809, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.142637565324316, | |
| "grad_norm": 0.2611220625407227, | |
| "learning_rate": 1.9893433680751105e-05, | |
| "loss": 0.2307, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.14386719950814633, | |
| "grad_norm": 0.37292140730199286, | |
| "learning_rate": 1.9887084885188354e-05, | |
| "loss": 0.2051, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.14509683369197662, | |
| "grad_norm": 0.21515044559789018, | |
| "learning_rate": 1.9880553476249437e-05, | |
| "loss": 0.2416, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.14632646787580694, | |
| "grad_norm": 0.27918682412366513, | |
| "learning_rate": 1.9873839574568756e-05, | |
| "loss": 0.2711, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.14755610205963726, | |
| "grad_norm": 0.23334267872599612, | |
| "learning_rate": 1.9866943304151346e-05, | |
| "loss": 0.305, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.14755610205963726, | |
| "eval_accuracy": 0.8021390374331551, | |
| "eval_f1": 0.5316455696202531, | |
| "eval_loss": 0.4852343797683716, | |
| "eval_precision": 0.7241379310344828, | |
| "eval_recall": 0.42, | |
| "eval_runtime": 22.4489, | |
| "eval_samples_per_second": 2.227, | |
| "eval_steps_per_second": 0.178, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.14878573624346758, | |
| "grad_norm": 0.255894846030308, | |
| "learning_rate": 1.9859864792370565e-05, | |
| "loss": 0.2925, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.15001537042729787, | |
| "grad_norm": 0.2894368730807284, | |
| "learning_rate": 1.985260416996575e-05, | |
| "loss": 0.3092, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.1512450046111282, | |
| "grad_norm": 0.2819736938873003, | |
| "learning_rate": 1.9845161571039805e-05, | |
| "loss": 0.2827, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.1524746387949585, | |
| "grad_norm": 0.2849595623136817, | |
| "learning_rate": 1.983753713305672e-05, | |
| "loss": 0.3301, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.1537042729787888, | |
| "grad_norm": 0.36826872994416193, | |
| "learning_rate": 1.982973099683902e-05, | |
| "loss": 0.2657, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.15493390716261912, | |
| "grad_norm": 0.26692338354049955, | |
| "learning_rate": 1.98217433065652e-05, | |
| "loss": 0.1732, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.15616354134644944, | |
| "grad_norm": 0.2573459669567664, | |
| "learning_rate": 1.9813574209767013e-05, | |
| "loss": 0.2586, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.15739317553027973, | |
| "grad_norm": 0.3781515395257974, | |
| "learning_rate": 1.9805223857326794e-05, | |
| "loss": 0.2687, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.15862280971411005, | |
| "grad_norm": 0.2609213888795932, | |
| "learning_rate": 1.9796692403474632e-05, | |
| "loss": 0.243, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.15985244389794037, | |
| "grad_norm": 0.33148152190355884, | |
| "learning_rate": 1.9787980005785553e-05, | |
| "loss": 0.284, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.16108207808177066, | |
| "grad_norm": 0.24671664914766978, | |
| "learning_rate": 1.977908682517658e-05, | |
| "loss": 0.2743, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.16231171226560098, | |
| "grad_norm": 0.23136891800428114, | |
| "learning_rate": 1.9770013025903797e-05, | |
| "loss": 0.1988, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.1635413464494313, | |
| "grad_norm": 0.3625913614143042, | |
| "learning_rate": 1.9760758775559275e-05, | |
| "loss": 0.3571, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.1647709806332616, | |
| "grad_norm": 0.4654838486463352, | |
| "learning_rate": 1.9751324245068008e-05, | |
| "loss": 0.2483, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.1660006148170919, | |
| "grad_norm": 0.23389863720470436, | |
| "learning_rate": 1.974170960868474e-05, | |
| "loss": 0.2121, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.16723024900092223, | |
| "grad_norm": 0.26709730418277217, | |
| "learning_rate": 1.973191504399076e-05, | |
| "loss": 0.2277, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.16845988318475252, | |
| "grad_norm": 0.3065908377026066, | |
| "learning_rate": 1.97219407318906e-05, | |
| "loss": 0.2811, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.16968951736858284, | |
| "grad_norm": 0.2561047976359151, | |
| "learning_rate": 1.9711786856608714e-05, | |
| "loss": 0.2702, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.17091915155241316, | |
| "grad_norm": 0.3029014908375809, | |
| "learning_rate": 1.970145360568607e-05, | |
| "loss": 0.2703, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.17214878573624345, | |
| "grad_norm": 0.2928125726746547, | |
| "learning_rate": 1.969094116997668e-05, | |
| "loss": 0.2925, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.17337841992007377, | |
| "grad_norm": 0.417971783345298, | |
| "learning_rate": 1.968024974364408e-05, | |
| "loss": 0.2496, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.1746080541039041, | |
| "grad_norm": 0.2496018935199012, | |
| "learning_rate": 1.9669379524157755e-05, | |
| "loss": 0.2279, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.1758376882877344, | |
| "grad_norm": 0.3714257892665527, | |
| "learning_rate": 1.9658330712289456e-05, | |
| "loss": 0.295, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.1770673224715647, | |
| "grad_norm": 0.2580656993421598, | |
| "learning_rate": 1.9647103512109535e-05, | |
| "loss": 0.253, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.17829695665539502, | |
| "grad_norm": 0.2560142798427468, | |
| "learning_rate": 1.9635698130983153e-05, | |
| "loss": 0.251, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.17952659083922534, | |
| "grad_norm": 0.2607452518822491, | |
| "learning_rate": 1.962411477956645e-05, | |
| "loss": 0.2395, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.18075622502305563, | |
| "grad_norm": 0.25191931122780475, | |
| "learning_rate": 1.9612353671802658e-05, | |
| "loss": 0.2389, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.18198585920688595, | |
| "grad_norm": 0.2991928933187448, | |
| "learning_rate": 1.960041502491815e-05, | |
| "loss": 0.3016, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.18321549339071627, | |
| "grad_norm": 0.2775853628798545, | |
| "learning_rate": 1.9588299059418434e-05, | |
| "loss": 0.2558, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.18444512757454656, | |
| "grad_norm": 0.25029731861303234, | |
| "learning_rate": 1.957600599908406e-05, | |
| "loss": 0.221, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.18567476175837688, | |
| "grad_norm": 0.3473969966642877, | |
| "learning_rate": 1.9563536070966513e-05, | |
| "loss": 0.3639, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.1869043959422072, | |
| "grad_norm": 0.21700372590221412, | |
| "learning_rate": 1.9550889505383996e-05, | |
| "loss": 0.2122, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.1881340301260375, | |
| "grad_norm": 0.26057622594959473, | |
| "learning_rate": 1.9538066535917196e-05, | |
| "loss": 0.2631, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.1893636643098678, | |
| "grad_norm": 0.31462240972233546, | |
| "learning_rate": 1.952506739940496e-05, | |
| "loss": 0.2576, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.19059329849369813, | |
| "grad_norm": 0.26307248972641967, | |
| "learning_rate": 1.9511892335939904e-05, | |
| "loss": 0.2419, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.19182293267752842, | |
| "grad_norm": 0.3849365062506917, | |
| "learning_rate": 1.9498541588864022e-05, | |
| "loss": 0.2316, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.19305256686135874, | |
| "grad_norm": 0.2858572602811596, | |
| "learning_rate": 1.948501540476414e-05, | |
| "loss": 0.2242, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.19428220104518906, | |
| "grad_norm": 0.20651885971329154, | |
| "learning_rate": 1.9471314033467413e-05, | |
| "loss": 0.2597, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.19551183522901935, | |
| "grad_norm": 0.24219070077131208, | |
| "learning_rate": 1.945743772803666e-05, | |
| "loss": 0.1932, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.19674146941284967, | |
| "grad_norm": 0.2870212902155916, | |
| "learning_rate": 1.9443386744765726e-05, | |
| "loss": 0.256, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.19674146941284967, | |
| "eval_accuracy": 0.8021390374331551, | |
| "eval_f1": 0.4931506849315068, | |
| "eval_loss": 0.43281251192092896, | |
| "eval_precision": 0.782608695652174, | |
| "eval_recall": 0.36, | |
| "eval_runtime": 23.7371, | |
| "eval_samples_per_second": 2.106, | |
| "eval_steps_per_second": 0.169, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.19797110359668, | |
| "grad_norm": 0.2614284036745401, | |
| "learning_rate": 1.942916134317473e-05, | |
| "loss": 0.2436, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.1992007377805103, | |
| "grad_norm": 0.26937001401772626, | |
| "learning_rate": 1.9414761786005293e-05, | |
| "loss": 0.1725, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.2004303719643406, | |
| "grad_norm": 0.28202676187309017, | |
| "learning_rate": 1.9400188339215657e-05, | |
| "loss": 0.2591, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.20166000614817092, | |
| "grad_norm": 0.27016058532381143, | |
| "learning_rate": 1.9385441271975786e-05, | |
| "loss": 0.2003, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.20288964033200124, | |
| "grad_norm": 0.20605906416711317, | |
| "learning_rate": 1.9370520856662406e-05, | |
| "loss": 0.1778, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.20411927451583153, | |
| "grad_norm": 0.21687941485697337, | |
| "learning_rate": 1.9355427368853946e-05, | |
| "loss": 0.2145, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.20534890869966185, | |
| "grad_norm": 0.4233260372619336, | |
| "learning_rate": 1.9340161087325483e-05, | |
| "loss": 0.1657, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.20657854288349217, | |
| "grad_norm": 0.26680222767798134, | |
| "learning_rate": 1.932472229404356e-05, | |
| "loss": 0.1846, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.20780817706732246, | |
| "grad_norm": 0.3380713604084801, | |
| "learning_rate": 1.9309111274161005e-05, | |
| "loss": 0.2896, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.20903781125115278, | |
| "grad_norm": 0.3308557221991788, | |
| "learning_rate": 1.9293328316011645e-05, | |
| "loss": 0.2199, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.2102674454349831, | |
| "grad_norm": 0.3136020181316013, | |
| "learning_rate": 1.927737371110499e-05, | |
| "loss": 0.213, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.2114970796188134, | |
| "grad_norm": 0.35343090913220965, | |
| "learning_rate": 1.9261247754120846e-05, | |
| "loss": 0.2322, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.2127267138026437, | |
| "grad_norm": 0.2824826725729394, | |
| "learning_rate": 1.924495074290388e-05, | |
| "loss": 0.2523, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.21395634798647403, | |
| "grad_norm": 0.34041228997887535, | |
| "learning_rate": 1.92284829784581e-05, | |
| "loss": 0.2854, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.21518598217030432, | |
| "grad_norm": 0.3318426940507861, | |
| "learning_rate": 1.9211844764941318e-05, | |
| "loss": 0.1669, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.21641561635413464, | |
| "grad_norm": 0.3090750522717658, | |
| "learning_rate": 1.919503640965951e-05, | |
| "loss": 0.1843, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.21764525053796496, | |
| "grad_norm": 0.297018364631407, | |
| "learning_rate": 1.917805822306117e-05, | |
| "loss": 0.2038, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.21887488472179525, | |
| "grad_norm": 0.2679923976809244, | |
| "learning_rate": 1.916091051873154e-05, | |
| "loss": 0.14, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.22010451890562557, | |
| "grad_norm": 0.3130518688020483, | |
| "learning_rate": 1.9143593613386845e-05, | |
| "loss": 0.1871, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.2213341530894559, | |
| "grad_norm": 0.38588801765002845, | |
| "learning_rate": 1.9126107826868436e-05, | |
| "loss": 0.275, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.22256378727328618, | |
| "grad_norm": 0.3157899959865116, | |
| "learning_rate": 1.9108453482136866e-05, | |
| "loss": 0.2098, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.2237934214571165, | |
| "grad_norm": 0.3517837295222883, | |
| "learning_rate": 1.9090630905265963e-05, | |
| "loss": 0.2855, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.22502305564094682, | |
| "grad_norm": 0.3092532190930959, | |
| "learning_rate": 1.9072640425436762e-05, | |
| "loss": 0.2278, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.22625268982477714, | |
| "grad_norm": 0.38543714174275906, | |
| "learning_rate": 1.905448237493147e-05, | |
| "loss": 0.289, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.22748232400860743, | |
| "grad_norm": 0.3569146643137087, | |
| "learning_rate": 1.9036157089127278e-05, | |
| "loss": 0.2716, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.22871195819243775, | |
| "grad_norm": 0.40683148192292634, | |
| "learning_rate": 1.901766490649022e-05, | |
| "loss": 0.2983, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.22994159237626807, | |
| "grad_norm": 0.4253940265898542, | |
| "learning_rate": 1.8999006168568883e-05, | |
| "loss": 0.2284, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.23117122656009836, | |
| "grad_norm": 0.26669631723987736, | |
| "learning_rate": 1.8980181219988117e-05, | |
| "loss": 0.1757, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.23240086074392868, | |
| "grad_norm": 0.2721222099304509, | |
| "learning_rate": 1.8961190408442662e-05, | |
| "loss": 0.2298, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.233630494927759, | |
| "grad_norm": 0.36072785713712757, | |
| "learning_rate": 1.8942034084690727e-05, | |
| "loss": 0.2847, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.2348601291115893, | |
| "grad_norm": 0.34669408899608534, | |
| "learning_rate": 1.8922712602547516e-05, | |
| "loss": 0.2453, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.2360897632954196, | |
| "grad_norm": 0.3295391150787403, | |
| "learning_rate": 1.89032263188787e-05, | |
| "loss": 0.2633, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.23731939747924993, | |
| "grad_norm": 0.2836153191531997, | |
| "learning_rate": 1.8883575593593793e-05, | |
| "loss": 0.2218, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.23854903166308022, | |
| "grad_norm": 0.2680039523838409, | |
| "learning_rate": 1.8863760789639548e-05, | |
| "loss": 0.2422, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.23977866584691054, | |
| "grad_norm": 0.18616539199723142, | |
| "learning_rate": 1.8843782272993225e-05, | |
| "loss": 0.1552, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.24100830003074086, | |
| "grad_norm": 0.2810251721018552, | |
| "learning_rate": 1.8823640412655844e-05, | |
| "loss": 0.1982, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.24223793421457115, | |
| "grad_norm": 0.2586309972920896, | |
| "learning_rate": 1.880333558064536e-05, | |
| "loss": 0.2115, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.24346756839840147, | |
| "grad_norm": 0.2529365933222149, | |
| "learning_rate": 1.878286815198979e-05, | |
| "loss": 0.2142, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.2446972025822318, | |
| "grad_norm": 0.376229567543972, | |
| "learning_rate": 1.876223850472032e-05, | |
| "loss": 0.2328, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.24592683676606208, | |
| "grad_norm": 0.23504541587554245, | |
| "learning_rate": 1.8741447019864263e-05, | |
| "loss": 0.2062, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.24592683676606208, | |
| "eval_accuracy": 0.786096256684492, | |
| "eval_f1": 0.42857142857142855, | |
| "eval_loss": 0.46992188692092896, | |
| "eval_precision": 0.75, | |
| "eval_recall": 0.3, | |
| "eval_runtime": 23.5849, | |
| "eval_samples_per_second": 2.12, | |
| "eval_steps_per_second": 0.17, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.2471564709498924, | |
| "grad_norm": 0.26939804480510793, | |
| "learning_rate": 1.872049408143808e-05, | |
| "loss": 0.1785, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 0.24838610513372272, | |
| "grad_norm": 0.3016410055860768, | |
| "learning_rate": 1.8699380076440242e-05, | |
| "loss": 0.2045, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.24961573931755301, | |
| "grad_norm": 0.3469258635915736, | |
| "learning_rate": 1.8678105394844114e-05, | |
| "loss": 0.2027, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 0.25084537350138336, | |
| "grad_norm": 0.32607531852363275, | |
| "learning_rate": 1.8656670429590745e-05, | |
| "loss": 0.1564, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.2520750076852136, | |
| "grad_norm": 0.2760811969255309, | |
| "learning_rate": 1.8635075576581587e-05, | |
| "loss": 0.1723, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.25330464186904394, | |
| "grad_norm": 0.25212411563493814, | |
| "learning_rate": 1.861332123467122e-05, | |
| "loss": 0.2283, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 0.25453427605287426, | |
| "grad_norm": 0.3448353672449361, | |
| "learning_rate": 1.859140780565996e-05, | |
| "loss": 0.2186, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 0.2557639102367046, | |
| "grad_norm": 0.35723591175806685, | |
| "learning_rate": 1.856933569428644e-05, | |
| "loss": 0.2346, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.2569935444205349, | |
| "grad_norm": 0.2721994057338676, | |
| "learning_rate": 1.8547105308220142e-05, | |
| "loss": 0.2408, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 0.2582231786043652, | |
| "grad_norm": 0.2946335121583992, | |
| "learning_rate": 1.852471705805387e-05, | |
| "loss": 0.179, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.2594528127881955, | |
| "grad_norm": 0.44468973852329186, | |
| "learning_rate": 1.8502171357296144e-05, | |
| "loss": 0.2007, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 0.2606824469720258, | |
| "grad_norm": 0.4480447012946234, | |
| "learning_rate": 1.84794686223636e-05, | |
| "loss": 0.2752, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 0.2619120811558561, | |
| "grad_norm": 0.28800115930201, | |
| "learning_rate": 1.8456609272573268e-05, | |
| "loss": 0.2106, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 0.26314171533968644, | |
| "grad_norm": 0.4124378870162344, | |
| "learning_rate": 1.8433593730134835e-05, | |
| "loss": 0.2648, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 0.26437134952351676, | |
| "grad_norm": 0.26984478766637626, | |
| "learning_rate": 1.841042242014285e-05, | |
| "loss": 0.1578, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.2656009837073471, | |
| "grad_norm": 0.4271224156022197, | |
| "learning_rate": 1.838709577056888e-05, | |
| "loss": 0.241, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.2668306178911774, | |
| "grad_norm": 0.32441622820215904, | |
| "learning_rate": 1.8363614212253585e-05, | |
| "loss": 0.1615, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 0.26806025207500767, | |
| "grad_norm": 0.35733117717955937, | |
| "learning_rate": 1.833997817889878e-05, | |
| "loss": 0.234, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 0.269289886258838, | |
| "grad_norm": 0.3419406717090276, | |
| "learning_rate": 1.8316188107059418e-05, | |
| "loss": 0.2538, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 0.2705195204426683, | |
| "grad_norm": 0.31666227946792574, | |
| "learning_rate": 1.8292244436135517e-05, | |
| "loss": 0.2518, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.2717491546264986, | |
| "grad_norm": 0.25628519133100447, | |
| "learning_rate": 1.8268147608364068e-05, | |
| "loss": 0.1488, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 0.27297878881032894, | |
| "grad_norm": 0.40851754881119196, | |
| "learning_rate": 1.8243898068810833e-05, | |
| "loss": 0.2662, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.27420842299415926, | |
| "grad_norm": 0.38439452767162985, | |
| "learning_rate": 1.8219496265362164e-05, | |
| "loss": 0.2033, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 0.2754380571779895, | |
| "grad_norm": 0.39575941152172794, | |
| "learning_rate": 1.81949426487167e-05, | |
| "loss": 0.2481, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.27666769136181985, | |
| "grad_norm": 0.39720852466696316, | |
| "learning_rate": 1.8170237672377046e-05, | |
| "loss": 0.2712, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.27789732554565016, | |
| "grad_norm": 0.56556586568128, | |
| "learning_rate": 1.814538179264142e-05, | |
| "loss": 0.2856, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 0.2791269597294805, | |
| "grad_norm": 0.4627206566124822, | |
| "learning_rate": 1.81203754685952e-05, | |
| "loss": 0.2353, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 0.2803565939133108, | |
| "grad_norm": 0.2862726313485586, | |
| "learning_rate": 1.8095219162102453e-05, | |
| "loss": 0.1773, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.2815862280971411, | |
| "grad_norm": 0.36128900613055054, | |
| "learning_rate": 1.8069913337797414e-05, | |
| "loss": 0.2265, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 0.2828158622809714, | |
| "grad_norm": 0.3598391415884193, | |
| "learning_rate": 1.804445846307588e-05, | |
| "loss": 0.2167, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.2840454964648017, | |
| "grad_norm": 0.3038583906156817, | |
| "learning_rate": 1.801885500808661e-05, | |
| "loss": 0.1929, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 0.285275130648632, | |
| "grad_norm": 0.2684590976129711, | |
| "learning_rate": 1.7993103445722615e-05, | |
| "loss": 0.1955, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.28650476483246234, | |
| "grad_norm": 0.3650334815395792, | |
| "learning_rate": 1.7967204251612432e-05, | |
| "loss": 0.2021, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 0.28773439901629266, | |
| "grad_norm": 0.3529582774863101, | |
| "learning_rate": 1.7941157904111346e-05, | |
| "loss": 0.2396, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 0.288964033200123, | |
| "grad_norm": 0.24658124615059435, | |
| "learning_rate": 1.7914964884292543e-05, | |
| "loss": 0.1841, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.29019366738395325, | |
| "grad_norm": 0.3897045768921742, | |
| "learning_rate": 1.7888625675938237e-05, | |
| "loss": 0.2233, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 0.29142330156778357, | |
| "grad_norm": 0.24436873250999808, | |
| "learning_rate": 1.7862140765530718e-05, | |
| "loss": 0.1587, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 0.2926529357516139, | |
| "grad_norm": 0.4324084079497792, | |
| "learning_rate": 1.783551064224339e-05, | |
| "loss": 0.1914, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 0.2938825699354442, | |
| "grad_norm": 0.31640333118588265, | |
| "learning_rate": 1.7808735797931715e-05, | |
| "loss": 0.1512, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 0.2951122041192745, | |
| "grad_norm": 0.34244615386284427, | |
| "learning_rate": 1.7781816727124138e-05, | |
| "loss": 0.2004, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.2951122041192745, | |
| "eval_accuracy": 0.7807486631016043, | |
| "eval_f1": 0.4225352112676056, | |
| "eval_loss": 0.4479687511920929, | |
| "eval_precision": 0.7142857142857143, | |
| "eval_recall": 0.3, | |
| "eval_runtime": 22.697, | |
| "eval_samples_per_second": 2.203, | |
| "eval_steps_per_second": 0.176, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.29634183830310484, | |
| "grad_norm": 0.24395125168807116, | |
| "learning_rate": 1.7754753927012955e-05, | |
| "loss": 0.1768, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 0.29757147248693516, | |
| "grad_norm": 0.2464638831816697, | |
| "learning_rate": 1.7727547897445117e-05, | |
| "loss": 0.1461, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 0.2988011066707654, | |
| "grad_norm": 0.33968511169569393, | |
| "learning_rate": 1.770019914091302e-05, | |
| "loss": 0.161, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 0.30003074085459575, | |
| "grad_norm": 0.6034876950604299, | |
| "learning_rate": 1.76727081625452e-05, | |
| "loss": 0.2023, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 0.30126037503842606, | |
| "grad_norm": 0.4452183735534574, | |
| "learning_rate": 1.7645075470097024e-05, | |
| "loss": 0.2207, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.3024900092222564, | |
| "grad_norm": 0.29139249592342986, | |
| "learning_rate": 1.7617301573941296e-05, | |
| "loss": 0.1763, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 0.3037196434060867, | |
| "grad_norm": 0.33614977158297427, | |
| "learning_rate": 1.758938698705884e-05, | |
| "loss": 0.2381, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 0.304949277589917, | |
| "grad_norm": 0.45585634062118247, | |
| "learning_rate": 1.7561332225029022e-05, | |
| "loss": 0.2215, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.3061789117737473, | |
| "grad_norm": 0.3889096135056135, | |
| "learning_rate": 1.7533137806020226e-05, | |
| "loss": 0.2512, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 0.3074085459575776, | |
| "grad_norm": 0.3698201068005034, | |
| "learning_rate": 1.7504804250780292e-05, | |
| "loss": 0.1962, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.3086381801414079, | |
| "grad_norm": 0.23954623770426672, | |
| "learning_rate": 1.747633208262688e-05, | |
| "loss": 0.1669, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 0.30986781432523824, | |
| "grad_norm": 0.3069850649077184, | |
| "learning_rate": 1.744772182743782e-05, | |
| "loss": 0.173, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 0.31109744850906856, | |
| "grad_norm": 0.5346208398414731, | |
| "learning_rate": 1.74189740136414e-05, | |
| "loss": 0.2977, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 0.3123270826928989, | |
| "grad_norm": 0.4383899410481399, | |
| "learning_rate": 1.7390089172206594e-05, | |
| "loss": 0.1867, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 0.31355671687672915, | |
| "grad_norm": 0.34336119858301023, | |
| "learning_rate": 1.736106783663326e-05, | |
| "loss": 0.2143, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.31478635106055947, | |
| "grad_norm": 0.2605528903698455, | |
| "learning_rate": 1.7331910542942298e-05, | |
| "loss": 0.2061, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.3160159852443898, | |
| "grad_norm": 0.29064553728173864, | |
| "learning_rate": 1.7302617829665725e-05, | |
| "loss": 0.1888, | |
| "step": 257 | |
| }, | |
| { | |
| "epoch": 0.3172456194282201, | |
| "grad_norm": 0.42180400278336094, | |
| "learning_rate": 1.7273190237836757e-05, | |
| "loss": 0.1727, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 0.3184752536120504, | |
| "grad_norm": 0.5419289309694173, | |
| "learning_rate": 1.7243628310979793e-05, | |
| "loss": 0.2215, | |
| "step": 259 | |
| }, | |
| { | |
| "epoch": 0.31970488779588074, | |
| "grad_norm": 0.5826869965037216, | |
| "learning_rate": 1.7213932595100384e-05, | |
| "loss": 0.2394, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.32093452197971106, | |
| "grad_norm": 0.35549156755432715, | |
| "learning_rate": 1.7184103638675157e-05, | |
| "loss": 0.2212, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 0.3221641561635413, | |
| "grad_norm": 0.403587329803515, | |
| "learning_rate": 1.715414199264168e-05, | |
| "loss": 0.1709, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 0.32339379034737165, | |
| "grad_norm": 0.568711959322274, | |
| "learning_rate": 1.7124048210388268e-05, | |
| "loss": 0.1972, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 0.32462342453120197, | |
| "grad_norm": 0.23810294888269123, | |
| "learning_rate": 1.709382284774379e-05, | |
| "loss": 0.1846, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 0.3258530587150323, | |
| "grad_norm": 0.3902059118868234, | |
| "learning_rate": 1.706346646296739e-05, | |
| "loss": 0.2197, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.3270826928988626, | |
| "grad_norm": 0.30966956204734636, | |
| "learning_rate": 1.7032979616738167e-05, | |
| "loss": 0.1728, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 0.3283123270826929, | |
| "grad_norm": 0.3364767329741329, | |
| "learning_rate": 1.7002362872144843e-05, | |
| "loss": 0.236, | |
| "step": 267 | |
| }, | |
| { | |
| "epoch": 0.3295419612665232, | |
| "grad_norm": 0.2575556372712217, | |
| "learning_rate": 1.697161679467534e-05, | |
| "loss": 0.2166, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 0.3307715954503535, | |
| "grad_norm": 0.6139887685526306, | |
| "learning_rate": 1.6940741952206342e-05, | |
| "loss": 0.217, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 0.3320012296341838, | |
| "grad_norm": 0.3192026684409325, | |
| "learning_rate": 1.6909738914992812e-05, | |
| "loss": 0.228, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.33323086381801414, | |
| "grad_norm": 0.3191098195045691, | |
| "learning_rate": 1.6878608255657457e-05, | |
| "loss": 0.2148, | |
| "step": 271 | |
| }, | |
| { | |
| "epoch": 0.33446049800184446, | |
| "grad_norm": 0.3876596561870598, | |
| "learning_rate": 1.6847350549180148e-05, | |
| "loss": 0.2191, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.3356901321856748, | |
| "grad_norm": 0.38903679735549906, | |
| "learning_rate": 1.6815966372887305e-05, | |
| "loss": 0.2205, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 0.33691976636950505, | |
| "grad_norm": 0.38626350258480885, | |
| "learning_rate": 1.6784456306441234e-05, | |
| "loss": 0.2672, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 0.33814940055333537, | |
| "grad_norm": 0.3297053884257871, | |
| "learning_rate": 1.675282093182941e-05, | |
| "loss": 0.1686, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.3393790347371657, | |
| "grad_norm": 0.4263354582166456, | |
| "learning_rate": 1.672106083335374e-05, | |
| "loss": 0.1873, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 0.340608668920996, | |
| "grad_norm": 0.2796799138264646, | |
| "learning_rate": 1.6689176597619773e-05, | |
| "loss": 0.1861, | |
| "step": 277 | |
| }, | |
| { | |
| "epoch": 0.3418383031048263, | |
| "grad_norm": 0.2741188381316141, | |
| "learning_rate": 1.6657168813525855e-05, | |
| "loss": 0.2212, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 0.34306793728865664, | |
| "grad_norm": 0.3392975873746286, | |
| "learning_rate": 1.662503807225225e-05, | |
| "loss": 0.2087, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 0.3442975714724869, | |
| "grad_norm": 0.3420260746414455, | |
| "learning_rate": 1.659278496725024e-05, | |
| "loss": 0.2241, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.3442975714724869, | |
| "eval_accuracy": 0.7807486631016043, | |
| "eval_f1": 0.4225352112676056, | |
| "eval_loss": 0.4449218809604645, | |
| "eval_precision": 0.7142857142857143, | |
| "eval_recall": 0.3, | |
| "eval_runtime": 22.916, | |
| "eval_samples_per_second": 2.182, | |
| "eval_steps_per_second": 0.175, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.3455272056563172, | |
| "grad_norm": 0.34182478583361364, | |
| "learning_rate": 1.6560410094231144e-05, | |
| "loss": 0.2257, | |
| "step": 281 | |
| }, | |
| { | |
| "epoch": 0.34675683984014755, | |
| "grad_norm": 0.3981831055380401, | |
| "learning_rate": 1.6527914051155328e-05, | |
| "loss": 0.2593, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 0.34798647402397787, | |
| "grad_norm": 0.3376266386557847, | |
| "learning_rate": 1.6495297438221145e-05, | |
| "loss": 0.1899, | |
| "step": 283 | |
| }, | |
| { | |
| "epoch": 0.3492161082078082, | |
| "grad_norm": 0.3924075494987157, | |
| "learning_rate": 1.6462560857853876e-05, | |
| "loss": 0.2032, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 0.3504457423916385, | |
| "grad_norm": 0.5277347648864814, | |
| "learning_rate": 1.6429704914694573e-05, | |
| "loss": 0.1546, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.3516753765754688, | |
| "grad_norm": 0.32345022677167856, | |
| "learning_rate": 1.6396730215588913e-05, | |
| "loss": 0.2559, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 0.3529050107592991, | |
| "grad_norm": 0.44254976480188857, | |
| "learning_rate": 1.6363637369575984e-05, | |
| "loss": 0.2794, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 0.3541346449431294, | |
| "grad_norm": 0.4548791128214442, | |
| "learning_rate": 1.633042698787703e-05, | |
| "loss": 0.2879, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.3553642791269597, | |
| "grad_norm": 0.27873749207646975, | |
| "learning_rate": 1.6297099683884163e-05, | |
| "loss": 0.1636, | |
| "step": 289 | |
| }, | |
| { | |
| "epoch": 0.35659391331079004, | |
| "grad_norm": 0.3258495849943053, | |
| "learning_rate": 1.626365607314905e-05, | |
| "loss": 0.2007, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.35782354749462036, | |
| "grad_norm": 0.4098308834550629, | |
| "learning_rate": 1.6230096773371514e-05, | |
| "loss": 0.2787, | |
| "step": 291 | |
| }, | |
| { | |
| "epoch": 0.3590531816784507, | |
| "grad_norm": 0.2722654753472478, | |
| "learning_rate": 1.619642240438816e-05, | |
| "loss": 0.19, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 0.36028281586228095, | |
| "grad_norm": 0.28423296754063243, | |
| "learning_rate": 1.616263358816089e-05, | |
| "loss": 0.1652, | |
| "step": 293 | |
| }, | |
| { | |
| "epoch": 0.36151245004611127, | |
| "grad_norm": 0.29897633728182227, | |
| "learning_rate": 1.612873094876545e-05, | |
| "loss": 0.2339, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 0.3627420842299416, | |
| "grad_norm": 0.31056546478547936, | |
| "learning_rate": 1.6094715112379874e-05, | |
| "loss": 0.1838, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.3639717184137719, | |
| "grad_norm": 0.2931202881800801, | |
| "learning_rate": 1.6060586707272943e-05, | |
| "loss": 0.1744, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.3652013525976022, | |
| "grad_norm": 0.37366320482050214, | |
| "learning_rate": 1.6026346363792565e-05, | |
| "loss": 0.1915, | |
| "step": 297 | |
| }, | |
| { | |
| "epoch": 0.36643098678143254, | |
| "grad_norm": 0.3528926787344543, | |
| "learning_rate": 1.599199471435414e-05, | |
| "loss": 0.1589, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 0.3676606209652628, | |
| "grad_norm": 0.3024625428052122, | |
| "learning_rate": 1.5957532393428872e-05, | |
| "loss": 0.2285, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 0.3688902551490931, | |
| "grad_norm": 0.3986663078423136, | |
| "learning_rate": 1.5922960037532057e-05, | |
| "loss": 0.281, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.37011988933292345, | |
| "grad_norm": 0.36134533123850165, | |
| "learning_rate": 1.588827828521133e-05, | |
| "loss": 0.1997, | |
| "step": 301 | |
| }, | |
| { | |
| "epoch": 0.37134952351675377, | |
| "grad_norm": 0.40175135868371165, | |
| "learning_rate": 1.585348777703486e-05, | |
| "loss": 0.2047, | |
| "step": 302 | |
| }, | |
| { | |
| "epoch": 0.3725791577005841, | |
| "grad_norm": 0.3610033083703328, | |
| "learning_rate": 1.581858915557953e-05, | |
| "loss": 0.2037, | |
| "step": 303 | |
| }, | |
| { | |
| "epoch": 0.3738087918844144, | |
| "grad_norm": 0.42325801209258107, | |
| "learning_rate": 1.5783583065419054e-05, | |
| "loss": 0.1871, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.3750384260682447, | |
| "grad_norm": 0.28821395654955023, | |
| "learning_rate": 1.5748470153112093e-05, | |
| "loss": 0.1934, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.376268060252075, | |
| "grad_norm": 0.4052634070520442, | |
| "learning_rate": 1.57132510671903e-05, | |
| "loss": 0.2351, | |
| "step": 306 | |
| }, | |
| { | |
| "epoch": 0.3774976944359053, | |
| "grad_norm": 0.48496008691724585, | |
| "learning_rate": 1.5677926458146327e-05, | |
| "loss": 0.2527, | |
| "step": 307 | |
| }, | |
| { | |
| "epoch": 0.3787273286197356, | |
| "grad_norm": 0.4026838585087856, | |
| "learning_rate": 1.5642496978421842e-05, | |
| "loss": 0.2554, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 0.37995696280356595, | |
| "grad_norm": 0.34534876497819145, | |
| "learning_rate": 1.560696328239547e-05, | |
| "loss": 0.1656, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 0.38118659698739626, | |
| "grad_norm": 0.2751030687579328, | |
| "learning_rate": 1.5571326026370676e-05, | |
| "loss": 0.162, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.3824162311712266, | |
| "grad_norm": 0.38621022206094485, | |
| "learning_rate": 1.5535585868563688e-05, | |
| "loss": 0.2212, | |
| "step": 311 | |
| }, | |
| { | |
| "epoch": 0.38364586535505685, | |
| "grad_norm": 0.3039979678378701, | |
| "learning_rate": 1.5499743469091303e-05, | |
| "loss": 0.2413, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 0.38487549953888717, | |
| "grad_norm": 0.31620852109871317, | |
| "learning_rate": 1.5463799489958727e-05, | |
| "loss": 0.1701, | |
| "step": 313 | |
| }, | |
| { | |
| "epoch": 0.3861051337227175, | |
| "grad_norm": 0.3036007341488487, | |
| "learning_rate": 1.542775459504732e-05, | |
| "loss": 0.1718, | |
| "step": 314 | |
| }, | |
| { | |
| "epoch": 0.3873347679065478, | |
| "grad_norm": 0.4288199829474487, | |
| "learning_rate": 1.5391609450102346e-05, | |
| "loss": 0.2237, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.3885644020903781, | |
| "grad_norm": 0.3798815943379263, | |
| "learning_rate": 1.5355364722720674e-05, | |
| "loss": 0.2177, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 0.38979403627420844, | |
| "grad_norm": 0.30645321053750946, | |
| "learning_rate": 1.5319021082338458e-05, | |
| "loss": 0.2356, | |
| "step": 317 | |
| }, | |
| { | |
| "epoch": 0.3910236704580387, | |
| "grad_norm": 0.3377360946626629, | |
| "learning_rate": 1.5282579200218762e-05, | |
| "loss": 0.21, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 0.392253304641869, | |
| "grad_norm": 0.3645715594028636, | |
| "learning_rate": 1.5246039749439159e-05, | |
| "loss": 0.1733, | |
| "step": 319 | |
| }, | |
| { | |
| "epoch": 0.39348293882569935, | |
| "grad_norm": 0.29596291916382467, | |
| "learning_rate": 1.5209403404879305e-05, | |
| "loss": 0.1505, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.39348293882569935, | |
| "eval_accuracy": 0.8181818181818182, | |
| "eval_f1": 0.5853658536585366, | |
| "eval_loss": 0.40882813930511475, | |
| "eval_precision": 0.75, | |
| "eval_recall": 0.48, | |
| "eval_runtime": 23.2002, | |
| "eval_samples_per_second": 2.155, | |
| "eval_steps_per_second": 0.172, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.39471257300952967, | |
| "grad_norm": 0.4078103453153395, | |
| "learning_rate": 1.5172670843208477e-05, | |
| "loss": 0.2415, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 0.39594220719336, | |
| "grad_norm": 0.3270612364847978, | |
| "learning_rate": 1.5135842742873077e-05, | |
| "loss": 0.2059, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 0.3971718413771903, | |
| "grad_norm": 0.3309279944550533, | |
| "learning_rate": 1.5098919784084083e-05, | |
| "loss": 0.1569, | |
| "step": 323 | |
| }, | |
| { | |
| "epoch": 0.3984014755610206, | |
| "grad_norm": 0.49875353395381333, | |
| "learning_rate": 1.5061902648804503e-05, | |
| "loss": 0.2477, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 0.3996311097448509, | |
| "grad_norm": 0.3787412609953064, | |
| "learning_rate": 1.502479202073678e-05, | |
| "loss": 0.179, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.4008607439286812, | |
| "grad_norm": 0.3919230580283582, | |
| "learning_rate": 1.4987588585310154e-05, | |
| "loss": 0.2249, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 0.4020903781125115, | |
| "grad_norm": 0.40414791953417883, | |
| "learning_rate": 1.4950293029668004e-05, | |
| "loss": 0.1772, | |
| "step": 327 | |
| }, | |
| { | |
| "epoch": 0.40332001229634185, | |
| "grad_norm": 0.3778228440356831, | |
| "learning_rate": 1.4912906042655164e-05, | |
| "loss": 0.208, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 0.40454964648017216, | |
| "grad_norm": 0.4107607344666035, | |
| "learning_rate": 1.4875428314805195e-05, | |
| "loss": 0.1716, | |
| "step": 329 | |
| }, | |
| { | |
| "epoch": 0.4057792806640025, | |
| "grad_norm": 0.3671542265326293, | |
| "learning_rate": 1.483786053832763e-05, | |
| "loss": 0.1805, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.40700891484783275, | |
| "grad_norm": 0.404957322338137, | |
| "learning_rate": 1.4800203407095194e-05, | |
| "loss": 0.1842, | |
| "step": 331 | |
| }, | |
| { | |
| "epoch": 0.40823854903166307, | |
| "grad_norm": 0.48189780697782403, | |
| "learning_rate": 1.4762457616630972e-05, | |
| "loss": 0.2277, | |
| "step": 332 | |
| }, | |
| { | |
| "epoch": 0.4094681832154934, | |
| "grad_norm": 0.30773052153479974, | |
| "learning_rate": 1.4724623864095595e-05, | |
| "loss": 0.1833, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 0.4106978173993237, | |
| "grad_norm": 0.4223729302065043, | |
| "learning_rate": 1.4686702848274328e-05, | |
| "loss": 0.2219, | |
| "step": 334 | |
| }, | |
| { | |
| "epoch": 0.411927451583154, | |
| "grad_norm": 0.2902399210528883, | |
| "learning_rate": 1.4648695269564182e-05, | |
| "loss": 0.1785, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.41315708576698434, | |
| "grad_norm": 0.297435572010037, | |
| "learning_rate": 1.461060182996098e-05, | |
| "loss": 0.2441, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 0.4143867199508146, | |
| "grad_norm": 0.4498936613858582, | |
| "learning_rate": 1.4572423233046386e-05, | |
| "loss": 0.1765, | |
| "step": 337 | |
| }, | |
| { | |
| "epoch": 0.4156163541346449, | |
| "grad_norm": 0.476353253435308, | |
| "learning_rate": 1.4534160183974908e-05, | |
| "loss": 0.1711, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 0.41684598831847525, | |
| "grad_norm": 0.4136384364300188, | |
| "learning_rate": 1.4495813389460875e-05, | |
| "loss": 0.213, | |
| "step": 339 | |
| }, | |
| { | |
| "epoch": 0.41807562250230557, | |
| "grad_norm": 0.457702192015087, | |
| "learning_rate": 1.4457383557765385e-05, | |
| "loss": 0.2056, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.4193052566861359, | |
| "grad_norm": 0.32316243545279294, | |
| "learning_rate": 1.4418871398683227e-05, | |
| "loss": 0.2445, | |
| "step": 341 | |
| }, | |
| { | |
| "epoch": 0.4205348908699662, | |
| "grad_norm": 0.3952237252439469, | |
| "learning_rate": 1.4380277623529766e-05, | |
| "loss": 0.1789, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 0.42176452505379647, | |
| "grad_norm": 0.45979858964325293, | |
| "learning_rate": 1.4341602945127806e-05, | |
| "loss": 0.225, | |
| "step": 343 | |
| }, | |
| { | |
| "epoch": 0.4229941592376268, | |
| "grad_norm": 0.456535265546009, | |
| "learning_rate": 1.4302848077794427e-05, | |
| "loss": 0.2245, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 0.4242237934214571, | |
| "grad_norm": 0.3244747375904321, | |
| "learning_rate": 1.426401373732779e-05, | |
| "loss": 0.1801, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.4254534276052874, | |
| "grad_norm": 0.40300301462416604, | |
| "learning_rate": 1.422510064099391e-05, | |
| "loss": 0.2212, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 0.42668306178911775, | |
| "grad_norm": 0.5264557343629197, | |
| "learning_rate": 1.4186109507513425e-05, | |
| "loss": 0.2202, | |
| "step": 347 | |
| }, | |
| { | |
| "epoch": 0.42791269597294807, | |
| "grad_norm": 0.4350217170052354, | |
| "learning_rate": 1.4147041057048303e-05, | |
| "loss": 0.2061, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 0.4291423301567784, | |
| "grad_norm": 0.6810095448654682, | |
| "learning_rate": 1.4107896011188546e-05, | |
| "loss": 0.1782, | |
| "step": 349 | |
| }, | |
| { | |
| "epoch": 0.43037196434060865, | |
| "grad_norm": 0.25807154769833757, | |
| "learning_rate": 1.4068675092938872e-05, | |
| "loss": 0.156, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.43160159852443897, | |
| "grad_norm": 0.29519781987132376, | |
| "learning_rate": 1.4029379026705352e-05, | |
| "loss": 0.2078, | |
| "step": 351 | |
| }, | |
| { | |
| "epoch": 0.4328312327082693, | |
| "grad_norm": 0.4580606963993864, | |
| "learning_rate": 1.3990008538282027e-05, | |
| "loss": 0.2024, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 0.4340608668920996, | |
| "grad_norm": 0.4014942247225734, | |
| "learning_rate": 1.3950564354837512e-05, | |
| "loss": 0.1801, | |
| "step": 353 | |
| }, | |
| { | |
| "epoch": 0.4352905010759299, | |
| "grad_norm": 0.40131332657814234, | |
| "learning_rate": 1.391104720490156e-05, | |
| "loss": 0.214, | |
| "step": 354 | |
| }, | |
| { | |
| "epoch": 0.43652013525976024, | |
| "grad_norm": 0.3560030996515948, | |
| "learning_rate": 1.387145781835161e-05, | |
| "loss": 0.2126, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.4377497694435905, | |
| "grad_norm": 0.3729681090326996, | |
| "learning_rate": 1.3831796926399295e-05, | |
| "loss": 0.2055, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 0.43897940362742083, | |
| "grad_norm": 0.4172829333159122, | |
| "learning_rate": 1.3792065261576953e-05, | |
| "loss": 0.2326, | |
| "step": 357 | |
| }, | |
| { | |
| "epoch": 0.44020903781125115, | |
| "grad_norm": 0.30998074509681783, | |
| "learning_rate": 1.3752263557724088e-05, | |
| "loss": 0.1633, | |
| "step": 358 | |
| }, | |
| { | |
| "epoch": 0.44143867199508147, | |
| "grad_norm": 0.345268920537541, | |
| "learning_rate": 1.3712392549973814e-05, | |
| "loss": 0.1765, | |
| "step": 359 | |
| }, | |
| { | |
| "epoch": 0.4426683061789118, | |
| "grad_norm": 0.3508481090946454, | |
| "learning_rate": 1.3672452974739278e-05, | |
| "loss": 0.1752, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.4426683061789118, | |
| "eval_accuracy": 0.786096256684492, | |
| "eval_f1": 0.42857142857142855, | |
| "eval_loss": 0.4385937452316284, | |
| "eval_precision": 0.75, | |
| "eval_recall": 0.3, | |
| "eval_runtime": 23.4097, | |
| "eval_samples_per_second": 2.136, | |
| "eval_steps_per_second": 0.171, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.4438979403627421, | |
| "grad_norm": 0.41404798323100117, | |
| "learning_rate": 1.3632445569700078e-05, | |
| "loss": 0.1745, | |
| "step": 361 | |
| }, | |
| { | |
| "epoch": 0.44512757454657237, | |
| "grad_norm": 0.39572877694784087, | |
| "learning_rate": 1.3592371073788595e-05, | |
| "loss": 0.216, | |
| "step": 362 | |
| }, | |
| { | |
| "epoch": 0.4463572087304027, | |
| "grad_norm": 0.3927819000312662, | |
| "learning_rate": 1.355223022717639e-05, | |
| "loss": 0.164, | |
| "step": 363 | |
| }, | |
| { | |
| "epoch": 0.447586842914233, | |
| "grad_norm": 0.36355749807156507, | |
| "learning_rate": 1.3512023771260507e-05, | |
| "loss": 0.2439, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 0.4488164770980633, | |
| "grad_norm": 0.37694180103294717, | |
| "learning_rate": 1.347175244864979e-05, | |
| "loss": 0.2009, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.45004611128189365, | |
| "grad_norm": 0.3566619922589067, | |
| "learning_rate": 1.3431417003151162e-05, | |
| "loss": 0.2045, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 0.45127574546572397, | |
| "grad_norm": 0.36473757399388024, | |
| "learning_rate": 1.3391018179755886e-05, | |
| "loss": 0.1711, | |
| "step": 367 | |
| }, | |
| { | |
| "epoch": 0.4525053796495543, | |
| "grad_norm": 0.3559479590045658, | |
| "learning_rate": 1.3350556724625809e-05, | |
| "loss": 0.2061, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 0.45373501383338455, | |
| "grad_norm": 0.34195723156033303, | |
| "learning_rate": 1.3310033385079589e-05, | |
| "loss": 0.1761, | |
| "step": 369 | |
| }, | |
| { | |
| "epoch": 0.45496464801721487, | |
| "grad_norm": 0.5770960005405298, | |
| "learning_rate": 1.3269448909578866e-05, | |
| "loss": 0.227, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.4561942822010452, | |
| "grad_norm": 0.6125705002122824, | |
| "learning_rate": 1.3228804047714462e-05, | |
| "loss": 0.2351, | |
| "step": 371 | |
| }, | |
| { | |
| "epoch": 0.4574239163848755, | |
| "grad_norm": 0.48363918457013794, | |
| "learning_rate": 1.3188099550192537e-05, | |
| "loss": 0.1847, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 0.4586535505687058, | |
| "grad_norm": 0.6610021081352014, | |
| "learning_rate": 1.31473361688207e-05, | |
| "loss": 0.2129, | |
| "step": 373 | |
| }, | |
| { | |
| "epoch": 0.45988318475253614, | |
| "grad_norm": 0.4341786311855446, | |
| "learning_rate": 1.3106514656494147e-05, | |
| "loss": 0.2426, | |
| "step": 374 | |
| }, | |
| { | |
| "epoch": 0.4611128189363664, | |
| "grad_norm": 0.3013643837535933, | |
| "learning_rate": 1.3065635767181748e-05, | |
| "loss": 0.1596, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.46234245312019673, | |
| "grad_norm": 0.29963722757790967, | |
| "learning_rate": 1.302470025591211e-05, | |
| "loss": 0.1821, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 0.46357208730402705, | |
| "grad_norm": 0.5856707955354147, | |
| "learning_rate": 1.2983708878759655e-05, | |
| "loss": 0.2024, | |
| "step": 377 | |
| }, | |
| { | |
| "epoch": 0.46480172148785737, | |
| "grad_norm": 0.37812820220102683, | |
| "learning_rate": 1.2942662392830632e-05, | |
| "loss": 0.2049, | |
| "step": 378 | |
| }, | |
| { | |
| "epoch": 0.4660313556716877, | |
| "grad_norm": 0.41966854124526104, | |
| "learning_rate": 1.290156155624914e-05, | |
| "loss": 0.227, | |
| "step": 379 | |
| }, | |
| { | |
| "epoch": 0.467260989855518, | |
| "grad_norm": 0.39476761658009546, | |
| "learning_rate": 1.286040712814314e-05, | |
| "loss": 0.1552, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.46849062403934827, | |
| "grad_norm": 0.4381036125394883, | |
| "learning_rate": 1.2819199868630419e-05, | |
| "loss": 0.1686, | |
| "step": 381 | |
| }, | |
| { | |
| "epoch": 0.4697202582231786, | |
| "grad_norm": 0.2834280308233097, | |
| "learning_rate": 1.2777940538804545e-05, | |
| "loss": 0.1292, | |
| "step": 382 | |
| }, | |
| { | |
| "epoch": 0.4709498924070089, | |
| "grad_norm": 0.3708781081449464, | |
| "learning_rate": 1.2736629900720832e-05, | |
| "loss": 0.1575, | |
| "step": 383 | |
| }, | |
| { | |
| "epoch": 0.4721795265908392, | |
| "grad_norm": 0.37089620983594307, | |
| "learning_rate": 1.2695268717382242e-05, | |
| "loss": 0.1923, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 0.47340916077466955, | |
| "grad_norm": 0.37850202105410397, | |
| "learning_rate": 1.2653857752725305e-05, | |
| "loss": 0.1751, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.47463879495849987, | |
| "grad_norm": 0.29465463630363, | |
| "learning_rate": 1.2612397771606015e-05, | |
| "loss": 0.1792, | |
| "step": 386 | |
| }, | |
| { | |
| "epoch": 0.47586842914233013, | |
| "grad_norm": 0.35698339352314057, | |
| "learning_rate": 1.2570889539785683e-05, | |
| "loss": 0.1325, | |
| "step": 387 | |
| }, | |
| { | |
| "epoch": 0.47709806332616045, | |
| "grad_norm": 0.43664575464965527, | |
| "learning_rate": 1.2529333823916807e-05, | |
| "loss": 0.1764, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 0.47832769750999077, | |
| "grad_norm": 0.4130236851643136, | |
| "learning_rate": 1.2487731391528919e-05, | |
| "loss": 0.2261, | |
| "step": 389 | |
| }, | |
| { | |
| "epoch": 0.4795573316938211, | |
| "grad_norm": 0.37099838030443905, | |
| "learning_rate": 1.2446083011014389e-05, | |
| "loss": 0.187, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.4807869658776514, | |
| "grad_norm": 0.241662388149488, | |
| "learning_rate": 1.2404389451614253e-05, | |
| "loss": 0.1671, | |
| "step": 391 | |
| }, | |
| { | |
| "epoch": 0.4820166000614817, | |
| "grad_norm": 0.40938373429888714, | |
| "learning_rate": 1.2362651483403985e-05, | |
| "loss": 0.1861, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 0.48324623424531205, | |
| "grad_norm": 0.3623538192567374, | |
| "learning_rate": 1.2320869877279297e-05, | |
| "loss": 0.1743, | |
| "step": 393 | |
| }, | |
| { | |
| "epoch": 0.4844758684291423, | |
| "grad_norm": 0.4437845258381219, | |
| "learning_rate": 1.2279045404941883e-05, | |
| "loss": 0.2337, | |
| "step": 394 | |
| }, | |
| { | |
| "epoch": 0.48570550261297263, | |
| "grad_norm": 0.3749955717955905, | |
| "learning_rate": 1.2237178838885168e-05, | |
| "loss": 0.2027, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.48693513679680295, | |
| "grad_norm": 0.3505048992054775, | |
| "learning_rate": 1.2195270952380052e-05, | |
| "loss": 0.1571, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 0.48816477098063327, | |
| "grad_norm": 0.3713688621968288, | |
| "learning_rate": 1.215332251946061e-05, | |
| "loss": 0.1915, | |
| "step": 397 | |
| }, | |
| { | |
| "epoch": 0.4893944051644636, | |
| "grad_norm": 0.4834162644214324, | |
| "learning_rate": 1.2111334314909811e-05, | |
| "loss": 0.2389, | |
| "step": 398 | |
| }, | |
| { | |
| "epoch": 0.4906240393482939, | |
| "grad_norm": 0.38954625041703195, | |
| "learning_rate": 1.2069307114245197e-05, | |
| "loss": 0.1682, | |
| "step": 399 | |
| }, | |
| { | |
| "epoch": 0.49185367353212417, | |
| "grad_norm": 0.46232563297646273, | |
| "learning_rate": 1.2027241693704567e-05, | |
| "loss": 0.2382, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.49185367353212417, | |
| "eval_accuracy": 0.8128342245989305, | |
| "eval_f1": 0.5454545454545454, | |
| "eval_loss": 0.41859376430511475, | |
| "eval_precision": 0.7777777777777778, | |
| "eval_recall": 0.42, | |
| "eval_runtime": 22.7158, | |
| "eval_samples_per_second": 2.201, | |
| "eval_steps_per_second": 0.176, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.4930833077159545, | |
| "grad_norm": 0.4925378998198651, | |
| "learning_rate": 1.1985138830231638e-05, | |
| "loss": 0.2452, | |
| "step": 401 | |
| }, | |
| { | |
| "epoch": 0.4943129418997848, | |
| "grad_norm": 0.43350371819373273, | |
| "learning_rate": 1.1942999301461694e-05, | |
| "loss": 0.2145, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 0.4955425760836151, | |
| "grad_norm": 0.4788710711794286, | |
| "learning_rate": 1.1900823885707216e-05, | |
| "loss": 0.2355, | |
| "step": 403 | |
| }, | |
| { | |
| "epoch": 0.49677221026744545, | |
| "grad_norm": 0.48489068989863693, | |
| "learning_rate": 1.1858613361943518e-05, | |
| "loss": 0.1636, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 0.49800184445127577, | |
| "grad_norm": 0.38739819249024443, | |
| "learning_rate": 1.1816368509794365e-05, | |
| "loss": 0.1768, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.49923147863510603, | |
| "grad_norm": 0.6455001321481116, | |
| "learning_rate": 1.177409010951755e-05, | |
| "loss": 0.2625, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 0.5004611128189363, | |
| "grad_norm": 0.6173490831722124, | |
| "learning_rate": 1.1731778941990497e-05, | |
| "loss": 0.1915, | |
| "step": 407 | |
| }, | |
| { | |
| "epoch": 0.5016907470027667, | |
| "grad_norm": 0.4814901038602925, | |
| "learning_rate": 1.1689435788695844e-05, | |
| "loss": 0.1646, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 0.502920381186597, | |
| "grad_norm": 0.4617832240658794, | |
| "learning_rate": 1.1647061431707e-05, | |
| "loss": 0.1671, | |
| "step": 409 | |
| }, | |
| { | |
| "epoch": 0.5041500153704273, | |
| "grad_norm": 0.31842872427334545, | |
| "learning_rate": 1.1604656653673707e-05, | |
| "loss": 0.1587, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.5053796495542576, | |
| "grad_norm": 0.4310369489277566, | |
| "learning_rate": 1.156222223780757e-05, | |
| "loss": 0.2058, | |
| "step": 411 | |
| }, | |
| { | |
| "epoch": 0.5066092837380879, | |
| "grad_norm": 0.3434568907080096, | |
| "learning_rate": 1.1519758967867608e-05, | |
| "loss": 0.1748, | |
| "step": 412 | |
| }, | |
| { | |
| "epoch": 0.5078389179219183, | |
| "grad_norm": 0.3212603412265404, | |
| "learning_rate": 1.1477267628145777e-05, | |
| "loss": 0.1998, | |
| "step": 413 | |
| }, | |
| { | |
| "epoch": 0.5090685521057485, | |
| "grad_norm": 0.3727679962149267, | |
| "learning_rate": 1.1434749003452467e-05, | |
| "loss": 0.1962, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 0.5102981862895789, | |
| "grad_norm": 0.5738583382215159, | |
| "learning_rate": 1.1392203879102027e-05, | |
| "loss": 0.1792, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.5115278204734092, | |
| "grad_norm": 0.3242461291494194, | |
| "learning_rate": 1.1349633040898246e-05, | |
| "loss": 0.1407, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 0.5127574546572394, | |
| "grad_norm": 0.39000456676333345, | |
| "learning_rate": 1.1307037275119854e-05, | |
| "loss": 0.1644, | |
| "step": 417 | |
| }, | |
| { | |
| "epoch": 0.5139870888410698, | |
| "grad_norm": 0.3257334260528596, | |
| "learning_rate": 1.1264417368505981e-05, | |
| "loss": 0.2097, | |
| "step": 418 | |
| }, | |
| { | |
| "epoch": 0.5152167230249001, | |
| "grad_norm": 0.37892769382651426, | |
| "learning_rate": 1.1221774108241646e-05, | |
| "loss": 0.1604, | |
| "step": 419 | |
| }, | |
| { | |
| "epoch": 0.5164463572087304, | |
| "grad_norm": 0.424302046031257, | |
| "learning_rate": 1.117910828194319e-05, | |
| "loss": 0.1472, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.5176759913925607, | |
| "grad_norm": 0.36620159075013065, | |
| "learning_rate": 1.1136420677643763e-05, | |
| "loss": 0.1656, | |
| "step": 421 | |
| }, | |
| { | |
| "epoch": 0.518905625576391, | |
| "grad_norm": 0.3640140474940355, | |
| "learning_rate": 1.1093712083778748e-05, | |
| "loss": 0.1676, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 0.5201352597602213, | |
| "grad_norm": 0.5095089795756096, | |
| "learning_rate": 1.1050983289171195e-05, | |
| "loss": 0.2301, | |
| "step": 423 | |
| }, | |
| { | |
| "epoch": 0.5213648939440516, | |
| "grad_norm": 0.3606729676571787, | |
| "learning_rate": 1.1008235083017272e-05, | |
| "loss": 0.1519, | |
| "step": 424 | |
| }, | |
| { | |
| "epoch": 0.522594528127882, | |
| "grad_norm": 0.6045563512444339, | |
| "learning_rate": 1.096546825487167e-05, | |
| "loss": 0.2366, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.5238241623117122, | |
| "grad_norm": 0.33927673864133545, | |
| "learning_rate": 1.092268359463302e-05, | |
| "loss": 0.2344, | |
| "step": 426 | |
| }, | |
| { | |
| "epoch": 0.5250537964955426, | |
| "grad_norm": 0.42112411326748034, | |
| "learning_rate": 1.0879881892529325e-05, | |
| "loss": 0.1428, | |
| "step": 427 | |
| }, | |
| { | |
| "epoch": 0.5262834306793729, | |
| "grad_norm": 0.5237196748492299, | |
| "learning_rate": 1.0837063939103332e-05, | |
| "loss": 0.2371, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 0.5275130648632032, | |
| "grad_norm": 0.45934815233301857, | |
| "learning_rate": 1.0794230525197959e-05, | |
| "loss": 0.2438, | |
| "step": 429 | |
| }, | |
| { | |
| "epoch": 0.5287426990470335, | |
| "grad_norm": 0.6638366111080263, | |
| "learning_rate": 1.0751382441941677e-05, | |
| "loss": 0.1915, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.5299723332308638, | |
| "grad_norm": 0.4535025325333235, | |
| "learning_rate": 1.0708520480733895e-05, | |
| "loss": 0.194, | |
| "step": 431 | |
| }, | |
| { | |
| "epoch": 0.5312019674146942, | |
| "grad_norm": 0.5223418583566981, | |
| "learning_rate": 1.0665645433230345e-05, | |
| "loss": 0.2608, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 0.5324316015985244, | |
| "grad_norm": 0.5251894549889435, | |
| "learning_rate": 1.0622758091328469e-05, | |
| "loss": 0.1438, | |
| "step": 433 | |
| }, | |
| { | |
| "epoch": 0.5336612357823548, | |
| "grad_norm": 0.4790588612821075, | |
| "learning_rate": 1.0579859247152774e-05, | |
| "loss": 0.1627, | |
| "step": 434 | |
| }, | |
| { | |
| "epoch": 0.5348908699661851, | |
| "grad_norm": 0.44616175922026896, | |
| "learning_rate": 1.0536949693040224e-05, | |
| "loss": 0.2082, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.5361205041500153, | |
| "grad_norm": 0.46980201755709106, | |
| "learning_rate": 1.0494030221525582e-05, | |
| "loss": 0.2491, | |
| "step": 436 | |
| }, | |
| { | |
| "epoch": 0.5373501383338457, | |
| "grad_norm": 0.31327453178418485, | |
| "learning_rate": 1.0451101625326798e-05, | |
| "loss": 0.1715, | |
| "step": 437 | |
| }, | |
| { | |
| "epoch": 0.538579772517676, | |
| "grad_norm": 0.4945036398444153, | |
| "learning_rate": 1.0408164697330348e-05, | |
| "loss": 0.2311, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 0.5398094067015063, | |
| "grad_norm": 0.49262428836934985, | |
| "learning_rate": 1.0365220230576592e-05, | |
| "loss": 0.2812, | |
| "step": 439 | |
| }, | |
| { | |
| "epoch": 0.5410390408853366, | |
| "grad_norm": 0.3787467213860296, | |
| "learning_rate": 1.0322269018245128e-05, | |
| "loss": 0.238, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.5410390408853366, | |
| "eval_accuracy": 0.7914438502673797, | |
| "eval_f1": 0.4657534246575342, | |
| "eval_loss": 0.4313281178474426, | |
| "eval_precision": 0.7391304347826086, | |
| "eval_recall": 0.34, | |
| "eval_runtime": 22.6212, | |
| "eval_samples_per_second": 2.21, | |
| "eval_steps_per_second": 0.177, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.5422686750691669, | |
| "grad_norm": 0.37936214504954746, | |
| "learning_rate": 1.0279311853640157e-05, | |
| "loss": 0.2168, | |
| "step": 441 | |
| }, | |
| { | |
| "epoch": 0.5434983092529972, | |
| "grad_norm": 0.3206038847726103, | |
| "learning_rate": 1.0236349530175807e-05, | |
| "loss": 0.1876, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 0.5447279434368275, | |
| "grad_norm": 0.36229470962362703, | |
| "learning_rate": 1.019338284136149e-05, | |
| "loss": 0.1736, | |
| "step": 443 | |
| }, | |
| { | |
| "epoch": 0.5459575776206579, | |
| "grad_norm": 0.4075063721479331, | |
| "learning_rate": 1.015041258078725e-05, | |
| "loss": 0.2283, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 0.5471872118044881, | |
| "grad_norm": 0.2649302419780058, | |
| "learning_rate": 1.0107439542109097e-05, | |
| "loss": 0.1342, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.5484168459883185, | |
| "grad_norm": 0.4132655463506092, | |
| "learning_rate": 1.0064464519034358e-05, | |
| "loss": 0.2227, | |
| "step": 446 | |
| }, | |
| { | |
| "epoch": 0.5496464801721488, | |
| "grad_norm": 0.3696111104824441, | |
| "learning_rate": 1.0021488305307003e-05, | |
| "loss": 0.16, | |
| "step": 447 | |
| }, | |
| { | |
| "epoch": 0.550876114355979, | |
| "grad_norm": 0.33812235844778665, | |
| "learning_rate": 9.978511694692999e-06, | |
| "loss": 0.1787, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 0.5521057485398094, | |
| "grad_norm": 0.4571938907087761, | |
| "learning_rate": 9.935535480965647e-06, | |
| "loss": 0.176, | |
| "step": 449 | |
| }, | |
| { | |
| "epoch": 0.5533353827236397, | |
| "grad_norm": 0.4297886084823541, | |
| "learning_rate": 9.892560457890907e-06, | |
| "loss": 0.1822, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5545650169074701, | |
| "grad_norm": 0.39629426107785415, | |
| "learning_rate": 9.849587419212751e-06, | |
| "loss": 0.1982, | |
| "step": 451 | |
| }, | |
| { | |
| "epoch": 0.5557946510913003, | |
| "grad_norm": 0.4188025429095887, | |
| "learning_rate": 9.806617158638515e-06, | |
| "loss": 0.263, | |
| "step": 452 | |
| }, | |
| { | |
| "epoch": 0.5570242852751307, | |
| "grad_norm": 0.5523505686855087, | |
| "learning_rate": 9.763650469824198e-06, | |
| "loss": 0.3381, | |
| "step": 453 | |
| }, | |
| { | |
| "epoch": 0.558253919458961, | |
| "grad_norm": 0.3507777011017874, | |
| "learning_rate": 9.720688146359843e-06, | |
| "loss": 0.2102, | |
| "step": 454 | |
| }, | |
| { | |
| "epoch": 0.5594835536427912, | |
| "grad_norm": 0.321426765671579, | |
| "learning_rate": 9.677730981754875e-06, | |
| "loss": 0.1714, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.5607131878266216, | |
| "grad_norm": 0.41323958284891527, | |
| "learning_rate": 9.634779769423412e-06, | |
| "loss": 0.236, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 0.5619428220104519, | |
| "grad_norm": 0.49361377740156237, | |
| "learning_rate": 9.591835302669657e-06, | |
| "loss": 0.1847, | |
| "step": 457 | |
| }, | |
| { | |
| "epoch": 0.5631724561942822, | |
| "grad_norm": 0.40610616606906547, | |
| "learning_rate": 9.548898374673205e-06, | |
| "loss": 0.2124, | |
| "step": 458 | |
| }, | |
| { | |
| "epoch": 0.5644020903781125, | |
| "grad_norm": 0.6542907937118401, | |
| "learning_rate": 9.505969778474418e-06, | |
| "loss": 0.2063, | |
| "step": 459 | |
| }, | |
| { | |
| "epoch": 0.5656317245619428, | |
| "grad_norm": 0.34192984136591886, | |
| "learning_rate": 9.463050306959782e-06, | |
| "loss": 0.1661, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.5668613587457731, | |
| "grad_norm": 0.4336541383620103, | |
| "learning_rate": 9.42014075284723e-06, | |
| "loss": 0.1666, | |
| "step": 461 | |
| }, | |
| { | |
| "epoch": 0.5680909929296034, | |
| "grad_norm": 0.3660193380978002, | |
| "learning_rate": 9.377241908671533e-06, | |
| "loss": 0.189, | |
| "step": 462 | |
| }, | |
| { | |
| "epoch": 0.5693206271134338, | |
| "grad_norm": 0.41494751800096213, | |
| "learning_rate": 9.334354566769658e-06, | |
| "loss": 0.2119, | |
| "step": 463 | |
| }, | |
| { | |
| "epoch": 0.570550261297264, | |
| "grad_norm": 0.5184551716970647, | |
| "learning_rate": 9.291479519266108e-06, | |
| "loss": 0.2418, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 0.5717798954810944, | |
| "grad_norm": 0.46046449118174804, | |
| "learning_rate": 9.248617558058328e-06, | |
| "loss": 0.1998, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.5730095296649247, | |
| "grad_norm": 0.3975308500337465, | |
| "learning_rate": 9.205769474802045e-06, | |
| "loss": 0.1907, | |
| "step": 466 | |
| }, | |
| { | |
| "epoch": 0.574239163848755, | |
| "grad_norm": 0.4237299624678481, | |
| "learning_rate": 9.162936060896672e-06, | |
| "loss": 0.2087, | |
| "step": 467 | |
| }, | |
| { | |
| "epoch": 0.5754687980325853, | |
| "grad_norm": 0.47636428636745337, | |
| "learning_rate": 9.12011810747068e-06, | |
| "loss": 0.161, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 0.5766984322164156, | |
| "grad_norm": 0.5257948156557456, | |
| "learning_rate": 9.07731640536698e-06, | |
| "loss": 0.3091, | |
| "step": 469 | |
| }, | |
| { | |
| "epoch": 0.577928066400246, | |
| "grad_norm": 0.4479489865327413, | |
| "learning_rate": 9.034531745128334e-06, | |
| "loss": 0.2383, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.5791577005840762, | |
| "grad_norm": 0.40206612139181935, | |
| "learning_rate": 8.991764916982731e-06, | |
| "loss": 0.2534, | |
| "step": 471 | |
| }, | |
| { | |
| "epoch": 0.5803873347679065, | |
| "grad_norm": 0.3870155682325831, | |
| "learning_rate": 8.949016710828808e-06, | |
| "loss": 0.1743, | |
| "step": 472 | |
| }, | |
| { | |
| "epoch": 0.5816169689517369, | |
| "grad_norm": 0.35857102895539483, | |
| "learning_rate": 8.906287916221259e-06, | |
| "loss": 0.1541, | |
| "step": 473 | |
| }, | |
| { | |
| "epoch": 0.5828466031355671, | |
| "grad_norm": 0.3297878518016191, | |
| "learning_rate": 8.863579322356242e-06, | |
| "loss": 0.184, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 0.5840762373193975, | |
| "grad_norm": 0.2983994696232135, | |
| "learning_rate": 8.820891718056815e-06, | |
| "loss": 0.1836, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.5853058715032278, | |
| "grad_norm": 0.40664636398131304, | |
| "learning_rate": 8.77822589175836e-06, | |
| "loss": 0.1594, | |
| "step": 476 | |
| }, | |
| { | |
| "epoch": 0.5865355056870581, | |
| "grad_norm": 0.37741982285626524, | |
| "learning_rate": 8.73558263149402e-06, | |
| "loss": 0.236, | |
| "step": 477 | |
| }, | |
| { | |
| "epoch": 0.5877651398708884, | |
| "grad_norm": 0.4536143968466746, | |
| "learning_rate": 8.692962724880148e-06, | |
| "loss": 0.1882, | |
| "step": 478 | |
| }, | |
| { | |
| "epoch": 0.5889947740547187, | |
| "grad_norm": 0.3713741062634612, | |
| "learning_rate": 8.650366959101757e-06, | |
| "loss": 0.2143, | |
| "step": 479 | |
| }, | |
| { | |
| "epoch": 0.590224408238549, | |
| "grad_norm": 0.27112035186414296, | |
| "learning_rate": 8.607796120897978e-06, | |
| "loss": 0.1448, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.590224408238549, | |
| "eval_accuracy": 0.8128342245989305, | |
| "eval_f1": 0.5454545454545454, | |
| "eval_loss": 0.41609373688697815, | |
| "eval_precision": 0.7777777777777778, | |
| "eval_recall": 0.42, | |
| "eval_runtime": 23.0163, | |
| "eval_samples_per_second": 2.172, | |
| "eval_steps_per_second": 0.174, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.5914540424223793, | |
| "grad_norm": 0.3731696583376688, | |
| "learning_rate": 8.565250996547538e-06, | |
| "loss": 0.1521, | |
| "step": 481 | |
| }, | |
| { | |
| "epoch": 0.5926836766062097, | |
| "grad_norm": 0.3715423359630779, | |
| "learning_rate": 8.522732371854228e-06, | |
| "loss": 0.1561, | |
| "step": 482 | |
| }, | |
| { | |
| "epoch": 0.59391331079004, | |
| "grad_norm": 0.4445264123453298, | |
| "learning_rate": 8.480241032132394e-06, | |
| "loss": 0.1772, | |
| "step": 483 | |
| }, | |
| { | |
| "epoch": 0.5951429449738703, | |
| "grad_norm": 0.417726015870805, | |
| "learning_rate": 8.437777762192434e-06, | |
| "loss": 0.2099, | |
| "step": 484 | |
| }, | |
| { | |
| "epoch": 0.5963725791577006, | |
| "grad_norm": 0.5443603401932148, | |
| "learning_rate": 8.395343346326295e-06, | |
| "loss": 0.2194, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.5976022133415309, | |
| "grad_norm": 0.357174859123756, | |
| "learning_rate": 8.352938568293e-06, | |
| "loss": 0.1994, | |
| "step": 486 | |
| }, | |
| { | |
| "epoch": 0.5988318475253612, | |
| "grad_norm": 0.4122807392874181, | |
| "learning_rate": 8.310564211304159e-06, | |
| "loss": 0.2326, | |
| "step": 487 | |
| }, | |
| { | |
| "epoch": 0.6000614817091915, | |
| "grad_norm": 0.3030477984709631, | |
| "learning_rate": 8.268221058009506e-06, | |
| "loss": 0.1678, | |
| "step": 488 | |
| }, | |
| { | |
| "epoch": 0.6012911158930219, | |
| "grad_norm": 0.32220394621350373, | |
| "learning_rate": 8.225909890482456e-06, | |
| "loss": 0.1442, | |
| "step": 489 | |
| }, | |
| { | |
| "epoch": 0.6025207500768521, | |
| "grad_norm": 0.4724721543903087, | |
| "learning_rate": 8.183631490205636e-06, | |
| "loss": 0.1962, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.6037503842606824, | |
| "grad_norm": 0.3600147393401425, | |
| "learning_rate": 8.141386638056482e-06, | |
| "loss": 0.1876, | |
| "step": 491 | |
| }, | |
| { | |
| "epoch": 0.6049800184445128, | |
| "grad_norm": 0.4000861620375486, | |
| "learning_rate": 8.09917611429279e-06, | |
| "loss": 0.2202, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 0.606209652628343, | |
| "grad_norm": 0.3772045360701519, | |
| "learning_rate": 8.057000698538311e-06, | |
| "loss": 0.1606, | |
| "step": 493 | |
| }, | |
| { | |
| "epoch": 0.6074392868121734, | |
| "grad_norm": 0.37207256879831996, | |
| "learning_rate": 8.014861169768362e-06, | |
| "loss": 0.2028, | |
| "step": 494 | |
| }, | |
| { | |
| "epoch": 0.6086689209960037, | |
| "grad_norm": 0.3479563238322552, | |
| "learning_rate": 7.972758306295436e-06, | |
| "loss": 0.1574, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.609898555179834, | |
| "grad_norm": 0.4796467447401053, | |
| "learning_rate": 7.930692885754806e-06, | |
| "loss": 0.224, | |
| "step": 496 | |
| }, | |
| { | |
| "epoch": 0.6111281893636643, | |
| "grad_norm": 0.3477407213433725, | |
| "learning_rate": 7.888665685090194e-06, | |
| "loss": 0.2333, | |
| "step": 497 | |
| }, | |
| { | |
| "epoch": 0.6123578235474946, | |
| "grad_norm": 0.3793733407594908, | |
| "learning_rate": 7.846677480539392e-06, | |
| "loss": 0.2162, | |
| "step": 498 | |
| }, | |
| { | |
| "epoch": 0.613587457731325, | |
| "grad_norm": 0.3805341456576239, | |
| "learning_rate": 7.80472904761995e-06, | |
| "loss": 0.141, | |
| "step": 499 | |
| }, | |
| { | |
| "epoch": 0.6148170919151552, | |
| "grad_norm": 0.29991453914514893, | |
| "learning_rate": 7.762821161114834e-06, | |
| "loss": 0.1767, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6160467260989856, | |
| "grad_norm": 0.49958166386531483, | |
| "learning_rate": 7.720954595058118e-06, | |
| "loss": 0.1951, | |
| "step": 501 | |
| }, | |
| { | |
| "epoch": 0.6172763602828159, | |
| "grad_norm": 0.3970045573640798, | |
| "learning_rate": 7.679130122720704e-06, | |
| "loss": 0.1961, | |
| "step": 502 | |
| }, | |
| { | |
| "epoch": 0.6185059944666462, | |
| "grad_norm": 0.4623993081995553, | |
| "learning_rate": 7.637348516596016e-06, | |
| "loss": 0.2396, | |
| "step": 503 | |
| }, | |
| { | |
| "epoch": 0.6197356286504765, | |
| "grad_norm": 0.3615937641177032, | |
| "learning_rate": 7.59561054838575e-06, | |
| "loss": 0.1549, | |
| "step": 504 | |
| }, | |
| { | |
| "epoch": 0.6209652628343068, | |
| "grad_norm": 0.4101127611696136, | |
| "learning_rate": 7.5539169889856135e-06, | |
| "loss": 0.211, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.6221948970181371, | |
| "grad_norm": 0.42525165037118207, | |
| "learning_rate": 7.512268608471083e-06, | |
| "loss": 0.1805, | |
| "step": 506 | |
| }, | |
| { | |
| "epoch": 0.6234245312019674, | |
| "grad_norm": 0.37275006951534, | |
| "learning_rate": 7.470666176083193e-06, | |
| "loss": 0.1708, | |
| "step": 507 | |
| }, | |
| { | |
| "epoch": 0.6246541653857978, | |
| "grad_norm": 0.3457729667153079, | |
| "learning_rate": 7.42911046021432e-06, | |
| "loss": 0.1653, | |
| "step": 508 | |
| }, | |
| { | |
| "epoch": 0.625883799569628, | |
| "grad_norm": 0.3278339387702758, | |
| "learning_rate": 7.387602228393987e-06, | |
| "loss": 0.2193, | |
| "step": 509 | |
| }, | |
| { | |
| "epoch": 0.6271134337534583, | |
| "grad_norm": 0.4458446440488868, | |
| "learning_rate": 7.346142247274695e-06, | |
| "loss": 0.1969, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.6283430679372887, | |
| "grad_norm": 0.39189734844551366, | |
| "learning_rate": 7.304731282617762e-06, | |
| "loss": 0.2064, | |
| "step": 511 | |
| }, | |
| { | |
| "epoch": 0.6295727021211189, | |
| "grad_norm": 0.5420020058867324, | |
| "learning_rate": 7.263370099279173e-06, | |
| "loss": 0.1818, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 0.6308023363049493, | |
| "grad_norm": 0.29997454975691457, | |
| "learning_rate": 7.2220594611954606e-06, | |
| "loss": 0.1301, | |
| "step": 513 | |
| }, | |
| { | |
| "epoch": 0.6320319704887796, | |
| "grad_norm": 0.5869237816795814, | |
| "learning_rate": 7.1808001313695855e-06, | |
| "loss": 0.2408, | |
| "step": 514 | |
| }, | |
| { | |
| "epoch": 0.63326160467261, | |
| "grad_norm": 0.4123986176767839, | |
| "learning_rate": 7.1395928718568605e-06, | |
| "loss": 0.1633, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.6344912388564402, | |
| "grad_norm": 0.33349921575102887, | |
| "learning_rate": 7.098438443750865e-06, | |
| "loss": 0.1433, | |
| "step": 516 | |
| }, | |
| { | |
| "epoch": 0.6357208730402705, | |
| "grad_norm": 0.38968457601021717, | |
| "learning_rate": 7.057337607169373e-06, | |
| "loss": 0.1385, | |
| "step": 517 | |
| }, | |
| { | |
| "epoch": 0.6369505072241008, | |
| "grad_norm": 0.4844956623703022, | |
| "learning_rate": 7.016291121240346e-06, | |
| "loss": 0.2059, | |
| "step": 518 | |
| }, | |
| { | |
| "epoch": 0.6381801414079311, | |
| "grad_norm": 0.5044663407494435, | |
| "learning_rate": 6.975299744087891e-06, | |
| "loss": 0.1627, | |
| "step": 519 | |
| }, | |
| { | |
| "epoch": 0.6394097755917615, | |
| "grad_norm": 0.38557915218071376, | |
| "learning_rate": 6.934364232818254e-06, | |
| "loss": 0.2096, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.6394097755917615, | |
| "eval_accuracy": 0.7967914438502673, | |
| "eval_f1": 0.4864864864864865, | |
| "eval_loss": 0.4250781238079071, | |
| "eval_precision": 0.75, | |
| "eval_recall": 0.36, | |
| "eval_runtime": 22.9637, | |
| "eval_samples_per_second": 2.177, | |
| "eval_steps_per_second": 0.174, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.6406394097755918, | |
| "grad_norm": 0.5448951647380567, | |
| "learning_rate": 6.8934853435058566e-06, | |
| "loss": 0.1752, | |
| "step": 521 | |
| }, | |
| { | |
| "epoch": 0.6418690439594221, | |
| "grad_norm": 0.4644280248680503, | |
| "learning_rate": 6.852663831179303e-06, | |
| "loss": 0.207, | |
| "step": 522 | |
| }, | |
| { | |
| "epoch": 0.6430986781432524, | |
| "grad_norm": 0.4087824167359295, | |
| "learning_rate": 6.811900449807465e-06, | |
| "loss": 0.2231, | |
| "step": 523 | |
| }, | |
| { | |
| "epoch": 0.6443283123270827, | |
| "grad_norm": 0.3257107890885892, | |
| "learning_rate": 6.771195952285541e-06, | |
| "loss": 0.2224, | |
| "step": 524 | |
| }, | |
| { | |
| "epoch": 0.645557946510913, | |
| "grad_norm": 0.3772013974334331, | |
| "learning_rate": 6.730551090421137e-06, | |
| "loss": 0.2488, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.6467875806947433, | |
| "grad_norm": 0.36235955730436165, | |
| "learning_rate": 6.689966614920414e-06, | |
| "loss": 0.243, | |
| "step": 526 | |
| }, | |
| { | |
| "epoch": 0.6480172148785737, | |
| "grad_norm": 0.3264893573428842, | |
| "learning_rate": 6.6494432753741935e-06, | |
| "loss": 0.1759, | |
| "step": 527 | |
| }, | |
| { | |
| "epoch": 0.6492468490624039, | |
| "grad_norm": 0.5043057888131116, | |
| "learning_rate": 6.608981820244116e-06, | |
| "loss": 0.2345, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 0.6504764832462342, | |
| "grad_norm": 0.3463558045689586, | |
| "learning_rate": 6.568582996848844e-06, | |
| "loss": 0.1618, | |
| "step": 529 | |
| }, | |
| { | |
| "epoch": 0.6517061174300646, | |
| "grad_norm": 0.38166358521794713, | |
| "learning_rate": 6.528247551350213e-06, | |
| "loss": 0.1684, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.6529357516138948, | |
| "grad_norm": 0.3156591052948769, | |
| "learning_rate": 6.487976228739493e-06, | |
| "loss": 0.1579, | |
| "step": 531 | |
| }, | |
| { | |
| "epoch": 0.6541653857977252, | |
| "grad_norm": 0.3332487495737034, | |
| "learning_rate": 6.4477697728236146e-06, | |
| "loss": 0.1551, | |
| "step": 532 | |
| }, | |
| { | |
| "epoch": 0.6553950199815555, | |
| "grad_norm": 0.4211650929646287, | |
| "learning_rate": 6.407628926211409e-06, | |
| "loss": 0.2312, | |
| "step": 533 | |
| }, | |
| { | |
| "epoch": 0.6566246541653858, | |
| "grad_norm": 0.2935734822708652, | |
| "learning_rate": 6.367554430299924e-06, | |
| "loss": 0.115, | |
| "step": 534 | |
| }, | |
| { | |
| "epoch": 0.6578542883492161, | |
| "grad_norm": 0.3109438577457444, | |
| "learning_rate": 6.327547025260723e-06, | |
| "loss": 0.1732, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.6590839225330464, | |
| "grad_norm": 0.5251627863608039, | |
| "learning_rate": 6.287607450026189e-06, | |
| "loss": 0.2232, | |
| "step": 536 | |
| }, | |
| { | |
| "epoch": 0.6603135567168767, | |
| "grad_norm": 0.5272463447754631, | |
| "learning_rate": 6.247736442275918e-06, | |
| "loss": 0.225, | |
| "step": 537 | |
| }, | |
| { | |
| "epoch": 0.661543190900707, | |
| "grad_norm": 0.49530017803033505, | |
| "learning_rate": 6.2079347384230505e-06, | |
| "loss": 0.2101, | |
| "step": 538 | |
| }, | |
| { | |
| "epoch": 0.6627728250845374, | |
| "grad_norm": 0.36399232198883885, | |
| "learning_rate": 6.168203073600706e-06, | |
| "loss": 0.2412, | |
| "step": 539 | |
| }, | |
| { | |
| "epoch": 0.6640024592683677, | |
| "grad_norm": 0.3761278210204055, | |
| "learning_rate": 6.128542181648395e-06, | |
| "loss": 0.1548, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.665232093452198, | |
| "grad_norm": 0.3600939725825308, | |
| "learning_rate": 6.088952795098442e-06, | |
| "loss": 0.1784, | |
| "step": 541 | |
| }, | |
| { | |
| "epoch": 0.6664617276360283, | |
| "grad_norm": 0.27977025283181606, | |
| "learning_rate": 6.049435645162487e-06, | |
| "loss": 0.1274, | |
| "step": 542 | |
| }, | |
| { | |
| "epoch": 0.6676913618198586, | |
| "grad_norm": 0.42480514662361724, | |
| "learning_rate": 6.009991461717977e-06, | |
| "loss": 0.1807, | |
| "step": 543 | |
| }, | |
| { | |
| "epoch": 0.6689209960036889, | |
| "grad_norm": 0.48627506473268806, | |
| "learning_rate": 5.9706209732946495e-06, | |
| "loss": 0.2132, | |
| "step": 544 | |
| }, | |
| { | |
| "epoch": 0.6701506301875192, | |
| "grad_norm": 0.42975103927248237, | |
| "learning_rate": 5.931324907061131e-06, | |
| "loss": 0.1755, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.6713802643713496, | |
| "grad_norm": 0.4272914218675845, | |
| "learning_rate": 5.892103988811457e-06, | |
| "loss": 0.2238, | |
| "step": 546 | |
| }, | |
| { | |
| "epoch": 0.6726098985551798, | |
| "grad_norm": 0.35364469492194867, | |
| "learning_rate": 5.852958942951701e-06, | |
| "loss": 0.1657, | |
| "step": 547 | |
| }, | |
| { | |
| "epoch": 0.6738395327390101, | |
| "grad_norm": 0.3205399017187773, | |
| "learning_rate": 5.8138904924865766e-06, | |
| "loss": 0.1284, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 0.6750691669228405, | |
| "grad_norm": 0.488644078540224, | |
| "learning_rate": 5.774899359006092e-06, | |
| "loss": 0.2515, | |
| "step": 549 | |
| }, | |
| { | |
| "epoch": 0.6762988011066707, | |
| "grad_norm": 0.3649190122100428, | |
| "learning_rate": 5.735986262672211e-06, | |
| "loss": 0.1812, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.6775284352905011, | |
| "grad_norm": 0.38288184293561706, | |
| "learning_rate": 5.697151922205575e-06, | |
| "loss": 0.2015, | |
| "step": 551 | |
| }, | |
| { | |
| "epoch": 0.6787580694743314, | |
| "grad_norm": 0.4564212700510477, | |
| "learning_rate": 5.658397054872197e-06, | |
| "loss": 0.1518, | |
| "step": 552 | |
| }, | |
| { | |
| "epoch": 0.6799877036581617, | |
| "grad_norm": 0.6778561540995489, | |
| "learning_rate": 5.619722376470238e-06, | |
| "loss": 0.2669, | |
| "step": 553 | |
| }, | |
| { | |
| "epoch": 0.681217337841992, | |
| "grad_norm": 0.38820523141101543, | |
| "learning_rate": 5.581128601316774e-06, | |
| "loss": 0.1773, | |
| "step": 554 | |
| }, | |
| { | |
| "epoch": 0.6824469720258223, | |
| "grad_norm": 0.47361205634888237, | |
| "learning_rate": 5.542616442234618e-06, | |
| "loss": 0.1582, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.6836766062096526, | |
| "grad_norm": 0.4441223497761333, | |
| "learning_rate": 5.504186610539131e-06, | |
| "loss": 0.2571, | |
| "step": 556 | |
| }, | |
| { | |
| "epoch": 0.6849062403934829, | |
| "grad_norm": 0.34475269069193953, | |
| "learning_rate": 5.465839816025093e-06, | |
| "loss": 0.1777, | |
| "step": 557 | |
| }, | |
| { | |
| "epoch": 0.6861358745773133, | |
| "grad_norm": 0.5033044850685292, | |
| "learning_rate": 5.427576766953615e-06, | |
| "loss": 0.2281, | |
| "step": 558 | |
| }, | |
| { | |
| "epoch": 0.6873655087611436, | |
| "grad_norm": 0.5766890545879761, | |
| "learning_rate": 5.3893981700390215e-06, | |
| "loss": 0.1924, | |
| "step": 559 | |
| }, | |
| { | |
| "epoch": 0.6885951429449738, | |
| "grad_norm": 0.41086152620647287, | |
| "learning_rate": 5.35130473043582e-06, | |
| "loss": 0.204, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.6885951429449738, | |
| "eval_accuracy": 0.7914438502673797, | |
| "eval_f1": 0.4657534246575342, | |
| "eval_loss": 0.44132813811302185, | |
| "eval_precision": 0.7391304347826086, | |
| "eval_recall": 0.34, | |
| "eval_runtime": 22.903, | |
| "eval_samples_per_second": 2.183, | |
| "eval_steps_per_second": 0.175, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.6898247771288042, | |
| "grad_norm": 0.3726661976685923, | |
| "learning_rate": 5.313297151725679e-06, | |
| "loss": 0.1644, | |
| "step": 561 | |
| }, | |
| { | |
| "epoch": 0.6910544113126345, | |
| "grad_norm": 0.5219762547061035, | |
| "learning_rate": 5.275376135904408e-06, | |
| "loss": 0.2288, | |
| "step": 562 | |
| }, | |
| { | |
| "epoch": 0.6922840454964648, | |
| "grad_norm": 0.5504038731015495, | |
| "learning_rate": 5.2375423833690255e-06, | |
| "loss": 0.2808, | |
| "step": 563 | |
| }, | |
| { | |
| "epoch": 0.6935136796802951, | |
| "grad_norm": 0.5113724900506852, | |
| "learning_rate": 5.1997965929048125e-06, | |
| "loss": 0.24, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 0.6947433138641255, | |
| "grad_norm": 0.3227481790414618, | |
| "learning_rate": 5.1621394616723705e-06, | |
| "loss": 0.1381, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.6959729480479557, | |
| "grad_norm": 0.4136723306908959, | |
| "learning_rate": 5.124571685194804e-06, | |
| "loss": 0.1984, | |
| "step": 566 | |
| }, | |
| { | |
| "epoch": 0.697202582231786, | |
| "grad_norm": 0.366222168153406, | |
| "learning_rate": 5.087093957344841e-06, | |
| "loss": 0.1981, | |
| "step": 567 | |
| }, | |
| { | |
| "epoch": 0.6984322164156164, | |
| "grad_norm": 0.45034331558707047, | |
| "learning_rate": 5.049706970332e-06, | |
| "loss": 0.1971, | |
| "step": 568 | |
| }, | |
| { | |
| "epoch": 0.6996618505994466, | |
| "grad_norm": 0.4100657746036488, | |
| "learning_rate": 5.0124114146898505e-06, | |
| "loss": 0.2322, | |
| "step": 569 | |
| }, | |
| { | |
| "epoch": 0.700891484783277, | |
| "grad_norm": 0.44211705459477657, | |
| "learning_rate": 4.9752079792632244e-06, | |
| "loss": 0.1571, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.7021211189671073, | |
| "grad_norm": 0.5266522352800783, | |
| "learning_rate": 4.938097351195499e-06, | |
| "loss": 0.1818, | |
| "step": 571 | |
| }, | |
| { | |
| "epoch": 0.7033507531509376, | |
| "grad_norm": 0.598616618137633, | |
| "learning_rate": 4.9010802159159224e-06, | |
| "loss": 0.2259, | |
| "step": 572 | |
| }, | |
| { | |
| "epoch": 0.7045803873347679, | |
| "grad_norm": 0.5779733430038166, | |
| "learning_rate": 4.864157257126928e-06, | |
| "loss": 0.1429, | |
| "step": 573 | |
| }, | |
| { | |
| "epoch": 0.7058100215185982, | |
| "grad_norm": 0.34649772674980184, | |
| "learning_rate": 4.8273291567915225e-06, | |
| "loss": 0.16, | |
| "step": 574 | |
| }, | |
| { | |
| "epoch": 0.7070396557024285, | |
| "grad_norm": 0.34316766175643343, | |
| "learning_rate": 4.790596595120699e-06, | |
| "loss": 0.1754, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.7082692898862588, | |
| "grad_norm": 0.522847657230503, | |
| "learning_rate": 4.753960250560843e-06, | |
| "loss": 0.1942, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 0.7094989240700892, | |
| "grad_norm": 0.3962657447065765, | |
| "learning_rate": 4.7174207997812436e-06, | |
| "loss": 0.2201, | |
| "step": 577 | |
| }, | |
| { | |
| "epoch": 0.7107285582539195, | |
| "grad_norm": 0.5773303103813715, | |
| "learning_rate": 4.680978917661544e-06, | |
| "loss": 0.1883, | |
| "step": 578 | |
| }, | |
| { | |
| "epoch": 0.7119581924377497, | |
| "grad_norm": 0.49036496077969033, | |
| "learning_rate": 4.6446352772793256e-06, | |
| "loss": 0.2064, | |
| "step": 579 | |
| }, | |
| { | |
| "epoch": 0.7131878266215801, | |
| "grad_norm": 0.6072300426360661, | |
| "learning_rate": 4.608390549897661e-06, | |
| "loss": 0.2741, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.7144174608054104, | |
| "grad_norm": 0.30903774667825634, | |
| "learning_rate": 4.5722454049526825e-06, | |
| "loss": 0.1321, | |
| "step": 581 | |
| }, | |
| { | |
| "epoch": 0.7156470949892407, | |
| "grad_norm": 0.41353484327403556, | |
| "learning_rate": 4.536200510041271e-06, | |
| "loss": 0.1589, | |
| "step": 582 | |
| }, | |
| { | |
| "epoch": 0.716876729173071, | |
| "grad_norm": 0.3781198095284841, | |
| "learning_rate": 4.5002565309087e-06, | |
| "loss": 0.1996, | |
| "step": 583 | |
| }, | |
| { | |
| "epoch": 0.7181063633569014, | |
| "grad_norm": 0.39752189123610254, | |
| "learning_rate": 4.4644141314363165e-06, | |
| "loss": 0.1437, | |
| "step": 584 | |
| }, | |
| { | |
| "epoch": 0.7193359975407316, | |
| "grad_norm": 0.3854738335798155, | |
| "learning_rate": 4.4286739736293285e-06, | |
| "loss": 0.1397, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.7205656317245619, | |
| "grad_norm": 0.431269796547053, | |
| "learning_rate": 4.393036717604536e-06, | |
| "loss": 0.1486, | |
| "step": 586 | |
| }, | |
| { | |
| "epoch": 0.7217952659083923, | |
| "grad_norm": 0.49871333432848314, | |
| "learning_rate": 4.357503021578158e-06, | |
| "loss": 0.1652, | |
| "step": 587 | |
| }, | |
| { | |
| "epoch": 0.7230249000922225, | |
| "grad_norm": 0.4737833916251569, | |
| "learning_rate": 4.322073541853677e-06, | |
| "loss": 0.2164, | |
| "step": 588 | |
| }, | |
| { | |
| "epoch": 0.7242545342760529, | |
| "grad_norm": 0.36804258048411215, | |
| "learning_rate": 4.286748932809707e-06, | |
| "loss": 0.2091, | |
| "step": 589 | |
| }, | |
| { | |
| "epoch": 0.7254841684598832, | |
| "grad_norm": 0.3785583210042997, | |
| "learning_rate": 4.2515298468879064e-06, | |
| "loss": 0.2258, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.7267138026437135, | |
| "grad_norm": 0.3271430669263614, | |
| "learning_rate": 4.216416934580947e-06, | |
| "loss": 0.1902, | |
| "step": 591 | |
| }, | |
| { | |
| "epoch": 0.7279434368275438, | |
| "grad_norm": 0.3725205335835651, | |
| "learning_rate": 4.181410844420473e-06, | |
| "loss": 0.1469, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 0.7291730710113741, | |
| "grad_norm": 0.5679833065078629, | |
| "learning_rate": 4.146512222965144e-06, | |
| "loss": 0.2159, | |
| "step": 593 | |
| }, | |
| { | |
| "epoch": 0.7304027051952044, | |
| "grad_norm": 0.6073546495930695, | |
| "learning_rate": 4.111721714788671e-06, | |
| "loss": 0.1222, | |
| "step": 594 | |
| }, | |
| { | |
| "epoch": 0.7316323393790347, | |
| "grad_norm": 0.32166243146523565, | |
| "learning_rate": 4.077039962467946e-06, | |
| "loss": 0.1224, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.7328619735628651, | |
| "grad_norm": 0.322565114053672, | |
| "learning_rate": 4.042467606571134e-06, | |
| "loss": 0.1714, | |
| "step": 596 | |
| }, | |
| { | |
| "epoch": 0.7340916077466954, | |
| "grad_norm": 0.3390381359494193, | |
| "learning_rate": 4.008005285645863e-06, | |
| "loss": 0.1638, | |
| "step": 597 | |
| }, | |
| { | |
| "epoch": 0.7353212419305256, | |
| "grad_norm": 0.3966925455964005, | |
| "learning_rate": 3.973653636207437e-06, | |
| "loss": 0.1508, | |
| "step": 598 | |
| }, | |
| { | |
| "epoch": 0.736550876114356, | |
| "grad_norm": 0.6143649916164688, | |
| "learning_rate": 3.939413292727061e-06, | |
| "loss": 0.2691, | |
| "step": 599 | |
| }, | |
| { | |
| "epoch": 0.7377805102981863, | |
| "grad_norm": 0.3681398679016445, | |
| "learning_rate": 3.9052848876201285e-06, | |
| "loss": 0.1545, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7377805102981863, | |
| "eval_accuracy": 0.7967914438502673, | |
| "eval_f1": 0.4864864864864865, | |
| "eval_loss": 0.43117186427116394, | |
| "eval_precision": 0.75, | |
| "eval_recall": 0.36, | |
| "eval_runtime": 23.5078, | |
| "eval_samples_per_second": 2.127, | |
| "eval_steps_per_second": 0.17, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7390101444820166, | |
| "grad_norm": 0.2943806410067254, | |
| "learning_rate": 3.8712690512345555e-06, | |
| "loss": 0.1728, | |
| "step": 601 | |
| }, | |
| { | |
| "epoch": 0.7402397786658469, | |
| "grad_norm": 0.42925890423626006, | |
| "learning_rate": 3.837366411839114e-06, | |
| "loss": 0.1948, | |
| "step": 602 | |
| }, | |
| { | |
| "epoch": 0.7414694128496773, | |
| "grad_norm": 0.3527421902997394, | |
| "learning_rate": 3.8035775956118416e-06, | |
| "loss": 0.1413, | |
| "step": 603 | |
| }, | |
| { | |
| "epoch": 0.7426990470335075, | |
| "grad_norm": 0.4175876107718813, | |
| "learning_rate": 3.7699032266284863e-06, | |
| "loss": 0.2727, | |
| "step": 604 | |
| }, | |
| { | |
| "epoch": 0.7439286812173378, | |
| "grad_norm": 0.4187920324268778, | |
| "learning_rate": 3.736343926850954e-06, | |
| "loss": 0.1588, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.7451583154011682, | |
| "grad_norm": 0.3852760473136735, | |
| "learning_rate": 3.702900316115836e-06, | |
| "loss": 0.174, | |
| "step": 606 | |
| }, | |
| { | |
| "epoch": 0.7463879495849984, | |
| "grad_norm": 0.42823407651531814, | |
| "learning_rate": 3.6695730121229734e-06, | |
| "loss": 0.1938, | |
| "step": 607 | |
| }, | |
| { | |
| "epoch": 0.7476175837688288, | |
| "grad_norm": 0.3509868875989032, | |
| "learning_rate": 3.6363626304240185e-06, | |
| "loss": 0.1475, | |
| "step": 608 | |
| }, | |
| { | |
| "epoch": 0.7488472179526591, | |
| "grad_norm": 0.2971798682387744, | |
| "learning_rate": 3.6032697844110896e-06, | |
| "loss": 0.1767, | |
| "step": 609 | |
| }, | |
| { | |
| "epoch": 0.7500768521364894, | |
| "grad_norm": 0.6072125452251376, | |
| "learning_rate": 3.5702950853054284e-06, | |
| "loss": 0.1699, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.7513064863203197, | |
| "grad_norm": 0.42164026185503256, | |
| "learning_rate": 3.5374391421461273e-06, | |
| "loss": 0.1412, | |
| "step": 611 | |
| }, | |
| { | |
| "epoch": 0.75253612050415, | |
| "grad_norm": 0.3486983507433236, | |
| "learning_rate": 3.5047025617788578e-06, | |
| "loss": 0.1936, | |
| "step": 612 | |
| }, | |
| { | |
| "epoch": 0.7537657546879803, | |
| "grad_norm": 0.4729724505869417, | |
| "learning_rate": 3.4720859488446744e-06, | |
| "loss": 0.2232, | |
| "step": 613 | |
| }, | |
| { | |
| "epoch": 0.7549953888718106, | |
| "grad_norm": 0.4712358882570717, | |
| "learning_rate": 3.4395899057688575e-06, | |
| "loss": 0.1957, | |
| "step": 614 | |
| }, | |
| { | |
| "epoch": 0.756225023055641, | |
| "grad_norm": 0.30705322198688406, | |
| "learning_rate": 3.407215032749763e-06, | |
| "loss": 0.1771, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.7574546572394713, | |
| "grad_norm": 0.4492814208789423, | |
| "learning_rate": 3.374961927747751e-06, | |
| "loss": 0.2017, | |
| "step": 616 | |
| }, | |
| { | |
| "epoch": 0.7586842914233015, | |
| "grad_norm": 0.4380941744123555, | |
| "learning_rate": 3.342831186474149e-06, | |
| "loss": 0.2032, | |
| "step": 617 | |
| }, | |
| { | |
| "epoch": 0.7599139256071319, | |
| "grad_norm": 0.3711476811320281, | |
| "learning_rate": 3.31082340238023e-06, | |
| "loss": 0.139, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 0.7611435597909622, | |
| "grad_norm": 0.4366197359235773, | |
| "learning_rate": 3.27893916664626e-06, | |
| "loss": 0.1726, | |
| "step": 619 | |
| }, | |
| { | |
| "epoch": 0.7623731939747925, | |
| "grad_norm": 0.3831073094979708, | |
| "learning_rate": 3.2471790681705928e-06, | |
| "loss": 0.1734, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.7636028281586228, | |
| "grad_norm": 0.3832625976759797, | |
| "learning_rate": 3.215543693558769e-06, | |
| "loss": 0.1326, | |
| "step": 621 | |
| }, | |
| { | |
| "epoch": 0.7648324623424532, | |
| "grad_norm": 0.4637885564290134, | |
| "learning_rate": 3.1840336271126935e-06, | |
| "loss": 0.213, | |
| "step": 622 | |
| }, | |
| { | |
| "epoch": 0.7660620965262834, | |
| "grad_norm": 0.5509391377682509, | |
| "learning_rate": 3.152649450819852e-06, | |
| "loss": 0.202, | |
| "step": 623 | |
| }, | |
| { | |
| "epoch": 0.7672917307101137, | |
| "grad_norm": 0.4604352454314464, | |
| "learning_rate": 3.1213917443425456e-06, | |
| "loss": 0.2395, | |
| "step": 624 | |
| }, | |
| { | |
| "epoch": 0.7685213648939441, | |
| "grad_norm": 0.5005650818328251, | |
| "learning_rate": 3.0902610850071922e-06, | |
| "loss": 0.1712, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.7697509990777743, | |
| "grad_norm": 0.3297795229391836, | |
| "learning_rate": 3.0592580477936606e-06, | |
| "loss": 0.1249, | |
| "step": 626 | |
| }, | |
| { | |
| "epoch": 0.7709806332616047, | |
| "grad_norm": 0.37133417357695125, | |
| "learning_rate": 3.0283832053246644e-06, | |
| "loss": 0.1496, | |
| "step": 627 | |
| }, | |
| { | |
| "epoch": 0.772210267445435, | |
| "grad_norm": 1.0851806228661502, | |
| "learning_rate": 2.99763712785516e-06, | |
| "loss": 0.1834, | |
| "step": 628 | |
| }, | |
| { | |
| "epoch": 0.7734399016292653, | |
| "grad_norm": 0.5871194480383413, | |
| "learning_rate": 2.967020383261834e-06, | |
| "loss": 0.2054, | |
| "step": 629 | |
| }, | |
| { | |
| "epoch": 0.7746695358130956, | |
| "grad_norm": 0.5149728889777226, | |
| "learning_rate": 2.9365335370326143e-06, | |
| "loss": 0.1972, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.7758991699969259, | |
| "grad_norm": 0.37527398302282, | |
| "learning_rate": 2.9061771522562143e-06, | |
| "loss": 0.1492, | |
| "step": 631 | |
| }, | |
| { | |
| "epoch": 0.7771288041807562, | |
| "grad_norm": 0.4284583342223879, | |
| "learning_rate": 2.875951789611734e-06, | |
| "loss": 0.1937, | |
| "step": 632 | |
| }, | |
| { | |
| "epoch": 0.7783584383645865, | |
| "grad_norm": 0.4328792148070332, | |
| "learning_rate": 2.8458580073583262e-06, | |
| "loss": 0.1905, | |
| "step": 633 | |
| }, | |
| { | |
| "epoch": 0.7795880725484169, | |
| "grad_norm": 0.4067822771383594, | |
| "learning_rate": 2.8158963613248437e-06, | |
| "loss": 0.2048, | |
| "step": 634 | |
| }, | |
| { | |
| "epoch": 0.7808177067322472, | |
| "grad_norm": 0.5475925840409395, | |
| "learning_rate": 2.7860674048996174e-06, | |
| "loss": 0.2014, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.7820473409160774, | |
| "grad_norm": 0.3714863801891058, | |
| "learning_rate": 2.756371689020214e-06, | |
| "loss": 0.1597, | |
| "step": 636 | |
| }, | |
| { | |
| "epoch": 0.7832769750999078, | |
| "grad_norm": 0.45403846500036404, | |
| "learning_rate": 2.7268097621632473e-06, | |
| "loss": 0.1588, | |
| "step": 637 | |
| }, | |
| { | |
| "epoch": 0.784506609283738, | |
| "grad_norm": 0.2750476426300895, | |
| "learning_rate": 2.697382170334275e-06, | |
| "loss": 0.1456, | |
| "step": 638 | |
| }, | |
| { | |
| "epoch": 0.7857362434675684, | |
| "grad_norm": 0.4122155448314921, | |
| "learning_rate": 2.6680894570577042e-06, | |
| "loss": 0.165, | |
| "step": 639 | |
| }, | |
| { | |
| "epoch": 0.7869658776513987, | |
| "grad_norm": 0.44104871745668295, | |
| "learning_rate": 2.638932163366742e-06, | |
| "loss": 0.1883, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.7869658776513987, | |
| "eval_accuracy": 0.8021390374331551, | |
| "eval_f1": 0.5066666666666667, | |
| "eval_loss": 0.42875000834465027, | |
| "eval_precision": 0.76, | |
| "eval_recall": 0.38, | |
| "eval_runtime": 22.3064, | |
| "eval_samples_per_second": 2.242, | |
| "eval_steps_per_second": 0.179, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.7881955118352291, | |
| "grad_norm": 0.38537966631812437, | |
| "learning_rate": 2.6099108277934105e-06, | |
| "loss": 0.1942, | |
| "step": 641 | |
| }, | |
| { | |
| "epoch": 0.7894251460190593, | |
| "grad_norm": 0.47302017581744826, | |
| "learning_rate": 2.581025986358602e-06, | |
| "loss": 0.2733, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 0.7906547802028896, | |
| "grad_norm": 0.4006638675446945, | |
| "learning_rate": 2.5522781725621814e-06, | |
| "loss": 0.1905, | |
| "step": 643 | |
| }, | |
| { | |
| "epoch": 0.79188441438672, | |
| "grad_norm": 0.4264868084266065, | |
| "learning_rate": 2.523667917373125e-06, | |
| "loss": 0.2047, | |
| "step": 644 | |
| }, | |
| { | |
| "epoch": 0.7931140485705502, | |
| "grad_norm": 0.3954441386492838, | |
| "learning_rate": 2.4951957492197097e-06, | |
| "loss": 0.1377, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 0.7943436827543806, | |
| "grad_norm": 0.39481889488214283, | |
| "learning_rate": 2.4668621939797745e-06, | |
| "loss": 0.1402, | |
| "step": 646 | |
| }, | |
| { | |
| "epoch": 0.7955733169382109, | |
| "grad_norm": 0.5271696297567287, | |
| "learning_rate": 2.438667774970981e-06, | |
| "loss": 0.2091, | |
| "step": 647 | |
| }, | |
| { | |
| "epoch": 0.7968029511220412, | |
| "grad_norm": 0.40581144727582685, | |
| "learning_rate": 2.4106130129411608e-06, | |
| "loss": 0.1898, | |
| "step": 648 | |
| }, | |
| { | |
| "epoch": 0.7980325853058715, | |
| "grad_norm": 0.4102532645005857, | |
| "learning_rate": 2.3826984260587084e-06, | |
| "loss": 0.2066, | |
| "step": 649 | |
| }, | |
| { | |
| "epoch": 0.7992622194897018, | |
| "grad_norm": 0.388703790445828, | |
| "learning_rate": 2.354924529902978e-06, | |
| "loss": 0.1987, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.8004918536735321, | |
| "grad_norm": 0.4906618445456134, | |
| "learning_rate": 2.327291837454799e-06, | |
| "loss": 0.1837, | |
| "step": 651 | |
| }, | |
| { | |
| "epoch": 0.8017214878573624, | |
| "grad_norm": 0.37536494595757913, | |
| "learning_rate": 2.2998008590869838e-06, | |
| "loss": 0.1657, | |
| "step": 652 | |
| }, | |
| { | |
| "epoch": 0.8029511220411928, | |
| "grad_norm": 0.3812431916923574, | |
| "learning_rate": 2.2724521025548828e-06, | |
| "loss": 0.1008, | |
| "step": 653 | |
| }, | |
| { | |
| "epoch": 0.804180756225023, | |
| "grad_norm": 0.3734890292027527, | |
| "learning_rate": 2.245246072987045e-06, | |
| "loss": 0.1343, | |
| "step": 654 | |
| }, | |
| { | |
| "epoch": 0.8054103904088533, | |
| "grad_norm": 0.4423063838480555, | |
| "learning_rate": 2.2181832728758635e-06, | |
| "loss": 0.2222, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 0.8066400245926837, | |
| "grad_norm": 0.3896545849527162, | |
| "learning_rate": 2.191264202068286e-06, | |
| "loss": 0.1766, | |
| "step": 656 | |
| }, | |
| { | |
| "epoch": 0.807869658776514, | |
| "grad_norm": 0.6024032080378133, | |
| "learning_rate": 2.1644893577566118e-06, | |
| "loss": 0.231, | |
| "step": 657 | |
| }, | |
| { | |
| "epoch": 0.8090992929603443, | |
| "grad_norm": 0.43861748495389236, | |
| "learning_rate": 2.137859234469286e-06, | |
| "loss": 0.2467, | |
| "step": 658 | |
| }, | |
| { | |
| "epoch": 0.8103289271441746, | |
| "grad_norm": 0.37033226791746354, | |
| "learning_rate": 2.1113743240617668e-06, | |
| "loss": 0.1337, | |
| "step": 659 | |
| }, | |
| { | |
| "epoch": 0.811558561328005, | |
| "grad_norm": 0.6398820179734428, | |
| "learning_rate": 2.08503511570746e-06, | |
| "loss": 0.1954, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.8127881955118352, | |
| "grad_norm": 0.4504933775118792, | |
| "learning_rate": 2.058842095888658e-06, | |
| "loss": 0.18, | |
| "step": 661 | |
| }, | |
| { | |
| "epoch": 0.8140178296956655, | |
| "grad_norm": 0.361212739042047, | |
| "learning_rate": 2.0327957483875693e-06, | |
| "loss": 0.1489, | |
| "step": 662 | |
| }, | |
| { | |
| "epoch": 0.8152474638794959, | |
| "grad_norm": 0.307913369177724, | |
| "learning_rate": 2.006896554277388e-06, | |
| "loss": 0.1572, | |
| "step": 663 | |
| }, | |
| { | |
| "epoch": 0.8164770980633261, | |
| "grad_norm": 0.25426740831645195, | |
| "learning_rate": 1.981144991913392e-06, | |
| "loss": 0.12, | |
| "step": 664 | |
| }, | |
| { | |
| "epoch": 0.8177067322471565, | |
| "grad_norm": 0.3663288109181175, | |
| "learning_rate": 1.9555415369241228e-06, | |
| "loss": 0.1571, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 0.8189363664309868, | |
| "grad_norm": 0.41662449029107057, | |
| "learning_rate": 1.930086662202589e-06, | |
| "loss": 0.1873, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 0.820166000614817, | |
| "grad_norm": 0.40845173743188795, | |
| "learning_rate": 1.9047808378975485e-06, | |
| "loss": 0.1534, | |
| "step": 667 | |
| }, | |
| { | |
| "epoch": 0.8213956347986474, | |
| "grad_norm": 0.6212434671550456, | |
| "learning_rate": 1.8796245314048046e-06, | |
| "loss": 0.2374, | |
| "step": 668 | |
| }, | |
| { | |
| "epoch": 0.8226252689824777, | |
| "grad_norm": 0.3337054400199707, | |
| "learning_rate": 1.8546182073585828e-06, | |
| "loss": 0.184, | |
| "step": 669 | |
| }, | |
| { | |
| "epoch": 0.823854903166308, | |
| "grad_norm": 0.37408116822647747, | |
| "learning_rate": 1.829762327622958e-06, | |
| "loss": 0.1627, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.8250845373501383, | |
| "grad_norm": 0.41291954814345744, | |
| "learning_rate": 1.805057351283307e-06, | |
| "loss": 0.1426, | |
| "step": 671 | |
| }, | |
| { | |
| "epoch": 0.8263141715339687, | |
| "grad_norm": 0.6232928915412197, | |
| "learning_rate": 1.7805037346378384e-06, | |
| "loss": 0.1939, | |
| "step": 672 | |
| }, | |
| { | |
| "epoch": 0.827543805717799, | |
| "grad_norm": 0.43962963164293384, | |
| "learning_rate": 1.756101931189169e-06, | |
| "loss": 0.2049, | |
| "step": 673 | |
| }, | |
| { | |
| "epoch": 0.8287734399016292, | |
| "grad_norm": 0.3747672424266052, | |
| "learning_rate": 1.7318523916359376e-06, | |
| "loss": 0.1644, | |
| "step": 674 | |
| }, | |
| { | |
| "epoch": 0.8300030740854596, | |
| "grad_norm": 0.4713865050667868, | |
| "learning_rate": 1.7077555638644838e-06, | |
| "loss": 0.2924, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.8312327082692899, | |
| "grad_norm": 0.5391745289921438, | |
| "learning_rate": 1.6838118929405856e-06, | |
| "loss": 0.1767, | |
| "step": 676 | |
| }, | |
| { | |
| "epoch": 0.8324623424531202, | |
| "grad_norm": 0.35807178811591905, | |
| "learning_rate": 1.660021821101222e-06, | |
| "loss": 0.1718, | |
| "step": 677 | |
| }, | |
| { | |
| "epoch": 0.8336919766369505, | |
| "grad_norm": 0.5700152695384362, | |
| "learning_rate": 1.6363857877464161e-06, | |
| "loss": 0.1505, | |
| "step": 678 | |
| }, | |
| { | |
| "epoch": 0.8349216108207809, | |
| "grad_norm": 0.521349273286693, | |
| "learning_rate": 1.6129042294311227e-06, | |
| "loss": 0.1893, | |
| "step": 679 | |
| }, | |
| { | |
| "epoch": 0.8361512450046111, | |
| "grad_norm": 0.4881174981503527, | |
| "learning_rate": 1.5895775798571523e-06, | |
| "loss": 0.2403, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.8361512450046111, | |
| "eval_accuracy": 0.8021390374331551, | |
| "eval_f1": 0.5066666666666667, | |
| "eval_loss": 0.42875000834465027, | |
| "eval_precision": 0.76, | |
| "eval_recall": 0.38, | |
| "eval_runtime": 23.134, | |
| "eval_samples_per_second": 2.161, | |
| "eval_steps_per_second": 0.173, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.8373808791884414, | |
| "grad_norm": 0.43157618057929154, | |
| "learning_rate": 1.5664062698651706e-06, | |
| "loss": 0.1824, | |
| "step": 681 | |
| }, | |
| { | |
| "epoch": 0.8386105133722718, | |
| "grad_norm": 0.5760272230077988, | |
| "learning_rate": 1.5433907274267357e-06, | |
| "loss": 0.2397, | |
| "step": 682 | |
| }, | |
| { | |
| "epoch": 0.839840147556102, | |
| "grad_norm": 0.5350905991023048, | |
| "learning_rate": 1.5205313776364028e-06, | |
| "loss": 0.1892, | |
| "step": 683 | |
| }, | |
| { | |
| "epoch": 0.8410697817399324, | |
| "grad_norm": 0.61137934990804, | |
| "learning_rate": 1.4978286427038602e-06, | |
| "loss": 0.2348, | |
| "step": 684 | |
| }, | |
| { | |
| "epoch": 0.8422994159237627, | |
| "grad_norm": 0.4331644305139785, | |
| "learning_rate": 1.4752829419461357e-06, | |
| "loss": 0.1937, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 0.8435290501075929, | |
| "grad_norm": 0.3640781076289279, | |
| "learning_rate": 1.4528946917798603e-06, | |
| "loss": 0.1962, | |
| "step": 686 | |
| }, | |
| { | |
| "epoch": 0.8447586842914233, | |
| "grad_norm": 0.4244637100420945, | |
| "learning_rate": 1.4306643057135638e-06, | |
| "loss": 0.193, | |
| "step": 687 | |
| }, | |
| { | |
| "epoch": 0.8459883184752536, | |
| "grad_norm": 0.27253213925489794, | |
| "learning_rate": 1.4085921943400416e-06, | |
| "loss": 0.1582, | |
| "step": 688 | |
| }, | |
| { | |
| "epoch": 0.847217952659084, | |
| "grad_norm": 0.7026492760941759, | |
| "learning_rate": 1.3866787653287804e-06, | |
| "loss": 0.2727, | |
| "step": 689 | |
| }, | |
| { | |
| "epoch": 0.8484475868429142, | |
| "grad_norm": 0.3357057600160637, | |
| "learning_rate": 1.3649244234184157e-06, | |
| "loss": 0.1395, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.8496772210267446, | |
| "grad_norm": 0.38849185683759185, | |
| "learning_rate": 1.3433295704092586e-06, | |
| "loss": 0.1367, | |
| "step": 691 | |
| }, | |
| { | |
| "epoch": 0.8509068552105749, | |
| "grad_norm": 0.5532934868131949, | |
| "learning_rate": 1.3218946051558867e-06, | |
| "loss": 0.2007, | |
| "step": 692 | |
| }, | |
| { | |
| "epoch": 0.8521364893944051, | |
| "grad_norm": 0.4093414023233572, | |
| "learning_rate": 1.3006199235597628e-06, | |
| "loss": 0.199, | |
| "step": 693 | |
| }, | |
| { | |
| "epoch": 0.8533661235782355, | |
| "grad_norm": 0.5800657790788337, | |
| "learning_rate": 1.279505918561923e-06, | |
| "loss": 0.1786, | |
| "step": 694 | |
| }, | |
| { | |
| "epoch": 0.8545957577620658, | |
| "grad_norm": 0.5604353644860381, | |
| "learning_rate": 1.2585529801357377e-06, | |
| "loss": 0.2597, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 0.8558253919458961, | |
| "grad_norm": 0.4944214492031985, | |
| "learning_rate": 1.2377614952796825e-06, | |
| "loss": 0.1578, | |
| "step": 696 | |
| }, | |
| { | |
| "epoch": 0.8570550261297264, | |
| "grad_norm": 0.3580298395044867, | |
| "learning_rate": 1.217131848010209e-06, | |
| "loss": 0.145, | |
| "step": 697 | |
| }, | |
| { | |
| "epoch": 0.8582846603135568, | |
| "grad_norm": 0.49696207588289626, | |
| "learning_rate": 1.196664419354644e-06, | |
| "loss": 0.1847, | |
| "step": 698 | |
| }, | |
| { | |
| "epoch": 0.859514294497387, | |
| "grad_norm": 0.5676831498828142, | |
| "learning_rate": 1.176359587344158e-06, | |
| "loss": 0.2467, | |
| "step": 699 | |
| }, | |
| { | |
| "epoch": 0.8607439286812173, | |
| "grad_norm": 0.4791316046608471, | |
| "learning_rate": 1.1562177270067766e-06, | |
| "loss": 0.2128, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.8619735628650477, | |
| "grad_norm": 0.4252111135080987, | |
| "learning_rate": 1.1362392103604536e-06, | |
| "loss": 0.1746, | |
| "step": 701 | |
| }, | |
| { | |
| "epoch": 0.8632031970488779, | |
| "grad_norm": 0.45840370226979704, | |
| "learning_rate": 1.1164244064062101e-06, | |
| "loss": 0.2294, | |
| "step": 702 | |
| }, | |
| { | |
| "epoch": 0.8644328312327083, | |
| "grad_norm": 0.4054117908741426, | |
| "learning_rate": 1.0967736811213048e-06, | |
| "loss": 0.2092, | |
| "step": 703 | |
| }, | |
| { | |
| "epoch": 0.8656624654165386, | |
| "grad_norm": 0.39243641295826015, | |
| "learning_rate": 1.0772873974524833e-06, | |
| "loss": 0.2066, | |
| "step": 704 | |
| }, | |
| { | |
| "epoch": 0.8668920996003688, | |
| "grad_norm": 0.354816693403516, | |
| "learning_rate": 1.0579659153092759e-06, | |
| "loss": 0.1898, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 0.8681217337841992, | |
| "grad_norm": 0.3898934913509522, | |
| "learning_rate": 1.0388095915573427e-06, | |
| "loss": 0.1295, | |
| "step": 706 | |
| }, | |
| { | |
| "epoch": 0.8693513679680295, | |
| "grad_norm": 0.5938780642245854, | |
| "learning_rate": 1.0198187800118842e-06, | |
| "loss": 0.2201, | |
| "step": 707 | |
| }, | |
| { | |
| "epoch": 0.8705810021518599, | |
| "grad_norm": 0.4475510981424918, | |
| "learning_rate": 1.0009938314311186e-06, | |
| "loss": 0.1617, | |
| "step": 708 | |
| }, | |
| { | |
| "epoch": 0.8718106363356901, | |
| "grad_norm": 0.6094389342778289, | |
| "learning_rate": 9.82335093509782e-07, | |
| "loss": 0.2703, | |
| "step": 709 | |
| }, | |
| { | |
| "epoch": 0.8730402705195205, | |
| "grad_norm": 0.3623911413159473, | |
| "learning_rate": 9.638429108727232e-07, | |
| "loss": 0.1882, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.8742699047033508, | |
| "grad_norm": 0.4538866026680618, | |
| "learning_rate": 9.455176250685338e-07, | |
| "loss": 0.2013, | |
| "step": 711 | |
| }, | |
| { | |
| "epoch": 0.875499538887181, | |
| "grad_norm": 0.3194360166861715, | |
| "learning_rate": 9.27359574563238e-07, | |
| "loss": 0.1851, | |
| "step": 712 | |
| }, | |
| { | |
| "epoch": 0.8767291730710114, | |
| "grad_norm": 0.38338959194718086, | |
| "learning_rate": 9.093690947340406e-07, | |
| "loss": 0.1927, | |
| "step": 713 | |
| }, | |
| { | |
| "epoch": 0.8779588072548417, | |
| "grad_norm": 0.3879298822682774, | |
| "learning_rate": 8.915465178631344e-07, | |
| "loss": 0.1238, | |
| "step": 714 | |
| }, | |
| { | |
| "epoch": 0.879188441438672, | |
| "grad_norm": 0.34738835843437105, | |
| "learning_rate": 8.738921731315686e-07, | |
| "loss": 0.1616, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 0.8804180756225023, | |
| "grad_norm": 0.42388132636624687, | |
| "learning_rate": 8.564063866131567e-07, | |
| "loss": 0.1793, | |
| "step": 716 | |
| }, | |
| { | |
| "epoch": 0.8816477098063327, | |
| "grad_norm": 0.5595604732568557, | |
| "learning_rate": 8.390894812684602e-07, | |
| "loss": 0.2188, | |
| "step": 717 | |
| }, | |
| { | |
| "epoch": 0.8828773439901629, | |
| "grad_norm": 0.4324638530503791, | |
| "learning_rate": 8.219417769388316e-07, | |
| "loss": 0.1984, | |
| "step": 718 | |
| }, | |
| { | |
| "epoch": 0.8841069781739932, | |
| "grad_norm": 0.5485490850760583, | |
| "learning_rate": 8.049635903404907e-07, | |
| "loss": 0.1978, | |
| "step": 719 | |
| }, | |
| { | |
| "epoch": 0.8853366123578236, | |
| "grad_norm": 0.34294438952407474, | |
| "learning_rate": 7.881552350586863e-07, | |
| "loss": 0.1937, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.8853366123578236, | |
| "eval_accuracy": 0.8021390374331551, | |
| "eval_f1": 0.5066666666666667, | |
| "eval_loss": 0.42445313930511475, | |
| "eval_precision": 0.76, | |
| "eval_recall": 0.38, | |
| "eval_runtime": 23.5507, | |
| "eval_samples_per_second": 2.123, | |
| "eval_steps_per_second": 0.17, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.8865662465416538, | |
| "grad_norm": 0.3459770483320799, | |
| "learning_rate": 7.715170215419043e-07, | |
| "loss": 0.1599, | |
| "step": 721 | |
| }, | |
| { | |
| "epoch": 0.8877958807254842, | |
| "grad_norm": 0.3720600631339784, | |
| "learning_rate": 7.550492570961243e-07, | |
| "loss": 0.1482, | |
| "step": 722 | |
| }, | |
| { | |
| "epoch": 0.8890255149093145, | |
| "grad_norm": 0.3994271301853499, | |
| "learning_rate": 7.387522458791552e-07, | |
| "loss": 0.1792, | |
| "step": 723 | |
| }, | |
| { | |
| "epoch": 0.8902551490931447, | |
| "grad_norm": 0.3640144769183845, | |
| "learning_rate": 7.226262888950153e-07, | |
| "loss": 0.1466, | |
| "step": 724 | |
| }, | |
| { | |
| "epoch": 0.8914847832769751, | |
| "grad_norm": 0.37455447234355393, | |
| "learning_rate": 7.066716839883592e-07, | |
| "loss": 0.1584, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.8927144174608054, | |
| "grad_norm": 0.333702811711787, | |
| "learning_rate": 6.908887258389974e-07, | |
| "loss": 0.186, | |
| "step": 726 | |
| }, | |
| { | |
| "epoch": 0.8939440516446358, | |
| "grad_norm": 0.4224450151535308, | |
| "learning_rate": 6.752777059564431e-07, | |
| "loss": 0.2272, | |
| "step": 727 | |
| }, | |
| { | |
| "epoch": 0.895173685828466, | |
| "grad_norm": 0.3600232430765048, | |
| "learning_rate": 6.598389126745209e-07, | |
| "loss": 0.2266, | |
| "step": 728 | |
| }, | |
| { | |
| "epoch": 0.8964033200122964, | |
| "grad_norm": 0.5012149171996233, | |
| "learning_rate": 6.445726311460553e-07, | |
| "loss": 0.2236, | |
| "step": 729 | |
| }, | |
| { | |
| "epoch": 0.8976329541961267, | |
| "grad_norm": 0.39536858077842535, | |
| "learning_rate": 6.29479143337598e-07, | |
| "loss": 0.145, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.8988625883799569, | |
| "grad_norm": 0.2991426563682339, | |
| "learning_rate": 6.145587280242138e-07, | |
| "loss": 0.175, | |
| "step": 731 | |
| }, | |
| { | |
| "epoch": 0.9000922225637873, | |
| "grad_norm": 0.3676759279938805, | |
| "learning_rate": 5.99811660784344e-07, | |
| "loss": 0.1776, | |
| "step": 732 | |
| }, | |
| { | |
| "epoch": 0.9013218567476176, | |
| "grad_norm": 0.3708523019855745, | |
| "learning_rate": 5.852382139947077e-07, | |
| "loss": 0.1736, | |
| "step": 733 | |
| }, | |
| { | |
| "epoch": 0.9025514909314479, | |
| "grad_norm": 0.43018990463756845, | |
| "learning_rate": 5.708386568252688e-07, | |
| "loss": 0.1436, | |
| "step": 734 | |
| }, | |
| { | |
| "epoch": 0.9037811251152782, | |
| "grad_norm": 0.4802111636137182, | |
| "learning_rate": 5.566132552342784e-07, | |
| "loss": 0.2092, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 0.9050107592991086, | |
| "grad_norm": 0.2782286925340434, | |
| "learning_rate": 5.425622719633428e-07, | |
| "loss": 0.1204, | |
| "step": 736 | |
| }, | |
| { | |
| "epoch": 0.9062403934829388, | |
| "grad_norm": 0.42051906131253536, | |
| "learning_rate": 5.286859665325905e-07, | |
| "loss": 0.2128, | |
| "step": 737 | |
| }, | |
| { | |
| "epoch": 0.9074700276667691, | |
| "grad_norm": 0.5089981907905707, | |
| "learning_rate": 5.149845952358589e-07, | |
| "loss": 0.2393, | |
| "step": 738 | |
| }, | |
| { | |
| "epoch": 0.9086996618505995, | |
| "grad_norm": 0.41726717657714646, | |
| "learning_rate": 5.014584111359811e-07, | |
| "loss": 0.1784, | |
| "step": 739 | |
| }, | |
| { | |
| "epoch": 0.9099292960344297, | |
| "grad_norm": 0.4603202083257241, | |
| "learning_rate": 4.881076640600979e-07, | |
| "loss": 0.1913, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.9111589302182601, | |
| "grad_norm": 0.4130280700492162, | |
| "learning_rate": 4.7493260059504497e-07, | |
| "loss": 0.238, | |
| "step": 741 | |
| }, | |
| { | |
| "epoch": 0.9123885644020904, | |
| "grad_norm": 0.5036196124164085, | |
| "learning_rate": 4.6193346408280216e-07, | |
| "loss": 0.1926, | |
| "step": 742 | |
| }, | |
| { | |
| "epoch": 0.9136181985859206, | |
| "grad_norm": 0.36883928715030756, | |
| "learning_rate": 4.491104946160052e-07, | |
| "loss": 0.1659, | |
| "step": 743 | |
| }, | |
| { | |
| "epoch": 0.914847832769751, | |
| "grad_norm": 0.4945934881547631, | |
| "learning_rate": 4.3646392903348823e-07, | |
| "loss": 0.1546, | |
| "step": 744 | |
| }, | |
| { | |
| "epoch": 0.9160774669535813, | |
| "grad_norm": 0.33228048042062874, | |
| "learning_rate": 4.2399400091594154e-07, | |
| "loss": 0.174, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 0.9173071011374117, | |
| "grad_norm": 0.3446047000937293, | |
| "learning_rate": 4.117009405815686e-07, | |
| "loss": 0.1422, | |
| "step": 746 | |
| }, | |
| { | |
| "epoch": 0.9185367353212419, | |
| "grad_norm": 0.4483830076000804, | |
| "learning_rate": 3.9958497508185036e-07, | |
| "loss": 0.2321, | |
| "step": 747 | |
| }, | |
| { | |
| "epoch": 0.9197663695050723, | |
| "grad_norm": 0.6123522355957743, | |
| "learning_rate": 3.8764632819734526e-07, | |
| "loss": 0.2343, | |
| "step": 748 | |
| }, | |
| { | |
| "epoch": 0.9209960036889026, | |
| "grad_norm": 0.4578579557499873, | |
| "learning_rate": 3.758852204335539e-07, | |
| "loss": 0.2546, | |
| "step": 749 | |
| }, | |
| { | |
| "epoch": 0.9222256378727328, | |
| "grad_norm": 0.5396510643509638, | |
| "learning_rate": 3.643018690168487e-07, | |
| "loss": 0.2142, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.9234552720565632, | |
| "grad_norm": 0.4439335617291929, | |
| "learning_rate": 3.5289648789046616e-07, | |
| "loss": 0.1763, | |
| "step": 751 | |
| }, | |
| { | |
| "epoch": 0.9246849062403935, | |
| "grad_norm": 0.3529798858955445, | |
| "learning_rate": 3.4166928771054653e-07, | |
| "loss": 0.2423, | |
| "step": 752 | |
| }, | |
| { | |
| "epoch": 0.9259145404242238, | |
| "grad_norm": 0.44409731091250987, | |
| "learning_rate": 3.3062047584224934e-07, | |
| "loss": 0.2102, | |
| "step": 753 | |
| }, | |
| { | |
| "epoch": 0.9271441746080541, | |
| "grad_norm": 0.5097303302568637, | |
| "learning_rate": 3.197502563559185e-07, | |
| "loss": 0.157, | |
| "step": 754 | |
| }, | |
| { | |
| "epoch": 0.9283738087918845, | |
| "grad_norm": 0.312449073047315, | |
| "learning_rate": 3.0905883002332213e-07, | |
| "loss": 0.1459, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 0.9296034429757147, | |
| "grad_norm": 0.5047008458688232, | |
| "learning_rate": 2.985463943139322e-07, | |
| "loss": 0.1738, | |
| "step": 756 | |
| }, | |
| { | |
| "epoch": 0.930833077159545, | |
| "grad_norm": 0.5557205879162439, | |
| "learning_rate": 2.882131433912883e-07, | |
| "loss": 0.2488, | |
| "step": 757 | |
| }, | |
| { | |
| "epoch": 0.9320627113433754, | |
| "grad_norm": 0.43487988536317196, | |
| "learning_rate": 2.7805926810940297e-07, | |
| "loss": 0.1683, | |
| "step": 758 | |
| }, | |
| { | |
| "epoch": 0.9332923455272056, | |
| "grad_norm": 0.41756639561772235, | |
| "learning_rate": 2.6808495600924355e-07, | |
| "loss": 0.1387, | |
| "step": 759 | |
| }, | |
| { | |
| "epoch": 0.934521979711036, | |
| "grad_norm": 0.3700165943051797, | |
| "learning_rate": 2.582903913152612e-07, | |
| "loss": 0.164, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.934521979711036, | |
| "eval_accuracy": 0.8074866310160428, | |
| "eval_f1": 0.5263157894736842, | |
| "eval_loss": 0.41820311546325684, | |
| "eval_precision": 0.7692307692307693, | |
| "eval_recall": 0.4, | |
| "eval_runtime": 23.5561, | |
| "eval_samples_per_second": 2.123, | |
| "eval_steps_per_second": 0.17, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.9357516138948663, | |
| "grad_norm": 0.375235976764425, | |
| "learning_rate": 2.4867575493199515e-07, | |
| "loss": 0.1546, | |
| "step": 761 | |
| }, | |
| { | |
| "epoch": 0.9369812480786965, | |
| "grad_norm": 0.43981480426316444, | |
| "learning_rate": 2.392412244407294e-07, | |
| "loss": 0.1917, | |
| "step": 762 | |
| }, | |
| { | |
| "epoch": 0.9382108822625269, | |
| "grad_norm": 0.6482748347069781, | |
| "learning_rate": 2.2998697409620573e-07, | |
| "loss": 0.2225, | |
| "step": 763 | |
| }, | |
| { | |
| "epoch": 0.9394405164463572, | |
| "grad_norm": 0.42061880501285365, | |
| "learning_rate": 2.2091317482342056e-07, | |
| "loss": 0.23, | |
| "step": 764 | |
| }, | |
| { | |
| "epoch": 0.9406701506301876, | |
| "grad_norm": 0.33119896763642076, | |
| "learning_rate": 2.1201999421445074e-07, | |
| "loss": 0.1174, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 0.9418997848140178, | |
| "grad_norm": 0.3630099682147168, | |
| "learning_rate": 2.0330759652536835e-07, | |
| "loss": 0.1809, | |
| "step": 766 | |
| }, | |
| { | |
| "epoch": 0.9431294189978482, | |
| "grad_norm": 0.5867052799544186, | |
| "learning_rate": 1.9477614267320867e-07, | |
| "loss": 0.258, | |
| "step": 767 | |
| }, | |
| { | |
| "epoch": 0.9443590531816785, | |
| "grad_norm": 0.4296071075770851, | |
| "learning_rate": 1.8642579023298913e-07, | |
| "loss": 0.2059, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 0.9455886873655087, | |
| "grad_norm": 0.49292796300633096, | |
| "learning_rate": 1.7825669343480624e-07, | |
| "loss": 0.2087, | |
| "step": 769 | |
| }, | |
| { | |
| "epoch": 0.9468183215493391, | |
| "grad_norm": 0.4595855430483603, | |
| "learning_rate": 1.7026900316098217e-07, | |
| "loss": 0.1969, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.9480479557331694, | |
| "grad_norm": 0.35921138730577007, | |
| "learning_rate": 1.6246286694328594e-07, | |
| "loss": 0.1353, | |
| "step": 771 | |
| }, | |
| { | |
| "epoch": 0.9492775899169997, | |
| "grad_norm": 0.41891935550025844, | |
| "learning_rate": 1.5483842896019675e-07, | |
| "loss": 0.144, | |
| "step": 772 | |
| }, | |
| { | |
| "epoch": 0.95050722410083, | |
| "grad_norm": 0.3608543106060588, | |
| "learning_rate": 1.473958300342504e-07, | |
| "loss": 0.1887, | |
| "step": 773 | |
| }, | |
| { | |
| "epoch": 0.9517368582846603, | |
| "grad_norm": 0.29802095126527695, | |
| "learning_rate": 1.401352076294371e-07, | |
| "loss": 0.114, | |
| "step": 774 | |
| }, | |
| { | |
| "epoch": 0.9529664924684906, | |
| "grad_norm": 0.4605125584516114, | |
| "learning_rate": 1.3305669584865565e-07, | |
| "loss": 0.226, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.9541961266523209, | |
| "grad_norm": 0.4384971669173196, | |
| "learning_rate": 1.261604254312454e-07, | |
| "loss": 0.2221, | |
| "step": 776 | |
| }, | |
| { | |
| "epoch": 0.9554257608361513, | |
| "grad_norm": 0.5042450469158528, | |
| "learning_rate": 1.1944652375056597e-07, | |
| "loss": 0.1903, | |
| "step": 777 | |
| }, | |
| { | |
| "epoch": 0.9566553950199815, | |
| "grad_norm": 0.34864348698758946, | |
| "learning_rate": 1.1291511481164807e-07, | |
| "loss": 0.1514, | |
| "step": 778 | |
| }, | |
| { | |
| "epoch": 0.9578850292038119, | |
| "grad_norm": 0.39006673270014214, | |
| "learning_rate": 1.0656631924889749e-07, | |
| "loss": 0.1849, | |
| "step": 779 | |
| }, | |
| { | |
| "epoch": 0.9591146633876422, | |
| "grad_norm": 0.37108179891285153, | |
| "learning_rate": 1.0040025432387801e-07, | |
| "loss": 0.1699, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.9603442975714724, | |
| "grad_norm": 0.4840858020566464, | |
| "learning_rate": 9.441703392313095e-08, | |
| "loss": 0.1975, | |
| "step": 781 | |
| }, | |
| { | |
| "epoch": 0.9615739317553028, | |
| "grad_norm": 0.4069300865488938, | |
| "learning_rate": 8.861676855608237e-08, | |
| "loss": 0.1635, | |
| "step": 782 | |
| }, | |
| { | |
| "epoch": 0.9628035659391331, | |
| "grad_norm": 0.43469366649519353, | |
| "learning_rate": 8.299956535300135e-08, | |
| "loss": 0.2255, | |
| "step": 783 | |
| }, | |
| { | |
| "epoch": 0.9640332001229635, | |
| "grad_norm": 0.4875682606006585, | |
| "learning_rate": 7.756552806301498e-08, | |
| "loss": 0.2556, | |
| "step": 784 | |
| }, | |
| { | |
| "epoch": 0.9652628343067937, | |
| "grad_norm": 0.37118130700190005, | |
| "learning_rate": 7.23147570521987e-08, | |
| "loss": 0.1849, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 0.9664924684906241, | |
| "grad_norm": 0.40788113934634646, | |
| "learning_rate": 6.724734930171561e-08, | |
| "loss": 0.132, | |
| "step": 786 | |
| }, | |
| { | |
| "epoch": 0.9677221026744544, | |
| "grad_norm": 0.40100916877522913, | |
| "learning_rate": 6.236339840603677e-08, | |
| "loss": 0.1737, | |
| "step": 787 | |
| }, | |
| { | |
| "epoch": 0.9689517368582846, | |
| "grad_norm": 0.288332597781413, | |
| "learning_rate": 5.766299457119817e-08, | |
| "loss": 0.1717, | |
| "step": 788 | |
| }, | |
| { | |
| "epoch": 0.970181371042115, | |
| "grad_norm": 0.3844917502681006, | |
| "learning_rate": 5.3146224613144225e-08, | |
| "loss": 0.2025, | |
| "step": 789 | |
| }, | |
| { | |
| "epoch": 0.9714110052259453, | |
| "grad_norm": 0.39531701055460605, | |
| "learning_rate": 4.8813171956123565e-08, | |
| "loss": 0.1683, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.9726406394097756, | |
| "grad_norm": 0.4042396380118405, | |
| "learning_rate": 4.4663916631143554e-08, | |
| "loss": 0.1624, | |
| "step": 791 | |
| }, | |
| { | |
| "epoch": 0.9738702735936059, | |
| "grad_norm": 0.2908881244181945, | |
| "learning_rate": 4.069853527449596e-08, | |
| "loss": 0.1629, | |
| "step": 792 | |
| }, | |
| { | |
| "epoch": 0.9750999077774362, | |
| "grad_norm": 0.4056597153568874, | |
| "learning_rate": 3.691710112634139e-08, | |
| "loss": 0.2174, | |
| "step": 793 | |
| }, | |
| { | |
| "epoch": 0.9763295419612665, | |
| "grad_norm": 0.45435351641201505, | |
| "learning_rate": 3.3319684029354815e-08, | |
| "loss": 0.1799, | |
| "step": 794 | |
| }, | |
| { | |
| "epoch": 0.9775591761450968, | |
| "grad_norm": 0.300609635946807, | |
| "learning_rate": 2.9906350427435505e-08, | |
| "loss": 0.1744, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 0.9787888103289272, | |
| "grad_norm": 0.3475286198999752, | |
| "learning_rate": 2.667716336448356e-08, | |
| "loss": 0.1171, | |
| "step": 796 | |
| }, | |
| { | |
| "epoch": 0.9800184445127574, | |
| "grad_norm": 0.3272831675072718, | |
| "learning_rate": 2.3632182483228628e-08, | |
| "loss": 0.1556, | |
| "step": 797 | |
| }, | |
| { | |
| "epoch": 0.9812480786965878, | |
| "grad_norm": 0.48277032481078636, | |
| "learning_rate": 2.077146402413521e-08, | |
| "loss": 0.2464, | |
| "step": 798 | |
| }, | |
| { | |
| "epoch": 0.9824777128804181, | |
| "grad_norm": 0.32237492877791696, | |
| "learning_rate": 1.80950608243613e-08, | |
| "loss": 0.1548, | |
| "step": 799 | |
| }, | |
| { | |
| "epoch": 0.9837073470642483, | |
| "grad_norm": 0.4196152906906351, | |
| "learning_rate": 1.5603022316780235e-08, | |
| "loss": 0.2185, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.9837073470642483, | |
| "eval_accuracy": 0.8128342245989305, | |
| "eval_f1": 0.5454545454545454, | |
| "eval_loss": 0.4195312559604645, | |
| "eval_precision": 0.7777777777777778, | |
| "eval_recall": 0.42, | |
| "eval_runtime": 23.0969, | |
| "eval_samples_per_second": 2.165, | |
| "eval_steps_per_second": 0.173, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.9849369812480787, | |
| "grad_norm": 0.4737970220793768, | |
| "learning_rate": 1.329539452907036e-08, | |
| "loss": 0.2434, | |
| "step": 801 | |
| }, | |
| { | |
| "epoch": 0.986166615431909, | |
| "grad_norm": 0.27628624700617177, | |
| "learning_rate": 1.117222008286456e-08, | |
| "loss": 0.1413, | |
| "step": 802 | |
| }, | |
| { | |
| "epoch": 0.9873962496157394, | |
| "grad_norm": 0.4484471355218967, | |
| "learning_rate": 9.233538192963132e-09, | |
| "loss": 0.2062, | |
| "step": 803 | |
| }, | |
| { | |
| "epoch": 0.9886258837995696, | |
| "grad_norm": 0.3948729656766095, | |
| "learning_rate": 7.479384666608802e-09, | |
| "loss": 0.1296, | |
| "step": 804 | |
| }, | |
| { | |
| "epoch": 0.9898555179834, | |
| "grad_norm": 0.32852852414095784, | |
| "learning_rate": 5.909791902823925e-09, | |
| "loss": 0.219, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 0.9910851521672303, | |
| "grad_norm": 0.42167582713648133, | |
| "learning_rate": 4.524788891816512e-09, | |
| "loss": 0.1242, | |
| "step": 806 | |
| }, | |
| { | |
| "epoch": 0.9923147863510605, | |
| "grad_norm": 0.4186302604931252, | |
| "learning_rate": 3.3244012144395545e-09, | |
| "loss": 0.2346, | |
| "step": 807 | |
| }, | |
| { | |
| "epoch": 0.9935444205348909, | |
| "grad_norm": 0.5241641959513683, | |
| "learning_rate": 2.3086510417225093e-09, | |
| "loss": 0.22, | |
| "step": 808 | |
| }, | |
| { | |
| "epoch": 0.9947740547187212, | |
| "grad_norm": 0.5432971120883086, | |
| "learning_rate": 1.4775571344605167e-09, | |
| "loss": 0.1391, | |
| "step": 809 | |
| }, | |
| { | |
| "epoch": 0.9960036889025515, | |
| "grad_norm": 0.36894298305824874, | |
| "learning_rate": 8.311348428657884e-10, | |
| "loss": 0.1366, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.9972333230863818, | |
| "grad_norm": 0.49102427432843837, | |
| "learning_rate": 3.6939610628894396e-10, | |
| "loss": 0.3079, | |
| "step": 811 | |
| }, | |
| { | |
| "epoch": 0.9984629572702121, | |
| "grad_norm": 0.6049830604783123, | |
| "learning_rate": 9.234945299363418e-11, | |
| "loss": 0.2577, | |
| "step": 812 | |
| }, | |
| { | |
| "epoch": 0.9996925914540424, | |
| "grad_norm": 0.4658029933791719, | |
| "learning_rate": 0.0, | |
| "loss": 0.2186, | |
| "step": 813 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 813, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 798189487620096.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |