| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9995771670190274, | |
| "eval_steps": 20, | |
| "global_step": 788, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0, | |
| "eval_accuracy": 0.7339955849889624, | |
| "eval_f1": 0.2445141065830721, | |
| "eval_loss": 0.5994934439659119, | |
| "eval_precision": 0.6, | |
| "eval_recall": 0.15354330708661418, | |
| "eval_runtime": 52.3271, | |
| "eval_samples_per_second": 5.294, | |
| "eval_steps_per_second": 0.172, | |
| "step": 0 | |
| }, | |
| { | |
| "epoch": 0.0012684989429175475, | |
| "grad_norm": 2.466090202331543, | |
| "learning_rate": 2.5316455696202533e-07, | |
| "loss": 0.7205, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.002536997885835095, | |
| "grad_norm": 1.759368896484375, | |
| "learning_rate": 5.063291139240507e-07, | |
| "loss": 0.6242, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0038054968287526427, | |
| "grad_norm": 1.8663183450698853, | |
| "learning_rate": 7.59493670886076e-07, | |
| "loss": 0.6844, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.00507399577167019, | |
| "grad_norm": 2.109837293624878, | |
| "learning_rate": 1.0126582278481013e-06, | |
| "loss": 0.6534, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.006342494714587738, | |
| "grad_norm": 2.1454806327819824, | |
| "learning_rate": 1.2658227848101267e-06, | |
| "loss": 0.6766, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.007610993657505285, | |
| "grad_norm": 2.128432512283325, | |
| "learning_rate": 1.518987341772152e-06, | |
| "loss": 0.6845, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.008879492600422832, | |
| "grad_norm": 2.348109722137451, | |
| "learning_rate": 1.7721518987341774e-06, | |
| "loss": 0.6689, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.01014799154334038, | |
| "grad_norm": 2.1332168579101562, | |
| "learning_rate": 2.0253164556962026e-06, | |
| "loss": 0.6957, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.011416490486257928, | |
| "grad_norm": 2.224982976913452, | |
| "learning_rate": 2.278481012658228e-06, | |
| "loss": 0.6278, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.012684989429175475, | |
| "grad_norm": 1.956710934638977, | |
| "learning_rate": 2.5316455696202535e-06, | |
| "loss": 0.6717, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.013953488372093023, | |
| "grad_norm": 2.072009325027466, | |
| "learning_rate": 2.7848101265822785e-06, | |
| "loss": 0.6349, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.01522198731501057, | |
| "grad_norm": 1.9736876487731934, | |
| "learning_rate": 3.037974683544304e-06, | |
| "loss": 0.6542, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.01649048625792812, | |
| "grad_norm": 2.037200689315796, | |
| "learning_rate": 3.2911392405063294e-06, | |
| "loss": 0.5939, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.017758985200845664, | |
| "grad_norm": 2.2937052249908447, | |
| "learning_rate": 3.544303797468355e-06, | |
| "loss": 0.6793, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.019027484143763214, | |
| "grad_norm": 2.500192403793335, | |
| "learning_rate": 3.7974683544303802e-06, | |
| "loss": 0.6384, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.02029598308668076, | |
| "grad_norm": 2.0733048915863037, | |
| "learning_rate": 4.050632911392405e-06, | |
| "loss": 0.6608, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.02156448202959831, | |
| "grad_norm": 2.258061170578003, | |
| "learning_rate": 4.303797468354431e-06, | |
| "loss": 0.7128, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.022832980972515855, | |
| "grad_norm": 1.9154382944107056, | |
| "learning_rate": 4.556962025316456e-06, | |
| "loss": 0.6418, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.024101479915433405, | |
| "grad_norm": 2.2492971420288086, | |
| "learning_rate": 4.8101265822784815e-06, | |
| "loss": 0.6382, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.02536997885835095, | |
| "grad_norm": 1.9588701725006104, | |
| "learning_rate": 5.063291139240507e-06, | |
| "loss": 0.6057, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02536997885835095, | |
| "eval_accuracy": 0.7373068432671082, | |
| "eval_f1": 0.25157232704402516, | |
| "eval_loss": 0.591836154460907, | |
| "eval_precision": 0.625, | |
| "eval_recall": 0.15748031496062992, | |
| "eval_runtime": 52.8906, | |
| "eval_samples_per_second": 5.237, | |
| "eval_steps_per_second": 0.17, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0266384778012685, | |
| "grad_norm": 2.0472803115844727, | |
| "learning_rate": 5.3164556962025316e-06, | |
| "loss": 0.6482, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.027906976744186046, | |
| "grad_norm": 2.471733570098877, | |
| "learning_rate": 5.569620253164557e-06, | |
| "loss": 0.6122, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.029175475687103596, | |
| "grad_norm": 2.041800022125244, | |
| "learning_rate": 5.8227848101265824e-06, | |
| "loss": 0.7014, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.03044397463002114, | |
| "grad_norm": 1.9668631553649902, | |
| "learning_rate": 6.075949367088608e-06, | |
| "loss": 0.6282, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.03171247357293869, | |
| "grad_norm": 1.7734192609786987, | |
| "learning_rate": 6.329113924050634e-06, | |
| "loss": 0.6623, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.03298097251585624, | |
| "grad_norm": 2.1032049655914307, | |
| "learning_rate": 6.582278481012659e-06, | |
| "loss": 0.6185, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.034249471458773786, | |
| "grad_norm": 1.768869400024414, | |
| "learning_rate": 6.835443037974684e-06, | |
| "loss": 0.6106, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.03551797040169133, | |
| "grad_norm": 1.874267339706421, | |
| "learning_rate": 7.08860759493671e-06, | |
| "loss": 0.6634, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.03678646934460888, | |
| "grad_norm": 1.702706217765808, | |
| "learning_rate": 7.341772151898735e-06, | |
| "loss": 0.5611, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.03805496828752643, | |
| "grad_norm": 1.583400845527649, | |
| "learning_rate": 7.5949367088607605e-06, | |
| "loss": 0.5927, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.03932346723044398, | |
| "grad_norm": 1.9068820476531982, | |
| "learning_rate": 7.848101265822786e-06, | |
| "loss": 0.6602, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.04059196617336152, | |
| "grad_norm": 1.8527209758758545, | |
| "learning_rate": 8.10126582278481e-06, | |
| "loss": 0.5679, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.04186046511627907, | |
| "grad_norm": 2.0851504802703857, | |
| "learning_rate": 8.354430379746837e-06, | |
| "loss": 0.5437, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.04312896405919662, | |
| "grad_norm": 1.8767458200454712, | |
| "learning_rate": 8.607594936708861e-06, | |
| "loss": 0.5738, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.04439746300211417, | |
| "grad_norm": 2.203929901123047, | |
| "learning_rate": 8.860759493670886e-06, | |
| "loss": 0.6506, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.04566596194503171, | |
| "grad_norm": 1.9388232231140137, | |
| "learning_rate": 9.113924050632912e-06, | |
| "loss": 0.5631, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.04693446088794926, | |
| "grad_norm": 1.4374593496322632, | |
| "learning_rate": 9.367088607594937e-06, | |
| "loss": 0.529, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.04820295983086681, | |
| "grad_norm": 1.4115264415740967, | |
| "learning_rate": 9.620253164556963e-06, | |
| "loss": 0.501, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.04947145877378435, | |
| "grad_norm": 1.4638575315475464, | |
| "learning_rate": 9.87341772151899e-06, | |
| "loss": 0.5635, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.0507399577167019, | |
| "grad_norm": 1.6112034320831299, | |
| "learning_rate": 1.0126582278481014e-05, | |
| "loss": 0.5356, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0507399577167019, | |
| "eval_accuracy": 0.7516556291390728, | |
| "eval_f1": 0.3283582089552239, | |
| "eval_loss": 0.5521447062492371, | |
| "eval_precision": 0.6790123456790124, | |
| "eval_recall": 0.21653543307086615, | |
| "eval_runtime": 52.1586, | |
| "eval_samples_per_second": 5.311, | |
| "eval_steps_per_second": 0.173, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.05200845665961945, | |
| "grad_norm": 1.6395788192749023, | |
| "learning_rate": 1.0379746835443039e-05, | |
| "loss": 0.5908, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.053276955602537, | |
| "grad_norm": 1.3024195432662964, | |
| "learning_rate": 1.0632911392405063e-05, | |
| "loss": 0.5528, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.05454545454545454, | |
| "grad_norm": 1.3428813219070435, | |
| "learning_rate": 1.088607594936709e-05, | |
| "loss": 0.5177, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.05581395348837209, | |
| "grad_norm": 2.2030651569366455, | |
| "learning_rate": 1.1139240506329114e-05, | |
| "loss": 0.5615, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.05708245243128964, | |
| "grad_norm": 1.491940975189209, | |
| "learning_rate": 1.139240506329114e-05, | |
| "loss": 0.5186, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.05835095137420719, | |
| "grad_norm": 2.706481695175171, | |
| "learning_rate": 1.1645569620253165e-05, | |
| "loss": 0.5678, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.059619450317124734, | |
| "grad_norm": 1.7402979135513306, | |
| "learning_rate": 1.189873417721519e-05, | |
| "loss": 0.5478, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.06088794926004228, | |
| "grad_norm": 1.7591081857681274, | |
| "learning_rate": 1.2151898734177216e-05, | |
| "loss": 0.5296, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.06215644820295983, | |
| "grad_norm": 1.5506426095962524, | |
| "learning_rate": 1.240506329113924e-05, | |
| "loss": 0.5186, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.06342494714587738, | |
| "grad_norm": 1.4400081634521484, | |
| "learning_rate": 1.2658227848101268e-05, | |
| "loss": 0.5178, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.06469344608879493, | |
| "grad_norm": 1.6742987632751465, | |
| "learning_rate": 1.2911392405063293e-05, | |
| "loss": 0.5777, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.06596194503171247, | |
| "grad_norm": 1.9172340631484985, | |
| "learning_rate": 1.3164556962025317e-05, | |
| "loss": 0.5675, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.06723044397463002, | |
| "grad_norm": 2.08730149269104, | |
| "learning_rate": 1.3417721518987344e-05, | |
| "loss": 0.5832, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.06849894291754757, | |
| "grad_norm": 1.4685484170913696, | |
| "learning_rate": 1.3670886075949368e-05, | |
| "loss": 0.4869, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.06976744186046512, | |
| "grad_norm": 1.7338310480117798, | |
| "learning_rate": 1.3924050632911395e-05, | |
| "loss": 0.5132, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.07103594080338266, | |
| "grad_norm": 2.538879156112671, | |
| "learning_rate": 1.417721518987342e-05, | |
| "loss": 0.5726, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.07230443974630021, | |
| "grad_norm": 2.6859829425811768, | |
| "learning_rate": 1.4430379746835444e-05, | |
| "loss": 0.5126, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.07357293868921776, | |
| "grad_norm": 2.0791189670562744, | |
| "learning_rate": 1.468354430379747e-05, | |
| "loss": 0.5027, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.07484143763213531, | |
| "grad_norm": 2.305135726928711, | |
| "learning_rate": 1.4936708860759495e-05, | |
| "loss": 0.5724, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.07610993657505286, | |
| "grad_norm": 1.8028602600097656, | |
| "learning_rate": 1.5189873417721521e-05, | |
| "loss": 0.5141, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.07610993657505286, | |
| "eval_accuracy": 0.7626931567328918, | |
| "eval_f1": 0.5454545454545454, | |
| "eval_loss": 0.502124547958374, | |
| "eval_precision": 0.589041095890411, | |
| "eval_recall": 0.5078740157480315, | |
| "eval_runtime": 52.5689, | |
| "eval_samples_per_second": 5.269, | |
| "eval_steps_per_second": 0.171, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.0773784355179704, | |
| "grad_norm": 2.500967025756836, | |
| "learning_rate": 1.5443037974683546e-05, | |
| "loss": 0.4764, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.07864693446088795, | |
| "grad_norm": 1.9441800117492676, | |
| "learning_rate": 1.5696202531645572e-05, | |
| "loss": 0.575, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.0799154334038055, | |
| "grad_norm": 2.2089622020721436, | |
| "learning_rate": 1.5949367088607598e-05, | |
| "loss": 0.4854, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.08118393234672304, | |
| "grad_norm": 1.727460503578186, | |
| "learning_rate": 1.620253164556962e-05, | |
| "loss": 0.4829, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.0824524312896406, | |
| "grad_norm": 2.3789401054382324, | |
| "learning_rate": 1.6455696202531647e-05, | |
| "loss": 0.5193, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.08372093023255814, | |
| "grad_norm": 1.817589282989502, | |
| "learning_rate": 1.6708860759493674e-05, | |
| "loss": 0.5115, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.08498942917547568, | |
| "grad_norm": 1.985190987586975, | |
| "learning_rate": 1.6962025316455696e-05, | |
| "loss": 0.5309, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.08625792811839324, | |
| "grad_norm": 2.5799453258514404, | |
| "learning_rate": 1.7215189873417723e-05, | |
| "loss": 0.455, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.08752642706131078, | |
| "grad_norm": 1.9298819303512573, | |
| "learning_rate": 1.746835443037975e-05, | |
| "loss": 0.4207, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.08879492600422834, | |
| "grad_norm": 1.6359901428222656, | |
| "learning_rate": 1.7721518987341772e-05, | |
| "loss": 0.4287, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.09006342494714588, | |
| "grad_norm": 3.6526267528533936, | |
| "learning_rate": 1.7974683544303798e-05, | |
| "loss": 0.5659, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.09133192389006342, | |
| "grad_norm": 2.615048408508301, | |
| "learning_rate": 1.8227848101265824e-05, | |
| "loss": 0.4652, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.09260042283298098, | |
| "grad_norm": 2.702597141265869, | |
| "learning_rate": 1.848101265822785e-05, | |
| "loss": 0.404, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.09386892177589852, | |
| "grad_norm": 2.3155453205108643, | |
| "learning_rate": 1.8734177215189874e-05, | |
| "loss": 0.4769, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.09513742071881606, | |
| "grad_norm": 2.9285926818847656, | |
| "learning_rate": 1.89873417721519e-05, | |
| "loss": 0.4264, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.09640591966173362, | |
| "grad_norm": 1.856951117515564, | |
| "learning_rate": 1.9240506329113926e-05, | |
| "loss": 0.4323, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.09767441860465116, | |
| "grad_norm": 1.961795449256897, | |
| "learning_rate": 1.949367088607595e-05, | |
| "loss": 0.3662, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.0989429175475687, | |
| "grad_norm": 2.0264902114868164, | |
| "learning_rate": 1.974683544303798e-05, | |
| "loss": 0.3677, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.10021141649048626, | |
| "grad_norm": 2.0433084964752197, | |
| "learning_rate": 2e-05, | |
| "loss": 0.4287, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.1014799154334038, | |
| "grad_norm": 2.14385724067688, | |
| "learning_rate": 1.999990183050232e-05, | |
| "loss": 0.3594, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.1014799154334038, | |
| "eval_accuracy": 0.7980132450331126, | |
| "eval_f1": 0.5413533834586466, | |
| "eval_loss": 0.4426968991756439, | |
| "eval_precision": 0.7448275862068966, | |
| "eval_recall": 0.4251968503937008, | |
| "eval_runtime": 50.9956, | |
| "eval_samples_per_second": 5.432, | |
| "eval_steps_per_second": 0.176, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.10274841437632136, | |
| "grad_norm": 3.1786880493164062, | |
| "learning_rate": 1.9999607323936722e-05, | |
| "loss": 0.4351, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.1040169133192389, | |
| "grad_norm": 2.1636154651641846, | |
| "learning_rate": 1.9999116486085527e-05, | |
| "loss": 0.3564, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.10528541226215644, | |
| "grad_norm": 2.686142921447754, | |
| "learning_rate": 1.999842932658579e-05, | |
| "loss": 0.3642, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.106553911205074, | |
| "grad_norm": 3.7086920738220215, | |
| "learning_rate": 1.9997545858929135e-05, | |
| "loss": 0.3724, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.10782241014799154, | |
| "grad_norm": 3.1358773708343506, | |
| "learning_rate": 1.9996466100461473e-05, | |
| "loss": 0.302, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.10909090909090909, | |
| "grad_norm": 5.590456962585449, | |
| "learning_rate": 1.9995190072382676e-05, | |
| "loss": 0.3492, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.11035940803382664, | |
| "grad_norm": 3.6015288829803467, | |
| "learning_rate": 1.9993717799746152e-05, | |
| "loss": 0.3645, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.11162790697674418, | |
| "grad_norm": 4.678368091583252, | |
| "learning_rate": 1.9992049311458353e-05, | |
| "loss": 0.254, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.11289640591966173, | |
| "grad_norm": 3.184845447540283, | |
| "learning_rate": 1.9990184640278212e-05, | |
| "loss": 0.3374, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.11416490486257928, | |
| "grad_norm": 2.6212780475616455, | |
| "learning_rate": 1.998812382281649e-05, | |
| "loss": 0.3546, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.11543340380549683, | |
| "grad_norm": 4.838207721710205, | |
| "learning_rate": 1.9985866899535076e-05, | |
| "loss": 0.3734, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.11670190274841438, | |
| "grad_norm": 3.615828037261963, | |
| "learning_rate": 1.9983413914746175e-05, | |
| "loss": 0.3111, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.11797040169133192, | |
| "grad_norm": 3.7412872314453125, | |
| "learning_rate": 1.998076491661144e-05, | |
| "loss": 0.3374, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.11923890063424947, | |
| "grad_norm": 5.049579620361328, | |
| "learning_rate": 1.997791995714104e-05, | |
| "loss": 0.3831, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.12050739957716702, | |
| "grad_norm": 3.293745279312134, | |
| "learning_rate": 1.9974879092192618e-05, | |
| "loss": 0.2982, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.12177589852008457, | |
| "grad_norm": 4.374386310577393, | |
| "learning_rate": 1.9971642381470207e-05, | |
| "loss": 0.367, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.12304439746300211, | |
| "grad_norm": 4.193471431732178, | |
| "learning_rate": 1.996820988852307e-05, | |
| "loss": 0.3096, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.12431289640591967, | |
| "grad_norm": 4.823442459106445, | |
| "learning_rate": 1.9964581680744425e-05, | |
| "loss": 0.3464, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.12558139534883722, | |
| "grad_norm": 4.056851387023926, | |
| "learning_rate": 1.9960757829370138e-05, | |
| "loss": 0.3194, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.12684989429175475, | |
| "grad_norm": 5.191165924072266, | |
| "learning_rate": 1.995673840947732e-05, | |
| "loss": 0.3988, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.12684989429175475, | |
| "eval_accuracy": 0.8245033112582781, | |
| "eval_f1": 0.6310904872389791, | |
| "eval_loss": 0.40671199560165405, | |
| "eval_precision": 0.768361581920904, | |
| "eval_recall": 0.5354330708661418, | |
| "eval_runtime": 50.9945, | |
| "eval_samples_per_second": 5.432, | |
| "eval_steps_per_second": 0.176, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.1281183932346723, | |
| "grad_norm": 3.31948184967041, | |
| "learning_rate": 1.9952523499982866e-05, | |
| "loss": 0.2718, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.12938689217758986, | |
| "grad_norm": 3.6124682426452637, | |
| "learning_rate": 1.9948113183641875e-05, | |
| "loss": 0.3701, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.1306553911205074, | |
| "grad_norm": 4.924780368804932, | |
| "learning_rate": 1.9943507547046063e-05, | |
| "loss": 0.3369, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.13192389006342495, | |
| "grad_norm": 2.997985363006592, | |
| "learning_rate": 1.9938706680622035e-05, | |
| "loss": 0.3062, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.1331923890063425, | |
| "grad_norm": 3.6868772506713867, | |
| "learning_rate": 1.9933710678629512e-05, | |
| "loss": 0.332, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.13446088794926003, | |
| "grad_norm": 3.6897387504577637, | |
| "learning_rate": 1.9928519639159508e-05, | |
| "loss": 0.3083, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.1357293868921776, | |
| "grad_norm": 3.886378049850464, | |
| "learning_rate": 1.992313366413236e-05, | |
| "loss": 0.3331, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.13699788583509515, | |
| "grad_norm": 4.580509662628174, | |
| "learning_rate": 1.9917552859295764e-05, | |
| "loss": 0.2677, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.13826638477801267, | |
| "grad_norm": 3.936422348022461, | |
| "learning_rate": 1.9911777334222682e-05, | |
| "loss": 0.3305, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.13953488372093023, | |
| "grad_norm": 3.6973016262054443, | |
| "learning_rate": 1.9905807202309196e-05, | |
| "loss": 0.319, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.1408033826638478, | |
| "grad_norm": 3.311084270477295, | |
| "learning_rate": 1.9899642580772274e-05, | |
| "loss": 0.3307, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.14207188160676532, | |
| "grad_norm": 6.772805690765381, | |
| "learning_rate": 1.9893283590647473e-05, | |
| "loss": 0.2844, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.14334038054968287, | |
| "grad_norm": 3.7769129276275635, | |
| "learning_rate": 1.9886730356786567e-05, | |
| "loss": 0.3657, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.14460887949260043, | |
| "grad_norm": 3.9703691005706787, | |
| "learning_rate": 1.9879983007855095e-05, | |
| "loss": 0.3125, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.14587737843551796, | |
| "grad_norm": 4.326747417449951, | |
| "learning_rate": 1.9873041676329825e-05, | |
| "loss": 0.3402, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.1471458773784355, | |
| "grad_norm": 3.051367998123169, | |
| "learning_rate": 1.9865906498496164e-05, | |
| "loss": 0.3178, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.14841437632135307, | |
| "grad_norm": 3.126115083694458, | |
| "learning_rate": 1.9858577614445476e-05, | |
| "loss": 0.2708, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.14968287526427063, | |
| "grad_norm": 3.873002052307129, | |
| "learning_rate": 1.9851055168072334e-05, | |
| "loss": 0.3772, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.15095137420718815, | |
| "grad_norm": 4.09826135635376, | |
| "learning_rate": 1.9843339307071697e-05, | |
| "loss": 0.3596, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.1522198731501057, | |
| "grad_norm": 3.2259912490844727, | |
| "learning_rate": 1.9835430182935998e-05, | |
| "loss": 0.3205, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.1522198731501057, | |
| "eval_accuracy": 0.8200883002207505, | |
| "eval_f1": 0.5914786967418546, | |
| "eval_loss": 0.3738110363483429, | |
| "eval_precision": 0.8137931034482758, | |
| "eval_recall": 0.4645669291338583, | |
| "eval_runtime": 52.2662, | |
| "eval_samples_per_second": 5.3, | |
| "eval_steps_per_second": 0.172, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.15348837209302327, | |
| "grad_norm": 3.804227590560913, | |
| "learning_rate": 1.9827327950952197e-05, | |
| "loss": 0.339, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.1547568710359408, | |
| "grad_norm": 4.679238796234131, | |
| "learning_rate": 1.9819032770198692e-05, | |
| "loss": 0.3301, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.15602536997885835, | |
| "grad_norm": 3.627534866333008, | |
| "learning_rate": 1.981054480354223e-05, | |
| "loss": 0.3087, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.1572938689217759, | |
| "grad_norm": 3.4424080848693848, | |
| "learning_rate": 1.9801864217634703e-05, | |
| "loss": 0.3192, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.15856236786469344, | |
| "grad_norm": 4.500999450683594, | |
| "learning_rate": 1.9792991182909857e-05, | |
| "loss": 0.3274, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.159830866807611, | |
| "grad_norm": 3.3262839317321777, | |
| "learning_rate": 1.9783925873579967e-05, | |
| "loss": 0.306, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.16109936575052855, | |
| "grad_norm": 4.585197448730469, | |
| "learning_rate": 1.9774668467632397e-05, | |
| "loss": 0.3007, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.16236786469344608, | |
| "grad_norm": 4.023516654968262, | |
| "learning_rate": 1.976521914682614e-05, | |
| "loss": 0.2867, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.16363636363636364, | |
| "grad_norm": 2.462892770767212, | |
| "learning_rate": 1.97555780966882e-05, | |
| "loss": 0.2486, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.1649048625792812, | |
| "grad_norm": 3.1057772636413574, | |
| "learning_rate": 1.974574550650999e-05, | |
| "loss": 0.2761, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.16617336152219872, | |
| "grad_norm": 4.618252754211426, | |
| "learning_rate": 1.97357215693436e-05, | |
| "loss": 0.2376, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.16744186046511628, | |
| "grad_norm": 3.48673152923584, | |
| "learning_rate": 1.9725506481998003e-05, | |
| "loss": 0.2945, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.16871035940803383, | |
| "grad_norm": 2.9228079319000244, | |
| "learning_rate": 1.9715100445035197e-05, | |
| "loss": 0.307, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.16997885835095136, | |
| "grad_norm": 6.0457611083984375, | |
| "learning_rate": 1.970450366276627e-05, | |
| "loss": 0.2083, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.17124735729386892, | |
| "grad_norm": 2.6761562824249268, | |
| "learning_rate": 1.9693716343247374e-05, | |
| "loss": 0.2134, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.17251585623678647, | |
| "grad_norm": 4.628048419952393, | |
| "learning_rate": 1.9682738698275663e-05, | |
| "loss": 0.2501, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.173784355179704, | |
| "grad_norm": 4.1137590408325195, | |
| "learning_rate": 1.9671570943385118e-05, | |
| "loss": 0.2737, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.17505285412262156, | |
| "grad_norm": 4.245756149291992, | |
| "learning_rate": 1.9660213297842304e-05, | |
| "loss": 0.3059, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.17632135306553912, | |
| "grad_norm": 4.939450263977051, | |
| "learning_rate": 1.9648665984642104e-05, | |
| "loss": 0.2831, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.17758985200845667, | |
| "grad_norm": 4.256004333496094, | |
| "learning_rate": 1.96369292305033e-05, | |
| "loss": 0.3026, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.17758985200845667, | |
| "eval_accuracy": 0.8289183222958058, | |
| "eval_f1": 0.6191646191646192, | |
| "eval_loss": 0.36796802282333374, | |
| "eval_precision": 0.8235294117647058, | |
| "eval_recall": 0.49606299212598426, | |
| "eval_runtime": 52.4513, | |
| "eval_samples_per_second": 5.281, | |
| "eval_steps_per_second": 0.172, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.1788583509513742, | |
| "grad_norm": 3.7234766483306885, | |
| "learning_rate": 1.9625003265864148e-05, | |
| "loss": 0.2006, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.18012684989429176, | |
| "grad_norm": 4.156630039215088, | |
| "learning_rate": 1.9612888324877834e-05, | |
| "loss": 0.2776, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.1813953488372093, | |
| "grad_norm": 3.76343035697937, | |
| "learning_rate": 1.9600584645407897e-05, | |
| "loss": 0.3226, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.18266384778012684, | |
| "grad_norm": 4.899490833282471, | |
| "learning_rate": 1.958809246902354e-05, | |
| "loss": 0.2916, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.1839323467230444, | |
| "grad_norm": 3.958932638168335, | |
| "learning_rate": 1.9575412040994902e-05, | |
| "loss": 0.2549, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.18520084566596196, | |
| "grad_norm": 2.987992525100708, | |
| "learning_rate": 1.956254361028823e-05, | |
| "loss": 0.2549, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.18646934460887948, | |
| "grad_norm": 4.582171440124512, | |
| "learning_rate": 1.9549487429561003e-05, | |
| "loss": 0.3249, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.18773784355179704, | |
| "grad_norm": 3.215688705444336, | |
| "learning_rate": 1.953624375515696e-05, | |
| "loss": 0.2555, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.1890063424947146, | |
| "grad_norm": 4.125192642211914, | |
| "learning_rate": 1.9522812847101077e-05, | |
| "loss": 0.3004, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.19027484143763213, | |
| "grad_norm": 4.248358726501465, | |
| "learning_rate": 1.9509194969094447e-05, | |
| "loss": 0.2206, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.19154334038054968, | |
| "grad_norm": 5.806552886962891, | |
| "learning_rate": 1.9495390388509123e-05, | |
| "loss": 0.2926, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.19281183932346724, | |
| "grad_norm": 3.595050096511841, | |
| "learning_rate": 1.948139937638285e-05, | |
| "loss": 0.2746, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.19408033826638477, | |
| "grad_norm": 4.2712836265563965, | |
| "learning_rate": 1.9467222207413763e-05, | |
| "loss": 0.275, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.19534883720930232, | |
| "grad_norm": 3.809781074523926, | |
| "learning_rate": 1.945285915995496e-05, | |
| "loss": 0.2291, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.19661733615221988, | |
| "grad_norm": 3.5505263805389404, | |
| "learning_rate": 1.9438310516009084e-05, | |
| "loss": 0.2637, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.1978858350951374, | |
| "grad_norm": 2.8975532054901123, | |
| "learning_rate": 1.9423576561222742e-05, | |
| "loss": 0.2066, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.19915433403805496, | |
| "grad_norm": 4.945555686950684, | |
| "learning_rate": 1.9408657584880924e-05, | |
| "loss": 0.2785, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.20042283298097252, | |
| "grad_norm": 6.6838274002075195, | |
| "learning_rate": 1.9393553879901314e-05, | |
| "loss": 0.3395, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.20169133192389008, | |
| "grad_norm": 4.039582252502441, | |
| "learning_rate": 1.9378265742828533e-05, | |
| "loss": 0.3043, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.2029598308668076, | |
| "grad_norm": 3.475989580154419, | |
| "learning_rate": 1.9362793473828338e-05, | |
| "loss": 0.2886, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.2029598308668076, | |
| "eval_accuracy": 0.8432671081677704, | |
| "eval_f1": 0.6536585365853659, | |
| "eval_loss": 0.34669193625450134, | |
| "eval_precision": 0.8589743589743589, | |
| "eval_recall": 0.5275590551181102, | |
| "eval_runtime": 52.3104, | |
| "eval_samples_per_second": 5.295, | |
| "eval_steps_per_second": 0.172, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.20422832980972516, | |
| "grad_norm": 5.376806735992432, | |
| "learning_rate": 1.9347137376681692e-05, | |
| "loss": 0.2629, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.20549682875264272, | |
| "grad_norm": 5.16301155090332, | |
| "learning_rate": 1.933129775877884e-05, | |
| "loss": 0.3097, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.20676532769556025, | |
| "grad_norm": 6.509466648101807, | |
| "learning_rate": 1.9315274931113253e-05, | |
| "loss": 0.2566, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.2080338266384778, | |
| "grad_norm": 4.453103065490723, | |
| "learning_rate": 1.929906920827551e-05, | |
| "loss": 0.2863, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.20930232558139536, | |
| "grad_norm": 3.955231189727783, | |
| "learning_rate": 1.9282680908447152e-05, | |
| "loss": 0.3102, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.2105708245243129, | |
| "grad_norm": 5.5910258293151855, | |
| "learning_rate": 1.926611035339441e-05, | |
| "loss": 0.2271, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.21183932346723044, | |
| "grad_norm": 4.199539661407471, | |
| "learning_rate": 1.9249357868461896e-05, | |
| "loss": 0.27, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.213107822410148, | |
| "grad_norm": 3.3116366863250732, | |
| "learning_rate": 1.9232423782566217e-05, | |
| "loss": 0.3221, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.21437632135306553, | |
| "grad_norm": 4.038525104522705, | |
| "learning_rate": 1.9215308428189514e-05, | |
| "loss": 0.2448, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.2156448202959831, | |
| "grad_norm": 4.98528528213501, | |
| "learning_rate": 1.9198012141372934e-05, | |
| "loss": 0.317, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.21691331923890064, | |
| "grad_norm": 3.0479323863983154, | |
| "learning_rate": 1.918053526171004e-05, | |
| "loss": 0.2867, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.21818181818181817, | |
| "grad_norm": 3.0250513553619385, | |
| "learning_rate": 1.916287813234012e-05, | |
| "loss": 0.1996, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.21945031712473573, | |
| "grad_norm": 2.8498682975769043, | |
| "learning_rate": 1.914504109994149e-05, | |
| "loss": 0.2383, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.22071881606765328, | |
| "grad_norm": 3.0769264698028564, | |
| "learning_rate": 1.912702451472465e-05, | |
| "loss": 0.2481, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.2219873150105708, | |
| "grad_norm": 3.5798423290252686, | |
| "learning_rate": 1.910882873042542e-05, | |
| "loss": 0.2883, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.22325581395348837, | |
| "grad_norm": 4.30120849609375, | |
| "learning_rate": 1.9090454104298003e-05, | |
| "loss": 0.2893, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.22452431289640593, | |
| "grad_norm": 4.033363342285156, | |
| "learning_rate": 1.907190099710796e-05, | |
| "loss": 0.2408, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.22579281183932345, | |
| "grad_norm": 6.6475348472595215, | |
| "learning_rate": 1.9053169773125143e-05, | |
| "loss": 0.2549, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.227061310782241, | |
| "grad_norm": 5.1474199295043945, | |
| "learning_rate": 1.903426080011651e-05, | |
| "loss": 0.2493, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.22832980972515857, | |
| "grad_norm": 4.56413459777832, | |
| "learning_rate": 1.901517444933894e-05, | |
| "loss": 0.2345, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.22832980972515857, | |
| "eval_accuracy": 0.8587196467991169, | |
| "eval_f1": 0.7253218884120172, | |
| "eval_loss": 0.3289457857608795, | |
| "eval_precision": 0.7971698113207547, | |
| "eval_recall": 0.6653543307086615, | |
| "eval_runtime": 51.489, | |
| "eval_samples_per_second": 5.38, | |
| "eval_steps_per_second": 0.175, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.22959830866807612, | |
| "grad_norm": 6.834506988525391, | |
| "learning_rate": 1.8995911095531932e-05, | |
| "loss": 0.2355, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.23086680761099365, | |
| "grad_norm": 6.830471038818359, | |
| "learning_rate": 1.8976471116910236e-05, | |
| "loss": 0.2727, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.2321353065539112, | |
| "grad_norm": 4.903042316436768, | |
| "learning_rate": 1.895685489515644e-05, | |
| "loss": 0.25, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.23340380549682876, | |
| "grad_norm": 3.5585076808929443, | |
| "learning_rate": 1.8937062815413466e-05, | |
| "loss": 0.2276, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.2346723044397463, | |
| "grad_norm": 3.2517757415771484, | |
| "learning_rate": 1.891709526627703e-05, | |
| "loss": 0.2492, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.23594080338266385, | |
| "grad_norm": 5.2381181716918945, | |
| "learning_rate": 1.8896952639787977e-05, | |
| "loss": 0.1813, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.2372093023255814, | |
| "grad_norm": 6.287901401519775, | |
| "learning_rate": 1.8876635331424616e-05, | |
| "loss": 0.2281, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.23847780126849893, | |
| "grad_norm": 4.748634338378906, | |
| "learning_rate": 1.8856143740094938e-05, | |
| "loss": 0.2654, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.2397463002114165, | |
| "grad_norm": 7.864667892456055, | |
| "learning_rate": 1.8835478268128784e-05, | |
| "loss": 0.2791, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.24101479915433405, | |
| "grad_norm": 5.30699348449707, | |
| "learning_rate": 1.881463932126996e-05, | |
| "loss": 0.2772, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.24228329809725158, | |
| "grad_norm": 4.305821895599365, | |
| "learning_rate": 1.879362730866825e-05, | |
| "loss": 0.2552, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.24355179704016913, | |
| "grad_norm": 9.22236442565918, | |
| "learning_rate": 1.8772442642871405e-05, | |
| "loss": 0.3735, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.2448202959830867, | |
| "grad_norm": 3.3223769664764404, | |
| "learning_rate": 1.8751085739817017e-05, | |
| "loss": 0.2447, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.24608879492600422, | |
| "grad_norm": 4.844041347503662, | |
| "learning_rate": 1.8729557018824382e-05, | |
| "loss": 0.3255, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.24735729386892177, | |
| "grad_norm": 3.9604134559631348, | |
| "learning_rate": 1.8707856902586244e-05, | |
| "loss": 0.2706, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.24862579281183933, | |
| "grad_norm": 3.333854913711548, | |
| "learning_rate": 1.8685985817160503e-05, | |
| "loss": 0.2665, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.24989429175475686, | |
| "grad_norm": 2.807447910308838, | |
| "learning_rate": 1.8663944191961853e-05, | |
| "loss": 0.2699, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.25116279069767444, | |
| "grad_norm": 4.559386730194092, | |
| "learning_rate": 1.864173245975335e-05, | |
| "loss": 0.2557, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.25243128964059197, | |
| "grad_norm": 3.0040392875671387, | |
| "learning_rate": 1.861935105663791e-05, | |
| "loss": 0.2343, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.2536997885835095, | |
| "grad_norm": 3.234032154083252, | |
| "learning_rate": 1.8596800422049756e-05, | |
| "loss": 0.2964, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.2536997885835095, | |
| "eval_accuracy": 0.8377483443708609, | |
| "eval_f1": 0.6388206388206388, | |
| "eval_loss": 0.33216196298599243, | |
| "eval_precision": 0.8496732026143791, | |
| "eval_recall": 0.5118110236220472, | |
| "eval_runtime": 51.0562, | |
| "eval_samples_per_second": 5.425, | |
| "eval_steps_per_second": 0.176, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.2549682875264271, | |
| "grad_norm": 2.9896929264068604, | |
| "learning_rate": 1.8574080998745785e-05, | |
| "loss": 0.2425, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 0.2562367864693446, | |
| "grad_norm": 4.189578056335449, | |
| "learning_rate": 1.8551193232796862e-05, | |
| "loss": 0.274, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.25750528541226214, | |
| "grad_norm": 3.2401742935180664, | |
| "learning_rate": 1.852813757357909e-05, | |
| "loss": 0.2548, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 0.2587737843551797, | |
| "grad_norm": 3.4946937561035156, | |
| "learning_rate": 1.8504914473764973e-05, | |
| "loss": 0.2924, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.26004228329809725, | |
| "grad_norm": 2.9049434661865234, | |
| "learning_rate": 1.8481524389314508e-05, | |
| "loss": 0.2132, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.2613107822410148, | |
| "grad_norm": 3.942204475402832, | |
| "learning_rate": 1.845796777946627e-05, | |
| "loss": 0.251, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 0.26257928118393237, | |
| "grad_norm": 3.098745822906494, | |
| "learning_rate": 1.8434245106728367e-05, | |
| "loss": 0.2698, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 0.2638477801268499, | |
| "grad_norm": 3.4184677600860596, | |
| "learning_rate": 1.8410356836869374e-05, | |
| "loss": 0.2239, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.2651162790697674, | |
| "grad_norm": 2.8000166416168213, | |
| "learning_rate": 1.8386303438909182e-05, | |
| "loss": 0.2144, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 0.266384778012685, | |
| "grad_norm": 4.3039937019348145, | |
| "learning_rate": 1.8362085385109788e-05, | |
| "loss": 0.2572, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.26765327695560254, | |
| "grad_norm": 3.855637311935425, | |
| "learning_rate": 1.8337703150966025e-05, | |
| "loss": 0.2392, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 0.26892177589852007, | |
| "grad_norm": 3.2118887901306152, | |
| "learning_rate": 1.8313157215196237e-05, | |
| "loss": 0.21, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 0.27019027484143765, | |
| "grad_norm": 4.038395881652832, | |
| "learning_rate": 1.8288448059732852e-05, | |
| "loss": 0.1917, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 0.2714587737843552, | |
| "grad_norm": 6.29147481918335, | |
| "learning_rate": 1.8263576169712955e-05, | |
| "loss": 0.2542, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 0.2727272727272727, | |
| "grad_norm": 6.392573356628418, | |
| "learning_rate": 1.823854203346872e-05, | |
| "loss": 0.2953, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.2739957716701903, | |
| "grad_norm": 5.477741718292236, | |
| "learning_rate": 1.8213346142517884e-05, | |
| "loss": 0.3066, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.2752642706131078, | |
| "grad_norm": 7.538582801818848, | |
| "learning_rate": 1.8187988991554027e-05, | |
| "loss": 0.303, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 0.27653276955602535, | |
| "grad_norm": 4.740074634552002, | |
| "learning_rate": 1.8162471078436903e-05, | |
| "loss": 0.2752, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 0.27780126849894293, | |
| "grad_norm": 3.831221580505371, | |
| "learning_rate": 1.8136792904182657e-05, | |
| "loss": 0.2749, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 0.27906976744186046, | |
| "grad_norm": 3.5587270259857178, | |
| "learning_rate": 1.8110954972953984e-05, | |
| "loss": 0.2655, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.27906976744186046, | |
| "eval_accuracy": 0.8278145695364238, | |
| "eval_f1": 0.5979381443298969, | |
| "eval_loss": 0.3495219349861145, | |
| "eval_precision": 0.8656716417910447, | |
| "eval_recall": 0.4566929133858268, | |
| "eval_runtime": 51.5039, | |
| "eval_samples_per_second": 5.378, | |
| "eval_steps_per_second": 0.175, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.280338266384778, | |
| "grad_norm": 4.122521877288818, | |
| "learning_rate": 1.8084957792050226e-05, | |
| "loss": 0.2141, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 0.2816067653276956, | |
| "grad_norm": 3.4336111545562744, | |
| "learning_rate": 1.8058801871897423e-05, | |
| "loss": 0.1997, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.2828752642706131, | |
| "grad_norm": 4.062242031097412, | |
| "learning_rate": 1.803248772603828e-05, | |
| "loss": 0.2811, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 0.28414376321353063, | |
| "grad_norm": 4.814560413360596, | |
| "learning_rate": 1.8006015871122105e-05, | |
| "loss": 0.1923, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.2854122621564482, | |
| "grad_norm": 3.1431543827056885, | |
| "learning_rate": 1.797938682689462e-05, | |
| "loss": 0.223, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.28668076109936574, | |
| "grad_norm": 4.914591312408447, | |
| "learning_rate": 1.7952601116187822e-05, | |
| "loss": 0.2928, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 0.2879492600422833, | |
| "grad_norm": 2.977471113204956, | |
| "learning_rate": 1.7925659264909657e-05, | |
| "loss": 0.2562, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 0.28921775898520086, | |
| "grad_norm": 5.882057189941406, | |
| "learning_rate": 1.7898561802033717e-05, | |
| "loss": 0.2599, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.2904862579281184, | |
| "grad_norm": 7.490647792816162, | |
| "learning_rate": 1.7871309259588884e-05, | |
| "loss": 0.2616, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 0.2917547568710359, | |
| "grad_norm": 8.199599266052246, | |
| "learning_rate": 1.784390217264882e-05, | |
| "loss": 0.2665, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.2930232558139535, | |
| "grad_norm": 2.947110414505005, | |
| "learning_rate": 1.781634107932153e-05, | |
| "loss": 0.183, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 0.294291754756871, | |
| "grad_norm": 5.069705486297607, | |
| "learning_rate": 1.7788626520738745e-05, | |
| "loss": 0.2423, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.2955602536997886, | |
| "grad_norm": 3.6471478939056396, | |
| "learning_rate": 1.776075904104533e-05, | |
| "loss": 0.24, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 0.29682875264270614, | |
| "grad_norm": 3.3612494468688965, | |
| "learning_rate": 1.773273918738857e-05, | |
| "loss": 0.2007, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 0.29809725158562367, | |
| "grad_norm": 3.5518970489501953, | |
| "learning_rate": 1.7704567509907468e-05, | |
| "loss": 0.2526, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.29936575052854125, | |
| "grad_norm": 5.473038673400879, | |
| "learning_rate": 1.7676244561721905e-05, | |
| "loss": 0.2802, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 0.3006342494714588, | |
| "grad_norm": 4.366296768188477, | |
| "learning_rate": 1.7647770898921798e-05, | |
| "loss": 0.2698, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 0.3019027484143763, | |
| "grad_norm": 6.12213659286499, | |
| "learning_rate": 1.7619147080556188e-05, | |
| "loss": 0.2266, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 0.3031712473572939, | |
| "grad_norm": 4.8955864906311035, | |
| "learning_rate": 1.759037366862224e-05, | |
| "loss": 0.2574, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 0.3044397463002114, | |
| "grad_norm": 4.285499572753906, | |
| "learning_rate": 1.7561451228054247e-05, | |
| "loss": 0.3252, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.3044397463002114, | |
| "eval_accuracy": 0.8454746136865342, | |
| "eval_f1": 0.6713615023474179, | |
| "eval_loss": 0.31889277696609497, | |
| "eval_precision": 0.8313953488372093, | |
| "eval_recall": 0.562992125984252, | |
| "eval_runtime": 51.2215, | |
| "eval_samples_per_second": 5.408, | |
| "eval_steps_per_second": 0.176, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.30570824524312895, | |
| "grad_norm": 3.903001308441162, | |
| "learning_rate": 1.7532380326712487e-05, | |
| "loss": 0.2511, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 0.30697674418604654, | |
| "grad_norm": 5.383487701416016, | |
| "learning_rate": 1.7503161535372126e-05, | |
| "loss": 0.2861, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 0.30824524312896406, | |
| "grad_norm": 5.267589569091797, | |
| "learning_rate": 1.7473795427711975e-05, | |
| "loss": 0.1947, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 0.3095137420718816, | |
| "grad_norm": 6.931105613708496, | |
| "learning_rate": 1.7444282580303237e-05, | |
| "loss": 0.2462, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 0.3107822410147992, | |
| "grad_norm": 7.340319633483887, | |
| "learning_rate": 1.74146235725982e-05, | |
| "loss": 0.2709, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.3120507399577167, | |
| "grad_norm": 4.371819972991943, | |
| "learning_rate": 1.738481898691884e-05, | |
| "loss": 0.2763, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 0.31331923890063423, | |
| "grad_norm": 4.855105400085449, | |
| "learning_rate": 1.7354869408445393e-05, | |
| "loss": 0.2306, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 0.3145877378435518, | |
| "grad_norm": 2.958298683166504, | |
| "learning_rate": 1.7324775425204877e-05, | |
| "loss": 0.2072, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.31585623678646935, | |
| "grad_norm": 3.649681806564331, | |
| "learning_rate": 1.729453762805954e-05, | |
| "loss": 0.2535, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 0.3171247357293869, | |
| "grad_norm": 3.00809383392334, | |
| "learning_rate": 1.7264156610695247e-05, | |
| "loss": 0.2002, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.31839323467230446, | |
| "grad_norm": 2.804321050643921, | |
| "learning_rate": 1.7233632969609844e-05, | |
| "loss": 0.2373, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 0.319661733615222, | |
| "grad_norm": 4.483391761779785, | |
| "learning_rate": 1.7202967304101434e-05, | |
| "loss": 0.226, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 0.3209302325581395, | |
| "grad_norm": 4.620050430297852, | |
| "learning_rate": 1.7172160216256607e-05, | |
| "loss": 0.2272, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 0.3221987315010571, | |
| "grad_norm": 4.082276821136475, | |
| "learning_rate": 1.714121231093864e-05, | |
| "loss": 0.2021, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 0.32346723044397463, | |
| "grad_norm": 4.720620632171631, | |
| "learning_rate": 1.7110124195775592e-05, | |
| "loss": 0.2471, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.32473572938689216, | |
| "grad_norm": 3.695242166519165, | |
| "learning_rate": 1.707889648114839e-05, | |
| "loss": 0.2629, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.32600422832980974, | |
| "grad_norm": 5.539944648742676, | |
| "learning_rate": 1.704752978017885e-05, | |
| "loss": 0.219, | |
| "step": 257 | |
| }, | |
| { | |
| "epoch": 0.32727272727272727, | |
| "grad_norm": 4.941290378570557, | |
| "learning_rate": 1.701602470871763e-05, | |
| "loss": 0.2, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 0.3285412262156448, | |
| "grad_norm": 5.238450527191162, | |
| "learning_rate": 1.698438188533213e-05, | |
| "loss": 0.1941, | |
| "step": 259 | |
| }, | |
| { | |
| "epoch": 0.3298097251585624, | |
| "grad_norm": 3.7015936374664307, | |
| "learning_rate": 1.6952601931294372e-05, | |
| "loss": 0.2561, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.3298097251585624, | |
| "eval_accuracy": 0.8532008830022075, | |
| "eval_f1": 0.6997742663656885, | |
| "eval_loss": 0.3227859139442444, | |
| "eval_precision": 0.8201058201058201, | |
| "eval_recall": 0.610236220472441, | |
| "eval_runtime": 51.1034, | |
| "eval_samples_per_second": 5.42, | |
| "eval_steps_per_second": 0.176, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.3310782241014799, | |
| "grad_norm": 3.9863884449005127, | |
| "learning_rate": 1.6920685470568775e-05, | |
| "loss": 0.2591, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 0.33234672304439744, | |
| "grad_norm": 3.281353712081909, | |
| "learning_rate": 1.6888633129799932e-05, | |
| "loss": 0.1717, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 0.333615221987315, | |
| "grad_norm": 3.945847749710083, | |
| "learning_rate": 1.6856445538300273e-05, | |
| "loss": 0.2916, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 0.33488372093023255, | |
| "grad_norm": 4.858996391296387, | |
| "learning_rate": 1.6824123328037737e-05, | |
| "loss": 0.2521, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 0.3361522198731501, | |
| "grad_norm": 7.984918594360352, | |
| "learning_rate": 1.6791667133623354e-05, | |
| "loss": 0.2878, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.33742071881606767, | |
| "grad_norm": 2.9552502632141113, | |
| "learning_rate": 1.675907759229879e-05, | |
| "loss": 0.2246, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 0.3386892177589852, | |
| "grad_norm": 5.446991443634033, | |
| "learning_rate": 1.6726355343923816e-05, | |
| "loss": 0.2362, | |
| "step": 267 | |
| }, | |
| { | |
| "epoch": 0.3399577167019027, | |
| "grad_norm": 4.872383117675781, | |
| "learning_rate": 1.6693501030963775e-05, | |
| "loss": 0.2282, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 0.3412262156448203, | |
| "grad_norm": 4.628652095794678, | |
| "learning_rate": 1.6660515298476943e-05, | |
| "loss": 0.2489, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 0.34249471458773784, | |
| "grad_norm": 3.3619189262390137, | |
| "learning_rate": 1.6627398794101883e-05, | |
| "loss": 0.2158, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.34376321353065536, | |
| "grad_norm": 4.453815937042236, | |
| "learning_rate": 1.659415216804471e-05, | |
| "loss": 0.267, | |
| "step": 271 | |
| }, | |
| { | |
| "epoch": 0.34503171247357295, | |
| "grad_norm": 4.466940879821777, | |
| "learning_rate": 1.6560776073066343e-05, | |
| "loss": 0.2187, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.3463002114164905, | |
| "grad_norm": 4.6378397941589355, | |
| "learning_rate": 1.652727116446968e-05, | |
| "loss": 0.2104, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 0.347568710359408, | |
| "grad_norm": 3.28175950050354, | |
| "learning_rate": 1.6493638100086726e-05, | |
| "loss": 0.2269, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 0.3488372093023256, | |
| "grad_norm": 5.272284507751465, | |
| "learning_rate": 1.6459877540265686e-05, | |
| "loss": 0.257, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.3501057082452431, | |
| "grad_norm": 2.739626407623291, | |
| "learning_rate": 1.6425990147858004e-05, | |
| "loss": 0.1847, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 0.3513742071881607, | |
| "grad_norm": 4.007655620574951, | |
| "learning_rate": 1.639197658820534e-05, | |
| "loss": 0.2498, | |
| "step": 277 | |
| }, | |
| { | |
| "epoch": 0.35264270613107823, | |
| "grad_norm": 2.493644952774048, | |
| "learning_rate": 1.6357837529126502e-05, | |
| "loss": 0.1925, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 0.35391120507399576, | |
| "grad_norm": 5.886753559112549, | |
| "learning_rate": 1.6323573640904352e-05, | |
| "loss": 0.2331, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 0.35517970401691334, | |
| "grad_norm": 3.9009897708892822, | |
| "learning_rate": 1.628918559627262e-05, | |
| "loss": 0.1661, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.35517970401691334, | |
| "eval_accuracy": 0.8498896247240618, | |
| "eval_f1": 0.6746411483253588, | |
| "eval_loss": 0.31411004066467285, | |
| "eval_precision": 0.8597560975609756, | |
| "eval_recall": 0.5551181102362205, | |
| "eval_runtime": 51.0464, | |
| "eval_samples_per_second": 5.426, | |
| "eval_steps_per_second": 0.176, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.3564482029598309, | |
| "grad_norm": 5.003305912017822, | |
| "learning_rate": 1.625467407040273e-05, | |
| "loss": 0.2532, | |
| "step": 281 | |
| }, | |
| { | |
| "epoch": 0.3577167019027484, | |
| "grad_norm": 3.5860018730163574, | |
| "learning_rate": 1.6220039740890507e-05, | |
| "loss": 0.2194, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 0.358985200845666, | |
| "grad_norm": 3.8231117725372314, | |
| "learning_rate": 1.6185283287742902e-05, | |
| "loss": 0.2183, | |
| "step": 283 | |
| }, | |
| { | |
| "epoch": 0.3602536997885835, | |
| "grad_norm": 5.111902236938477, | |
| "learning_rate": 1.615040539336462e-05, | |
| "loss": 0.2831, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 0.36152219873150104, | |
| "grad_norm": 7.390100002288818, | |
| "learning_rate": 1.6115406742544736e-05, | |
| "loss": 0.2675, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.3627906976744186, | |
| "grad_norm": 4.8447489738464355, | |
| "learning_rate": 1.6080288022443242e-05, | |
| "loss": 0.184, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 0.36405919661733616, | |
| "grad_norm": 4.323414325714111, | |
| "learning_rate": 1.6045049922577567e-05, | |
| "loss": 0.2267, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 0.3653276955602537, | |
| "grad_norm": 4.628475189208984, | |
| "learning_rate": 1.6009693134809018e-05, | |
| "loss": 0.2527, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.36659619450317127, | |
| "grad_norm": 5.454392910003662, | |
| "learning_rate": 1.597421835332922e-05, | |
| "loss": 0.2165, | |
| "step": 289 | |
| }, | |
| { | |
| "epoch": 0.3678646934460888, | |
| "grad_norm": 3.9720702171325684, | |
| "learning_rate": 1.5938626274646457e-05, | |
| "loss": 0.2671, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.3691331923890063, | |
| "grad_norm": 4.625940322875977, | |
| "learning_rate": 1.590291759757204e-05, | |
| "loss": 0.242, | |
| "step": 291 | |
| }, | |
| { | |
| "epoch": 0.3704016913319239, | |
| "grad_norm": 3.9591894149780273, | |
| "learning_rate": 1.5867093023206538e-05, | |
| "loss": 0.185, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 0.37167019027484144, | |
| "grad_norm": 9.09524154663086, | |
| "learning_rate": 1.5831153254926048e-05, | |
| "loss": 0.2799, | |
| "step": 293 | |
| }, | |
| { | |
| "epoch": 0.37293868921775897, | |
| "grad_norm": 3.936962127685547, | |
| "learning_rate": 1.579509899836837e-05, | |
| "loss": 0.2206, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 0.37420718816067655, | |
| "grad_norm": 3.293510913848877, | |
| "learning_rate": 1.5758930961419154e-05, | |
| "loss": 0.202, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.3754756871035941, | |
| "grad_norm": 4.638730525970459, | |
| "learning_rate": 1.5722649854198007e-05, | |
| "loss": 0.2745, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.3767441860465116, | |
| "grad_norm": 11.325010299682617, | |
| "learning_rate": 1.568625638904454e-05, | |
| "loss": 0.258, | |
| "step": 297 | |
| }, | |
| { | |
| "epoch": 0.3780126849894292, | |
| "grad_norm": 3.792722702026367, | |
| "learning_rate": 1.564975128050439e-05, | |
| "loss": 0.2376, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 0.3792811839323467, | |
| "grad_norm": 4.620094299316406, | |
| "learning_rate": 1.5613135245315196e-05, | |
| "loss": 0.2361, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 0.38054968287526425, | |
| "grad_norm": 4.535460948944092, | |
| "learning_rate": 1.557640900239251e-05, | |
| "loss": 0.1812, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.38054968287526425, | |
| "eval_accuracy": 0.8300220750551877, | |
| "eval_f1": 0.6169154228855721, | |
| "eval_loss": 0.33304014801979065, | |
| "eval_precision": 0.8378378378378378, | |
| "eval_recall": 0.4881889763779528, | |
| "eval_runtime": 50.8332, | |
| "eval_samples_per_second": 5.449, | |
| "eval_steps_per_second": 0.177, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.38181818181818183, | |
| "grad_norm": 3.9105491638183594, | |
| "learning_rate": 1.5539573272815698e-05, | |
| "loss": 0.2252, | |
| "step": 301 | |
| }, | |
| { | |
| "epoch": 0.38308668076109936, | |
| "grad_norm": 5.97371244430542, | |
| "learning_rate": 1.5502628779813774e-05, | |
| "loss": 0.3113, | |
| "step": 302 | |
| }, | |
| { | |
| "epoch": 0.3843551797040169, | |
| "grad_norm": 2.844496965408325, | |
| "learning_rate": 1.54655762487512e-05, | |
| "loss": 0.1796, | |
| "step": 303 | |
| }, | |
| { | |
| "epoch": 0.3856236786469345, | |
| "grad_norm": 5.323549270629883, | |
| "learning_rate": 1.542841640711365e-05, | |
| "loss": 0.2381, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.386892177589852, | |
| "grad_norm": 3.897357940673828, | |
| "learning_rate": 1.539114998449372e-05, | |
| "loss": 0.2174, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.38816067653276953, | |
| "grad_norm": 3.345202922821045, | |
| "learning_rate": 1.5353777712576613e-05, | |
| "loss": 0.2236, | |
| "step": 306 | |
| }, | |
| { | |
| "epoch": 0.3894291754756871, | |
| "grad_norm": 4.982361316680908, | |
| "learning_rate": 1.531630032512575e-05, | |
| "loss": 0.2484, | |
| "step": 307 | |
| }, | |
| { | |
| "epoch": 0.39069767441860465, | |
| "grad_norm": 3.730459213256836, | |
| "learning_rate": 1.5278718557968405e-05, | |
| "loss": 0.2516, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 0.3919661733615222, | |
| "grad_norm": 5.202997207641602, | |
| "learning_rate": 1.5241033148981209e-05, | |
| "loss": 0.2951, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 0.39323467230443976, | |
| "grad_norm": 3.5829482078552246, | |
| "learning_rate": 1.5203244838075702e-05, | |
| "loss": 0.2491, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.3945031712473573, | |
| "grad_norm": 6.405603408813477, | |
| "learning_rate": 1.5165354367183778e-05, | |
| "loss": 0.2377, | |
| "step": 311 | |
| }, | |
| { | |
| "epoch": 0.3957716701902748, | |
| "grad_norm": 4.980127334594727, | |
| "learning_rate": 1.5127362480243139e-05, | |
| "loss": 0.1729, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 0.3970401691331924, | |
| "grad_norm": 3.1744191646575928, | |
| "learning_rate": 1.508926992318268e-05, | |
| "loss": 0.2174, | |
| "step": 313 | |
| }, | |
| { | |
| "epoch": 0.39830866807610993, | |
| "grad_norm": 5.434924125671387, | |
| "learning_rate": 1.505107744390783e-05, | |
| "loss": 0.2544, | |
| "step": 314 | |
| }, | |
| { | |
| "epoch": 0.39957716701902746, | |
| "grad_norm": 4.3313212394714355, | |
| "learning_rate": 1.5012785792285901e-05, | |
| "loss": 0.229, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.40084566596194504, | |
| "grad_norm": 3.850680351257324, | |
| "learning_rate": 1.4974395720131328e-05, | |
| "loss": 0.2952, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 0.40211416490486257, | |
| "grad_norm": 3.698664665222168, | |
| "learning_rate": 1.4935907981190931e-05, | |
| "loss": 0.2758, | |
| "step": 317 | |
| }, | |
| { | |
| "epoch": 0.40338266384778015, | |
| "grad_norm": 4.980349540710449, | |
| "learning_rate": 1.4897323331129113e-05, | |
| "loss": 0.2672, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 0.4046511627906977, | |
| "grad_norm": 4.738137245178223, | |
| "learning_rate": 1.4858642527513009e-05, | |
| "loss": 0.2359, | |
| "step": 319 | |
| }, | |
| { | |
| "epoch": 0.4059196617336152, | |
| "grad_norm": 5.489634990692139, | |
| "learning_rate": 1.4819866329797639e-05, | |
| "loss": 0.3265, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.4059196617336152, | |
| "eval_accuracy": 0.8543046357615894, | |
| "eval_f1": 0.7, | |
| "eval_loss": 0.2961479127407074, | |
| "eval_precision": 0.8279569892473119, | |
| "eval_recall": 0.6062992125984252, | |
| "eval_runtime": 51.0166, | |
| "eval_samples_per_second": 5.43, | |
| "eval_steps_per_second": 0.176, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.4071881606765328, | |
| "grad_norm": 3.285163164138794, | |
| "learning_rate": 1.4780995499310973e-05, | |
| "loss": 0.1567, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 0.4084566596194503, | |
| "grad_norm": 3.621706485748291, | |
| "learning_rate": 1.4742030799238989e-05, | |
| "loss": 0.2204, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 0.40972515856236785, | |
| "grad_norm": 4.652901649475098, | |
| "learning_rate": 1.4702972994610694e-05, | |
| "loss": 0.1918, | |
| "step": 323 | |
| }, | |
| { | |
| "epoch": 0.41099365750528544, | |
| "grad_norm": 3.95822811126709, | |
| "learning_rate": 1.46638228522831e-05, | |
| "loss": 0.2537, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 0.41226215644820297, | |
| "grad_norm": 6.265679836273193, | |
| "learning_rate": 1.4624581140926169e-05, | |
| "loss": 0.2054, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.4135306553911205, | |
| "grad_norm": 3.0921337604522705, | |
| "learning_rate": 1.458524863100772e-05, | |
| "loss": 0.2118, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 0.4147991543340381, | |
| "grad_norm": 3.311448812484741, | |
| "learning_rate": 1.45458260947783e-05, | |
| "loss": 0.1814, | |
| "step": 327 | |
| }, | |
| { | |
| "epoch": 0.4160676532769556, | |
| "grad_norm": 4.024649620056152, | |
| "learning_rate": 1.4506314306256025e-05, | |
| "loss": 0.2323, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 0.41733615221987314, | |
| "grad_norm": 4.3980536460876465, | |
| "learning_rate": 1.4466714041211383e-05, | |
| "loss": 0.2106, | |
| "step": 329 | |
| }, | |
| { | |
| "epoch": 0.4186046511627907, | |
| "grad_norm": 5.472320079803467, | |
| "learning_rate": 1.4427026077151998e-05, | |
| "loss": 0.2769, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.41987315010570825, | |
| "grad_norm": 5.5313401222229, | |
| "learning_rate": 1.4387251193307369e-05, | |
| "loss": 0.2501, | |
| "step": 331 | |
| }, | |
| { | |
| "epoch": 0.4211416490486258, | |
| "grad_norm": 5.064823627471924, | |
| "learning_rate": 1.434739017061357e-05, | |
| "loss": 0.3063, | |
| "step": 332 | |
| }, | |
| { | |
| "epoch": 0.42241014799154336, | |
| "grad_norm": 7.245492458343506, | |
| "learning_rate": 1.4307443791697915e-05, | |
| "loss": 0.251, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 0.4236786469344609, | |
| "grad_norm": 4.918938636779785, | |
| "learning_rate": 1.4267412840863597e-05, | |
| "loss": 0.2897, | |
| "step": 334 | |
| }, | |
| { | |
| "epoch": 0.4249471458773784, | |
| "grad_norm": 7.725310325622559, | |
| "learning_rate": 1.4227298104074279e-05, | |
| "loss": 0.2339, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.426215644820296, | |
| "grad_norm": 3.9810047149658203, | |
| "learning_rate": 1.4187100368938678e-05, | |
| "loss": 0.2219, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 0.42748414376321353, | |
| "grad_norm": 4.304353713989258, | |
| "learning_rate": 1.4146820424695082e-05, | |
| "loss": 0.2547, | |
| "step": 337 | |
| }, | |
| { | |
| "epoch": 0.42875264270613106, | |
| "grad_norm": 6.903933525085449, | |
| "learning_rate": 1.4106459062195873e-05, | |
| "loss": 0.2123, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 0.43002114164904864, | |
| "grad_norm": 4.345834732055664, | |
| "learning_rate": 1.4066017073891988e-05, | |
| "loss": 0.2094, | |
| "step": 339 | |
| }, | |
| { | |
| "epoch": 0.4312896405919662, | |
| "grad_norm": 4.44146203994751, | |
| "learning_rate": 1.4025495253817359e-05, | |
| "loss": 0.2217, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.4312896405919662, | |
| "eval_accuracy": 0.8664459161147903, | |
| "eval_f1": 0.7430997876857749, | |
| "eval_loss": 0.29700928926467896, | |
| "eval_precision": 0.8064516129032258, | |
| "eval_recall": 0.6889763779527559, | |
| "eval_runtime": 51.4883, | |
| "eval_samples_per_second": 5.38, | |
| "eval_steps_per_second": 0.175, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.4325581395348837, | |
| "grad_norm": 4.186248779296875, | |
| "learning_rate": 1.3984894397573332e-05, | |
| "loss": 0.2687, | |
| "step": 341 | |
| }, | |
| { | |
| "epoch": 0.4338266384778013, | |
| "grad_norm": 4.99129056930542, | |
| "learning_rate": 1.3944215302313042e-05, | |
| "loss": 0.202, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 0.4350951374207188, | |
| "grad_norm": 4.0888848304748535, | |
| "learning_rate": 1.3903458766725757e-05, | |
| "loss": 0.2868, | |
| "step": 343 | |
| }, | |
| { | |
| "epoch": 0.43636363636363634, | |
| "grad_norm": 6.3171515464782715, | |
| "learning_rate": 1.3862625591021201e-05, | |
| "loss": 0.2175, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 0.4376321353065539, | |
| "grad_norm": 5.681170463562012, | |
| "learning_rate": 1.3821716576913841e-05, | |
| "loss": 0.2224, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.43890063424947146, | |
| "grad_norm": 3.9973504543304443, | |
| "learning_rate": 1.3780732527607157e-05, | |
| "loss": 0.2135, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 0.440169133192389, | |
| "grad_norm": 3.7838385105133057, | |
| "learning_rate": 1.3739674247777853e-05, | |
| "loss": 0.2475, | |
| "step": 347 | |
| }, | |
| { | |
| "epoch": 0.44143763213530657, | |
| "grad_norm": 3.6724958419799805, | |
| "learning_rate": 1.3698542543560068e-05, | |
| "loss": 0.2295, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 0.4427061310782241, | |
| "grad_norm": 3.7347805500030518, | |
| "learning_rate": 1.3657338222529553e-05, | |
| "loss": 0.2553, | |
| "step": 349 | |
| }, | |
| { | |
| "epoch": 0.4439746300211416, | |
| "grad_norm": 4.058570384979248, | |
| "learning_rate": 1.3616062093687803e-05, | |
| "loss": 0.2698, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.4452431289640592, | |
| "grad_norm": 2.770801067352295, | |
| "learning_rate": 1.3574714967446193e-05, | |
| "loss": 0.1499, | |
| "step": 351 | |
| }, | |
| { | |
| "epoch": 0.44651162790697674, | |
| "grad_norm": 5.817844867706299, | |
| "learning_rate": 1.3533297655610038e-05, | |
| "loss": 0.2548, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 0.44778012684989427, | |
| "grad_norm": 3.270364999771118, | |
| "learning_rate": 1.3491810971362682e-05, | |
| "loss": 0.2621, | |
| "step": 353 | |
| }, | |
| { | |
| "epoch": 0.44904862579281185, | |
| "grad_norm": 4.292186260223389, | |
| "learning_rate": 1.3450255729249513e-05, | |
| "loss": 0.2542, | |
| "step": 354 | |
| }, | |
| { | |
| "epoch": 0.4503171247357294, | |
| "grad_norm": 3.6082029342651367, | |
| "learning_rate": 1.340863274516198e-05, | |
| "loss": 0.2861, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.4515856236786469, | |
| "grad_norm": 5.060634136199951, | |
| "learning_rate": 1.3366942836321574e-05, | |
| "loss": 0.2482, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 0.4528541226215645, | |
| "grad_norm": 3.972484827041626, | |
| "learning_rate": 1.3325186821263777e-05, | |
| "loss": 0.2121, | |
| "step": 357 | |
| }, | |
| { | |
| "epoch": 0.454122621564482, | |
| "grad_norm": 3.285651445388794, | |
| "learning_rate": 1.3283365519821987e-05, | |
| "loss": 0.1904, | |
| "step": 358 | |
| }, | |
| { | |
| "epoch": 0.4553911205073996, | |
| "grad_norm": 3.950873374938965, | |
| "learning_rate": 1.3241479753111441e-05, | |
| "loss": 0.2098, | |
| "step": 359 | |
| }, | |
| { | |
| "epoch": 0.45665961945031713, | |
| "grad_norm": 2.7246978282928467, | |
| "learning_rate": 1.3199530343513077e-05, | |
| "loss": 0.2058, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.45665961945031713, | |
| "eval_accuracy": 0.8520971302428256, | |
| "eval_f1": 0.6912442396313364, | |
| "eval_loss": 0.3054248094558716, | |
| "eval_precision": 0.8333333333333334, | |
| "eval_recall": 0.5905511811023622, | |
| "eval_runtime": 50.5884, | |
| "eval_samples_per_second": 5.476, | |
| "eval_steps_per_second": 0.178, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.45792811839323466, | |
| "grad_norm": 4.469756603240967, | |
| "learning_rate": 1.3157518114657382e-05, | |
| "loss": 0.2589, | |
| "step": 361 | |
| }, | |
| { | |
| "epoch": 0.45919661733615225, | |
| "grad_norm": 4.308989524841309, | |
| "learning_rate": 1.311544389140824e-05, | |
| "loss": 0.2859, | |
| "step": 362 | |
| }, | |
| { | |
| "epoch": 0.4604651162790698, | |
| "grad_norm": 3.6170761585235596, | |
| "learning_rate": 1.3073308499846722e-05, | |
| "loss": 0.2399, | |
| "step": 363 | |
| }, | |
| { | |
| "epoch": 0.4617336152219873, | |
| "grad_norm": 4.052624225616455, | |
| "learning_rate": 1.3031112767254874e-05, | |
| "loss": 0.272, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 0.4630021141649049, | |
| "grad_norm": 3.574326992034912, | |
| "learning_rate": 1.2988857522099471e-05, | |
| "loss": 0.2331, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.4642706131078224, | |
| "grad_norm": 3.4199771881103516, | |
| "learning_rate": 1.2946543594015752e-05, | |
| "loss": 0.2147, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 0.46553911205073994, | |
| "grad_norm": 4.001230716705322, | |
| "learning_rate": 1.2904171813791127e-05, | |
| "loss": 0.2274, | |
| "step": 367 | |
| }, | |
| { | |
| "epoch": 0.46680761099365753, | |
| "grad_norm": 3.2300639152526855, | |
| "learning_rate": 1.2861743013348873e-05, | |
| "loss": 0.257, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 0.46807610993657506, | |
| "grad_norm": 3.6694083213806152, | |
| "learning_rate": 1.2819258025731798e-05, | |
| "loss": 0.175, | |
| "step": 369 | |
| }, | |
| { | |
| "epoch": 0.4693446088794926, | |
| "grad_norm": 4.965548992156982, | |
| "learning_rate": 1.2776717685085878e-05, | |
| "loss": 0.2061, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.47061310782241017, | |
| "grad_norm": 2.8742663860321045, | |
| "learning_rate": 1.2734122826643884e-05, | |
| "loss": 0.1796, | |
| "step": 371 | |
| }, | |
| { | |
| "epoch": 0.4718816067653277, | |
| "grad_norm": 5.175198078155518, | |
| "learning_rate": 1.2691474286708995e-05, | |
| "loss": 0.2637, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 0.4731501057082452, | |
| "grad_norm": 3.802945613861084, | |
| "learning_rate": 1.2648772902638358e-05, | |
| "loss": 0.2745, | |
| "step": 373 | |
| }, | |
| { | |
| "epoch": 0.4744186046511628, | |
| "grad_norm": 3.0676567554473877, | |
| "learning_rate": 1.2606019512826655e-05, | |
| "loss": 0.1904, | |
| "step": 374 | |
| }, | |
| { | |
| "epoch": 0.47568710359408034, | |
| "grad_norm": 4.040560722351074, | |
| "learning_rate": 1.2563214956689646e-05, | |
| "loss": 0.2882, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.47695560253699787, | |
| "grad_norm": 5.133072376251221, | |
| "learning_rate": 1.2520360074647686e-05, | |
| "loss": 0.1883, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 0.47822410147991545, | |
| "grad_norm": 3.334303379058838, | |
| "learning_rate": 1.2477455708109228e-05, | |
| "loss": 0.2287, | |
| "step": 377 | |
| }, | |
| { | |
| "epoch": 0.479492600422833, | |
| "grad_norm": 3.445974349975586, | |
| "learning_rate": 1.243450269945429e-05, | |
| "loss": 0.2616, | |
| "step": 378 | |
| }, | |
| { | |
| "epoch": 0.4807610993657505, | |
| "grad_norm": 4.057299613952637, | |
| "learning_rate": 1.239150189201793e-05, | |
| "loss": 0.2113, | |
| "step": 379 | |
| }, | |
| { | |
| "epoch": 0.4820295983086681, | |
| "grad_norm": 3.4984474182128906, | |
| "learning_rate": 1.2348454130073679e-05, | |
| "loss": 0.225, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.4820295983086681, | |
| "eval_accuracy": 0.8576158940397351, | |
| "eval_f1": 0.7006960556844548, | |
| "eval_loss": 0.30184322595596313, | |
| "eval_precision": 0.8531073446327684, | |
| "eval_recall": 0.594488188976378, | |
| "eval_runtime": 51.4684, | |
| "eval_samples_per_second": 5.382, | |
| "eval_steps_per_second": 0.175, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.4832980972515856, | |
| "grad_norm": 5.044597625732422, | |
| "learning_rate": 1.2305360258816976e-05, | |
| "loss": 0.1806, | |
| "step": 381 | |
| }, | |
| { | |
| "epoch": 0.48456659619450315, | |
| "grad_norm": 5.378483295440674, | |
| "learning_rate": 1.2262221124348555e-05, | |
| "loss": 0.2141, | |
| "step": 382 | |
| }, | |
| { | |
| "epoch": 0.48583509513742074, | |
| "grad_norm": 2.9310171604156494, | |
| "learning_rate": 1.2219037573657848e-05, | |
| "loss": 0.2164, | |
| "step": 383 | |
| }, | |
| { | |
| "epoch": 0.48710359408033826, | |
| "grad_norm": 3.2981674671173096, | |
| "learning_rate": 1.2175810454606354e-05, | |
| "loss": 0.2024, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 0.4883720930232558, | |
| "grad_norm": 2.9548192024230957, | |
| "learning_rate": 1.2132540615910984e-05, | |
| "loss": 0.1725, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.4896405919661734, | |
| "grad_norm": 3.742029905319214, | |
| "learning_rate": 1.2089228907127403e-05, | |
| "loss": 0.2754, | |
| "step": 386 | |
| }, | |
| { | |
| "epoch": 0.4909090909090909, | |
| "grad_norm": 3.1863484382629395, | |
| "learning_rate": 1.2045876178633354e-05, | |
| "loss": 0.1871, | |
| "step": 387 | |
| }, | |
| { | |
| "epoch": 0.49217758985200843, | |
| "grad_norm": 3.8084633350372314, | |
| "learning_rate": 1.2002483281611945e-05, | |
| "loss": 0.2319, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 0.493446088794926, | |
| "grad_norm": 5.892749786376953, | |
| "learning_rate": 1.1959051068034965e-05, | |
| "loss": 0.2589, | |
| "step": 389 | |
| }, | |
| { | |
| "epoch": 0.49471458773784355, | |
| "grad_norm": 3.523524045944214, | |
| "learning_rate": 1.1915580390646129e-05, | |
| "loss": 0.1949, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.4959830866807611, | |
| "grad_norm": 3.9826090335845947, | |
| "learning_rate": 1.1872072102944345e-05, | |
| "loss": 0.202, | |
| "step": 391 | |
| }, | |
| { | |
| "epoch": 0.49725158562367866, | |
| "grad_norm": 4.0874810218811035, | |
| "learning_rate": 1.182852705916697e-05, | |
| "loss": 0.1757, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 0.4985200845665962, | |
| "grad_norm": 3.664313316345215, | |
| "learning_rate": 1.1784946114273015e-05, | |
| "loss": 0.2164, | |
| "step": 393 | |
| }, | |
| { | |
| "epoch": 0.4997885835095137, | |
| "grad_norm": 3.309058427810669, | |
| "learning_rate": 1.1741330123926376e-05, | |
| "loss": 0.1956, | |
| "step": 394 | |
| }, | |
| { | |
| "epoch": 0.5010570824524313, | |
| "grad_norm": 4.173009872436523, | |
| "learning_rate": 1.1697679944479023e-05, | |
| "loss": 0.2445, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.5023255813953489, | |
| "grad_norm": 3.826289653778076, | |
| "learning_rate": 1.1653996432954194e-05, | |
| "loss": 0.2082, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 0.5035940803382664, | |
| "grad_norm": 4.404917240142822, | |
| "learning_rate": 1.161028044702957e-05, | |
| "loss": 0.2502, | |
| "step": 397 | |
| }, | |
| { | |
| "epoch": 0.5048625792811839, | |
| "grad_norm": 4.247323036193848, | |
| "learning_rate": 1.1566532845020422e-05, | |
| "loss": 0.1902, | |
| "step": 398 | |
| }, | |
| { | |
| "epoch": 0.5061310782241015, | |
| "grad_norm": 3.4806225299835205, | |
| "learning_rate": 1.1522754485862773e-05, | |
| "loss": 0.2221, | |
| "step": 399 | |
| }, | |
| { | |
| "epoch": 0.507399577167019, | |
| "grad_norm": 5.911139965057373, | |
| "learning_rate": 1.1478946229096532e-05, | |
| "loss": 0.2045, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.507399577167019, | |
| "eval_accuracy": 0.8509933774834437, | |
| "eval_f1": 0.6731234866828087, | |
| "eval_loss": 0.3174249827861786, | |
| "eval_precision": 0.8742138364779874, | |
| "eval_recall": 0.547244094488189, | |
| "eval_runtime": 51.0242, | |
| "eval_samples_per_second": 5.429, | |
| "eval_steps_per_second": 0.176, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5086680761099366, | |
| "grad_norm": 4.422440528869629, | |
| "learning_rate": 1.1435108934848609e-05, | |
| "loss": 0.2941, | |
| "step": 401 | |
| }, | |
| { | |
| "epoch": 0.5099365750528542, | |
| "grad_norm": 3.987483024597168, | |
| "learning_rate": 1.1391243463816036e-05, | |
| "loss": 0.2109, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 0.5112050739957716, | |
| "grad_norm": 4.6511640548706055, | |
| "learning_rate": 1.1347350677249063e-05, | |
| "loss": 0.2082, | |
| "step": 403 | |
| }, | |
| { | |
| "epoch": 0.5124735729386892, | |
| "grad_norm": 5.202207565307617, | |
| "learning_rate": 1.1303431436934254e-05, | |
| "loss": 0.1939, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 0.5137420718816068, | |
| "grad_norm": 3.7034809589385986, | |
| "learning_rate": 1.125948660517756e-05, | |
| "loss": 0.1794, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.5150105708245243, | |
| "grad_norm": 4.559153079986572, | |
| "learning_rate": 1.1215517044787387e-05, | |
| "loss": 0.2288, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 0.5162790697674419, | |
| "grad_norm": 6.425766944885254, | |
| "learning_rate": 1.1171523619057678e-05, | |
| "loss": 0.3279, | |
| "step": 407 | |
| }, | |
| { | |
| "epoch": 0.5175475687103595, | |
| "grad_norm": 3.744201898574829, | |
| "learning_rate": 1.1127507191750926e-05, | |
| "loss": 0.1917, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 0.5188160676532769, | |
| "grad_norm": 4.557126522064209, | |
| "learning_rate": 1.1083468627081241e-05, | |
| "loss": 0.2885, | |
| "step": 409 | |
| }, | |
| { | |
| "epoch": 0.5200845665961945, | |
| "grad_norm": 3.1752729415893555, | |
| "learning_rate": 1.1039408789697385e-05, | |
| "loss": 0.1734, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.5213530655391121, | |
| "grad_norm": 3.41485595703125, | |
| "learning_rate": 1.0995328544665776e-05, | |
| "loss": 0.2242, | |
| "step": 411 | |
| }, | |
| { | |
| "epoch": 0.5226215644820296, | |
| "grad_norm": 3.8267111778259277, | |
| "learning_rate": 1.0951228757453514e-05, | |
| "loss": 0.1678, | |
| "step": 412 | |
| }, | |
| { | |
| "epoch": 0.5238900634249471, | |
| "grad_norm": 3.5933799743652344, | |
| "learning_rate": 1.0907110293911391e-05, | |
| "loss": 0.1935, | |
| "step": 413 | |
| }, | |
| { | |
| "epoch": 0.5251585623678647, | |
| "grad_norm": 4.974501609802246, | |
| "learning_rate": 1.086297402025689e-05, | |
| "loss": 0.2342, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 0.5264270613107822, | |
| "grad_norm": 5.614207744598389, | |
| "learning_rate": 1.0818820803057172e-05, | |
| "loss": 0.1686, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.5276955602536998, | |
| "grad_norm": 4.421863555908203, | |
| "learning_rate": 1.077465150921207e-05, | |
| "loss": 0.2413, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 0.5289640591966174, | |
| "grad_norm": 3.7212207317352295, | |
| "learning_rate": 1.073046700593706e-05, | |
| "loss": 0.2188, | |
| "step": 417 | |
| }, | |
| { | |
| "epoch": 0.5302325581395348, | |
| "grad_norm": 3.744774580001831, | |
| "learning_rate": 1.068626816074624e-05, | |
| "loss": 0.2309, | |
| "step": 418 | |
| }, | |
| { | |
| "epoch": 0.5315010570824524, | |
| "grad_norm": 3.859792709350586, | |
| "learning_rate": 1.0642055841435297e-05, | |
| "loss": 0.1562, | |
| "step": 419 | |
| }, | |
| { | |
| "epoch": 0.53276955602537, | |
| "grad_norm": 7.018131256103516, | |
| "learning_rate": 1.0597830916064466e-05, | |
| "loss": 0.2368, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.53276955602537, | |
| "eval_accuracy": 0.847682119205298, | |
| "eval_f1": 0.6698564593301436, | |
| "eval_loss": 0.315592885017395, | |
| "eval_precision": 0.8536585365853658, | |
| "eval_recall": 0.5511811023622047, | |
| "eval_runtime": 51.9783, | |
| "eval_samples_per_second": 5.329, | |
| "eval_steps_per_second": 0.173, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.5340380549682875, | |
| "grad_norm": 5.954317569732666, | |
| "learning_rate": 1.0553594252941489e-05, | |
| "loss": 0.2667, | |
| "step": 421 | |
| }, | |
| { | |
| "epoch": 0.5353065539112051, | |
| "grad_norm": 2.7260313034057617, | |
| "learning_rate": 1.0509346720604568e-05, | |
| "loss": 0.1736, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 0.5365750528541227, | |
| "grad_norm": 6.12712287902832, | |
| "learning_rate": 1.0465089187805305e-05, | |
| "loss": 0.2367, | |
| "step": 423 | |
| }, | |
| { | |
| "epoch": 0.5378435517970401, | |
| "grad_norm": 4.741700172424316, | |
| "learning_rate": 1.0420822523491651e-05, | |
| "loss": 0.2163, | |
| "step": 424 | |
| }, | |
| { | |
| "epoch": 0.5391120507399577, | |
| "grad_norm": 8.61501407623291, | |
| "learning_rate": 1.037654759679085e-05, | |
| "loss": 0.2217, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.5403805496828753, | |
| "grad_norm": 4.60408353805542, | |
| "learning_rate": 1.0332265276992361e-05, | |
| "loss": 0.2595, | |
| "step": 426 | |
| }, | |
| { | |
| "epoch": 0.5416490486257928, | |
| "grad_norm": 5.619015693664551, | |
| "learning_rate": 1.0287976433530803e-05, | |
| "loss": 0.2045, | |
| "step": 427 | |
| }, | |
| { | |
| "epoch": 0.5429175475687104, | |
| "grad_norm": 6.060495376586914, | |
| "learning_rate": 1.0243681935968885e-05, | |
| "loss": 0.2218, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 0.5441860465116279, | |
| "grad_norm": 4.551842212677002, | |
| "learning_rate": 1.0199382653980317e-05, | |
| "loss": 0.2108, | |
| "step": 429 | |
| }, | |
| { | |
| "epoch": 0.5454545454545454, | |
| "grad_norm": 6.0335564613342285, | |
| "learning_rate": 1.0155079457332747e-05, | |
| "loss": 0.2044, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.546723044397463, | |
| "grad_norm": 3.9287242889404297, | |
| "learning_rate": 1.0110773215870695e-05, | |
| "loss": 0.1748, | |
| "step": 431 | |
| }, | |
| { | |
| "epoch": 0.5479915433403806, | |
| "grad_norm": 5.402927398681641, | |
| "learning_rate": 1.0066464799498452e-05, | |
| "loss": 0.2594, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 0.5492600422832981, | |
| "grad_norm": 4.90135383605957, | |
| "learning_rate": 1.0022155078163012e-05, | |
| "loss": 0.1855, | |
| "step": 433 | |
| }, | |
| { | |
| "epoch": 0.5505285412262156, | |
| "grad_norm": 2.856055736541748, | |
| "learning_rate": 9.977844921836992e-06, | |
| "loss": 0.1365, | |
| "step": 434 | |
| }, | |
| { | |
| "epoch": 0.5517970401691332, | |
| "grad_norm": 5.056857109069824, | |
| "learning_rate": 9.933535200501552e-06, | |
| "loss": 0.1966, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.5530655391120507, | |
| "grad_norm": 5.74174690246582, | |
| "learning_rate": 9.889226784129307e-06, | |
| "loss": 0.2867, | |
| "step": 436 | |
| }, | |
| { | |
| "epoch": 0.5543340380549683, | |
| "grad_norm": 4.972617149353027, | |
| "learning_rate": 9.844920542667254e-06, | |
| "loss": 0.2438, | |
| "step": 437 | |
| }, | |
| { | |
| "epoch": 0.5556025369978859, | |
| "grad_norm": 5.448729038238525, | |
| "learning_rate": 9.800617346019687e-06, | |
| "loss": 0.2221, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 0.5568710359408033, | |
| "grad_norm": 4.500537395477295, | |
| "learning_rate": 9.756318064031118e-06, | |
| "loss": 0.2215, | |
| "step": 439 | |
| }, | |
| { | |
| "epoch": 0.5581395348837209, | |
| "grad_norm": 4.469764232635498, | |
| "learning_rate": 9.712023566469198e-06, | |
| "loss": 0.2162, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.5581395348837209, | |
| "eval_accuracy": 0.8609271523178808, | |
| "eval_f1": 0.7136363636363636, | |
| "eval_loss": 0.2927682101726532, | |
| "eval_precision": 0.8440860215053764, | |
| "eval_recall": 0.6181102362204725, | |
| "eval_runtime": 52.7005, | |
| "eval_samples_per_second": 5.256, | |
| "eval_steps_per_second": 0.171, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.5594080338266385, | |
| "grad_norm": 4.238883018493652, | |
| "learning_rate": 9.66773472300764e-06, | |
| "loss": 0.1607, | |
| "step": 441 | |
| }, | |
| { | |
| "epoch": 0.560676532769556, | |
| "grad_norm": 3.689420223236084, | |
| "learning_rate": 9.623452403209152e-06, | |
| "loss": 0.2105, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 0.5619450317124736, | |
| "grad_norm": 3.286783456802368, | |
| "learning_rate": 9.579177476508352e-06, | |
| "loss": 0.2194, | |
| "step": 443 | |
| }, | |
| { | |
| "epoch": 0.5632135306553911, | |
| "grad_norm": 4.903306484222412, | |
| "learning_rate": 9.5349108121947e-06, | |
| "loss": 0.2095, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 0.5644820295983086, | |
| "grad_norm": 3.390174627304077, | |
| "learning_rate": 9.490653279395436e-06, | |
| "loss": 0.1972, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.5657505285412262, | |
| "grad_norm": 4.267228603363037, | |
| "learning_rate": 9.446405747058513e-06, | |
| "loss": 0.2665, | |
| "step": 446 | |
| }, | |
| { | |
| "epoch": 0.5670190274841438, | |
| "grad_norm": 4.4697675704956055, | |
| "learning_rate": 9.402169083935539e-06, | |
| "loss": 0.2574, | |
| "step": 447 | |
| }, | |
| { | |
| "epoch": 0.5682875264270613, | |
| "grad_norm": 4.458617210388184, | |
| "learning_rate": 9.357944158564708e-06, | |
| "loss": 0.1765, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 0.5695560253699788, | |
| "grad_norm": 4.2114715576171875, | |
| "learning_rate": 9.313731839253766e-06, | |
| "loss": 0.2469, | |
| "step": 449 | |
| }, | |
| { | |
| "epoch": 0.5708245243128964, | |
| "grad_norm": 3.167578935623169, | |
| "learning_rate": 9.269532994062945e-06, | |
| "loss": 0.2217, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5720930232558139, | |
| "grad_norm": 3.5999159812927246, | |
| "learning_rate": 9.225348490787935e-06, | |
| "loss": 0.2433, | |
| "step": 451 | |
| }, | |
| { | |
| "epoch": 0.5733615221987315, | |
| "grad_norm": 4.339929580688477, | |
| "learning_rate": 9.181179196942831e-06, | |
| "loss": 0.2543, | |
| "step": 452 | |
| }, | |
| { | |
| "epoch": 0.5746300211416491, | |
| "grad_norm": 3.657891035079956, | |
| "learning_rate": 9.137025979743114e-06, | |
| "loss": 0.1899, | |
| "step": 453 | |
| }, | |
| { | |
| "epoch": 0.5758985200845665, | |
| "grad_norm": 3.0815606117248535, | |
| "learning_rate": 9.092889706088615e-06, | |
| "loss": 0.1921, | |
| "step": 454 | |
| }, | |
| { | |
| "epoch": 0.5771670190274841, | |
| "grad_norm": 3.4993326663970947, | |
| "learning_rate": 9.048771242546493e-06, | |
| "loss": 0.1811, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.5784355179704017, | |
| "grad_norm": 3.1785824298858643, | |
| "learning_rate": 9.004671455334227e-06, | |
| "loss": 0.1451, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 0.5797040169133192, | |
| "grad_norm": 4.485751152038574, | |
| "learning_rate": 8.960591210302616e-06, | |
| "loss": 0.2327, | |
| "step": 457 | |
| }, | |
| { | |
| "epoch": 0.5809725158562368, | |
| "grad_norm": 3.2318646907806396, | |
| "learning_rate": 8.91653137291876e-06, | |
| "loss": 0.2231, | |
| "step": 458 | |
| }, | |
| { | |
| "epoch": 0.5822410147991544, | |
| "grad_norm": 3.670243740081787, | |
| "learning_rate": 8.872492808249077e-06, | |
| "loss": 0.192, | |
| "step": 459 | |
| }, | |
| { | |
| "epoch": 0.5835095137420718, | |
| "grad_norm": 3.487762928009033, | |
| "learning_rate": 8.828476380942326e-06, | |
| "loss": 0.1664, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.5835095137420718, | |
| "eval_accuracy": 0.8598233995584988, | |
| "eval_f1": 0.7146067415730337, | |
| "eval_loss": 0.29775571823120117, | |
| "eval_precision": 0.8324607329842932, | |
| "eval_recall": 0.6259842519685039, | |
| "eval_runtime": 52.4539, | |
| "eval_samples_per_second": 5.281, | |
| "eval_steps_per_second": 0.172, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.5847780126849894, | |
| "grad_norm": 3.1561360359191895, | |
| "learning_rate": 8.784482955212615e-06, | |
| "loss": 0.1727, | |
| "step": 461 | |
| }, | |
| { | |
| "epoch": 0.586046511627907, | |
| "grad_norm": 4.375553131103516, | |
| "learning_rate": 8.740513394822442e-06, | |
| "loss": 0.2016, | |
| "step": 462 | |
| }, | |
| { | |
| "epoch": 0.5873150105708245, | |
| "grad_norm": 3.2783806324005127, | |
| "learning_rate": 8.696568563065748e-06, | |
| "loss": 0.1808, | |
| "step": 463 | |
| }, | |
| { | |
| "epoch": 0.588583509513742, | |
| "grad_norm": 5.312812328338623, | |
| "learning_rate": 8.652649322750937e-06, | |
| "loss": 0.2486, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 0.5898520084566596, | |
| "grad_norm": 3.248567819595337, | |
| "learning_rate": 8.608756536183964e-06, | |
| "loss": 0.2222, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.5911205073995772, | |
| "grad_norm": 4.083889007568359, | |
| "learning_rate": 8.564891065151391e-06, | |
| "loss": 0.2293, | |
| "step": 466 | |
| }, | |
| { | |
| "epoch": 0.5923890063424947, | |
| "grad_norm": 4.238209247589111, | |
| "learning_rate": 8.521053770903468e-06, | |
| "loss": 0.225, | |
| "step": 467 | |
| }, | |
| { | |
| "epoch": 0.5936575052854123, | |
| "grad_norm": 5.558248043060303, | |
| "learning_rate": 8.477245514137227e-06, | |
| "loss": 0.2204, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 0.5949260042283299, | |
| "grad_norm": 4.5489420890808105, | |
| "learning_rate": 8.43346715497958e-06, | |
| "loss": 0.2116, | |
| "step": 469 | |
| }, | |
| { | |
| "epoch": 0.5961945031712473, | |
| "grad_norm": 3.887915849685669, | |
| "learning_rate": 8.389719552970432e-06, | |
| "loss": 0.1674, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.5974630021141649, | |
| "grad_norm": 5.154160976409912, | |
| "learning_rate": 8.346003567045806e-06, | |
| "loss": 0.2238, | |
| "step": 471 | |
| }, | |
| { | |
| "epoch": 0.5987315010570825, | |
| "grad_norm": 4.397689342498779, | |
| "learning_rate": 8.30232005552098e-06, | |
| "loss": 0.2215, | |
| "step": 472 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 5.090795040130615, | |
| "learning_rate": 8.258669876073626e-06, | |
| "loss": 0.1966, | |
| "step": 473 | |
| }, | |
| { | |
| "epoch": 0.6012684989429176, | |
| "grad_norm": 2.8848166465759277, | |
| "learning_rate": 8.215053885726986e-06, | |
| "loss": 0.1757, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 0.6025369978858351, | |
| "grad_norm": 5.922531604766846, | |
| "learning_rate": 8.171472940833033e-06, | |
| "loss": 0.2721, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.6038054968287526, | |
| "grad_norm": 3.6184239387512207, | |
| "learning_rate": 8.127927897055658e-06, | |
| "loss": 0.1644, | |
| "step": 476 | |
| }, | |
| { | |
| "epoch": 0.6050739957716702, | |
| "grad_norm": 3.496631145477295, | |
| "learning_rate": 8.084419609353875e-06, | |
| "loss": 0.1849, | |
| "step": 477 | |
| }, | |
| { | |
| "epoch": 0.6063424947145878, | |
| "grad_norm": 4.242512226104736, | |
| "learning_rate": 8.040948931965038e-06, | |
| "loss": 0.2105, | |
| "step": 478 | |
| }, | |
| { | |
| "epoch": 0.6076109936575053, | |
| "grad_norm": 3.4095211029052734, | |
| "learning_rate": 7.997516718388056e-06, | |
| "loss": 0.1628, | |
| "step": 479 | |
| }, | |
| { | |
| "epoch": 0.6088794926004228, | |
| "grad_norm": 5.149474143981934, | |
| "learning_rate": 7.95412382136665e-06, | |
| "loss": 0.2282, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.6088794926004228, | |
| "eval_accuracy": 0.8587196467991169, | |
| "eval_f1": 0.7037037037037037, | |
| "eval_loss": 0.3031023144721985, | |
| "eval_precision": 0.8539325842696629, | |
| "eval_recall": 0.5984251968503937, | |
| "eval_runtime": 51.9749, | |
| "eval_samples_per_second": 5.329, | |
| "eval_steps_per_second": 0.173, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.6101479915433404, | |
| "grad_norm": 4.154394149780273, | |
| "learning_rate": 7.910771092872598e-06, | |
| "loss": 0.2157, | |
| "step": 481 | |
| }, | |
| { | |
| "epoch": 0.6114164904862579, | |
| "grad_norm": 5.618874549865723, | |
| "learning_rate": 7.86745938408902e-06, | |
| "loss": 0.2325, | |
| "step": 482 | |
| }, | |
| { | |
| "epoch": 0.6126849894291755, | |
| "grad_norm": 4.56056547164917, | |
| "learning_rate": 7.82418954539365e-06, | |
| "loss": 0.2452, | |
| "step": 483 | |
| }, | |
| { | |
| "epoch": 0.6139534883720931, | |
| "grad_norm": 5.774714946746826, | |
| "learning_rate": 7.780962426342155e-06, | |
| "loss": 0.2435, | |
| "step": 484 | |
| }, | |
| { | |
| "epoch": 0.6152219873150105, | |
| "grad_norm": 5.51974630355835, | |
| "learning_rate": 7.737778875651448e-06, | |
| "loss": 0.1966, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.6164904862579281, | |
| "grad_norm": 3.314197063446045, | |
| "learning_rate": 7.694639741183027e-06, | |
| "loss": 0.1392, | |
| "step": 486 | |
| }, | |
| { | |
| "epoch": 0.6177589852008457, | |
| "grad_norm": 4.631697654724121, | |
| "learning_rate": 7.651545869926323e-06, | |
| "loss": 0.2346, | |
| "step": 487 | |
| }, | |
| { | |
| "epoch": 0.6190274841437632, | |
| "grad_norm": 4.110776424407959, | |
| "learning_rate": 7.608498107982074e-06, | |
| "loss": 0.221, | |
| "step": 488 | |
| }, | |
| { | |
| "epoch": 0.6202959830866808, | |
| "grad_norm": 5.305356502532959, | |
| "learning_rate": 7.565497300545714e-06, | |
| "loss": 0.2169, | |
| "step": 489 | |
| }, | |
| { | |
| "epoch": 0.6215644820295984, | |
| "grad_norm": 4.338992595672607, | |
| "learning_rate": 7.5225442918907765e-06, | |
| "loss": 0.1901, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.6228329809725158, | |
| "grad_norm": 6.824976921081543, | |
| "learning_rate": 7.479639925352318e-06, | |
| "loss": 0.2836, | |
| "step": 491 | |
| }, | |
| { | |
| "epoch": 0.6241014799154334, | |
| "grad_norm": 3.5278079509735107, | |
| "learning_rate": 7.436785043310357e-06, | |
| "loss": 0.1524, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 0.625369978858351, | |
| "grad_norm": 4.220887184143066, | |
| "learning_rate": 7.393980487173349e-06, | |
| "loss": 0.253, | |
| "step": 493 | |
| }, | |
| { | |
| "epoch": 0.6266384778012685, | |
| "grad_norm": 4.364911079406738, | |
| "learning_rate": 7.351227097361645e-06, | |
| "loss": 0.215, | |
| "step": 494 | |
| }, | |
| { | |
| "epoch": 0.627906976744186, | |
| "grad_norm": 3.2984468936920166, | |
| "learning_rate": 7.308525713291006e-06, | |
| "loss": 0.2252, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.6291754756871036, | |
| "grad_norm": 4.866385459899902, | |
| "learning_rate": 7.2658771733561175e-06, | |
| "loss": 0.2572, | |
| "step": 496 | |
| }, | |
| { | |
| "epoch": 0.6304439746300211, | |
| "grad_norm": 4.756896495819092, | |
| "learning_rate": 7.223282314914128e-06, | |
| "loss": 0.1821, | |
| "step": 497 | |
| }, | |
| { | |
| "epoch": 0.6317124735729387, | |
| "grad_norm": 3.7380802631378174, | |
| "learning_rate": 7.180741974268207e-06, | |
| "loss": 0.1703, | |
| "step": 498 | |
| }, | |
| { | |
| "epoch": 0.6329809725158563, | |
| "grad_norm": 3.99318265914917, | |
| "learning_rate": 7.138256986651131e-06, | |
| "loss": 0.2035, | |
| "step": 499 | |
| }, | |
| { | |
| "epoch": 0.6342494714587738, | |
| "grad_norm": 4.226932525634766, | |
| "learning_rate": 7.095828186208879e-06, | |
| "loss": 0.1983, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6342494714587738, | |
| "eval_accuracy": 0.8543046357615894, | |
| "eval_f1": 0.7040358744394619, | |
| "eval_loss": 0.2958061695098877, | |
| "eval_precision": 0.8177083333333334, | |
| "eval_recall": 0.6181102362204725, | |
| "eval_runtime": 51.9773, | |
| "eval_samples_per_second": 5.329, | |
| "eval_steps_per_second": 0.173, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6355179704016913, | |
| "grad_norm": 2.9807305335998535, | |
| "learning_rate": 7.053456405984254e-06, | |
| "loss": 0.2006, | |
| "step": 501 | |
| }, | |
| { | |
| "epoch": 0.6367864693446089, | |
| "grad_norm": 3.490462303161621, | |
| "learning_rate": 7.011142477900534e-06, | |
| "loss": 0.209, | |
| "step": 502 | |
| }, | |
| { | |
| "epoch": 0.6380549682875264, | |
| "grad_norm": 4.7009782791137695, | |
| "learning_rate": 6.9688872327451295e-06, | |
| "loss": 0.1958, | |
| "step": 503 | |
| }, | |
| { | |
| "epoch": 0.639323467230444, | |
| "grad_norm": 4.062927722930908, | |
| "learning_rate": 6.926691500153283e-06, | |
| "loss": 0.2643, | |
| "step": 504 | |
| }, | |
| { | |
| "epoch": 0.6405919661733616, | |
| "grad_norm": 5.526817321777344, | |
| "learning_rate": 6.884556108591766e-06, | |
| "loss": 0.2787, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.641860465116279, | |
| "grad_norm": 5.635672569274902, | |
| "learning_rate": 6.842481885342624e-06, | |
| "loss": 0.2394, | |
| "step": 506 | |
| }, | |
| { | |
| "epoch": 0.6431289640591966, | |
| "grad_norm": 3.4105381965637207, | |
| "learning_rate": 6.8004696564869275e-06, | |
| "loss": 0.1787, | |
| "step": 507 | |
| }, | |
| { | |
| "epoch": 0.6443974630021142, | |
| "grad_norm": 4.923336982727051, | |
| "learning_rate": 6.75852024688856e-06, | |
| "loss": 0.1556, | |
| "step": 508 | |
| }, | |
| { | |
| "epoch": 0.6456659619450317, | |
| "grad_norm": 4.841919422149658, | |
| "learning_rate": 6.716634480178016e-06, | |
| "loss": 0.2467, | |
| "step": 509 | |
| }, | |
| { | |
| "epoch": 0.6469344608879493, | |
| "grad_norm": 3.464397668838501, | |
| "learning_rate": 6.674813178736226e-06, | |
| "loss": 0.1951, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.6482029598308668, | |
| "grad_norm": 4.985055923461914, | |
| "learning_rate": 6.633057163678427e-06, | |
| "loss": 0.2217, | |
| "step": 511 | |
| }, | |
| { | |
| "epoch": 0.6494714587737843, | |
| "grad_norm": 4.595572471618652, | |
| "learning_rate": 6.5913672548380215e-06, | |
| "loss": 0.2207, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 0.6507399577167019, | |
| "grad_norm": 3.702177047729492, | |
| "learning_rate": 6.549744270750489e-06, | |
| "loss": 0.1988, | |
| "step": 513 | |
| }, | |
| { | |
| "epoch": 0.6520084566596195, | |
| "grad_norm": 6.06284236907959, | |
| "learning_rate": 6.50818902863732e-06, | |
| "loss": 0.2363, | |
| "step": 514 | |
| }, | |
| { | |
| "epoch": 0.653276955602537, | |
| "grad_norm": 3.6575632095336914, | |
| "learning_rate": 6.466702344389963e-06, | |
| "loss": 0.1872, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.6545454545454545, | |
| "grad_norm": 3.904543161392212, | |
| "learning_rate": 6.42528503255381e-06, | |
| "loss": 0.2535, | |
| "step": 516 | |
| }, | |
| { | |
| "epoch": 0.6558139534883721, | |
| "grad_norm": 2.966963291168213, | |
| "learning_rate": 6.383937906312196e-06, | |
| "loss": 0.1866, | |
| "step": 517 | |
| }, | |
| { | |
| "epoch": 0.6570824524312896, | |
| "grad_norm": 3.8866353034973145, | |
| "learning_rate": 6.342661777470449e-06, | |
| "loss": 0.2196, | |
| "step": 518 | |
| }, | |
| { | |
| "epoch": 0.6583509513742072, | |
| "grad_norm": 4.250112533569336, | |
| "learning_rate": 6.301457456439932e-06, | |
| "loss": 0.1956, | |
| "step": 519 | |
| }, | |
| { | |
| "epoch": 0.6596194503171248, | |
| "grad_norm": 4.357515335083008, | |
| "learning_rate": 6.260325752222148e-06, | |
| "loss": 0.1843, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.6596194503171248, | |
| "eval_accuracy": 0.8609271523178808, | |
| "eval_f1": 0.7096774193548387, | |
| "eval_loss": 0.30549532175064087, | |
| "eval_precision": 0.8555555555555555, | |
| "eval_recall": 0.6062992125984252, | |
| "eval_runtime": 50.4215, | |
| "eval_samples_per_second": 5.494, | |
| "eval_steps_per_second": 0.178, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.6608879492600422, | |
| "grad_norm": 3.0417070388793945, | |
| "learning_rate": 6.2192674723928425e-06, | |
| "loss": 0.2046, | |
| "step": 521 | |
| }, | |
| { | |
| "epoch": 0.6621564482029598, | |
| "grad_norm": 3.6613922119140625, | |
| "learning_rate": 6.178283423086159e-06, | |
| "loss": 0.1782, | |
| "step": 522 | |
| }, | |
| { | |
| "epoch": 0.6634249471458774, | |
| "grad_norm": 3.165771722793579, | |
| "learning_rate": 6.137374408978803e-06, | |
| "loss": 0.2041, | |
| "step": 523 | |
| }, | |
| { | |
| "epoch": 0.6646934460887949, | |
| "grad_norm": 4.003574371337891, | |
| "learning_rate": 6.096541233274247e-06, | |
| "loss": 0.2098, | |
| "step": 524 | |
| }, | |
| { | |
| "epoch": 0.6659619450317125, | |
| "grad_norm": 3.318516492843628, | |
| "learning_rate": 6.05578469768696e-06, | |
| "loss": 0.1887, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.66723044397463, | |
| "grad_norm": 4.255181312561035, | |
| "learning_rate": 6.01510560242667e-06, | |
| "loss": 0.2046, | |
| "step": 526 | |
| }, | |
| { | |
| "epoch": 0.6684989429175475, | |
| "grad_norm": 3.862027883529663, | |
| "learning_rate": 5.974504746182644e-06, | |
| "loss": 0.1841, | |
| "step": 527 | |
| }, | |
| { | |
| "epoch": 0.6697674418604651, | |
| "grad_norm": 3.570117950439453, | |
| "learning_rate": 5.933982926108015e-06, | |
| "loss": 0.1597, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 0.6710359408033827, | |
| "grad_norm": 6.440456867218018, | |
| "learning_rate": 5.893540937804128e-06, | |
| "loss": 0.2544, | |
| "step": 529 | |
| }, | |
| { | |
| "epoch": 0.6723044397463002, | |
| "grad_norm": 4.021892547607422, | |
| "learning_rate": 5.85317957530492e-06, | |
| "loss": 0.2522, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.6735729386892177, | |
| "grad_norm": 4.242708206176758, | |
| "learning_rate": 5.812899631061328e-06, | |
| "loss": 0.1897, | |
| "step": 531 | |
| }, | |
| { | |
| "epoch": 0.6748414376321353, | |
| "grad_norm": 4.2478814125061035, | |
| "learning_rate": 5.772701895925725e-06, | |
| "loss": 0.2376, | |
| "step": 532 | |
| }, | |
| { | |
| "epoch": 0.6761099365750528, | |
| "grad_norm": 3.1216530799865723, | |
| "learning_rate": 5.732587159136409e-06, | |
| "loss": 0.1965, | |
| "step": 533 | |
| }, | |
| { | |
| "epoch": 0.6773784355179704, | |
| "grad_norm": 4.511528968811035, | |
| "learning_rate": 5.692556208302089e-06, | |
| "loss": 0.2142, | |
| "step": 534 | |
| }, | |
| { | |
| "epoch": 0.678646934460888, | |
| "grad_norm": 3.4524171352386475, | |
| "learning_rate": 5.652609829386431e-06, | |
| "loss": 0.2075, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.6799154334038054, | |
| "grad_norm": 5.186423301696777, | |
| "learning_rate": 5.612748806692631e-06, | |
| "loss": 0.2504, | |
| "step": 536 | |
| }, | |
| { | |
| "epoch": 0.681183932346723, | |
| "grad_norm": 7.290851593017578, | |
| "learning_rate": 5.572973922848003e-06, | |
| "loss": 0.2263, | |
| "step": 537 | |
| }, | |
| { | |
| "epoch": 0.6824524312896406, | |
| "grad_norm": 7.404147624969482, | |
| "learning_rate": 5.5332859587886174e-06, | |
| "loss": 0.2877, | |
| "step": 538 | |
| }, | |
| { | |
| "epoch": 0.6837209302325581, | |
| "grad_norm": 3.5259296894073486, | |
| "learning_rate": 5.493685693743975e-06, | |
| "loss": 0.2011, | |
| "step": 539 | |
| }, | |
| { | |
| "epoch": 0.6849894291754757, | |
| "grad_norm": 5.679382801055908, | |
| "learning_rate": 5.4541739052217015e-06, | |
| "loss": 0.1915, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.6849894291754757, | |
| "eval_accuracy": 0.8675496688741722, | |
| "eval_f1": 0.7424892703862661, | |
| "eval_loss": 0.28183531761169434, | |
| "eval_precision": 0.8160377358490566, | |
| "eval_recall": 0.6811023622047244, | |
| "eval_runtime": 50.4206, | |
| "eval_samples_per_second": 5.494, | |
| "eval_steps_per_second": 0.178, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.6862579281183933, | |
| "grad_norm": 4.955303192138672, | |
| "learning_rate": 5.414751368992281e-06, | |
| "loss": 0.1991, | |
| "step": 541 | |
| }, | |
| { | |
| "epoch": 0.6875264270613107, | |
| "grad_norm": 5.0703349113464355, | |
| "learning_rate": 5.3754188590738335e-06, | |
| "loss": 0.1959, | |
| "step": 542 | |
| }, | |
| { | |
| "epoch": 0.6887949260042283, | |
| "grad_norm": 3.5751047134399414, | |
| "learning_rate": 5.3361771477169025e-06, | |
| "loss": 0.2537, | |
| "step": 543 | |
| }, | |
| { | |
| "epoch": 0.6900634249471459, | |
| "grad_norm": 4.129851341247559, | |
| "learning_rate": 5.297027005389309e-06, | |
| "loss": 0.2031, | |
| "step": 544 | |
| }, | |
| { | |
| "epoch": 0.6913319238900634, | |
| "grad_norm": 3.951871633529663, | |
| "learning_rate": 5.257969200761015e-06, | |
| "loss": 0.1888, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.692600422832981, | |
| "grad_norm": 4.308104515075684, | |
| "learning_rate": 5.219004500689031e-06, | |
| "loss": 0.2216, | |
| "step": 546 | |
| }, | |
| { | |
| "epoch": 0.6938689217758985, | |
| "grad_norm": 3.6957151889801025, | |
| "learning_rate": 5.180133670202363e-06, | |
| "loss": 0.2113, | |
| "step": 547 | |
| }, | |
| { | |
| "epoch": 0.695137420718816, | |
| "grad_norm": 4.605830669403076, | |
| "learning_rate": 5.141357472486994e-06, | |
| "loss": 0.1663, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 0.6964059196617336, | |
| "grad_norm": 4.787519931793213, | |
| "learning_rate": 5.102676668870894e-06, | |
| "loss": 0.2201, | |
| "step": 549 | |
| }, | |
| { | |
| "epoch": 0.6976744186046512, | |
| "grad_norm": 4.204073905944824, | |
| "learning_rate": 5.064092018809074e-06, | |
| "loss": 0.2575, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.6989429175475688, | |
| "grad_norm": 5.7078962326049805, | |
| "learning_rate": 5.025604279868677e-06, | |
| "loss": 0.3108, | |
| "step": 551 | |
| }, | |
| { | |
| "epoch": 0.7002114164904862, | |
| "grad_norm": 4.210063457489014, | |
| "learning_rate": 4.987214207714103e-06, | |
| "loss": 0.1687, | |
| "step": 552 | |
| }, | |
| { | |
| "epoch": 0.7014799154334038, | |
| "grad_norm": 3.3465261459350586, | |
| "learning_rate": 4.948922556092173e-06, | |
| "loss": 0.1805, | |
| "step": 553 | |
| }, | |
| { | |
| "epoch": 0.7027484143763214, | |
| "grad_norm": 3.795522928237915, | |
| "learning_rate": 4.910730076817326e-06, | |
| "loss": 0.229, | |
| "step": 554 | |
| }, | |
| { | |
| "epoch": 0.7040169133192389, | |
| "grad_norm": 4.867710113525391, | |
| "learning_rate": 4.872637519756865e-06, | |
| "loss": 0.2243, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.7052854122621565, | |
| "grad_norm": 4.932121753692627, | |
| "learning_rate": 4.834645632816227e-06, | |
| "loss": 0.2373, | |
| "step": 556 | |
| }, | |
| { | |
| "epoch": 0.706553911205074, | |
| "grad_norm": 4.272938251495361, | |
| "learning_rate": 4.796755161924305e-06, | |
| "loss": 0.2484, | |
| "step": 557 | |
| }, | |
| { | |
| "epoch": 0.7078224101479915, | |
| "grad_norm": 6.413752555847168, | |
| "learning_rate": 4.7589668510187955e-06, | |
| "loss": 0.2344, | |
| "step": 558 | |
| }, | |
| { | |
| "epoch": 0.7090909090909091, | |
| "grad_norm": 3.5056440830230713, | |
| "learning_rate": 4.721281442031601e-06, | |
| "loss": 0.1616, | |
| "step": 559 | |
| }, | |
| { | |
| "epoch": 0.7103594080338267, | |
| "grad_norm": 3.480790615081787, | |
| "learning_rate": 4.683699674874253e-06, | |
| "loss": 0.1582, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.7103594080338267, | |
| "eval_accuracy": 0.8675496688741722, | |
| "eval_f1": 0.726027397260274, | |
| "eval_loss": 0.288669228553772, | |
| "eval_precision": 0.8641304347826086, | |
| "eval_recall": 0.6259842519685039, | |
| "eval_runtime": 52.0991, | |
| "eval_samples_per_second": 5.317, | |
| "eval_steps_per_second": 0.173, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.7116279069767442, | |
| "grad_norm": 3.7166647911071777, | |
| "learning_rate": 4.646222287423391e-06, | |
| "loss": 0.2111, | |
| "step": 561 | |
| }, | |
| { | |
| "epoch": 0.7128964059196617, | |
| "grad_norm": 5.089128017425537, | |
| "learning_rate": 4.608850015506282e-06, | |
| "loss": 0.2095, | |
| "step": 562 | |
| }, | |
| { | |
| "epoch": 0.7141649048625793, | |
| "grad_norm": 4.740091800689697, | |
| "learning_rate": 4.571583592886352e-06, | |
| "loss": 0.1926, | |
| "step": 563 | |
| }, | |
| { | |
| "epoch": 0.7154334038054968, | |
| "grad_norm": 3.8879525661468506, | |
| "learning_rate": 4.534423751248803e-06, | |
| "loss": 0.2297, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 0.7167019027484144, | |
| "grad_norm": 4.693061351776123, | |
| "learning_rate": 4.497371220186229e-06, | |
| "loss": 0.2001, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.717970401691332, | |
| "grad_norm": 4.66520881652832, | |
| "learning_rate": 4.460426727184305e-06, | |
| "loss": 0.2073, | |
| "step": 566 | |
| }, | |
| { | |
| "epoch": 0.7192389006342494, | |
| "grad_norm": 3.9349865913391113, | |
| "learning_rate": 4.4235909976074935e-06, | |
| "loss": 0.2098, | |
| "step": 567 | |
| }, | |
| { | |
| "epoch": 0.720507399577167, | |
| "grad_norm": 5.9813618659973145, | |
| "learning_rate": 4.386864754684808e-06, | |
| "loss": 0.2579, | |
| "step": 568 | |
| }, | |
| { | |
| "epoch": 0.7217758985200846, | |
| "grad_norm": 4.006646633148193, | |
| "learning_rate": 4.350248719495612e-06, | |
| "loss": 0.1935, | |
| "step": 569 | |
| }, | |
| { | |
| "epoch": 0.7230443974630021, | |
| "grad_norm": 4.767092227935791, | |
| "learning_rate": 4.3137436109554645e-06, | |
| "loss": 0.2556, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.7243128964059197, | |
| "grad_norm": 4.517501354217529, | |
| "learning_rate": 4.277350145801994e-06, | |
| "loss": 0.185, | |
| "step": 571 | |
| }, | |
| { | |
| "epoch": 0.7255813953488373, | |
| "grad_norm": 3.7447240352630615, | |
| "learning_rate": 4.241069038580845e-06, | |
| "loss": 0.2017, | |
| "step": 572 | |
| }, | |
| { | |
| "epoch": 0.7268498942917547, | |
| "grad_norm": 5.2144904136657715, | |
| "learning_rate": 4.204901001631631e-06, | |
| "loss": 0.2049, | |
| "step": 573 | |
| }, | |
| { | |
| "epoch": 0.7281183932346723, | |
| "grad_norm": 4.404168128967285, | |
| "learning_rate": 4.168846745073952e-06, | |
| "loss": 0.266, | |
| "step": 574 | |
| }, | |
| { | |
| "epoch": 0.7293868921775899, | |
| "grad_norm": 4.323419570922852, | |
| "learning_rate": 4.132906976793463e-06, | |
| "loss": 0.1812, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.7306553911205074, | |
| "grad_norm": 3.2697091102600098, | |
| "learning_rate": 4.097082402427962e-06, | |
| "loss": 0.1758, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 0.731923890063425, | |
| "grad_norm": 4.7810959815979, | |
| "learning_rate": 4.061373725353542e-06, | |
| "loss": 0.2368, | |
| "step": 577 | |
| }, | |
| { | |
| "epoch": 0.7331923890063425, | |
| "grad_norm": 4.363092422485352, | |
| "learning_rate": 4.025781646670784e-06, | |
| "loss": 0.1726, | |
| "step": 578 | |
| }, | |
| { | |
| "epoch": 0.73446088794926, | |
| "grad_norm": 4.322168827056885, | |
| "learning_rate": 3.990306865190983e-06, | |
| "loss": 0.2365, | |
| "step": 579 | |
| }, | |
| { | |
| "epoch": 0.7357293868921776, | |
| "grad_norm": 3.4661788940429688, | |
| "learning_rate": 3.954950077422435e-06, | |
| "loss": 0.2003, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.7357293868921776, | |
| "eval_accuracy": 0.8653421633554084, | |
| "eval_f1": 0.7239819004524887, | |
| "eval_loss": 0.2871930003166199, | |
| "eval_precision": 0.851063829787234, | |
| "eval_recall": 0.6299212598425197, | |
| "eval_runtime": 51.4881, | |
| "eval_samples_per_second": 5.38, | |
| "eval_steps_per_second": 0.175, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.7369978858350952, | |
| "grad_norm": 4.783055305480957, | |
| "learning_rate": 3.9197119775567595e-06, | |
| "loss": 0.2522, | |
| "step": 581 | |
| }, | |
| { | |
| "epoch": 0.7382663847780127, | |
| "grad_norm": 4.873471260070801, | |
| "learning_rate": 3.884593257455268e-06, | |
| "loss": 0.2188, | |
| "step": 582 | |
| }, | |
| { | |
| "epoch": 0.7395348837209302, | |
| "grad_norm": 4.4479289054870605, | |
| "learning_rate": 3.849594606635384e-06, | |
| "loss": 0.1991, | |
| "step": 583 | |
| }, | |
| { | |
| "epoch": 0.7408033826638478, | |
| "grad_norm": 2.947563409805298, | |
| "learning_rate": 3.814716712257101e-06, | |
| "loss": 0.1545, | |
| "step": 584 | |
| }, | |
| { | |
| "epoch": 0.7420718816067653, | |
| "grad_norm": 3.4630439281463623, | |
| "learning_rate": 3.7799602591094953e-06, | |
| "loss": 0.1736, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.7433403805496829, | |
| "grad_norm": 3.2017877101898193, | |
| "learning_rate": 3.745325929597272e-06, | |
| "loss": 0.1384, | |
| "step": 586 | |
| }, | |
| { | |
| "epoch": 0.7446088794926005, | |
| "grad_norm": 3.4015791416168213, | |
| "learning_rate": 3.7108144037273806e-06, | |
| "loss": 0.2033, | |
| "step": 587 | |
| }, | |
| { | |
| "epoch": 0.7458773784355179, | |
| "grad_norm": 3.5852890014648438, | |
| "learning_rate": 3.676426359095653e-06, | |
| "loss": 0.1888, | |
| "step": 588 | |
| }, | |
| { | |
| "epoch": 0.7471458773784355, | |
| "grad_norm": 3.599536657333374, | |
| "learning_rate": 3.6421624708735002e-06, | |
| "loss": 0.1998, | |
| "step": 589 | |
| }, | |
| { | |
| "epoch": 0.7484143763213531, | |
| "grad_norm": 5.890554904937744, | |
| "learning_rate": 3.6080234117946634e-06, | |
| "loss": 0.2303, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.7496828752642706, | |
| "grad_norm": 4.978075981140137, | |
| "learning_rate": 3.5740098521419985e-06, | |
| "loss": 0.2299, | |
| "step": 591 | |
| }, | |
| { | |
| "epoch": 0.7509513742071882, | |
| "grad_norm": 4.91259241104126, | |
| "learning_rate": 3.5401224597343175e-06, | |
| "loss": 0.222, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 0.7522198731501057, | |
| "grad_norm": 3.010634422302246, | |
| "learning_rate": 3.5063618999132798e-06, | |
| "loss": 0.2077, | |
| "step": 593 | |
| }, | |
| { | |
| "epoch": 0.7534883720930232, | |
| "grad_norm": 4.121917724609375, | |
| "learning_rate": 3.4727288355303247e-06, | |
| "loss": 0.2408, | |
| "step": 594 | |
| }, | |
| { | |
| "epoch": 0.7547568710359408, | |
| "grad_norm": 6.061969757080078, | |
| "learning_rate": 3.4392239269336592e-06, | |
| "loss": 0.227, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.7560253699788584, | |
| "grad_norm": 6.199504375457764, | |
| "learning_rate": 3.4058478319552935e-06, | |
| "loss": 0.2553, | |
| "step": 596 | |
| }, | |
| { | |
| "epoch": 0.7572938689217759, | |
| "grad_norm": 3.981107473373413, | |
| "learning_rate": 3.372601205898122e-06, | |
| "loss": 0.2189, | |
| "step": 597 | |
| }, | |
| { | |
| "epoch": 0.7585623678646934, | |
| "grad_norm": 5.17703104019165, | |
| "learning_rate": 3.3394847015230613e-06, | |
| "loss": 0.2252, | |
| "step": 598 | |
| }, | |
| { | |
| "epoch": 0.759830866807611, | |
| "grad_norm": 3.7564640045166016, | |
| "learning_rate": 3.3064989690362314e-06, | |
| "loss": 0.2137, | |
| "step": 599 | |
| }, | |
| { | |
| "epoch": 0.7610993657505285, | |
| "grad_norm": 5.007497310638428, | |
| "learning_rate": 3.2736446560761903e-06, | |
| "loss": 0.2345, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7610993657505285, | |
| "eval_accuracy": 0.8686534216335541, | |
| "eval_f1": 0.7407407407407407, | |
| "eval_loss": 0.282692551612854, | |
| "eval_precision": 0.8292682926829268, | |
| "eval_recall": 0.6692913385826772, | |
| "eval_runtime": 52.302, | |
| "eval_samples_per_second": 5.296, | |
| "eval_steps_per_second": 0.172, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7623678646934461, | |
| "grad_norm": 3.266761064529419, | |
| "learning_rate": 3.2409224077012135e-06, | |
| "loss": 0.1771, | |
| "step": 601 | |
| }, | |
| { | |
| "epoch": 0.7636363636363637, | |
| "grad_norm": 4.591704368591309, | |
| "learning_rate": 3.2083328663766466e-06, | |
| "loss": 0.241, | |
| "step": 602 | |
| }, | |
| { | |
| "epoch": 0.7649048625792811, | |
| "grad_norm": 4.731637954711914, | |
| "learning_rate": 3.175876671962266e-06, | |
| "loss": 0.2559, | |
| "step": 603 | |
| }, | |
| { | |
| "epoch": 0.7661733615221987, | |
| "grad_norm": 9.054510116577148, | |
| "learning_rate": 3.1435544616997303e-06, | |
| "loss": 0.2459, | |
| "step": 604 | |
| }, | |
| { | |
| "epoch": 0.7674418604651163, | |
| "grad_norm": 5.0422821044921875, | |
| "learning_rate": 3.111366870200071e-06, | |
| "loss": 0.1812, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.7687103594080338, | |
| "grad_norm": 5.838403701782227, | |
| "learning_rate": 3.0793145294312253e-06, | |
| "loss": 0.2237, | |
| "step": 606 | |
| }, | |
| { | |
| "epoch": 0.7699788583509514, | |
| "grad_norm": 3.5189685821533203, | |
| "learning_rate": 3.0473980687056314e-06, | |
| "loss": 0.1696, | |
| "step": 607 | |
| }, | |
| { | |
| "epoch": 0.771247357293869, | |
| "grad_norm": 3.204148530960083, | |
| "learning_rate": 3.015618114667873e-06, | |
| "loss": 0.1476, | |
| "step": 608 | |
| }, | |
| { | |
| "epoch": 0.7725158562367864, | |
| "grad_norm": 5.796388149261475, | |
| "learning_rate": 2.983975291282375e-06, | |
| "loss": 0.1914, | |
| "step": 609 | |
| }, | |
| { | |
| "epoch": 0.773784355179704, | |
| "grad_norm": 4.04534912109375, | |
| "learning_rate": 2.952470219821152e-06, | |
| "loss": 0.2216, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.7750528541226216, | |
| "grad_norm": 3.9704835414886475, | |
| "learning_rate": 2.921103518851609e-06, | |
| "loss": 0.2576, | |
| "step": 611 | |
| }, | |
| { | |
| "epoch": 0.7763213530655391, | |
| "grad_norm": 3.9379115104675293, | |
| "learning_rate": 2.889875804224409e-06, | |
| "loss": 0.2255, | |
| "step": 612 | |
| }, | |
| { | |
| "epoch": 0.7775898520084567, | |
| "grad_norm": 4.200435161590576, | |
| "learning_rate": 2.8587876890613597e-06, | |
| "loss": 0.2399, | |
| "step": 613 | |
| }, | |
| { | |
| "epoch": 0.7788583509513742, | |
| "grad_norm": 3.6293070316314697, | |
| "learning_rate": 2.827839783743391e-06, | |
| "loss": 0.2346, | |
| "step": 614 | |
| }, | |
| { | |
| "epoch": 0.7801268498942917, | |
| "grad_norm": 4.450516700744629, | |
| "learning_rate": 2.7970326958985683e-06, | |
| "loss": 0.2134, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.7813953488372093, | |
| "grad_norm": 3.8171873092651367, | |
| "learning_rate": 2.766367030390157e-06, | |
| "loss": 0.1628, | |
| "step": 616 | |
| }, | |
| { | |
| "epoch": 0.7826638477801269, | |
| "grad_norm": 4.238905906677246, | |
| "learning_rate": 2.7358433893047543e-06, | |
| "loss": 0.2054, | |
| "step": 617 | |
| }, | |
| { | |
| "epoch": 0.7839323467230443, | |
| "grad_norm": 3.8484349250793457, | |
| "learning_rate": 2.7054623719404617e-06, | |
| "loss": 0.1907, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 0.7852008456659619, | |
| "grad_norm": 4.630876541137695, | |
| "learning_rate": 2.675224574795123e-06, | |
| "loss": 0.2193, | |
| "step": 619 | |
| }, | |
| { | |
| "epoch": 0.7864693446088795, | |
| "grad_norm": 5.129254341125488, | |
| "learning_rate": 2.645130591554609e-06, | |
| "loss": 0.2107, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.7864693446088795, | |
| "eval_accuracy": 0.8642384105960265, | |
| "eval_f1": 0.7146171693735499, | |
| "eval_loss": 0.2954389750957489, | |
| "eval_precision": 0.8700564971751412, | |
| "eval_recall": 0.6062992125984252, | |
| "eval_runtime": 51.5152, | |
| "eval_samples_per_second": 5.377, | |
| "eval_steps_per_second": 0.175, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.787737843551797, | |
| "grad_norm": 4.9152750968933105, | |
| "learning_rate": 2.615181013081164e-06, | |
| "loss": 0.1496, | |
| "step": 621 | |
| }, | |
| { | |
| "epoch": 0.7890063424947146, | |
| "grad_norm": 4.203990459442139, | |
| "learning_rate": 2.5853764274018024e-06, | |
| "loss": 0.1628, | |
| "step": 622 | |
| }, | |
| { | |
| "epoch": 0.7902748414376322, | |
| "grad_norm": 4.1775641441345215, | |
| "learning_rate": 2.555717419696764e-06, | |
| "loss": 0.1943, | |
| "step": 623 | |
| }, | |
| { | |
| "epoch": 0.7915433403805496, | |
| "grad_norm": 3.536635398864746, | |
| "learning_rate": 2.526204572288029e-06, | |
| "loss": 0.1927, | |
| "step": 624 | |
| }, | |
| { | |
| "epoch": 0.7928118393234672, | |
| "grad_norm": 3.426269769668579, | |
| "learning_rate": 2.4968384646278765e-06, | |
| "loss": 0.202, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.7940803382663848, | |
| "grad_norm": 7.433694362640381, | |
| "learning_rate": 2.4676196732875147e-06, | |
| "loss": 0.2374, | |
| "step": 626 | |
| }, | |
| { | |
| "epoch": 0.7953488372093023, | |
| "grad_norm": 4.034791946411133, | |
| "learning_rate": 2.4385487719457568e-06, | |
| "loss": 0.2249, | |
| "step": 627 | |
| }, | |
| { | |
| "epoch": 0.7966173361522199, | |
| "grad_norm": 4.299747467041016, | |
| "learning_rate": 2.4096263313777603e-06, | |
| "loss": 0.1816, | |
| "step": 628 | |
| }, | |
| { | |
| "epoch": 0.7978858350951374, | |
| "grad_norm": 5.804561614990234, | |
| "learning_rate": 2.3808529194438156e-06, | |
| "loss": 0.3159, | |
| "step": 629 | |
| }, | |
| { | |
| "epoch": 0.7991543340380549, | |
| "grad_norm": 4.759912014007568, | |
| "learning_rate": 2.352229101078205e-06, | |
| "loss": 0.2168, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.8004228329809725, | |
| "grad_norm": 4.604716777801514, | |
| "learning_rate": 2.3237554382781004e-06, | |
| "loss": 0.2306, | |
| "step": 631 | |
| }, | |
| { | |
| "epoch": 0.8016913319238901, | |
| "grad_norm": 4.394404411315918, | |
| "learning_rate": 2.2954324900925362e-06, | |
| "loss": 0.226, | |
| "step": 632 | |
| }, | |
| { | |
| "epoch": 0.8029598308668076, | |
| "grad_norm": 4.407333850860596, | |
| "learning_rate": 2.2672608126114337e-06, | |
| "loss": 0.2125, | |
| "step": 633 | |
| }, | |
| { | |
| "epoch": 0.8042283298097251, | |
| "grad_norm": 4.021424770355225, | |
| "learning_rate": 2.239240958954677e-06, | |
| "loss": 0.1941, | |
| "step": 634 | |
| }, | |
| { | |
| "epoch": 0.8054968287526427, | |
| "grad_norm": 5.0576066970825195, | |
| "learning_rate": 2.2113734792612586e-06, | |
| "loss": 0.1864, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.8067653276955603, | |
| "grad_norm": 5.43579626083374, | |
| "learning_rate": 2.1836589206784742e-06, | |
| "loss": 0.2176, | |
| "step": 636 | |
| }, | |
| { | |
| "epoch": 0.8080338266384778, | |
| "grad_norm": 4.382997035980225, | |
| "learning_rate": 2.15609782735118e-06, | |
| "loss": 0.1978, | |
| "step": 637 | |
| }, | |
| { | |
| "epoch": 0.8093023255813954, | |
| "grad_norm": 3.2525434494018555, | |
| "learning_rate": 2.12869074041112e-06, | |
| "loss": 0.1882, | |
| "step": 638 | |
| }, | |
| { | |
| "epoch": 0.810570824524313, | |
| "grad_norm": 4.6330790519714355, | |
| "learning_rate": 2.1014381979662823e-06, | |
| "loss": 0.2146, | |
| "step": 639 | |
| }, | |
| { | |
| "epoch": 0.8118393234672304, | |
| "grad_norm": 4.439876079559326, | |
| "learning_rate": 2.0743407350903465e-06, | |
| "loss": 0.2562, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.8118393234672304, | |
| "eval_accuracy": 0.8642384105960265, | |
| "eval_f1": 0.7210884353741497, | |
| "eval_loss": 0.293789803981781, | |
| "eval_precision": 0.8502673796791443, | |
| "eval_recall": 0.6259842519685039, | |
| "eval_runtime": 51.7868, | |
| "eval_samples_per_second": 5.349, | |
| "eval_steps_per_second": 0.174, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.813107822410148, | |
| "grad_norm": 3.350806713104248, | |
| "learning_rate": 2.0473988838121783e-06, | |
| "loss": 0.1801, | |
| "step": 641 | |
| }, | |
| { | |
| "epoch": 0.8143763213530656, | |
| "grad_norm": 3.780141592025757, | |
| "learning_rate": 2.020613173105379e-06, | |
| "loss": 0.1665, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 0.8156448202959831, | |
| "grad_norm": 3.851416826248169, | |
| "learning_rate": 1.9939841288778996e-06, | |
| "loss": 0.1559, | |
| "step": 643 | |
| }, | |
| { | |
| "epoch": 0.8169133192389006, | |
| "grad_norm": 3.916234254837036, | |
| "learning_rate": 1.96751227396172e-06, | |
| "loss": 0.2272, | |
| "step": 644 | |
| }, | |
| { | |
| "epoch": 0.8181818181818182, | |
| "grad_norm": 4.235809326171875, | |
| "learning_rate": 1.9411981281025818e-06, | |
| "loss": 0.2591, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 0.8194503171247357, | |
| "grad_norm": 3.97944974899292, | |
| "learning_rate": 1.9150422079497787e-06, | |
| "loss": 0.1772, | |
| "step": 646 | |
| }, | |
| { | |
| "epoch": 0.8207188160676533, | |
| "grad_norm": 3.336775064468384, | |
| "learning_rate": 1.8890450270460204e-06, | |
| "loss": 0.1648, | |
| "step": 647 | |
| }, | |
| { | |
| "epoch": 0.8219873150105709, | |
| "grad_norm": 3.8211421966552734, | |
| "learning_rate": 1.8632070958173453e-06, | |
| "loss": 0.1483, | |
| "step": 648 | |
| }, | |
| { | |
| "epoch": 0.8232558139534883, | |
| "grad_norm": 4.630310535430908, | |
| "learning_rate": 1.8375289215630998e-06, | |
| "loss": 0.232, | |
| "step": 649 | |
| }, | |
| { | |
| "epoch": 0.8245243128964059, | |
| "grad_norm": 3.9007372856140137, | |
| "learning_rate": 1.8120110084459763e-06, | |
| "loss": 0.2163, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.8257928118393235, | |
| "grad_norm": 4.006324291229248, | |
| "learning_rate": 1.786653857482118e-06, | |
| "loss": 0.2104, | |
| "step": 651 | |
| }, | |
| { | |
| "epoch": 0.827061310782241, | |
| "grad_norm": 4.888721942901611, | |
| "learning_rate": 1.7614579665312792e-06, | |
| "loss": 0.2257, | |
| "step": 652 | |
| }, | |
| { | |
| "epoch": 0.8283298097251586, | |
| "grad_norm": 5.345705032348633, | |
| "learning_rate": 1.7364238302870517e-06, | |
| "loss": 0.2251, | |
| "step": 653 | |
| }, | |
| { | |
| "epoch": 0.8295983086680762, | |
| "grad_norm": 3.589813709259033, | |
| "learning_rate": 1.711551940267151e-06, | |
| "loss": 0.1877, | |
| "step": 654 | |
| }, | |
| { | |
| "epoch": 0.8308668076109936, | |
| "grad_norm": 5.65714168548584, | |
| "learning_rate": 1.6868427848037672e-06, | |
| "loss": 0.2736, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 0.8321353065539112, | |
| "grad_norm": 5.815731048583984, | |
| "learning_rate": 1.6622968490339775e-06, | |
| "loss": 0.2564, | |
| "step": 656 | |
| }, | |
| { | |
| "epoch": 0.8334038054968288, | |
| "grad_norm": 3.979933738708496, | |
| "learning_rate": 1.637914614890217e-06, | |
| "loss": 0.1795, | |
| "step": 657 | |
| }, | |
| { | |
| "epoch": 0.8346723044397463, | |
| "grad_norm": 4.708816051483154, | |
| "learning_rate": 1.613696561090823e-06, | |
| "loss": 0.2817, | |
| "step": 658 | |
| }, | |
| { | |
| "epoch": 0.8359408033826639, | |
| "grad_norm": 4.505849838256836, | |
| "learning_rate": 1.5896431631306298e-06, | |
| "loss": 0.2302, | |
| "step": 659 | |
| }, | |
| { | |
| "epoch": 0.8372093023255814, | |
| "grad_norm": 4.055660247802734, | |
| "learning_rate": 1.565754893271636e-06, | |
| "loss": 0.1054, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.8372093023255814, | |
| "eval_accuracy": 0.8642384105960265, | |
| "eval_f1": 0.7210884353741497, | |
| "eval_loss": 0.29165971279144287, | |
| "eval_precision": 0.8502673796791443, | |
| "eval_recall": 0.6259842519685039, | |
| "eval_runtime": 52.8549, | |
| "eval_samples_per_second": 5.241, | |
| "eval_steps_per_second": 0.17, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.8384778012684989, | |
| "grad_norm": 3.554976224899292, | |
| "learning_rate": 1.5420322205337335e-06, | |
| "loss": 0.2036, | |
| "step": 661 | |
| }, | |
| { | |
| "epoch": 0.8397463002114165, | |
| "grad_norm": 5.035533905029297, | |
| "learning_rate": 1.5184756106854925e-06, | |
| "loss": 0.1991, | |
| "step": 662 | |
| }, | |
| { | |
| "epoch": 0.8410147991543341, | |
| "grad_norm": 3.7902748584747314, | |
| "learning_rate": 1.4950855262350282e-06, | |
| "loss": 0.1813, | |
| "step": 663 | |
| }, | |
| { | |
| "epoch": 0.8422832980972516, | |
| "grad_norm": 3.7442333698272705, | |
| "learning_rate": 1.471862426420908e-06, | |
| "loss": 0.2316, | |
| "step": 664 | |
| }, | |
| { | |
| "epoch": 0.8435517970401691, | |
| "grad_norm": 3.83044171333313, | |
| "learning_rate": 1.4488067672031391e-06, | |
| "loss": 0.2185, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 0.8448202959830867, | |
| "grad_norm": 3.863065719604492, | |
| "learning_rate": 1.425919001254219e-06, | |
| "loss": 0.2586, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 0.8460887949260042, | |
| "grad_norm": 4.449268341064453, | |
| "learning_rate": 1.403199577950245e-06, | |
| "loss": 0.1982, | |
| "step": 667 | |
| }, | |
| { | |
| "epoch": 0.8473572938689218, | |
| "grad_norm": 4.1120123863220215, | |
| "learning_rate": 1.3806489433620917e-06, | |
| "loss": 0.2196, | |
| "step": 668 | |
| }, | |
| { | |
| "epoch": 0.8486257928118394, | |
| "grad_norm": 3.667243242263794, | |
| "learning_rate": 1.3582675402466538e-06, | |
| "loss": 0.1903, | |
| "step": 669 | |
| }, | |
| { | |
| "epoch": 0.8498942917547568, | |
| "grad_norm": 4.294753551483154, | |
| "learning_rate": 1.336055808038149e-06, | |
| "loss": 0.2223, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.8511627906976744, | |
| "grad_norm": 3.5263938903808594, | |
| "learning_rate": 1.3140141828394993e-06, | |
| "loss": 0.1916, | |
| "step": 671 | |
| }, | |
| { | |
| "epoch": 0.852431289640592, | |
| "grad_norm": 4.649272918701172, | |
| "learning_rate": 1.2921430974137562e-06, | |
| "loss": 0.278, | |
| "step": 672 | |
| }, | |
| { | |
| "epoch": 0.8536997885835095, | |
| "grad_norm": 5.0053558349609375, | |
| "learning_rate": 1.270442981175617e-06, | |
| "loss": 0.2298, | |
| "step": 673 | |
| }, | |
| { | |
| "epoch": 0.8549682875264271, | |
| "grad_norm": 6.003551483154297, | |
| "learning_rate": 1.2489142601829819e-06, | |
| "loss": 0.3026, | |
| "step": 674 | |
| }, | |
| { | |
| "epoch": 0.8562367864693446, | |
| "grad_norm": 5.016626834869385, | |
| "learning_rate": 1.227557357128597e-06, | |
| "loss": 0.234, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.8575052854122621, | |
| "grad_norm": 4.0852251052856445, | |
| "learning_rate": 1.2063726913317508e-06, | |
| "loss": 0.1984, | |
| "step": 676 | |
| }, | |
| { | |
| "epoch": 0.8587737843551797, | |
| "grad_norm": 3.9487900733947754, | |
| "learning_rate": 1.185360678730043e-06, | |
| "loss": 0.191, | |
| "step": 677 | |
| }, | |
| { | |
| "epoch": 0.8600422832980973, | |
| "grad_norm": 5.6874213218688965, | |
| "learning_rate": 1.1645217318712187e-06, | |
| "loss": 0.2541, | |
| "step": 678 | |
| }, | |
| { | |
| "epoch": 0.8613107822410148, | |
| "grad_norm": 4.008146286010742, | |
| "learning_rate": 1.1438562599050661e-06, | |
| "loss": 0.1698, | |
| "step": 679 | |
| }, | |
| { | |
| "epoch": 0.8625792811839323, | |
| "grad_norm": 5.791302680969238, | |
| "learning_rate": 1.1233646685753864e-06, | |
| "loss": 0.2837, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.8625792811839323, | |
| "eval_accuracy": 0.8664459161147903, | |
| "eval_f1": 0.7317073170731707, | |
| "eval_loss": 0.2841557264328003, | |
| "eval_precision": 0.8375634517766497, | |
| "eval_recall": 0.6496062992125984, | |
| "eval_runtime": 51.6664, | |
| "eval_samples_per_second": 5.361, | |
| "eval_steps_per_second": 0.174, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.8638477801268499, | |
| "grad_norm": 4.153045177459717, | |
| "learning_rate": 1.103047360212024e-06, | |
| "loss": 0.1929, | |
| "step": 681 | |
| }, | |
| { | |
| "epoch": 0.8651162790697674, | |
| "grad_norm": 6.1054511070251465, | |
| "learning_rate": 1.0829047337229714e-06, | |
| "loss": 0.2017, | |
| "step": 682 | |
| }, | |
| { | |
| "epoch": 0.866384778012685, | |
| "grad_norm": 4.814021110534668, | |
| "learning_rate": 1.0629371845865333e-06, | |
| "loss": 0.2109, | |
| "step": 683 | |
| }, | |
| { | |
| "epoch": 0.8676532769556026, | |
| "grad_norm": 3.665745258331299, | |
| "learning_rate": 1.0431451048435637e-06, | |
| "loss": 0.1851, | |
| "step": 684 | |
| }, | |
| { | |
| "epoch": 0.86892177589852, | |
| "grad_norm": 4.5687127113342285, | |
| "learning_rate": 1.023528883089766e-06, | |
| "loss": 0.2229, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 0.8701902748414376, | |
| "grad_norm": 7.112755298614502, | |
| "learning_rate": 1.0040889044680702e-06, | |
| "loss": 0.2175, | |
| "step": 686 | |
| }, | |
| { | |
| "epoch": 0.8714587737843552, | |
| "grad_norm": 4.810848236083984, | |
| "learning_rate": 9.848255506610616e-07, | |
| "loss": 0.2445, | |
| "step": 687 | |
| }, | |
| { | |
| "epoch": 0.8727272727272727, | |
| "grad_norm": 3.9013190269470215, | |
| "learning_rate": 9.657391998834942e-07, | |
| "loss": 0.2054, | |
| "step": 688 | |
| }, | |
| { | |
| "epoch": 0.8739957716701903, | |
| "grad_norm": 4.112692832946777, | |
| "learning_rate": 9.468302268748608e-07, | |
| "loss": 0.2084, | |
| "step": 689 | |
| }, | |
| { | |
| "epoch": 0.8752642706131079, | |
| "grad_norm": 3.5854508876800537, | |
| "learning_rate": 9.2809900289204e-07, | |
| "loss": 0.1765, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.8765327695560253, | |
| "grad_norm": 4.269070625305176, | |
| "learning_rate": 9.095458957019986e-07, | |
| "loss": 0.1872, | |
| "step": 691 | |
| }, | |
| { | |
| "epoch": 0.8778012684989429, | |
| "grad_norm": 6.264438152313232, | |
| "learning_rate": 8.911712695745823e-07, | |
| "loss": 0.1691, | |
| "step": 692 | |
| }, | |
| { | |
| "epoch": 0.8790697674418605, | |
| "grad_norm": 4.607624530792236, | |
| "learning_rate": 8.729754852753525e-07, | |
| "loss": 0.2539, | |
| "step": 693 | |
| }, | |
| { | |
| "epoch": 0.880338266384778, | |
| "grad_norm": 3.378829002380371, | |
| "learning_rate": 8.549589000585101e-07, | |
| "loss": 0.2015, | |
| "step": 694 | |
| }, | |
| { | |
| "epoch": 0.8816067653276956, | |
| "grad_norm": 4.111599922180176, | |
| "learning_rate": 8.371218676598814e-07, | |
| "loss": 0.212, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 0.8828752642706131, | |
| "grad_norm": 7.777480125427246, | |
| "learning_rate": 8.194647382899657e-07, | |
| "loss": 0.2435, | |
| "step": 696 | |
| }, | |
| { | |
| "epoch": 0.8841437632135306, | |
| "grad_norm": 3.5605111122131348, | |
| "learning_rate": 8.019878586270691e-07, | |
| "loss": 0.2158, | |
| "step": 697 | |
| }, | |
| { | |
| "epoch": 0.8854122621564482, | |
| "grad_norm": 3.6146066188812256, | |
| "learning_rate": 7.846915718104897e-07, | |
| "loss": 0.2118, | |
| "step": 698 | |
| }, | |
| { | |
| "epoch": 0.8866807610993658, | |
| "grad_norm": 3.586723804473877, | |
| "learning_rate": 7.675762174337864e-07, | |
| "loss": 0.1818, | |
| "step": 699 | |
| }, | |
| { | |
| "epoch": 0.8879492600422833, | |
| "grad_norm": 4.1939167976379395, | |
| "learning_rate": 7.506421315381074e-07, | |
| "loss": 0.1779, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.8879492600422833, | |
| "eval_accuracy": 0.8708609271523179, | |
| "eval_f1": 0.7405764966740577, | |
| "eval_loss": 0.28408411145210266, | |
| "eval_precision": 0.8477157360406091, | |
| "eval_recall": 0.65748031496063, | |
| "eval_runtime": 51.6724, | |
| "eval_samples_per_second": 5.361, | |
| "eval_steps_per_second": 0.174, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.8892177589852008, | |
| "grad_norm": 3.024763584136963, | |
| "learning_rate": 7.338896466055934e-07, | |
| "loss": 0.1503, | |
| "step": 701 | |
| }, | |
| { | |
| "epoch": 0.8904862579281184, | |
| "grad_norm": 5.726973533630371, | |
| "learning_rate": 7.173190915528494e-07, | |
| "loss": 0.2625, | |
| "step": 702 | |
| }, | |
| { | |
| "epoch": 0.8917547568710359, | |
| "grad_norm": 3.9381308555603027, | |
| "learning_rate": 7.009307917244912e-07, | |
| "loss": 0.1853, | |
| "step": 703 | |
| }, | |
| { | |
| "epoch": 0.8930232558139535, | |
| "grad_norm": 6.115716457366943, | |
| "learning_rate": 6.847250688867512e-07, | |
| "loss": 0.204, | |
| "step": 704 | |
| }, | |
| { | |
| "epoch": 0.8942917547568711, | |
| "grad_norm": 6.009361743927002, | |
| "learning_rate": 6.687022412211608e-07, | |
| "loss": 0.2365, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 0.8955602536997885, | |
| "grad_norm": 4.4307332038879395, | |
| "learning_rate": 6.52862623318311e-07, | |
| "loss": 0.1889, | |
| "step": 706 | |
| }, | |
| { | |
| "epoch": 0.8968287526427061, | |
| "grad_norm": 4.24178409576416, | |
| "learning_rate": 6.372065261716664e-07, | |
| "loss": 0.1632, | |
| "step": 707 | |
| }, | |
| { | |
| "epoch": 0.8980972515856237, | |
| "grad_norm": 3.5372025966644287, | |
| "learning_rate": 6.217342571714679e-07, | |
| "loss": 0.1961, | |
| "step": 708 | |
| }, | |
| { | |
| "epoch": 0.8993657505285412, | |
| "grad_norm": 4.413094997406006, | |
| "learning_rate": 6.064461200986893e-07, | |
| "loss": 0.2418, | |
| "step": 709 | |
| }, | |
| { | |
| "epoch": 0.9006342494714588, | |
| "grad_norm": 5.046213150024414, | |
| "learning_rate": 5.913424151190772e-07, | |
| "loss": 0.2449, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.9019027484143763, | |
| "grad_norm": 4.061814785003662, | |
| "learning_rate": 5.764234387772593e-07, | |
| "loss": 0.2165, | |
| "step": 711 | |
| }, | |
| { | |
| "epoch": 0.9031712473572938, | |
| "grad_norm": 4.079103469848633, | |
| "learning_rate": 5.616894839909181e-07, | |
| "loss": 0.2452, | |
| "step": 712 | |
| }, | |
| { | |
| "epoch": 0.9044397463002114, | |
| "grad_norm": 3.2785000801086426, | |
| "learning_rate": 5.471408400450395e-07, | |
| "loss": 0.1731, | |
| "step": 713 | |
| }, | |
| { | |
| "epoch": 0.905708245243129, | |
| "grad_norm": 5.047578811645508, | |
| "learning_rate": 5.327777925862399e-07, | |
| "loss": 0.2437, | |
| "step": 714 | |
| }, | |
| { | |
| "epoch": 0.9069767441860465, | |
| "grad_norm": 4.548726558685303, | |
| "learning_rate": 5.186006236171492e-07, | |
| "loss": 0.2111, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 0.908245243128964, | |
| "grad_norm": 3.2352497577667236, | |
| "learning_rate": 5.046096114908783e-07, | |
| "loss": 0.1582, | |
| "step": 716 | |
| }, | |
| { | |
| "epoch": 0.9095137420718816, | |
| "grad_norm": 4.062112331390381, | |
| "learning_rate": 4.908050309055545e-07, | |
| "loss": 0.1853, | |
| "step": 717 | |
| }, | |
| { | |
| "epoch": 0.9107822410147992, | |
| "grad_norm": 3.6328508853912354, | |
| "learning_rate": 4.771871528989247e-07, | |
| "loss": 0.2202, | |
| "step": 718 | |
| }, | |
| { | |
| "epoch": 0.9120507399577167, | |
| "grad_norm": 5.491158962249756, | |
| "learning_rate": 4.6375624484303993e-07, | |
| "loss": 0.245, | |
| "step": 719 | |
| }, | |
| { | |
| "epoch": 0.9133192389006343, | |
| "grad_norm": 4.557428359985352, | |
| "learning_rate": 4.505125704389968e-07, | |
| "loss": 0.2277, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.9133192389006343, | |
| "eval_accuracy": 0.8675496688741722, | |
| "eval_f1": 0.7345132743362832, | |
| "eval_loss": 0.2847309410572052, | |
| "eval_precision": 0.8383838383838383, | |
| "eval_recall": 0.6535433070866141, | |
| "eval_runtime": 50.2843, | |
| "eval_samples_per_second": 5.509, | |
| "eval_steps_per_second": 0.179, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.9145877378435519, | |
| "grad_norm": 6.393286228179932, | |
| "learning_rate": 4.374563897117701e-07, | |
| "loss": 0.2023, | |
| "step": 721 | |
| }, | |
| { | |
| "epoch": 0.9158562367864693, | |
| "grad_norm": 3.4379963874816895, | |
| "learning_rate": 4.2458795900509943e-07, | |
| "loss": 0.2034, | |
| "step": 722 | |
| }, | |
| { | |
| "epoch": 0.9171247357293869, | |
| "grad_norm": 3.457280397415161, | |
| "learning_rate": 4.11907530976462e-07, | |
| "loss": 0.1882, | |
| "step": 723 | |
| }, | |
| { | |
| "epoch": 0.9183932346723045, | |
| "grad_norm": 5.479604244232178, | |
| "learning_rate": 3.9941535459210535e-07, | |
| "loss": 0.1896, | |
| "step": 724 | |
| }, | |
| { | |
| "epoch": 0.919661733615222, | |
| "grad_norm": 4.309389591217041, | |
| "learning_rate": 3.8711167512216816e-07, | |
| "loss": 0.2101, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.9209302325581395, | |
| "grad_norm": 3.776045799255371, | |
| "learning_rate": 3.7499673413585516e-07, | |
| "loss": 0.2342, | |
| "step": 726 | |
| }, | |
| { | |
| "epoch": 0.9221987315010571, | |
| "grad_norm": 4.307753086090088, | |
| "learning_rate": 3.6307076949670173e-07, | |
| "loss": 0.2191, | |
| "step": 727 | |
| }, | |
| { | |
| "epoch": 0.9234672304439746, | |
| "grad_norm": 4.025116920471191, | |
| "learning_rate": 3.513340153578981e-07, | |
| "loss": 0.2486, | |
| "step": 728 | |
| }, | |
| { | |
| "epoch": 0.9247357293868922, | |
| "grad_norm": 5.146588325500488, | |
| "learning_rate": 3.3978670215769726e-07, | |
| "loss": 0.2288, | |
| "step": 729 | |
| }, | |
| { | |
| "epoch": 0.9260042283298098, | |
| "grad_norm": 4.41669225692749, | |
| "learning_rate": 3.284290566148873e-07, | |
| "loss": 0.3089, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.9272727272727272, | |
| "grad_norm": 4.287278175354004, | |
| "learning_rate": 3.1726130172433756e-07, | |
| "loss": 0.2162, | |
| "step": 731 | |
| }, | |
| { | |
| "epoch": 0.9285412262156448, | |
| "grad_norm": 6.512703895568848, | |
| "learning_rate": 3.062836567526262e-07, | |
| "loss": 0.3019, | |
| "step": 732 | |
| }, | |
| { | |
| "epoch": 0.9298097251585624, | |
| "grad_norm": 4.87605094909668, | |
| "learning_rate": 2.954963372337327e-07, | |
| "loss": 0.2448, | |
| "step": 733 | |
| }, | |
| { | |
| "epoch": 0.9310782241014799, | |
| "grad_norm": 5.300022125244141, | |
| "learning_rate": 2.848995549648048e-07, | |
| "loss": 0.1657, | |
| "step": 734 | |
| }, | |
| { | |
| "epoch": 0.9323467230443975, | |
| "grad_norm": 3.4938764572143555, | |
| "learning_rate": 2.744935180019992e-07, | |
| "loss": 0.1821, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 0.9336152219873151, | |
| "grad_norm": 4.725379467010498, | |
| "learning_rate": 2.642784306564017e-07, | |
| "loss": 0.2246, | |
| "step": 736 | |
| }, | |
| { | |
| "epoch": 0.9348837209302325, | |
| "grad_norm": 4.498754024505615, | |
| "learning_rate": 2.542544934900115e-07, | |
| "loss": 0.1903, | |
| "step": 737 | |
| }, | |
| { | |
| "epoch": 0.9361522198731501, | |
| "grad_norm": 3.992305040359497, | |
| "learning_rate": 2.444219033118012e-07, | |
| "loss": 0.1987, | |
| "step": 738 | |
| }, | |
| { | |
| "epoch": 0.9374207188160677, | |
| "grad_norm": 4.835594654083252, | |
| "learning_rate": 2.3478085317386157e-07, | |
| "loss": 0.1839, | |
| "step": 739 | |
| }, | |
| { | |
| "epoch": 0.9386892177589852, | |
| "grad_norm": 4.230025768280029, | |
| "learning_rate": 2.2533153236760197e-07, | |
| "loss": 0.2099, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.9386892177589852, | |
| "eval_accuracy": 0.8719646799116998, | |
| "eval_f1": 0.7433628318584071, | |
| "eval_loss": 0.2828395962715149, | |
| "eval_precision": 0.8484848484848485, | |
| "eval_recall": 0.6614173228346457, | |
| "eval_runtime": 52.0079, | |
| "eval_samples_per_second": 5.326, | |
| "eval_steps_per_second": 0.173, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.9399577167019028, | |
| "grad_norm": 3.8719146251678467, | |
| "learning_rate": 2.160741264200361e-07, | |
| "loss": 0.1284, | |
| "step": 741 | |
| }, | |
| { | |
| "epoch": 0.9412262156448203, | |
| "grad_norm": 3.105536699295044, | |
| "learning_rate": 2.070088170901441e-07, | |
| "loss": 0.198, | |
| "step": 742 | |
| }, | |
| { | |
| "epoch": 0.9424947145877378, | |
| "grad_norm": 5.078315258026123, | |
| "learning_rate": 1.981357823652974e-07, | |
| "loss": 0.2043, | |
| "step": 743 | |
| }, | |
| { | |
| "epoch": 0.9437632135306554, | |
| "grad_norm": 2.7954623699188232, | |
| "learning_rate": 1.8945519645776955e-07, | |
| "loss": 0.1437, | |
| "step": 744 | |
| }, | |
| { | |
| "epoch": 0.945031712473573, | |
| "grad_norm": 4.234827995300293, | |
| "learning_rate": 1.8096722980131097e-07, | |
| "loss": 0.2144, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 0.9463002114164905, | |
| "grad_norm": 4.860174655914307, | |
| "learning_rate": 1.7267204904780621e-07, | |
| "loss": 0.203, | |
| "step": 746 | |
| }, | |
| { | |
| "epoch": 0.947568710359408, | |
| "grad_norm": 4.2235188484191895, | |
| "learning_rate": 1.64569817064002e-07, | |
| "loss": 0.2186, | |
| "step": 747 | |
| }, | |
| { | |
| "epoch": 0.9488372093023256, | |
| "grad_norm": 4.688875675201416, | |
| "learning_rate": 1.5666069292830544e-07, | |
| "loss": 0.299, | |
| "step": 748 | |
| }, | |
| { | |
| "epoch": 0.9501057082452431, | |
| "grad_norm": 3.522006034851074, | |
| "learning_rate": 1.489448319276676e-07, | |
| "loss": 0.1852, | |
| "step": 749 | |
| }, | |
| { | |
| "epoch": 0.9513742071881607, | |
| "grad_norm": 3.4337916374206543, | |
| "learning_rate": 1.4142238555452603e-07, | |
| "loss": 0.2085, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.9526427061310783, | |
| "grad_norm": 4.442638874053955, | |
| "learning_rate": 1.3409350150383804e-07, | |
| "loss": 0.3052, | |
| "step": 751 | |
| }, | |
| { | |
| "epoch": 0.9539112050739957, | |
| "grad_norm": 4.747457981109619, | |
| "learning_rate": 1.2695832367017657e-07, | |
| "loss": 0.157, | |
| "step": 752 | |
| }, | |
| { | |
| "epoch": 0.9551797040169133, | |
| "grad_norm": 4.382355213165283, | |
| "learning_rate": 1.200169921449068e-07, | |
| "loss": 0.139, | |
| "step": 753 | |
| }, | |
| { | |
| "epoch": 0.9564482029598309, | |
| "grad_norm": 4.102878570556641, | |
| "learning_rate": 1.1326964321343503e-07, | |
| "loss": 0.2203, | |
| "step": 754 | |
| }, | |
| { | |
| "epoch": 0.9577167019027484, | |
| "grad_norm": 5.611064434051514, | |
| "learning_rate": 1.0671640935252969e-07, | |
| "loss": 0.279, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 0.958985200845666, | |
| "grad_norm": 3.508052110671997, | |
| "learning_rate": 1.0035741922772902e-07, | |
| "loss": 0.1757, | |
| "step": 756 | |
| }, | |
| { | |
| "epoch": 0.9602536997885835, | |
| "grad_norm": 8.137269973754883, | |
| "learning_rate": 9.419279769080524e-08, | |
| "loss": 0.3384, | |
| "step": 757 | |
| }, | |
| { | |
| "epoch": 0.961522198731501, | |
| "grad_norm": 5.806704521179199, | |
| "learning_rate": 8.822266577731775e-08, | |
| "loss": 0.1737, | |
| "step": 758 | |
| }, | |
| { | |
| "epoch": 0.9627906976744186, | |
| "grad_norm": 3.4643378257751465, | |
| "learning_rate": 8.244714070423709e-08, | |
| "loss": 0.2046, | |
| "step": 759 | |
| }, | |
| { | |
| "epoch": 0.9640591966173362, | |
| "grad_norm": 4.203879356384277, | |
| "learning_rate": 7.686633586764247e-08, | |
| "loss": 0.2167, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.9640591966173362, | |
| "eval_accuracy": 0.8708609271523179, | |
| "eval_f1": 0.7405764966740577, | |
| "eval_loss": 0.2835051715373993, | |
| "eval_precision": 0.8477157360406091, | |
| "eval_recall": 0.65748031496063, | |
| "eval_runtime": 51.121, | |
| "eval_samples_per_second": 5.419, | |
| "eval_steps_per_second": 0.176, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.9653276955602537, | |
| "grad_norm": 3.5984137058258057, | |
| "learning_rate": 7.14803608404957e-08, | |
| "loss": 0.2524, | |
| "step": 761 | |
| }, | |
| { | |
| "epoch": 0.9665961945031712, | |
| "grad_norm": 3.3036506175994873, | |
| "learning_rate": 6.62893213704885e-08, | |
| "loss": 0.2024, | |
| "step": 762 | |
| }, | |
| { | |
| "epoch": 0.9678646934460888, | |
| "grad_norm": 4.572678089141846, | |
| "learning_rate": 6.129331937796856e-08, | |
| "loss": 0.1583, | |
| "step": 763 | |
| }, | |
| { | |
| "epoch": 0.9691331923890063, | |
| "grad_norm": 5.103546619415283, | |
| "learning_rate": 5.64924529539379e-08, | |
| "loss": 0.2218, | |
| "step": 764 | |
| }, | |
| { | |
| "epoch": 0.9704016913319239, | |
| "grad_norm": 5.696028232574463, | |
| "learning_rate": 5.1886816358125425e-08, | |
| "loss": 0.1784, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 0.9716701902748415, | |
| "grad_norm": 4.081172466278076, | |
| "learning_rate": 4.747650001713622e-08, | |
| "loss": 0.2345, | |
| "step": 766 | |
| }, | |
| { | |
| "epoch": 0.972938689217759, | |
| "grad_norm": 3.9841372966766357, | |
| "learning_rate": 4.326159052267964e-08, | |
| "loss": 0.1854, | |
| "step": 767 | |
| }, | |
| { | |
| "epoch": 0.9742071881606765, | |
| "grad_norm": 4.337159156799316, | |
| "learning_rate": 3.924217062986402e-08, | |
| "loss": 0.2464, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 0.9754756871035941, | |
| "grad_norm": 3.1016838550567627, | |
| "learning_rate": 3.541831925557571e-08, | |
| "loss": 0.1883, | |
| "step": 769 | |
| }, | |
| { | |
| "epoch": 0.9767441860465116, | |
| "grad_norm": 4.459488391876221, | |
| "learning_rate": 3.179011147693034e-08, | |
| "loss": 0.2386, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.9780126849894292, | |
| "grad_norm": 6.270811080932617, | |
| "learning_rate": 2.835761852979291e-08, | |
| "loss": 0.2679, | |
| "step": 771 | |
| }, | |
| { | |
| "epoch": 0.9792811839323468, | |
| "grad_norm": 5.3437604904174805, | |
| "learning_rate": 2.512090780738552e-08, | |
| "loss": 0.2517, | |
| "step": 772 | |
| }, | |
| { | |
| "epoch": 0.9805496828752642, | |
| "grad_norm": 2.9482016563415527, | |
| "learning_rate": 2.2080042858961815e-08, | |
| "loss": 0.1528, | |
| "step": 773 | |
| }, | |
| { | |
| "epoch": 0.9818181818181818, | |
| "grad_norm": 6.166524887084961, | |
| "learning_rate": 1.9235083388559063e-08, | |
| "loss": 0.2392, | |
| "step": 774 | |
| }, | |
| { | |
| "epoch": 0.9830866807610994, | |
| "grad_norm": 3.6311111450195312, | |
| "learning_rate": 1.6586085253824657e-08, | |
| "loss": 0.2192, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.9843551797040169, | |
| "grad_norm": 3.79732084274292, | |
| "learning_rate": 1.4133100464922556e-08, | |
| "loss": 0.1825, | |
| "step": 776 | |
| }, | |
| { | |
| "epoch": 0.9856236786469345, | |
| "grad_norm": 2.6432785987854004, | |
| "learning_rate": 1.187617718350853e-08, | |
| "loss": 0.1475, | |
| "step": 777 | |
| }, | |
| { | |
| "epoch": 0.986892177589852, | |
| "grad_norm": 2.565079927444458, | |
| "learning_rate": 9.81535972178871e-09, | |
| "loss": 0.157, | |
| "step": 778 | |
| }, | |
| { | |
| "epoch": 0.9881606765327695, | |
| "grad_norm": 4.657979965209961, | |
| "learning_rate": 7.950688541646934e-09, | |
| "loss": 0.2102, | |
| "step": 779 | |
| }, | |
| { | |
| "epoch": 0.9894291754756871, | |
| "grad_norm": 5.906298637390137, | |
| "learning_rate": 6.282200253847625e-09, | |
| "loss": 0.1901, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.9894291754756871, | |
| "eval_accuracy": 0.8686534216335541, | |
| "eval_f1": 0.7373068432671082, | |
| "eval_loss": 0.28413623571395874, | |
| "eval_precision": 0.8391959798994975, | |
| "eval_recall": 0.65748031496063, | |
| "eval_runtime": 51.8309, | |
| "eval_samples_per_second": 5.344, | |
| "eval_steps_per_second": 0.174, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.9906976744186047, | |
| "grad_norm": 4.27111291885376, | |
| "learning_rate": 4.809927617324128e-09, | |
| "loss": 0.2112, | |
| "step": 781 | |
| }, | |
| { | |
| "epoch": 0.9919661733615222, | |
| "grad_norm": 6.234564781188965, | |
| "learning_rate": 3.533899538528118e-09, | |
| "loss": 0.2754, | |
| "step": 782 | |
| }, | |
| { | |
| "epoch": 0.9932346723044397, | |
| "grad_norm": 5.112316131591797, | |
| "learning_rate": 2.4541410708667223e-09, | |
| "loss": 0.2337, | |
| "step": 783 | |
| }, | |
| { | |
| "epoch": 0.9945031712473573, | |
| "grad_norm": 4.032815933227539, | |
| "learning_rate": 1.5706734142106883e-09, | |
| "loss": 0.2389, | |
| "step": 784 | |
| }, | |
| { | |
| "epoch": 0.9957716701902748, | |
| "grad_norm": 3.928197145462036, | |
| "learning_rate": 8.835139144736104e-10, | |
| "loss": 0.2295, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 0.9970401691331924, | |
| "grad_norm": 5.0506815910339355, | |
| "learning_rate": 3.9267606327775266e-10, | |
| "loss": 0.1851, | |
| "step": 786 | |
| }, | |
| { | |
| "epoch": 0.99830866807611, | |
| "grad_norm": 4.563787937164307, | |
| "learning_rate": 9.816949768204354e-11, | |
| "loss": 0.2297, | |
| "step": 787 | |
| }, | |
| { | |
| "epoch": 0.9995771670190274, | |
| "grad_norm": 5.677037715911865, | |
| "learning_rate": 0.0, | |
| "loss": 0.19, | |
| "step": 788 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 788, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.9528591072598426e+17, | |
| "train_batch_size": 6, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |