| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 20, | |
| "global_step": 1078, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0, | |
| "eval_accuracy": 0.738359201773836, | |
| "eval_f1": 0.23376623376623376, | |
| "eval_loss": 0.5994226336479187, | |
| "eval_precision": 0.6545454545454545, | |
| "eval_recall": 0.1422924901185771, | |
| "eval_runtime": 47.8594, | |
| "eval_samples_per_second": 5.767, | |
| "eval_steps_per_second": 0.188, | |
| "step": 0 | |
| }, | |
| { | |
| "epoch": 0.0009276437847866419, | |
| "grad_norm": 2.113847255706787, | |
| "learning_rate": 1.8518518518518518e-07, | |
| "loss": 0.6464, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0018552875695732839, | |
| "grad_norm": 2.337275505065918, | |
| "learning_rate": 3.7037037037037036e-07, | |
| "loss": 0.7325, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0027829313543599257, | |
| "grad_norm": 2.0731401443481445, | |
| "learning_rate": 5.555555555555555e-07, | |
| "loss": 0.6441, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.0037105751391465678, | |
| "grad_norm": 2.075726270675659, | |
| "learning_rate": 7.407407407407407e-07, | |
| "loss": 0.6722, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.00463821892393321, | |
| "grad_norm": 2.2193410396575928, | |
| "learning_rate": 9.259259259259259e-07, | |
| "loss": 0.6676, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.0055658627087198514, | |
| "grad_norm": 2.2222299575805664, | |
| "learning_rate": 1.111111111111111e-06, | |
| "loss": 0.5932, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.006493506493506494, | |
| "grad_norm": 2.5532214641571045, | |
| "learning_rate": 1.2962962962962962e-06, | |
| "loss": 0.6558, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.0074211502782931356, | |
| "grad_norm": 1.7259494066238403, | |
| "learning_rate": 1.4814814814814815e-06, | |
| "loss": 0.5976, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.008348794063079777, | |
| "grad_norm": 2.020770788192749, | |
| "learning_rate": 1.6666666666666667e-06, | |
| "loss": 0.6576, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.00927643784786642, | |
| "grad_norm": 2.5885586738586426, | |
| "learning_rate": 1.8518518518518519e-06, | |
| "loss": 0.6656, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.01020408163265306, | |
| "grad_norm": 1.901673674583435, | |
| "learning_rate": 2.037037037037037e-06, | |
| "loss": 0.6293, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.011131725417439703, | |
| "grad_norm": 1.867441177368164, | |
| "learning_rate": 2.222222222222222e-06, | |
| "loss": 0.5698, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.012059369202226345, | |
| "grad_norm": 2.405125141143799, | |
| "learning_rate": 2.4074074074074075e-06, | |
| "loss": 0.5984, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.012987012987012988, | |
| "grad_norm": 2.0719292163848877, | |
| "learning_rate": 2.5925925925925925e-06, | |
| "loss": 0.5933, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.013914656771799629, | |
| "grad_norm": 2.2819459438323975, | |
| "learning_rate": 2.7777777777777783e-06, | |
| "loss": 0.6439, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.014842300556586271, | |
| "grad_norm": 2.3114988803863525, | |
| "learning_rate": 2.962962962962963e-06, | |
| "loss": 0.6118, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.015769944341372914, | |
| "grad_norm": 2.1615548133850098, | |
| "learning_rate": 3.1481481481481483e-06, | |
| "loss": 0.5185, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.016697588126159554, | |
| "grad_norm": 2.3003029823303223, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.6267, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.017625231910946195, | |
| "grad_norm": 2.284081220626831, | |
| "learning_rate": 3.5185185185185187e-06, | |
| "loss": 0.6659, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.01855287569573284, | |
| "grad_norm": 1.8381303548812866, | |
| "learning_rate": 3.7037037037037037e-06, | |
| "loss": 0.572, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.01855287569573284, | |
| "eval_accuracy": 0.7394678492239468, | |
| "eval_f1": 0.22950819672131148, | |
| "eval_loss": 0.5945659279823303, | |
| "eval_precision": 0.6730769230769231, | |
| "eval_recall": 0.1383399209486166, | |
| "eval_runtime": 47.6156, | |
| "eval_samples_per_second": 5.796, | |
| "eval_steps_per_second": 0.189, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.01948051948051948, | |
| "grad_norm": 2.036238670349121, | |
| "learning_rate": 3.88888888888889e-06, | |
| "loss": 0.6409, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.02040816326530612, | |
| "grad_norm": 1.9110214710235596, | |
| "learning_rate": 4.074074074074074e-06, | |
| "loss": 0.6008, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.021335807050092765, | |
| "grad_norm": 2.4081571102142334, | |
| "learning_rate": 4.2592592592592596e-06, | |
| "loss": 0.632, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.022263450834879406, | |
| "grad_norm": 1.9782453775405884, | |
| "learning_rate": 4.444444444444444e-06, | |
| "loss": 0.6474, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.023191094619666047, | |
| "grad_norm": 1.7934902906417847, | |
| "learning_rate": 4.62962962962963e-06, | |
| "loss": 0.5803, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.02411873840445269, | |
| "grad_norm": 2.6915359497070312, | |
| "learning_rate": 4.814814814814815e-06, | |
| "loss": 0.6505, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.02504638218923933, | |
| "grad_norm": 1.806140422821045, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5283, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.025974025974025976, | |
| "grad_norm": 1.7927738428115845, | |
| "learning_rate": 5.185185185185185e-06, | |
| "loss": 0.5696, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.026901669758812616, | |
| "grad_norm": 1.7386640310287476, | |
| "learning_rate": 5.370370370370371e-06, | |
| "loss": 0.4902, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.027829313543599257, | |
| "grad_norm": 2.015052318572998, | |
| "learning_rate": 5.555555555555557e-06, | |
| "loss": 0.6123, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.0287569573283859, | |
| "grad_norm": 1.620802402496338, | |
| "learning_rate": 5.740740740740741e-06, | |
| "loss": 0.5611, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.029684601113172542, | |
| "grad_norm": 1.8895862102508545, | |
| "learning_rate": 5.925925925925926e-06, | |
| "loss": 0.6024, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.030612244897959183, | |
| "grad_norm": 2.030715227127075, | |
| "learning_rate": 6.111111111111112e-06, | |
| "loss": 0.6245, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.03153988868274583, | |
| "grad_norm": 2.208017349243164, | |
| "learning_rate": 6.296296296296297e-06, | |
| "loss": 0.5331, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.032467532467532464, | |
| "grad_norm": 2.373842239379883, | |
| "learning_rate": 6.481481481481482e-06, | |
| "loss": 0.638, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.03339517625231911, | |
| "grad_norm": 2.170443296432495, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.563, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.03432282003710575, | |
| "grad_norm": 1.4894213676452637, | |
| "learning_rate": 6.851851851851853e-06, | |
| "loss": 0.5338, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.03525046382189239, | |
| "grad_norm": 1.9410276412963867, | |
| "learning_rate": 7.0370370370370375e-06, | |
| "loss": 0.5609, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.036178107606679034, | |
| "grad_norm": 2.315661668777466, | |
| "learning_rate": 7.222222222222223e-06, | |
| "loss": 0.4866, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.03710575139146568, | |
| "grad_norm": 1.8483999967575073, | |
| "learning_rate": 7.4074074074074075e-06, | |
| "loss": 0.5829, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.03710575139146568, | |
| "eval_accuracy": 0.7494456762749445, | |
| "eval_f1": 0.2848101265822785, | |
| "eval_loss": 0.5677995681762695, | |
| "eval_precision": 0.7142857142857143, | |
| "eval_recall": 0.17786561264822134, | |
| "eval_runtime": 46.6917, | |
| "eval_samples_per_second": 5.911, | |
| "eval_steps_per_second": 0.193, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.038033395176252316, | |
| "grad_norm": 2.0759365558624268, | |
| "learning_rate": 7.592592592592594e-06, | |
| "loss": 0.5541, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.03896103896103896, | |
| "grad_norm": 1.7480107545852661, | |
| "learning_rate": 7.77777777777778e-06, | |
| "loss": 0.5236, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.039888682745825604, | |
| "grad_norm": 1.8141703605651855, | |
| "learning_rate": 7.962962962962963e-06, | |
| "loss": 0.519, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.04081632653061224, | |
| "grad_norm": 1.8323687314987183, | |
| "learning_rate": 8.148148148148148e-06, | |
| "loss": 0.5472, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.041743970315398886, | |
| "grad_norm": 1.6657449007034302, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 0.4735, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.04267161410018553, | |
| "grad_norm": 2.900596857070923, | |
| "learning_rate": 8.518518518518519e-06, | |
| "loss": 0.5968, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.04359925788497217, | |
| "grad_norm": 1.9013563394546509, | |
| "learning_rate": 8.703703703703705e-06, | |
| "loss": 0.5436, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.04452690166975881, | |
| "grad_norm": 1.7270978689193726, | |
| "learning_rate": 8.888888888888888e-06, | |
| "loss": 0.4987, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.045454545454545456, | |
| "grad_norm": 1.4679995775222778, | |
| "learning_rate": 9.074074074074075e-06, | |
| "loss": 0.4405, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.04638218923933209, | |
| "grad_norm": 2.332545042037964, | |
| "learning_rate": 9.25925925925926e-06, | |
| "loss": 0.5181, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.04730983302411874, | |
| "grad_norm": 2.0095181465148926, | |
| "learning_rate": 9.444444444444445e-06, | |
| "loss": 0.5561, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.04823747680890538, | |
| "grad_norm": 2.628974199295044, | |
| "learning_rate": 9.62962962962963e-06, | |
| "loss": 0.5791, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.04916512059369202, | |
| "grad_norm": 1.918746829032898, | |
| "learning_rate": 9.814814814814815e-06, | |
| "loss": 0.5171, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.05009276437847866, | |
| "grad_norm": 1.907528281211853, | |
| "learning_rate": 1e-05, | |
| "loss": 0.5104, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.05102040816326531, | |
| "grad_norm": 1.70274817943573, | |
| "learning_rate": 1.0185185185185186e-05, | |
| "loss": 0.5104, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.05194805194805195, | |
| "grad_norm": 2.177898406982422, | |
| "learning_rate": 1.037037037037037e-05, | |
| "loss": 0.4683, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.05287569573283859, | |
| "grad_norm": 2.1687746047973633, | |
| "learning_rate": 1.0555555555555557e-05, | |
| "loss": 0.4888, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.05380333951762523, | |
| "grad_norm": 3.19343900680542, | |
| "learning_rate": 1.0740740740740742e-05, | |
| "loss": 0.5476, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.05473098330241188, | |
| "grad_norm": 2.7876338958740234, | |
| "learning_rate": 1.0925925925925926e-05, | |
| "loss": 0.4876, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.055658627087198514, | |
| "grad_norm": 2.028104066848755, | |
| "learning_rate": 1.1111111111111113e-05, | |
| "loss": 0.4808, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.055658627087198514, | |
| "eval_accuracy": 0.7694013303769401, | |
| "eval_f1": 0.45549738219895286, | |
| "eval_loss": 0.512853741645813, | |
| "eval_precision": 0.6744186046511628, | |
| "eval_recall": 0.3438735177865613, | |
| "eval_runtime": 46.8717, | |
| "eval_samples_per_second": 5.888, | |
| "eval_steps_per_second": 0.192, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.05658627087198516, | |
| "grad_norm": 2.1539430618286133, | |
| "learning_rate": 1.1296296296296297e-05, | |
| "loss": 0.5098, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.0575139146567718, | |
| "grad_norm": 2.1614112854003906, | |
| "learning_rate": 1.1481481481481482e-05, | |
| "loss": 0.4424, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.05844155844155844, | |
| "grad_norm": 2.3000025749206543, | |
| "learning_rate": 1.1666666666666668e-05, | |
| "loss": 0.4228, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.059369202226345084, | |
| "grad_norm": 2.695854663848877, | |
| "learning_rate": 1.1851851851851852e-05, | |
| "loss": 0.4409, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.06029684601113173, | |
| "grad_norm": 2.0516905784606934, | |
| "learning_rate": 1.2037037037037039e-05, | |
| "loss": 0.5061, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.061224489795918366, | |
| "grad_norm": 2.1683146953582764, | |
| "learning_rate": 1.2222222222222224e-05, | |
| "loss": 0.4346, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.06215213358070501, | |
| "grad_norm": 1.7861865758895874, | |
| "learning_rate": 1.2407407407407408e-05, | |
| "loss": 0.4451, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.06307977736549165, | |
| "grad_norm": 2.505061626434326, | |
| "learning_rate": 1.2592592592592593e-05, | |
| "loss": 0.4545, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.0640074211502783, | |
| "grad_norm": 2.663344621658325, | |
| "learning_rate": 1.2777777777777777e-05, | |
| "loss": 0.4741, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.06493506493506493, | |
| "grad_norm": 2.4229860305786133, | |
| "learning_rate": 1.2962962962962964e-05, | |
| "loss": 0.4564, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.06586270871985157, | |
| "grad_norm": 1.9748098850250244, | |
| "learning_rate": 1.314814814814815e-05, | |
| "loss": 0.4832, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.06679035250463822, | |
| "grad_norm": 2.0413177013397217, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 0.4377, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.06771799628942486, | |
| "grad_norm": 2.965312957763672, | |
| "learning_rate": 1.351851851851852e-05, | |
| "loss": 0.3847, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.0686456400742115, | |
| "grad_norm": 3.3258345127105713, | |
| "learning_rate": 1.3703703703703706e-05, | |
| "loss": 0.441, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.06957328385899815, | |
| "grad_norm": 4.911780834197998, | |
| "learning_rate": 1.388888888888889e-05, | |
| "loss": 0.5277, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.07050092764378478, | |
| "grad_norm": 3.556264638900757, | |
| "learning_rate": 1.4074074074074075e-05, | |
| "loss": 0.5102, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.07142857142857142, | |
| "grad_norm": 2.313514232635498, | |
| "learning_rate": 1.4259259259259259e-05, | |
| "loss": 0.3874, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.07235621521335807, | |
| "grad_norm": 2.3410682678222656, | |
| "learning_rate": 1.4444444444444446e-05, | |
| "loss": 0.3794, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.07328385899814471, | |
| "grad_norm": 1.974550724029541, | |
| "learning_rate": 1.4629629629629631e-05, | |
| "loss": 0.3691, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.07421150278293136, | |
| "grad_norm": 3.7399516105651855, | |
| "learning_rate": 1.4814814814814815e-05, | |
| "loss": 0.498, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.07421150278293136, | |
| "eval_accuracy": 0.7804878048780488, | |
| "eval_f1": 0.5217391304347826, | |
| "eval_loss": 0.46577775478363037, | |
| "eval_precision": 0.6708074534161491, | |
| "eval_recall": 0.4268774703557312, | |
| "eval_runtime": 47.3211, | |
| "eval_samples_per_second": 5.832, | |
| "eval_steps_per_second": 0.19, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.075139146567718, | |
| "grad_norm": 4.030659198760986, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 0.4012, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.07606679035250463, | |
| "grad_norm": 4.128874778747559, | |
| "learning_rate": 1.5185185185185187e-05, | |
| "loss": 0.4411, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.07699443413729128, | |
| "grad_norm": 5.405181884765625, | |
| "learning_rate": 1.537037037037037e-05, | |
| "loss": 0.3886, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.07792207792207792, | |
| "grad_norm": 2.473198175430298, | |
| "learning_rate": 1.555555555555556e-05, | |
| "loss": 0.3348, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.07884972170686456, | |
| "grad_norm": 3.9382104873657227, | |
| "learning_rate": 1.5740740740740744e-05, | |
| "loss": 0.3732, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.07977736549165121, | |
| "grad_norm": 2.209444761276245, | |
| "learning_rate": 1.5925925925925926e-05, | |
| "loss": 0.3247, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.08070500927643785, | |
| "grad_norm": 4.216385364532471, | |
| "learning_rate": 1.6111111111111115e-05, | |
| "loss": 0.4295, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.08163265306122448, | |
| "grad_norm": 4.730212211608887, | |
| "learning_rate": 1.6296296296296297e-05, | |
| "loss": 0.3214, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.08256029684601113, | |
| "grad_norm": 2.821192741394043, | |
| "learning_rate": 1.6481481481481482e-05, | |
| "loss": 0.3864, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.08348794063079777, | |
| "grad_norm": 2.9178500175476074, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 0.3257, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.08441558441558442, | |
| "grad_norm": 4.074183464050293, | |
| "learning_rate": 1.6851851851851853e-05, | |
| "loss": 0.3916, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.08534322820037106, | |
| "grad_norm": 2.721669912338257, | |
| "learning_rate": 1.7037037037037038e-05, | |
| "loss": 0.3153, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.0862708719851577, | |
| "grad_norm": 4.657402992248535, | |
| "learning_rate": 1.7222222222222224e-05, | |
| "loss": 0.4462, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.08719851576994433, | |
| "grad_norm": 2.9709548950195312, | |
| "learning_rate": 1.740740740740741e-05, | |
| "loss": 0.3124, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.08812615955473098, | |
| "grad_norm": 5.8010711669921875, | |
| "learning_rate": 1.7592592592592595e-05, | |
| "loss": 0.2713, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.08905380333951762, | |
| "grad_norm": 3.4263553619384766, | |
| "learning_rate": 1.7777777777777777e-05, | |
| "loss": 0.3161, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.08998144712430427, | |
| "grad_norm": 3.680510997772217, | |
| "learning_rate": 1.7962962962962965e-05, | |
| "loss": 0.3464, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.09090909090909091, | |
| "grad_norm": 5.526573657989502, | |
| "learning_rate": 1.814814814814815e-05, | |
| "loss": 0.2899, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.09183673469387756, | |
| "grad_norm": 3.2282707691192627, | |
| "learning_rate": 1.8333333333333333e-05, | |
| "loss": 0.2088, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.09276437847866419, | |
| "grad_norm": 3.8928277492523193, | |
| "learning_rate": 1.851851851851852e-05, | |
| "loss": 0.2531, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.09276437847866419, | |
| "eval_accuracy": 0.8015521064301552, | |
| "eval_f1": 0.5665859564164649, | |
| "eval_loss": 0.4834875166416168, | |
| "eval_precision": 0.73125, | |
| "eval_recall": 0.4624505928853755, | |
| "eval_runtime": 48.1977, | |
| "eval_samples_per_second": 5.726, | |
| "eval_steps_per_second": 0.187, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.09369202226345083, | |
| "grad_norm": 4.2124924659729, | |
| "learning_rate": 1.8703703703703707e-05, | |
| "loss": 0.2609, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.09461966604823747, | |
| "grad_norm": 5.753302097320557, | |
| "learning_rate": 1.888888888888889e-05, | |
| "loss": 0.3886, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.09554730983302412, | |
| "grad_norm": 4.0034050941467285, | |
| "learning_rate": 1.9074074074074075e-05, | |
| "loss": 0.2714, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.09647495361781076, | |
| "grad_norm": 7.041585922241211, | |
| "learning_rate": 1.925925925925926e-05, | |
| "loss": 0.3665, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.09740259740259741, | |
| "grad_norm": 5.10760498046875, | |
| "learning_rate": 1.9444444444444445e-05, | |
| "loss": 0.3047, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.09833024118738404, | |
| "grad_norm": 7.445008754730225, | |
| "learning_rate": 1.962962962962963e-05, | |
| "loss": 0.2991, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.09925788497217068, | |
| "grad_norm": 5.59462833404541, | |
| "learning_rate": 1.9814814814814816e-05, | |
| "loss": 0.3124, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.10018552875695733, | |
| "grad_norm": 4.407949447631836, | |
| "learning_rate": 2e-05, | |
| "loss": 0.2371, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.10111317254174397, | |
| "grad_norm": 7.777821063995361, | |
| "learning_rate": 1.999994755236596e-05, | |
| "loss": 0.4208, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.10204081632653061, | |
| "grad_norm": 9.282930374145508, | |
| "learning_rate": 1.999979021001399e-05, | |
| "loss": 0.2394, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.10296846011131726, | |
| "grad_norm": 3.985445976257324, | |
| "learning_rate": 1.999952797459453e-05, | |
| "loss": 0.3059, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.1038961038961039, | |
| "grad_norm": 8.832866668701172, | |
| "learning_rate": 1.999916084885832e-05, | |
| "loss": 0.2748, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.10482374768089053, | |
| "grad_norm": 3.676673173904419, | |
| "learning_rate": 1.9998688836656322e-05, | |
| "loss": 0.2271, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.10575139146567718, | |
| "grad_norm": 4.632993698120117, | |
| "learning_rate": 1.9998111942939727e-05, | |
| "loss": 0.292, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.10667903525046382, | |
| "grad_norm": 4.095834732055664, | |
| "learning_rate": 1.9997430173759876e-05, | |
| "loss": 0.2222, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.10760667903525047, | |
| "grad_norm": 5.404327392578125, | |
| "learning_rate": 1.9996643536268202e-05, | |
| "loss": 0.3083, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.10853432282003711, | |
| "grad_norm": 5.261657238006592, | |
| "learning_rate": 1.9995752038716166e-05, | |
| "loss": 0.2628, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.10946196660482375, | |
| "grad_norm": 5.081439018249512, | |
| "learning_rate": 1.9994755690455154e-05, | |
| "loss": 0.3403, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.11038961038961038, | |
| "grad_norm": 8.73869800567627, | |
| "learning_rate": 1.999365450193638e-05, | |
| "loss": 0.2949, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.11131725417439703, | |
| "grad_norm": 6.3659586906433105, | |
| "learning_rate": 1.99924484847108e-05, | |
| "loss": 0.2925, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.11131725417439703, | |
| "eval_accuracy": 0.8015521064301552, | |
| "eval_f1": 0.5095890410958904, | |
| "eval_loss": 0.500335693359375, | |
| "eval_precision": 0.8303571428571429, | |
| "eval_recall": 0.3675889328063241, | |
| "eval_runtime": 48.0846, | |
| "eval_samples_per_second": 5.74, | |
| "eval_steps_per_second": 0.187, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.11224489795918367, | |
| "grad_norm": 4.58190393447876, | |
| "learning_rate": 1.9991137651428957e-05, | |
| "loss": 0.2265, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.11317254174397032, | |
| "grad_norm": 5.173889636993408, | |
| "learning_rate": 1.998972201584088e-05, | |
| "loss": 0.2239, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.11410018552875696, | |
| "grad_norm": 4.783614158630371, | |
| "learning_rate": 1.998820159279591e-05, | |
| "loss": 0.2998, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.1150278293135436, | |
| "grad_norm": 3.5412709712982178, | |
| "learning_rate": 1.9986576398242566e-05, | |
| "loss": 0.2021, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.11595547309833024, | |
| "grad_norm": 3.692047119140625, | |
| "learning_rate": 1.998484644922837e-05, | |
| "loss": 0.2432, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.11688311688311688, | |
| "grad_norm": 2.976855993270874, | |
| "learning_rate": 1.9983011763899674e-05, | |
| "loss": 0.2703, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.11781076066790352, | |
| "grad_norm": 5.132311820983887, | |
| "learning_rate": 1.998107236150145e-05, | |
| "loss": 0.3625, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.11873840445269017, | |
| "grad_norm": 5.332205772399902, | |
| "learning_rate": 1.997902826237712e-05, | |
| "loss": 0.3431, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.11966604823747681, | |
| "grad_norm": 7.54325532913208, | |
| "learning_rate": 1.997687948796831e-05, | |
| "loss": 0.2762, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.12059369202226346, | |
| "grad_norm": 4.39344596862793, | |
| "learning_rate": 1.997462606081465e-05, | |
| "loss": 0.2178, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.12152133580705009, | |
| "grad_norm": 4.67897891998291, | |
| "learning_rate": 1.997226800455352e-05, | |
| "loss": 0.2575, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.12244897959183673, | |
| "grad_norm": 2.7064077854156494, | |
| "learning_rate": 1.9969805343919822e-05, | |
| "loss": 0.1973, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.12337662337662338, | |
| "grad_norm": 2.841456413269043, | |
| "learning_rate": 1.9967238104745695e-05, | |
| "loss": 0.186, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.12430426716141002, | |
| "grad_norm": 5.078066349029541, | |
| "learning_rate": 1.9964566313960265e-05, | |
| "loss": 0.2899, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.12523191094619665, | |
| "grad_norm": 3.5166287422180176, | |
| "learning_rate": 1.9961789999589357e-05, | |
| "loss": 0.2397, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.1261595547309833, | |
| "grad_norm": 3.0311009883880615, | |
| "learning_rate": 1.995890919075519e-05, | |
| "loss": 0.2042, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.12708719851576994, | |
| "grad_norm": 4.455051898956299, | |
| "learning_rate": 1.995592391767608e-05, | |
| "loss": 0.1938, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.1280148423005566, | |
| "grad_norm": 3.05238676071167, | |
| "learning_rate": 1.995283421166614e-05, | |
| "loss": 0.2134, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.12894248608534323, | |
| "grad_norm": 2.5545527935028076, | |
| "learning_rate": 1.994964010513492e-05, | |
| "loss": 0.1558, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.12987012987012986, | |
| "grad_norm": 3.169755458831787, | |
| "learning_rate": 1.9946341631587086e-05, | |
| "loss": 0.1912, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.12987012987012986, | |
| "eval_accuracy": 0.8004434589800443, | |
| "eval_f1": 0.5, | |
| "eval_loss": 0.45746758580207825, | |
| "eval_precision": 0.8411214953271028, | |
| "eval_recall": 0.3557312252964427, | |
| "eval_runtime": 48.503, | |
| "eval_samples_per_second": 5.69, | |
| "eval_steps_per_second": 0.186, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.13079777365491652, | |
| "grad_norm": 5.176037788391113, | |
| "learning_rate": 1.9942938825622064e-05, | |
| "loss": 0.261, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.13172541743970315, | |
| "grad_norm": 4.5571513175964355, | |
| "learning_rate": 1.9939431722933678e-05, | |
| "loss": 0.1861, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.1326530612244898, | |
| "grad_norm": 4.165744304656982, | |
| "learning_rate": 1.993582036030978e-05, | |
| "loss": 0.252, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.13358070500927643, | |
| "grad_norm": 3.6084752082824707, | |
| "learning_rate": 1.9932104775631847e-05, | |
| "loss": 0.2091, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.1345083487940631, | |
| "grad_norm": 5.7025837898254395, | |
| "learning_rate": 1.992828500787461e-05, | |
| "loss": 0.2875, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.13543599257884972, | |
| "grad_norm": 3.954706907272339, | |
| "learning_rate": 1.9924361097105624e-05, | |
| "loss": 0.147, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.13636363636363635, | |
| "grad_norm": 3.246682643890381, | |
| "learning_rate": 1.992033308448486e-05, | |
| "loss": 0.1406, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.137291280148423, | |
| "grad_norm": 7.386576175689697, | |
| "learning_rate": 1.9916201012264255e-05, | |
| "loss": 0.2637, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.13821892393320964, | |
| "grad_norm": 11.797701835632324, | |
| "learning_rate": 1.9911964923787295e-05, | |
| "loss": 0.2989, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.1391465677179963, | |
| "grad_norm": 4.424801349639893, | |
| "learning_rate": 1.990762486348855e-05, | |
| "loss": 0.2059, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.14007421150278293, | |
| "grad_norm": 6.5464582443237305, | |
| "learning_rate": 1.9903180876893195e-05, | |
| "loss": 0.3335, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.14100185528756956, | |
| "grad_norm": 6.232185363769531, | |
| "learning_rate": 1.989863301061654e-05, | |
| "loss": 0.1652, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.14192949907235622, | |
| "grad_norm": 7.7785162925720215, | |
| "learning_rate": 1.9893981312363563e-05, | |
| "loss": 0.3246, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.14285714285714285, | |
| "grad_norm": 4.034485340118408, | |
| "learning_rate": 1.9889225830928365e-05, | |
| "loss": 0.1639, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.1437847866419295, | |
| "grad_norm": 4.565614223480225, | |
| "learning_rate": 1.9884366616193707e-05, | |
| "loss": 0.2567, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.14471243042671614, | |
| "grad_norm": 4.671913146972656, | |
| "learning_rate": 1.987940371913044e-05, | |
| "loss": 0.2956, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.1456400742115028, | |
| "grad_norm": 4.866475582122803, | |
| "learning_rate": 1.987433719179702e-05, | |
| "loss": 0.1732, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.14656771799628943, | |
| "grad_norm": 7.748964786529541, | |
| "learning_rate": 1.9869167087338908e-05, | |
| "loss": 0.3068, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.14749536178107606, | |
| "grad_norm": 3.243945837020874, | |
| "learning_rate": 1.986389345998806e-05, | |
| "loss": 0.2172, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.14842300556586271, | |
| "grad_norm": 2.541400671005249, | |
| "learning_rate": 1.9858516365062334e-05, | |
| "loss": 0.1991, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.14842300556586271, | |
| "eval_accuracy": 0.811529933481153, | |
| "eval_f1": 0.5478723404255319, | |
| "eval_loss": 0.4108695983886719, | |
| "eval_precision": 0.8373983739837398, | |
| "eval_recall": 0.40711462450592883, | |
| "eval_runtime": 49.0091, | |
| "eval_samples_per_second": 5.632, | |
| "eval_steps_per_second": 0.184, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.14935064935064934, | |
| "grad_norm": 3.5915067195892334, | |
| "learning_rate": 1.9853035858964907e-05, | |
| "loss": 0.2252, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.150278293135436, | |
| "grad_norm": 3.297874927520752, | |
| "learning_rate": 1.9847451999183692e-05, | |
| "loss": 0.2025, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.15120593692022263, | |
| "grad_norm": 7.80188512802124, | |
| "learning_rate": 1.9841764844290744e-05, | |
| "loss": 0.3563, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.15213358070500926, | |
| "grad_norm": 4.962357044219971, | |
| "learning_rate": 1.9835974453941623e-05, | |
| "loss": 0.2331, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.15306122448979592, | |
| "grad_norm": 4.794024467468262, | |
| "learning_rate": 1.983008088887478e-05, | |
| "loss": 0.2759, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.15398886827458255, | |
| "grad_norm": 5.007259368896484, | |
| "learning_rate": 1.9824084210910924e-05, | |
| "loss": 0.1732, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.1549165120593692, | |
| "grad_norm": 4.154080390930176, | |
| "learning_rate": 1.9817984482952378e-05, | |
| "loss": 0.199, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.15584415584415584, | |
| "grad_norm": 5.933828830718994, | |
| "learning_rate": 1.9811781768982392e-05, | |
| "loss": 0.3237, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.1567717996289425, | |
| "grad_norm": 4.5709943771362305, | |
| "learning_rate": 1.980547613406451e-05, | |
| "loss": 0.2356, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.15769944341372913, | |
| "grad_norm": 4.232250690460205, | |
| "learning_rate": 1.9799067644341844e-05, | |
| "loss": 0.2318, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.15862708719851576, | |
| "grad_norm": 5.491664886474609, | |
| "learning_rate": 1.9792556367036432e-05, | |
| "loss": 0.2551, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.15955473098330242, | |
| "grad_norm": 3.584186315536499, | |
| "learning_rate": 1.978594237044849e-05, | |
| "loss": 0.1733, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.16048237476808905, | |
| "grad_norm": 2.8872857093811035, | |
| "learning_rate": 1.977922572395571e-05, | |
| "loss": 0.2076, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.1614100185528757, | |
| "grad_norm": 4.199950695037842, | |
| "learning_rate": 1.977240649801253e-05, | |
| "loss": 0.2076, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.16233766233766234, | |
| "grad_norm": 3.1421799659729004, | |
| "learning_rate": 1.9765484764149413e-05, | |
| "loss": 0.2281, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.16326530612244897, | |
| "grad_norm": 4.832352638244629, | |
| "learning_rate": 1.9758460594972068e-05, | |
| "loss": 0.1834, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.16419294990723562, | |
| "grad_norm": 2.8222384452819824, | |
| "learning_rate": 1.9751334064160708e-05, | |
| "loss": 0.1908, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.16512059369202226, | |
| "grad_norm": 3.1630570888519287, | |
| "learning_rate": 1.9744105246469264e-05, | |
| "loss": 0.1962, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.1660482374768089, | |
| "grad_norm": 3.811518669128418, | |
| "learning_rate": 1.9736774217724614e-05, | |
| "loss": 0.1786, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.16697588126159554, | |
| "grad_norm": 4.223273754119873, | |
| "learning_rate": 1.9729341054825783e-05, | |
| "loss": 0.2153, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.16697588126159554, | |
| "eval_accuracy": 0.8337028824833703, | |
| "eval_f1": 0.6268656716417911, | |
| "eval_loss": 0.37183114886283875, | |
| "eval_precision": 0.8456375838926175, | |
| "eval_recall": 0.4980237154150198, | |
| "eval_runtime": 48.5694, | |
| "eval_samples_per_second": 5.683, | |
| "eval_steps_per_second": 0.185, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.1679035250463822, | |
| "grad_norm": 3.9425668716430664, | |
| "learning_rate": 1.972180583574313e-05, | |
| "loss": 0.1998, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.16883116883116883, | |
| "grad_norm": 5.5068840980529785, | |
| "learning_rate": 1.9714168639517543e-05, | |
| "loss": 0.2466, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.16975881261595546, | |
| "grad_norm": 6.162604808807373, | |
| "learning_rate": 1.9706429546259592e-05, | |
| "loss": 0.163, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.17068645640074212, | |
| "grad_norm": 5.026734828948975, | |
| "learning_rate": 1.9698588637148705e-05, | |
| "loss": 0.275, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.17161410018552875, | |
| "grad_norm": 6.298387050628662, | |
| "learning_rate": 1.9690645994432307e-05, | |
| "loss": 0.1692, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.1725417439703154, | |
| "grad_norm": 5.307831287384033, | |
| "learning_rate": 1.9682601701424958e-05, | |
| "loss": 0.2499, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.17346938775510204, | |
| "grad_norm": 6.9988203048706055, | |
| "learning_rate": 1.9674455842507494e-05, | |
| "loss": 0.2434, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.17439703153988867, | |
| "grad_norm": 4.128062725067139, | |
| "learning_rate": 1.9666208503126115e-05, | |
| "loss": 0.1976, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.17532467532467533, | |
| "grad_norm": 3.3845396041870117, | |
| "learning_rate": 1.9657859769791506e-05, | |
| "loss": 0.1355, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.17625231910946196, | |
| "grad_norm": 3.59576416015625, | |
| "learning_rate": 1.9649409730077934e-05, | |
| "loss": 0.2027, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.17717996289424862, | |
| "grad_norm": 3.110616683959961, | |
| "learning_rate": 1.9640858472622316e-05, | |
| "loss": 0.2039, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.17810760667903525, | |
| "grad_norm": 3.8708298206329346, | |
| "learning_rate": 1.9632206087123296e-05, | |
| "loss": 0.2163, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.1790352504638219, | |
| "grad_norm": 5.087402820587158, | |
| "learning_rate": 1.9623452664340305e-05, | |
| "loss": 0.2631, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.17996289424860853, | |
| "grad_norm": 3.7053322792053223, | |
| "learning_rate": 1.9614598296092603e-05, | |
| "loss": 0.2034, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.18089053803339517, | |
| "grad_norm": 4.846376419067383, | |
| "learning_rate": 1.9605643075258323e-05, | |
| "loss": 0.2071, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.18181818181818182, | |
| "grad_norm": 3.7219040393829346, | |
| "learning_rate": 1.9596587095773496e-05, | |
| "loss": 0.2516, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.18274582560296845, | |
| "grad_norm": 5.604904651641846, | |
| "learning_rate": 1.958743045263106e-05, | |
| "loss": 0.2076, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.1836734693877551, | |
| "grad_norm": 2.9652745723724365, | |
| "learning_rate": 1.957817324187987e-05, | |
| "loss": 0.1752, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.18460111317254174, | |
| "grad_norm": 4.468489646911621, | |
| "learning_rate": 1.956881556062369e-05, | |
| "loss": 0.2177, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.18552875695732837, | |
| "grad_norm": 4.231448173522949, | |
| "learning_rate": 1.9559357507020163e-05, | |
| "loss": 0.1638, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.18552875695732837, | |
| "eval_accuracy": 0.8237250554323725, | |
| "eval_f1": 0.5826771653543307, | |
| "eval_loss": 0.36573752760887146, | |
| "eval_precision": 0.8671875, | |
| "eval_recall": 0.43873517786561267, | |
| "eval_runtime": 49.5575, | |
| "eval_samples_per_second": 5.569, | |
| "eval_steps_per_second": 0.182, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.18645640074211503, | |
| "grad_norm": 3.4353301525115967, | |
| "learning_rate": 1.9549799180279793e-05, | |
| "loss": 0.2293, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 0.18738404452690166, | |
| "grad_norm": 3.4561715126037598, | |
| "learning_rate": 1.9540140680664915e-05, | |
| "loss": 0.1131, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.18831168831168832, | |
| "grad_norm": 4.9482293128967285, | |
| "learning_rate": 1.953038210948861e-05, | |
| "loss": 0.2009, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 0.18923933209647495, | |
| "grad_norm": 4.263972759246826, | |
| "learning_rate": 1.952052356911368e-05, | |
| "loss": 0.2624, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.1901669758812616, | |
| "grad_norm": 4.766571044921875, | |
| "learning_rate": 1.9510565162951538e-05, | |
| "loss": 0.1823, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.19109461966604824, | |
| "grad_norm": 5.494351387023926, | |
| "learning_rate": 1.950050699546116e-05, | |
| "loss": 0.2365, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 0.19202226345083487, | |
| "grad_norm": 5.0484795570373535, | |
| "learning_rate": 1.9490349172147964e-05, | |
| "loss": 0.2197, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 0.19294990723562153, | |
| "grad_norm": 4.198167324066162, | |
| "learning_rate": 1.9480091799562706e-05, | |
| "loss": 0.1825, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.19387755102040816, | |
| "grad_norm": 4.7388105392456055, | |
| "learning_rate": 1.9469734985300373e-05, | |
| "loss": 0.2195, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 0.19480519480519481, | |
| "grad_norm": 4.556212902069092, | |
| "learning_rate": 1.9459278837999048e-05, | |
| "loss": 0.2085, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.19573283858998144, | |
| "grad_norm": 5.153113842010498, | |
| "learning_rate": 1.9448723467338765e-05, | |
| "loss": 0.2332, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 0.19666048237476808, | |
| "grad_norm": 2.6720712184906006, | |
| "learning_rate": 1.9438068984040366e-05, | |
| "loss": 0.1761, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 0.19758812615955473, | |
| "grad_norm": 2.9613466262817383, | |
| "learning_rate": 1.9427315499864345e-05, | |
| "loss": 0.1877, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 0.19851576994434136, | |
| "grad_norm": 3.5731585025787354, | |
| "learning_rate": 1.9416463127609655e-05, | |
| "loss": 0.2018, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 0.19944341372912802, | |
| "grad_norm": 4.205770015716553, | |
| "learning_rate": 1.9405511981112553e-05, | |
| "loss": 0.1952, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.20037105751391465, | |
| "grad_norm": 4.905291557312012, | |
| "learning_rate": 1.9394462175245382e-05, | |
| "loss": 0.2482, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.2012987012987013, | |
| "grad_norm": 6.575018405914307, | |
| "learning_rate": 1.9383313825915372e-05, | |
| "loss": 0.2007, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 0.20222634508348794, | |
| "grad_norm": 4.795684814453125, | |
| "learning_rate": 1.937206705006344e-05, | |
| "loss": 0.1878, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 0.20315398886827457, | |
| "grad_norm": 3.1328468322753906, | |
| "learning_rate": 1.9360721965662934e-05, | |
| "loss": 0.1864, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 0.20408163265306123, | |
| "grad_norm": 3.297593355178833, | |
| "learning_rate": 1.9349278691718426e-05, | |
| "loss": 0.2033, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.20408163265306123, | |
| "eval_accuracy": 0.8370288248337029, | |
| "eval_f1": 0.6423357664233577, | |
| "eval_loss": 0.34552833437919617, | |
| "eval_precision": 0.8354430379746836, | |
| "eval_recall": 0.5217391304347826, | |
| "eval_runtime": 48.9715, | |
| "eval_samples_per_second": 5.636, | |
| "eval_steps_per_second": 0.184, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.20500927643784786, | |
| "grad_norm": 3.253046989440918, | |
| "learning_rate": 1.9337737348264448e-05, | |
| "loss": 0.2099, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 0.20593692022263452, | |
| "grad_norm": 4.903271675109863, | |
| "learning_rate": 1.9326098056364224e-05, | |
| "loss": 0.2595, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.20686456400742115, | |
| "grad_norm": 6.408766746520996, | |
| "learning_rate": 1.9314360938108427e-05, | |
| "loss": 0.2615, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 0.2077922077922078, | |
| "grad_norm": 4.407992839813232, | |
| "learning_rate": 1.9302526116613863e-05, | |
| "loss": 0.1009, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.20871985157699444, | |
| "grad_norm": 2.9286835193634033, | |
| "learning_rate": 1.9290593716022218e-05, | |
| "loss": 0.1412, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.20964749536178107, | |
| "grad_norm": 3.6685068607330322, | |
| "learning_rate": 1.9278563861498726e-05, | |
| "loss": 0.221, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 0.21057513914656772, | |
| "grad_norm": 4.300150394439697, | |
| "learning_rate": 1.9266436679230866e-05, | |
| "loss": 0.1645, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 0.21150278293135436, | |
| "grad_norm": 4.307882308959961, | |
| "learning_rate": 1.9254212296427043e-05, | |
| "loss": 0.2284, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.212430426716141, | |
| "grad_norm": 2.4391989707946777, | |
| "learning_rate": 1.924189084131525e-05, | |
| "loss": 0.118, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 0.21335807050092764, | |
| "grad_norm": 5.997758388519287, | |
| "learning_rate": 1.922947244314172e-05, | |
| "loss": 0.2798, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.21428571428571427, | |
| "grad_norm": 4.957704544067383, | |
| "learning_rate": 1.9216957232169567e-05, | |
| "loss": 0.1852, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 0.21521335807050093, | |
| "grad_norm": 5.667599678039551, | |
| "learning_rate": 1.9204345339677442e-05, | |
| "loss": 0.2155, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.21614100185528756, | |
| "grad_norm": 4.291685104370117, | |
| "learning_rate": 1.9191636897958123e-05, | |
| "loss": 0.2637, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 0.21706864564007422, | |
| "grad_norm": 4.587561130523682, | |
| "learning_rate": 1.9178832040317153e-05, | |
| "loss": 0.2383, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 0.21799628942486085, | |
| "grad_norm": 3.8426513671875, | |
| "learning_rate": 1.916593090107143e-05, | |
| "loss": 0.212, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.2189239332096475, | |
| "grad_norm": 5.173671722412109, | |
| "learning_rate": 1.91529336155478e-05, | |
| "loss": 0.2054, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 0.21985157699443414, | |
| "grad_norm": 4.827680587768555, | |
| "learning_rate": 1.913984032008163e-05, | |
| "loss": 0.2072, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 0.22077922077922077, | |
| "grad_norm": 7.693399906158447, | |
| "learning_rate": 1.9126651152015404e-05, | |
| "loss": 0.2071, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 0.22170686456400743, | |
| "grad_norm": 3.128953695297241, | |
| "learning_rate": 1.911336624969725e-05, | |
| "loss": 0.2271, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 0.22263450834879406, | |
| "grad_norm": 6.966447353363037, | |
| "learning_rate": 1.9099985752479505e-05, | |
| "loss": 0.2448, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.22263450834879406, | |
| "eval_accuracy": 0.8381374722838137, | |
| "eval_f1": 0.6403940886699507, | |
| "eval_loss": 0.34381967782974243, | |
| "eval_precision": 0.8496732026143791, | |
| "eval_recall": 0.5138339920948617, | |
| "eval_runtime": 48.6504, | |
| "eval_samples_per_second": 5.673, | |
| "eval_steps_per_second": 0.185, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.22356215213358072, | |
| "grad_norm": 4.817497730255127, | |
| "learning_rate": 1.908650980071726e-05, | |
| "loss": 0.1878, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 0.22448979591836735, | |
| "grad_norm": 3.612339973449707, | |
| "learning_rate": 1.9072938535766864e-05, | |
| "loss": 0.1226, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 0.22541743970315398, | |
| "grad_norm": 4.005315780639648, | |
| "learning_rate": 1.905927209998447e-05, | |
| "loss": 0.2076, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 0.22634508348794063, | |
| "grad_norm": 3.032423496246338, | |
| "learning_rate": 1.904551063672452e-05, | |
| "loss": 0.1511, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 0.22727272727272727, | |
| "grad_norm": 3.060365676879883, | |
| "learning_rate": 1.9031654290338256e-05, | |
| "loss": 0.1954, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.22820037105751392, | |
| "grad_norm": 5.271503448486328, | |
| "learning_rate": 1.9017703206172187e-05, | |
| "loss": 0.2244, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 0.22912801484230055, | |
| "grad_norm": 2.923628568649292, | |
| "learning_rate": 1.900365753056659e-05, | |
| "loss": 0.1753, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 0.2300556586270872, | |
| "grad_norm": 3.745664119720459, | |
| "learning_rate": 1.8989517410853956e-05, | |
| "loss": 0.188, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.23098330241187384, | |
| "grad_norm": 3.7633256912231445, | |
| "learning_rate": 1.8975282995357448e-05, | |
| "loss": 0.201, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 0.23191094619666047, | |
| "grad_norm": 3.29656720161438, | |
| "learning_rate": 1.896095443338935e-05, | |
| "loss": 0.1956, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.23283858998144713, | |
| "grad_norm": 4.702951431274414, | |
| "learning_rate": 1.8946531875249496e-05, | |
| "loss": 0.2101, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 0.23376623376623376, | |
| "grad_norm": 7.016535758972168, | |
| "learning_rate": 1.8932015472223692e-05, | |
| "loss": 0.2486, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 0.23469387755102042, | |
| "grad_norm": 4.004672050476074, | |
| "learning_rate": 1.8917405376582144e-05, | |
| "loss": 0.239, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 0.23562152133580705, | |
| "grad_norm": 4.382692337036133, | |
| "learning_rate": 1.8902701741577844e-05, | |
| "loss": 0.2308, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 0.23654916512059368, | |
| "grad_norm": 4.72487735748291, | |
| "learning_rate": 1.8887904721444955e-05, | |
| "loss": 0.2098, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.23747680890538034, | |
| "grad_norm": 3.4035651683807373, | |
| "learning_rate": 1.8873014471397225e-05, | |
| "loss": 0.101, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.23840445269016697, | |
| "grad_norm": 7.383582592010498, | |
| "learning_rate": 1.8858031147626326e-05, | |
| "loss": 0.2499, | |
| "step": 257 | |
| }, | |
| { | |
| "epoch": 0.23933209647495363, | |
| "grad_norm": 4.3388895988464355, | |
| "learning_rate": 1.8842954907300236e-05, | |
| "loss": 0.1859, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 0.24025974025974026, | |
| "grad_norm": 3.2572548389434814, | |
| "learning_rate": 1.8827785908561585e-05, | |
| "loss": 0.1833, | |
| "step": 259 | |
| }, | |
| { | |
| "epoch": 0.24118738404452691, | |
| "grad_norm": 7.2369561195373535, | |
| "learning_rate": 1.881252431052599e-05, | |
| "loss": 0.2337, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.24118738404452691, | |
| "eval_accuracy": 0.8281596452328159, | |
| "eval_f1": 0.5931758530183727, | |
| "eval_loss": 0.3704891502857208, | |
| "eval_precision": 0.8828125, | |
| "eval_recall": 0.44664031620553357, | |
| "eval_runtime": 47.7821, | |
| "eval_samples_per_second": 5.776, | |
| "eval_steps_per_second": 0.188, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.24211502782931354, | |
| "grad_norm": 6.052790641784668, | |
| "learning_rate": 1.879717027328039e-05, | |
| "loss": 0.2569, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 0.24304267161410018, | |
| "grad_norm": 3.246156692504883, | |
| "learning_rate": 1.8781723957881374e-05, | |
| "loss": 0.1871, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 0.24397031539888683, | |
| "grad_norm": 6.536667823791504, | |
| "learning_rate": 1.876618552635348e-05, | |
| "loss": 0.1425, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 0.24489795918367346, | |
| "grad_norm": 5.4920830726623535, | |
| "learning_rate": 1.87505551416875e-05, | |
| "loss": 0.2275, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 0.24582560296846012, | |
| "grad_norm": 7.879229545593262, | |
| "learning_rate": 1.8734832967838775e-05, | |
| "loss": 0.2852, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.24675324675324675, | |
| "grad_norm": 3.7755329608917236, | |
| "learning_rate": 1.871901916972547e-05, | |
| "loss": 0.1967, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 0.24768089053803338, | |
| "grad_norm": 4.236266136169434, | |
| "learning_rate": 1.8703113913226847e-05, | |
| "loss": 0.1302, | |
| "step": 267 | |
| }, | |
| { | |
| "epoch": 0.24860853432282004, | |
| "grad_norm": 5.8521599769592285, | |
| "learning_rate": 1.8687117365181514e-05, | |
| "loss": 0.2866, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 0.24953617810760667, | |
| "grad_norm": 2.9448065757751465, | |
| "learning_rate": 1.867102969338569e-05, | |
| "loss": 0.171, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 0.2504638218923933, | |
| "grad_norm": 4.411244869232178, | |
| "learning_rate": 1.865485106659145e-05, | |
| "loss": 0.1424, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.25139146567717996, | |
| "grad_norm": 5.2516655921936035, | |
| "learning_rate": 1.863858165450492e-05, | |
| "loss": 0.2166, | |
| "step": 271 | |
| }, | |
| { | |
| "epoch": 0.2523191094619666, | |
| "grad_norm": 2.6857924461364746, | |
| "learning_rate": 1.862222162778454e-05, | |
| "loss": 0.1786, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.2532467532467532, | |
| "grad_norm": 6.46083927154541, | |
| "learning_rate": 1.8605771158039253e-05, | |
| "loss": 0.1817, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 0.2541743970315399, | |
| "grad_norm": 6.851802349090576, | |
| "learning_rate": 1.85892304178267e-05, | |
| "loss": 0.174, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 0.25510204081632654, | |
| "grad_norm": 4.372910022735596, | |
| "learning_rate": 1.8572599580651415e-05, | |
| "loss": 0.211, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.2560296846011132, | |
| "grad_norm": 8.214804649353027, | |
| "learning_rate": 1.8555878820963014e-05, | |
| "loss": 0.2295, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 0.2569573283858998, | |
| "grad_norm": 3.299901247024536, | |
| "learning_rate": 1.8539068314154355e-05, | |
| "loss": 0.1784, | |
| "step": 277 | |
| }, | |
| { | |
| "epoch": 0.25788497217068646, | |
| "grad_norm": 3.1576666831970215, | |
| "learning_rate": 1.8522168236559693e-05, | |
| "loss": 0.1864, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 0.2588126159554731, | |
| "grad_norm": 6.918071269989014, | |
| "learning_rate": 1.8505178765452853e-05, | |
| "loss": 0.2015, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 0.2597402597402597, | |
| "grad_norm": 2.884896755218506, | |
| "learning_rate": 1.8488100079045345e-05, | |
| "loss": 0.1698, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.2597402597402597, | |
| "eval_accuracy": 0.8215077605321508, | |
| "eval_f1": 0.5729442970822282, | |
| "eval_loss": 0.37236273288726807, | |
| "eval_precision": 0.8709677419354839, | |
| "eval_recall": 0.4268774703557312, | |
| "eval_runtime": 48.19, | |
| "eval_samples_per_second": 5.727, | |
| "eval_steps_per_second": 0.187, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.2606679035250464, | |
| "grad_norm": 2.52854061126709, | |
| "learning_rate": 1.847093235648451e-05, | |
| "loss": 0.1427, | |
| "step": 281 | |
| }, | |
| { | |
| "epoch": 0.26159554730983303, | |
| "grad_norm": 3.8434646129608154, | |
| "learning_rate": 1.8453675777851627e-05, | |
| "loss": 0.2429, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 0.2625231910946197, | |
| "grad_norm": 3.32183837890625, | |
| "learning_rate": 1.8436330524160048e-05, | |
| "loss": 0.1672, | |
| "step": 283 | |
| }, | |
| { | |
| "epoch": 0.2634508348794063, | |
| "grad_norm": 3.077954053878784, | |
| "learning_rate": 1.8418896777353272e-05, | |
| "loss": 0.1511, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 0.26437847866419295, | |
| "grad_norm": 6.1399617195129395, | |
| "learning_rate": 1.8401374720303054e-05, | |
| "loss": 0.1989, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.2653061224489796, | |
| "grad_norm": 4.476570129394531, | |
| "learning_rate": 1.8383764536807486e-05, | |
| "loss": 0.2204, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 0.2662337662337662, | |
| "grad_norm": 3.3272931575775146, | |
| "learning_rate": 1.836606641158905e-05, | |
| "loss": 0.1733, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 0.26716141001855287, | |
| "grad_norm": 3.8696646690368652, | |
| "learning_rate": 1.8348280530292712e-05, | |
| "loss": 0.2698, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.2680890538033395, | |
| "grad_norm": 4.347325325012207, | |
| "learning_rate": 1.833040707948395e-05, | |
| "loss": 0.2098, | |
| "step": 289 | |
| }, | |
| { | |
| "epoch": 0.2690166975881262, | |
| "grad_norm": 3.8024332523345947, | |
| "learning_rate": 1.831244624664681e-05, | |
| "loss": 0.2092, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.2699443413729128, | |
| "grad_norm": 3.742701768875122, | |
| "learning_rate": 1.829439822018192e-05, | |
| "loss": 0.1793, | |
| "step": 291 | |
| }, | |
| { | |
| "epoch": 0.27087198515769945, | |
| "grad_norm": 4.459840774536133, | |
| "learning_rate": 1.827626318940454e-05, | |
| "loss": 0.224, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 0.2717996289424861, | |
| "grad_norm": 3.4559335708618164, | |
| "learning_rate": 1.8258041344542567e-05, | |
| "loss": 0.2342, | |
| "step": 293 | |
| }, | |
| { | |
| "epoch": 0.2727272727272727, | |
| "grad_norm": 4.520707607269287, | |
| "learning_rate": 1.8239732876734525e-05, | |
| "loss": 0.2899, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 0.27365491651205937, | |
| "grad_norm": 3.844388246536255, | |
| "learning_rate": 1.822133797802758e-05, | |
| "loss": 0.172, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.274582560296846, | |
| "grad_norm": 3.944843292236328, | |
| "learning_rate": 1.8202856841375517e-05, | |
| "loss": 0.1602, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.2755102040816326, | |
| "grad_norm": 2.833136796951294, | |
| "learning_rate": 1.8184289660636715e-05, | |
| "loss": 0.1829, | |
| "step": 297 | |
| }, | |
| { | |
| "epoch": 0.2764378478664193, | |
| "grad_norm": 5.877793312072754, | |
| "learning_rate": 1.816563663057211e-05, | |
| "loss": 0.1776, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 0.27736549165120594, | |
| "grad_norm": 7.863223552703857, | |
| "learning_rate": 1.8146897946843162e-05, | |
| "loss": 0.2734, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 0.2782931354359926, | |
| "grad_norm": 3.5865793228149414, | |
| "learning_rate": 1.81280738060098e-05, | |
| "loss": 0.1607, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.2782931354359926, | |
| "eval_accuracy": 0.8292682926829268, | |
| "eval_f1": 0.6010362694300518, | |
| "eval_loss": 0.34550240635871887, | |
| "eval_precision": 0.8721804511278195, | |
| "eval_recall": 0.45849802371541504, | |
| "eval_runtime": 47.9887, | |
| "eval_samples_per_second": 5.751, | |
| "eval_steps_per_second": 0.188, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.2792207792207792, | |
| "grad_norm": 4.416810512542725, | |
| "learning_rate": 1.810916440552835e-05, | |
| "loss": 0.2122, | |
| "step": 301 | |
| }, | |
| { | |
| "epoch": 0.28014842300556586, | |
| "grad_norm": 4.608455657958984, | |
| "learning_rate": 1.8090169943749477e-05, | |
| "loss": 0.1634, | |
| "step": 302 | |
| }, | |
| { | |
| "epoch": 0.2810760667903525, | |
| "grad_norm": 2.9112977981567383, | |
| "learning_rate": 1.8071090619916095e-05, | |
| "loss": 0.1863, | |
| "step": 303 | |
| }, | |
| { | |
| "epoch": 0.2820037105751391, | |
| "grad_norm": 4.165685176849365, | |
| "learning_rate": 1.8051926634161282e-05, | |
| "loss": 0.1607, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.2829313543599258, | |
| "grad_norm": 3.188124895095825, | |
| "learning_rate": 1.8032678187506187e-05, | |
| "loss": 0.1379, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.28385899814471244, | |
| "grad_norm": 4.277055740356445, | |
| "learning_rate": 1.8013345481857903e-05, | |
| "loss": 0.1945, | |
| "step": 306 | |
| }, | |
| { | |
| "epoch": 0.2847866419294991, | |
| "grad_norm": 4.259377956390381, | |
| "learning_rate": 1.799392872000736e-05, | |
| "loss": 0.2123, | |
| "step": 307 | |
| }, | |
| { | |
| "epoch": 0.2857142857142857, | |
| "grad_norm": 4.486137390136719, | |
| "learning_rate": 1.797442810562721e-05, | |
| "loss": 0.2004, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 0.28664192949907236, | |
| "grad_norm": 6.125461578369141, | |
| "learning_rate": 1.7954843843269665e-05, | |
| "loss": 0.2124, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 0.287569573283859, | |
| "grad_norm": 4.838400363922119, | |
| "learning_rate": 1.793517613836437e-05, | |
| "loss": 0.2112, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.2884972170686456, | |
| "grad_norm": 3.1725666522979736, | |
| "learning_rate": 1.7915425197216246e-05, | |
| "loss": 0.1658, | |
| "step": 311 | |
| }, | |
| { | |
| "epoch": 0.2894248608534323, | |
| "grad_norm": 3.154855489730835, | |
| "learning_rate": 1.7895591227003316e-05, | |
| "loss": 0.2081, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 0.29035250463821893, | |
| "grad_norm": 3.7664122581481934, | |
| "learning_rate": 1.7875674435774546e-05, | |
| "loss": 0.1628, | |
| "step": 313 | |
| }, | |
| { | |
| "epoch": 0.2912801484230056, | |
| "grad_norm": 5.876150131225586, | |
| "learning_rate": 1.7855675032447648e-05, | |
| "loss": 0.1882, | |
| "step": 314 | |
| }, | |
| { | |
| "epoch": 0.2922077922077922, | |
| "grad_norm": 2.4536941051483154, | |
| "learning_rate": 1.7835593226806902e-05, | |
| "loss": 0.1287, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.29313543599257885, | |
| "grad_norm": 4.552228927612305, | |
| "learning_rate": 1.7815429229500946e-05, | |
| "loss": 0.2509, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 0.2940630797773655, | |
| "grad_norm": 3.861381769180298, | |
| "learning_rate": 1.7795183252040568e-05, | |
| "loss": 0.1827, | |
| "step": 317 | |
| }, | |
| { | |
| "epoch": 0.2949907235621521, | |
| "grad_norm": 3.6626925468444824, | |
| "learning_rate": 1.7774855506796497e-05, | |
| "loss": 0.154, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 0.29591836734693877, | |
| "grad_norm": 6.695428848266602, | |
| "learning_rate": 1.7754446206997152e-05, | |
| "loss": 0.2334, | |
| "step": 319 | |
| }, | |
| { | |
| "epoch": 0.29684601113172543, | |
| "grad_norm": 3.2786192893981934, | |
| "learning_rate": 1.7733955566726438e-05, | |
| "loss": 0.1671, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.29684601113172543, | |
| "eval_accuracy": 0.8337028824833703, | |
| "eval_f1": 0.625, | |
| "eval_loss": 0.3371331989765167, | |
| "eval_precision": 0.8503401360544217, | |
| "eval_recall": 0.49407114624505927, | |
| "eval_runtime": 47.664, | |
| "eval_samples_per_second": 5.791, | |
| "eval_steps_per_second": 0.189, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.29777365491651203, | |
| "grad_norm": 4.181811809539795, | |
| "learning_rate": 1.771338380092148e-05, | |
| "loss": 0.2198, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 0.2987012987012987, | |
| "grad_norm": 4.808495998382568, | |
| "learning_rate": 1.7692731125370355e-05, | |
| "loss": 0.171, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 0.29962894248608535, | |
| "grad_norm": 5.946144104003906, | |
| "learning_rate": 1.767199775670986e-05, | |
| "loss": 0.2176, | |
| "step": 323 | |
| }, | |
| { | |
| "epoch": 0.300556586270872, | |
| "grad_norm": 4.010025978088379, | |
| "learning_rate": 1.7651183912423228e-05, | |
| "loss": 0.2003, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 0.3014842300556586, | |
| "grad_norm": 4.795968055725098, | |
| "learning_rate": 1.7630289810837836e-05, | |
| "loss": 0.165, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.30241187384044527, | |
| "grad_norm": 2.9344944953918457, | |
| "learning_rate": 1.7609315671122912e-05, | |
| "loss": 0.1603, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 0.3033395176252319, | |
| "grad_norm": 5.812965393066406, | |
| "learning_rate": 1.758826171328727e-05, | |
| "loss": 0.1458, | |
| "step": 327 | |
| }, | |
| { | |
| "epoch": 0.3042671614100185, | |
| "grad_norm": 4.66487979888916, | |
| "learning_rate": 1.7567128158176955e-05, | |
| "loss": 0.2155, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 0.3051948051948052, | |
| "grad_norm": 3.436579942703247, | |
| "learning_rate": 1.7545915227472967e-05, | |
| "loss": 0.1699, | |
| "step": 329 | |
| }, | |
| { | |
| "epoch": 0.30612244897959184, | |
| "grad_norm": 4.022927284240723, | |
| "learning_rate": 1.7524623143688905e-05, | |
| "loss": 0.2037, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.3070500927643785, | |
| "grad_norm": 3.154623031616211, | |
| "learning_rate": 1.7503252130168657e-05, | |
| "loss": 0.1627, | |
| "step": 331 | |
| }, | |
| { | |
| "epoch": 0.3079777365491651, | |
| "grad_norm": 2.7966222763061523, | |
| "learning_rate": 1.748180241108404e-05, | |
| "loss": 0.1439, | |
| "step": 332 | |
| }, | |
| { | |
| "epoch": 0.30890538033395176, | |
| "grad_norm": 3.8477606773376465, | |
| "learning_rate": 1.7460274211432463e-05, | |
| "loss": 0.135, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 0.3098330241187384, | |
| "grad_norm": 3.51237416267395, | |
| "learning_rate": 1.7438667757034547e-05, | |
| "loss": 0.213, | |
| "step": 334 | |
| }, | |
| { | |
| "epoch": 0.310760667903525, | |
| "grad_norm": 4.482552528381348, | |
| "learning_rate": 1.7416983274531777e-05, | |
| "loss": 0.2018, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.3116883116883117, | |
| "grad_norm": 3.7399659156799316, | |
| "learning_rate": 1.739522099138411e-05, | |
| "loss": 0.2196, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 0.31261595547309834, | |
| "grad_norm": 3.4632811546325684, | |
| "learning_rate": 1.7373381135867605e-05, | |
| "loss": 0.1588, | |
| "step": 337 | |
| }, | |
| { | |
| "epoch": 0.313543599257885, | |
| "grad_norm": 5.400178909301758, | |
| "learning_rate": 1.7351463937072008e-05, | |
| "loss": 0.2455, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 0.3144712430426716, | |
| "grad_norm": 4.754225730895996, | |
| "learning_rate": 1.732946962489836e-05, | |
| "loss": 0.2067, | |
| "step": 339 | |
| }, | |
| { | |
| "epoch": 0.31539888682745826, | |
| "grad_norm": 3.6749982833862305, | |
| "learning_rate": 1.7307398430056595e-05, | |
| "loss": 0.1809, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.31539888682745826, | |
| "eval_accuracy": 0.8514412416851441, | |
| "eval_f1": 0.6912442396313364, | |
| "eval_loss": 0.34059056639671326, | |
| "eval_precision": 0.8287292817679558, | |
| "eval_recall": 0.5928853754940712, | |
| "eval_runtime": 47.764, | |
| "eval_samples_per_second": 5.778, | |
| "eval_steps_per_second": 0.188, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.3163265306122449, | |
| "grad_norm": 5.549361705780029, | |
| "learning_rate": 1.72852505840631e-05, | |
| "loss": 0.1656, | |
| "step": 341 | |
| }, | |
| { | |
| "epoch": 0.3172541743970315, | |
| "grad_norm": 7.491882801055908, | |
| "learning_rate": 1.72630263192383e-05, | |
| "loss": 0.2493, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 0.3181818181818182, | |
| "grad_norm": 5.225057125091553, | |
| "learning_rate": 1.7240725868704218e-05, | |
| "loss": 0.2031, | |
| "step": 343 | |
| }, | |
| { | |
| "epoch": 0.31910946196660483, | |
| "grad_norm": 7.436551094055176, | |
| "learning_rate": 1.7218349466382024e-05, | |
| "loss": 0.2245, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 0.3200371057513915, | |
| "grad_norm": 4.4297871589660645, | |
| "learning_rate": 1.719589734698959e-05, | |
| "loss": 0.2226, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.3209647495361781, | |
| "grad_norm": 6.560292720794678, | |
| "learning_rate": 1.7173369746039026e-05, | |
| "loss": 0.2331, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 0.32189239332096475, | |
| "grad_norm": 5.045467376708984, | |
| "learning_rate": 1.7150766899834205e-05, | |
| "loss": 0.2321, | |
| "step": 347 | |
| }, | |
| { | |
| "epoch": 0.3228200371057514, | |
| "grad_norm": 6.464671611785889, | |
| "learning_rate": 1.7128089045468294e-05, | |
| "loss": 0.19, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 0.323747680890538, | |
| "grad_norm": 2.7538771629333496, | |
| "learning_rate": 1.7105336420821247e-05, | |
| "loss": 0.1642, | |
| "step": 349 | |
| }, | |
| { | |
| "epoch": 0.3246753246753247, | |
| "grad_norm": 4.287221431732178, | |
| "learning_rate": 1.7082509264557333e-05, | |
| "loss": 0.1556, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.32560296846011133, | |
| "grad_norm": 3.1308867931365967, | |
| "learning_rate": 1.705960781612262e-05, | |
| "loss": 0.1583, | |
| "step": 351 | |
| }, | |
| { | |
| "epoch": 0.32653061224489793, | |
| "grad_norm": 3.598468542098999, | |
| "learning_rate": 1.7036632315742464e-05, | |
| "loss": 0.1571, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 0.3274582560296846, | |
| "grad_norm": 2.93074369430542, | |
| "learning_rate": 1.7013583004418994e-05, | |
| "loss": 0.1463, | |
| "step": 353 | |
| }, | |
| { | |
| "epoch": 0.32838589981447125, | |
| "grad_norm": 3.5115561485290527, | |
| "learning_rate": 1.6990460123928577e-05, | |
| "loss": 0.1522, | |
| "step": 354 | |
| }, | |
| { | |
| "epoch": 0.3293135435992579, | |
| "grad_norm": 2.3975048065185547, | |
| "learning_rate": 1.696726391681929e-05, | |
| "loss": 0.136, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.3302411873840445, | |
| "grad_norm": 3.01874041557312, | |
| "learning_rate": 1.6943994626408365e-05, | |
| "loss": 0.1928, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 0.33116883116883117, | |
| "grad_norm": 3.620572805404663, | |
| "learning_rate": 1.692065249677965e-05, | |
| "loss": 0.1964, | |
| "step": 357 | |
| }, | |
| { | |
| "epoch": 0.3320964749536178, | |
| "grad_norm": 3.1630985736846924, | |
| "learning_rate": 1.6897237772781046e-05, | |
| "loss": 0.1757, | |
| "step": 358 | |
| }, | |
| { | |
| "epoch": 0.33302411873840443, | |
| "grad_norm": 2.7404632568359375, | |
| "learning_rate": 1.6873750700021917e-05, | |
| "loss": 0.1391, | |
| "step": 359 | |
| }, | |
| { | |
| "epoch": 0.3339517625231911, | |
| "grad_norm": 3.915576696395874, | |
| "learning_rate": 1.6850191524870548e-05, | |
| "loss": 0.1672, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.3339517625231911, | |
| "eval_accuracy": 0.8392461197339246, | |
| "eval_f1": 0.6365914786967418, | |
| "eval_loss": 0.3519636392593384, | |
| "eval_precision": 0.8698630136986302, | |
| "eval_recall": 0.5019762845849802, | |
| "eval_runtime": 48.8319, | |
| "eval_samples_per_second": 5.652, | |
| "eval_steps_per_second": 0.184, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.33487940630797774, | |
| "grad_norm": 4.442441940307617, | |
| "learning_rate": 1.682656049445154e-05, | |
| "loss": 0.1695, | |
| "step": 361 | |
| }, | |
| { | |
| "epoch": 0.3358070500927644, | |
| "grad_norm": 3.9514217376708984, | |
| "learning_rate": 1.6802857856643214e-05, | |
| "loss": 0.1744, | |
| "step": 362 | |
| }, | |
| { | |
| "epoch": 0.336734693877551, | |
| "grad_norm": 5.170446395874023, | |
| "learning_rate": 1.6779083860075032e-05, | |
| "loss": 0.1198, | |
| "step": 363 | |
| }, | |
| { | |
| "epoch": 0.33766233766233766, | |
| "grad_norm": 7.009990215301514, | |
| "learning_rate": 1.6755238754124965e-05, | |
| "loss": 0.1708, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 0.3385899814471243, | |
| "grad_norm": 7.845787048339844, | |
| "learning_rate": 1.6731322788916892e-05, | |
| "loss": 0.1723, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.3395176252319109, | |
| "grad_norm": 3.684112071990967, | |
| "learning_rate": 1.6707336215317968e-05, | |
| "loss": 0.1136, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 0.3404452690166976, | |
| "grad_norm": 6.165255069732666, | |
| "learning_rate": 1.6683279284936004e-05, | |
| "loss": 0.2895, | |
| "step": 367 | |
| }, | |
| { | |
| "epoch": 0.34137291280148424, | |
| "grad_norm": 5.1027326583862305, | |
| "learning_rate": 1.665915225011681e-05, | |
| "loss": 0.1915, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 0.3423005565862709, | |
| "grad_norm": 4.702267646789551, | |
| "learning_rate": 1.6634955363941573e-05, | |
| "loss": 0.135, | |
| "step": 369 | |
| }, | |
| { | |
| "epoch": 0.3432282003710575, | |
| "grad_norm": 3.623323678970337, | |
| "learning_rate": 1.6610688880224178e-05, | |
| "loss": 0.1364, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.34415584415584416, | |
| "grad_norm": 4.77068567276001, | |
| "learning_rate": 1.6586353053508548e-05, | |
| "loss": 0.2082, | |
| "step": 371 | |
| }, | |
| { | |
| "epoch": 0.3450834879406308, | |
| "grad_norm": 6.354672908782959, | |
| "learning_rate": 1.6561948139065997e-05, | |
| "loss": 0.2037, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 0.3460111317254174, | |
| "grad_norm": 3.074613094329834, | |
| "learning_rate": 1.6537474392892527e-05, | |
| "loss": 0.1123, | |
| "step": 373 | |
| }, | |
| { | |
| "epoch": 0.3469387755102041, | |
| "grad_norm": 5.388340473175049, | |
| "learning_rate": 1.6512932071706153e-05, | |
| "loss": 0.2308, | |
| "step": 374 | |
| }, | |
| { | |
| "epoch": 0.34786641929499074, | |
| "grad_norm": 3.377591609954834, | |
| "learning_rate": 1.6488321432944218e-05, | |
| "loss": 0.1607, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.34879406307977734, | |
| "grad_norm": 3.697253465652466, | |
| "learning_rate": 1.646364273476067e-05, | |
| "loss": 0.2064, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 0.349721706864564, | |
| "grad_norm": 2.825587272644043, | |
| "learning_rate": 1.6438896236023374e-05, | |
| "loss": 0.1405, | |
| "step": 377 | |
| }, | |
| { | |
| "epoch": 0.35064935064935066, | |
| "grad_norm": 5.332752227783203, | |
| "learning_rate": 1.6414082196311402e-05, | |
| "loss": 0.1838, | |
| "step": 378 | |
| }, | |
| { | |
| "epoch": 0.3515769944341373, | |
| "grad_norm": 3.3014256954193115, | |
| "learning_rate": 1.638920087591228e-05, | |
| "loss": 0.1516, | |
| "step": 379 | |
| }, | |
| { | |
| "epoch": 0.3525046382189239, | |
| "grad_norm": 2.9438254833221436, | |
| "learning_rate": 1.6364252535819284e-05, | |
| "loss": 0.153, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.3525046382189239, | |
| "eval_accuracy": 0.8458980044345898, | |
| "eval_f1": 0.6634382566585957, | |
| "eval_loss": 0.32734009623527527, | |
| "eval_precision": 0.85625, | |
| "eval_recall": 0.541501976284585, | |
| "eval_runtime": 47.3489, | |
| "eval_samples_per_second": 5.829, | |
| "eval_steps_per_second": 0.19, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.3534322820037106, | |
| "grad_norm": 4.24219274520874, | |
| "learning_rate": 1.63392374377287e-05, | |
| "loss": 0.1265, | |
| "step": 381 | |
| }, | |
| { | |
| "epoch": 0.35435992578849723, | |
| "grad_norm": 3.8825278282165527, | |
| "learning_rate": 1.6314155844037074e-05, | |
| "loss": 0.173, | |
| "step": 382 | |
| }, | |
| { | |
| "epoch": 0.35528756957328383, | |
| "grad_norm": 4.059886455535889, | |
| "learning_rate": 1.6289008017838447e-05, | |
| "loss": 0.1639, | |
| "step": 383 | |
| }, | |
| { | |
| "epoch": 0.3562152133580705, | |
| "grad_norm": 5.13779878616333, | |
| "learning_rate": 1.626379422292162e-05, | |
| "loss": 0.1006, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 0.35714285714285715, | |
| "grad_norm": 5.269052028656006, | |
| "learning_rate": 1.6238514723767372e-05, | |
| "loss": 0.2033, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.3580705009276438, | |
| "grad_norm": 4.739077091217041, | |
| "learning_rate": 1.6213169785545688e-05, | |
| "loss": 0.2673, | |
| "step": 386 | |
| }, | |
| { | |
| "epoch": 0.3589981447124304, | |
| "grad_norm": 5.1900634765625, | |
| "learning_rate": 1.6187759674112972e-05, | |
| "loss": 0.2156, | |
| "step": 387 | |
| }, | |
| { | |
| "epoch": 0.35992578849721707, | |
| "grad_norm": 2.844031810760498, | |
| "learning_rate": 1.6162284656009276e-05, | |
| "loss": 0.0855, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 0.3608534322820037, | |
| "grad_norm": 5.831792831420898, | |
| "learning_rate": 1.6136744998455477e-05, | |
| "loss": 0.2278, | |
| "step": 389 | |
| }, | |
| { | |
| "epoch": 0.36178107606679033, | |
| "grad_norm": 3.952970504760742, | |
| "learning_rate": 1.6111140969350504e-05, | |
| "loss": 0.1348, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.362708719851577, | |
| "grad_norm": 3.9323179721832275, | |
| "learning_rate": 1.6085472837268504e-05, | |
| "loss": 0.1731, | |
| "step": 391 | |
| }, | |
| { | |
| "epoch": 0.36363636363636365, | |
| "grad_norm": 4.621875286102295, | |
| "learning_rate": 1.6059740871456035e-05, | |
| "loss": 0.2011, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 0.3645640074211503, | |
| "grad_norm": 6.936546325683594, | |
| "learning_rate": 1.603394534182925e-05, | |
| "loss": 0.1766, | |
| "step": 393 | |
| }, | |
| { | |
| "epoch": 0.3654916512059369, | |
| "grad_norm": 4.287359237670898, | |
| "learning_rate": 1.6008086518971037e-05, | |
| "loss": 0.1939, | |
| "step": 394 | |
| }, | |
| { | |
| "epoch": 0.36641929499072357, | |
| "grad_norm": 3.0315101146698, | |
| "learning_rate": 1.598216467412822e-05, | |
| "loss": 0.1091, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.3673469387755102, | |
| "grad_norm": 6.599478721618652, | |
| "learning_rate": 1.5956180079208684e-05, | |
| "loss": 0.232, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 0.3682745825602968, | |
| "grad_norm": 4.763383388519287, | |
| "learning_rate": 1.593013300677853e-05, | |
| "loss": 0.1877, | |
| "step": 397 | |
| }, | |
| { | |
| "epoch": 0.3692022263450835, | |
| "grad_norm": 4.326437950134277, | |
| "learning_rate": 1.5904023730059227e-05, | |
| "loss": 0.1924, | |
| "step": 398 | |
| }, | |
| { | |
| "epoch": 0.37012987012987014, | |
| "grad_norm": 4.332076072692871, | |
| "learning_rate": 1.5877852522924733e-05, | |
| "loss": 0.201, | |
| "step": 399 | |
| }, | |
| { | |
| "epoch": 0.37105751391465674, | |
| "grad_norm": 4.557116985321045, | |
| "learning_rate": 1.5851619659898623e-05, | |
| "loss": 0.2, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.37105751391465674, | |
| "eval_accuracy": 0.844789356984479, | |
| "eval_f1": 0.6585365853658537, | |
| "eval_loss": 0.33066147565841675, | |
| "eval_precision": 0.8598726114649682, | |
| "eval_recall": 0.5335968379446641, | |
| "eval_runtime": 47.9063, | |
| "eval_samples_per_second": 5.761, | |
| "eval_steps_per_second": 0.188, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.3719851576994434, | |
| "grad_norm": 4.4434332847595215, | |
| "learning_rate": 1.582532541615122e-05, | |
| "loss": 0.1563, | |
| "step": 401 | |
| }, | |
| { | |
| "epoch": 0.37291280148423006, | |
| "grad_norm": 2.928924083709717, | |
| "learning_rate": 1.57989700674967e-05, | |
| "loss": 0.1211, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 0.3738404452690167, | |
| "grad_norm": 3.329848527908325, | |
| "learning_rate": 1.5772553890390196e-05, | |
| "loss": 0.2248, | |
| "step": 403 | |
| }, | |
| { | |
| "epoch": 0.3747680890538033, | |
| "grad_norm": 4.852266311645508, | |
| "learning_rate": 1.5746077161924905e-05, | |
| "loss": 0.2121, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 0.37569573283859, | |
| "grad_norm": 8.039565086364746, | |
| "learning_rate": 1.5719540159829185e-05, | |
| "loss": 0.1864, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.37662337662337664, | |
| "grad_norm": 4.178286552429199, | |
| "learning_rate": 1.5692943162463628e-05, | |
| "loss": 0.1164, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 0.37755102040816324, | |
| "grad_norm": 4.181092739105225, | |
| "learning_rate": 1.5666286448818152e-05, | |
| "loss": 0.136, | |
| "step": 407 | |
| }, | |
| { | |
| "epoch": 0.3784786641929499, | |
| "grad_norm": 4.847424030303955, | |
| "learning_rate": 1.5639570298509067e-05, | |
| "loss": 0.1692, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 0.37940630797773656, | |
| "grad_norm": 5.589816570281982, | |
| "learning_rate": 1.5612794991776147e-05, | |
| "loss": 0.1375, | |
| "step": 409 | |
| }, | |
| { | |
| "epoch": 0.3803339517625232, | |
| "grad_norm": 3.174445629119873, | |
| "learning_rate": 1.5585960809479698e-05, | |
| "loss": 0.1683, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.3812615955473098, | |
| "grad_norm": 7.2739739418029785, | |
| "learning_rate": 1.5559068033097583e-05, | |
| "loss": 0.2135, | |
| "step": 411 | |
| }, | |
| { | |
| "epoch": 0.3821892393320965, | |
| "grad_norm": 5.514982223510742, | |
| "learning_rate": 1.5532116944722308e-05, | |
| "loss": 0.2366, | |
| "step": 412 | |
| }, | |
| { | |
| "epoch": 0.38311688311688313, | |
| "grad_norm": 4.4258036613464355, | |
| "learning_rate": 1.5505107827058038e-05, | |
| "loss": 0.1667, | |
| "step": 413 | |
| }, | |
| { | |
| "epoch": 0.38404452690166974, | |
| "grad_norm": 4.089566230773926, | |
| "learning_rate": 1.547804096341763e-05, | |
| "loss": 0.1647, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 0.3849721706864564, | |
| "grad_norm": 3.6559548377990723, | |
| "learning_rate": 1.5450916637719683e-05, | |
| "loss": 0.1705, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.38589981447124305, | |
| "grad_norm": 4.045246124267578, | |
| "learning_rate": 1.5423735134485537e-05, | |
| "loss": 0.1915, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 0.3868274582560297, | |
| "grad_norm": 3.6718714237213135, | |
| "learning_rate": 1.5396496738836292e-05, | |
| "loss": 0.1532, | |
| "step": 417 | |
| }, | |
| { | |
| "epoch": 0.3877551020408163, | |
| "grad_norm": 4.593350887298584, | |
| "learning_rate": 1.536920173648984e-05, | |
| "loss": 0.1815, | |
| "step": 418 | |
| }, | |
| { | |
| "epoch": 0.38868274582560297, | |
| "grad_norm": 3.913667917251587, | |
| "learning_rate": 1.5341850413757834e-05, | |
| "loss": 0.1448, | |
| "step": 419 | |
| }, | |
| { | |
| "epoch": 0.38961038961038963, | |
| "grad_norm": 4.148723125457764, | |
| "learning_rate": 1.5314443057542703e-05, | |
| "loss": 0.2082, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.38961038961038963, | |
| "eval_accuracy": 0.8603104212860311, | |
| "eval_f1": 0.7136363636363636, | |
| "eval_loss": 0.31429344415664673, | |
| "eval_precision": 0.839572192513369, | |
| "eval_recall": 0.6205533596837944, | |
| "eval_runtime": 48.5179, | |
| "eval_samples_per_second": 5.689, | |
| "eval_steps_per_second": 0.185, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.39053803339517623, | |
| "grad_norm": 4.14030122756958, | |
| "learning_rate": 1.5286979955334655e-05, | |
| "loss": 0.1749, | |
| "step": 421 | |
| }, | |
| { | |
| "epoch": 0.3914656771799629, | |
| "grad_norm": 3.4345805644989014, | |
| "learning_rate": 1.5259461395208628e-05, | |
| "loss": 0.1712, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 0.39239332096474955, | |
| "grad_norm": 3.9379382133483887, | |
| "learning_rate": 1.52318876658213e-05, | |
| "loss": 0.1658, | |
| "step": 423 | |
| }, | |
| { | |
| "epoch": 0.39332096474953615, | |
| "grad_norm": 3.1555135250091553, | |
| "learning_rate": 1.5204259056408046e-05, | |
| "loss": 0.1537, | |
| "step": 424 | |
| }, | |
| { | |
| "epoch": 0.3942486085343228, | |
| "grad_norm": 3.893655776977539, | |
| "learning_rate": 1.5176575856779904e-05, | |
| "loss": 0.1509, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.39517625231910947, | |
| "grad_norm": 4.423066139221191, | |
| "learning_rate": 1.5148838357320537e-05, | |
| "loss": 0.1501, | |
| "step": 426 | |
| }, | |
| { | |
| "epoch": 0.3961038961038961, | |
| "grad_norm": 5.078852653503418, | |
| "learning_rate": 1.512104684898319e-05, | |
| "loss": 0.2835, | |
| "step": 427 | |
| }, | |
| { | |
| "epoch": 0.3970315398886827, | |
| "grad_norm": 3.377333164215088, | |
| "learning_rate": 1.5093201623287631e-05, | |
| "loss": 0.1393, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 0.3979591836734694, | |
| "grad_norm": 4.0545196533203125, | |
| "learning_rate": 1.5065302972317108e-05, | |
| "loss": 0.1567, | |
| "step": 429 | |
| }, | |
| { | |
| "epoch": 0.39888682745825604, | |
| "grad_norm": 6.253279685974121, | |
| "learning_rate": 1.5037351188715265e-05, | |
| "loss": 0.2155, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.39981447124304265, | |
| "grad_norm": 2.698172092437744, | |
| "learning_rate": 1.5009346565683088e-05, | |
| "loss": 0.1102, | |
| "step": 431 | |
| }, | |
| { | |
| "epoch": 0.4007421150278293, | |
| "grad_norm": 4.845376014709473, | |
| "learning_rate": 1.4981289396975818e-05, | |
| "loss": 0.2186, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 0.40166975881261596, | |
| "grad_norm": 2.9927968978881836, | |
| "learning_rate": 1.4953179976899878e-05, | |
| "loss": 0.1365, | |
| "step": 433 | |
| }, | |
| { | |
| "epoch": 0.4025974025974026, | |
| "grad_norm": 3.484133720397949, | |
| "learning_rate": 1.4925018600309784e-05, | |
| "loss": 0.18, | |
| "step": 434 | |
| }, | |
| { | |
| "epoch": 0.4035250463821892, | |
| "grad_norm": 2.8350846767425537, | |
| "learning_rate": 1.4896805562605052e-05, | |
| "loss": 0.1644, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.4044526901669759, | |
| "grad_norm": 3.7044577598571777, | |
| "learning_rate": 1.4868541159727097e-05, | |
| "loss": 0.1758, | |
| "step": 436 | |
| }, | |
| { | |
| "epoch": 0.40538033395176254, | |
| "grad_norm": 3.7919528484344482, | |
| "learning_rate": 1.4840225688156132e-05, | |
| "loss": 0.1609, | |
| "step": 437 | |
| }, | |
| { | |
| "epoch": 0.40630797773654914, | |
| "grad_norm": 3.7263407707214355, | |
| "learning_rate": 1.4811859444908053e-05, | |
| "loss": 0.1674, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 0.4072356215213358, | |
| "grad_norm": 3.5427684783935547, | |
| "learning_rate": 1.4783442727531328e-05, | |
| "loss": 0.212, | |
| "step": 439 | |
| }, | |
| { | |
| "epoch": 0.40816326530612246, | |
| "grad_norm": 5.29186487197876, | |
| "learning_rate": 1.4754975834103877e-05, | |
| "loss": 0.2051, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.40816326530612246, | |
| "eval_accuracy": 0.8569844789356984, | |
| "eval_f1": 0.6978922716627635, | |
| "eval_loss": 0.3138832747936249, | |
| "eval_precision": 0.8563218390804598, | |
| "eval_recall": 0.5889328063241107, | |
| "eval_runtime": 48.1239, | |
| "eval_samples_per_second": 5.735, | |
| "eval_steps_per_second": 0.187, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.4090909090909091, | |
| "grad_norm": 6.476135730743408, | |
| "learning_rate": 1.4726459063229946e-05, | |
| "loss": 0.1586, | |
| "step": 441 | |
| }, | |
| { | |
| "epoch": 0.4100185528756957, | |
| "grad_norm": 4.70169734954834, | |
| "learning_rate": 1.4697892714036959e-05, | |
| "loss": 0.1604, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 0.4109461966604824, | |
| "grad_norm": 3.1262881755828857, | |
| "learning_rate": 1.4669277086172406e-05, | |
| "loss": 0.1282, | |
| "step": 443 | |
| }, | |
| { | |
| "epoch": 0.41187384044526903, | |
| "grad_norm": 3.5137503147125244, | |
| "learning_rate": 1.4640612479800686e-05, | |
| "loss": 0.179, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 0.41280148423005564, | |
| "grad_norm": 5.388190269470215, | |
| "learning_rate": 1.4611899195599952e-05, | |
| "loss": 0.2404, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.4137291280148423, | |
| "grad_norm": 4.668002128601074, | |
| "learning_rate": 1.4583137534758968e-05, | |
| "loss": 0.1781, | |
| "step": 446 | |
| }, | |
| { | |
| "epoch": 0.41465677179962895, | |
| "grad_norm": 4.363613128662109, | |
| "learning_rate": 1.455432779897395e-05, | |
| "loss": 0.1636, | |
| "step": 447 | |
| }, | |
| { | |
| "epoch": 0.4155844155844156, | |
| "grad_norm": 3.608628749847412, | |
| "learning_rate": 1.4525470290445392e-05, | |
| "loss": 0.1604, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 0.4165120593692022, | |
| "grad_norm": 4.1195387840271, | |
| "learning_rate": 1.4496565311874902e-05, | |
| "loss": 0.1752, | |
| "step": 449 | |
| }, | |
| { | |
| "epoch": 0.4174397031539889, | |
| "grad_norm": 2.9940760135650635, | |
| "learning_rate": 1.4467613166462024e-05, | |
| "loss": 0.1579, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.41836734693877553, | |
| "grad_norm": 3.3826465606689453, | |
| "learning_rate": 1.4438614157901073e-05, | |
| "loss": 0.1529, | |
| "step": 451 | |
| }, | |
| { | |
| "epoch": 0.41929499072356213, | |
| "grad_norm": 2.9892001152038574, | |
| "learning_rate": 1.4409568590377918e-05, | |
| "loss": 0.1435, | |
| "step": 452 | |
| }, | |
| { | |
| "epoch": 0.4202226345083488, | |
| "grad_norm": 3.4909090995788574, | |
| "learning_rate": 1.4380476768566825e-05, | |
| "loss": 0.1879, | |
| "step": 453 | |
| }, | |
| { | |
| "epoch": 0.42115027829313545, | |
| "grad_norm": 3.8178699016571045, | |
| "learning_rate": 1.4351338997627233e-05, | |
| "loss": 0.1693, | |
| "step": 454 | |
| }, | |
| { | |
| "epoch": 0.42207792207792205, | |
| "grad_norm": 3.101930618286133, | |
| "learning_rate": 1.4322155583200577e-05, | |
| "loss": 0.1315, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.4230055658627087, | |
| "grad_norm": 3.344278335571289, | |
| "learning_rate": 1.429292683140706e-05, | |
| "loss": 0.1539, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 0.42393320964749537, | |
| "grad_norm": 2.970942497253418, | |
| "learning_rate": 1.4263653048842461e-05, | |
| "loss": 0.1374, | |
| "step": 457 | |
| }, | |
| { | |
| "epoch": 0.424860853432282, | |
| "grad_norm": 3.1692464351654053, | |
| "learning_rate": 1.4234334542574906e-05, | |
| "loss": 0.2035, | |
| "step": 458 | |
| }, | |
| { | |
| "epoch": 0.42578849721706863, | |
| "grad_norm": 2.829171895980835, | |
| "learning_rate": 1.4204971620141648e-05, | |
| "loss": 0.1483, | |
| "step": 459 | |
| }, | |
| { | |
| "epoch": 0.4267161410018553, | |
| "grad_norm": 3.1080052852630615, | |
| "learning_rate": 1.4175564589545853e-05, | |
| "loss": 0.0959, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.4267161410018553, | |
| "eval_accuracy": 0.8569844789356984, | |
| "eval_f1": 0.6993006993006993, | |
| "eval_loss": 0.3129526674747467, | |
| "eval_precision": 0.8522727272727273, | |
| "eval_recall": 0.5928853754940712, | |
| "eval_runtime": 48.0013, | |
| "eval_samples_per_second": 5.75, | |
| "eval_steps_per_second": 0.187, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.42764378478664195, | |
| "grad_norm": 4.785598278045654, | |
| "learning_rate": 1.4146113759253362e-05, | |
| "loss": 0.1891, | |
| "step": 461 | |
| }, | |
| { | |
| "epoch": 0.42857142857142855, | |
| "grad_norm": 4.229030609130859, | |
| "learning_rate": 1.411661943818944e-05, | |
| "loss": 0.1546, | |
| "step": 462 | |
| }, | |
| { | |
| "epoch": 0.4294990723562152, | |
| "grad_norm": 4.43852424621582, | |
| "learning_rate": 1.4087081935735565e-05, | |
| "loss": 0.1769, | |
| "step": 463 | |
| }, | |
| { | |
| "epoch": 0.43042671614100186, | |
| "grad_norm": 4.6244049072265625, | |
| "learning_rate": 1.4057501561726157e-05, | |
| "loss": 0.2404, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 0.4313543599257885, | |
| "grad_norm": 4.3941168785095215, | |
| "learning_rate": 1.4027878626445339e-05, | |
| "loss": 0.1781, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.4322820037105751, | |
| "grad_norm": 6.995157718658447, | |
| "learning_rate": 1.3998213440623691e-05, | |
| "loss": 0.2318, | |
| "step": 466 | |
| }, | |
| { | |
| "epoch": 0.4332096474953618, | |
| "grad_norm": 2.781472682952881, | |
| "learning_rate": 1.3968506315434973e-05, | |
| "loss": 0.0949, | |
| "step": 467 | |
| }, | |
| { | |
| "epoch": 0.43413729128014844, | |
| "grad_norm": 2.9485185146331787, | |
| "learning_rate": 1.3938757562492873e-05, | |
| "loss": 0.1268, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 0.43506493506493504, | |
| "grad_norm": 6.8653130531311035, | |
| "learning_rate": 1.390896749384773e-05, | |
| "loss": 0.2818, | |
| "step": 469 | |
| }, | |
| { | |
| "epoch": 0.4359925788497217, | |
| "grad_norm": 5.269331932067871, | |
| "learning_rate": 1.3879136421983265e-05, | |
| "loss": 0.1386, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.43692022263450836, | |
| "grad_norm": 5.839674472808838, | |
| "learning_rate": 1.3849264659813314e-05, | |
| "loss": 0.1739, | |
| "step": 471 | |
| }, | |
| { | |
| "epoch": 0.437847866419295, | |
| "grad_norm": 4.306594371795654, | |
| "learning_rate": 1.3819352520678519e-05, | |
| "loss": 0.1562, | |
| "step": 472 | |
| }, | |
| { | |
| "epoch": 0.4387755102040816, | |
| "grad_norm": 4.80615234375, | |
| "learning_rate": 1.378940031834307e-05, | |
| "loss": 0.2049, | |
| "step": 473 | |
| }, | |
| { | |
| "epoch": 0.4397031539888683, | |
| "grad_norm": 4.824836730957031, | |
| "learning_rate": 1.3759408366991391e-05, | |
| "loss": 0.1667, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 0.44063079777365494, | |
| "grad_norm": 4.227314472198486, | |
| "learning_rate": 1.3729376981224869e-05, | |
| "loss": 0.1783, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.44155844155844154, | |
| "grad_norm": 4.170743942260742, | |
| "learning_rate": 1.3699306476058523e-05, | |
| "loss": 0.1724, | |
| "step": 476 | |
| }, | |
| { | |
| "epoch": 0.4424860853432282, | |
| "grad_norm": 6.098860263824463, | |
| "learning_rate": 1.3669197166917723e-05, | |
| "loss": 0.2301, | |
| "step": 477 | |
| }, | |
| { | |
| "epoch": 0.44341372912801486, | |
| "grad_norm": 3.2894742488861084, | |
| "learning_rate": 1.3639049369634878e-05, | |
| "loss": 0.1636, | |
| "step": 478 | |
| }, | |
| { | |
| "epoch": 0.44434137291280146, | |
| "grad_norm": 3.913362503051758, | |
| "learning_rate": 1.3608863400446113e-05, | |
| "loss": 0.1762, | |
| "step": 479 | |
| }, | |
| { | |
| "epoch": 0.4452690166975881, | |
| "grad_norm": 4.007498741149902, | |
| "learning_rate": 1.357863957598796e-05, | |
| "loss": 0.1955, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.4452690166975881, | |
| "eval_accuracy": 0.8592017738359202, | |
| "eval_f1": 0.7080459770114943, | |
| "eval_loss": 0.3043546974658966, | |
| "eval_precision": 0.8461538461538461, | |
| "eval_recall": 0.6086956521739131, | |
| "eval_runtime": 48.2191, | |
| "eval_samples_per_second": 5.724, | |
| "eval_steps_per_second": 0.187, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.4461966604823748, | |
| "grad_norm": 4.113521575927734, | |
| "learning_rate": 1.3548378213294042e-05, | |
| "loss": 0.1875, | |
| "step": 481 | |
| }, | |
| { | |
| "epoch": 0.44712430426716143, | |
| "grad_norm": 4.532953262329102, | |
| "learning_rate": 1.3518079629791725e-05, | |
| "loss": 0.1425, | |
| "step": 482 | |
| }, | |
| { | |
| "epoch": 0.44805194805194803, | |
| "grad_norm": 2.7280144691467285, | |
| "learning_rate": 1.3487744143298822e-05, | |
| "loss": 0.0783, | |
| "step": 483 | |
| }, | |
| { | |
| "epoch": 0.4489795918367347, | |
| "grad_norm": 3.562812566757202, | |
| "learning_rate": 1.345737207202023e-05, | |
| "loss": 0.1569, | |
| "step": 484 | |
| }, | |
| { | |
| "epoch": 0.44990723562152135, | |
| "grad_norm": 5.523044109344482, | |
| "learning_rate": 1.3426963734544601e-05, | |
| "loss": 0.2227, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.45083487940630795, | |
| "grad_norm": 4.155531883239746, | |
| "learning_rate": 1.3396519449841006e-05, | |
| "loss": 0.2297, | |
| "step": 486 | |
| }, | |
| { | |
| "epoch": 0.4517625231910946, | |
| "grad_norm": 2.8421530723571777, | |
| "learning_rate": 1.3366039537255589e-05, | |
| "loss": 0.1699, | |
| "step": 487 | |
| }, | |
| { | |
| "epoch": 0.45269016697588127, | |
| "grad_norm": 3.845489263534546, | |
| "learning_rate": 1.3335524316508208e-05, | |
| "loss": 0.1773, | |
| "step": 488 | |
| }, | |
| { | |
| "epoch": 0.4536178107606679, | |
| "grad_norm": 3.0157673358917236, | |
| "learning_rate": 1.3304974107689088e-05, | |
| "loss": 0.1379, | |
| "step": 489 | |
| }, | |
| { | |
| "epoch": 0.45454545454545453, | |
| "grad_norm": 2.74364972114563, | |
| "learning_rate": 1.3274389231255466e-05, | |
| "loss": 0.1265, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.4554730983302412, | |
| "grad_norm": 6.820478439331055, | |
| "learning_rate": 1.3243770008028225e-05, | |
| "loss": 0.1849, | |
| "step": 491 | |
| }, | |
| { | |
| "epoch": 0.45640074211502785, | |
| "grad_norm": 2.741809368133545, | |
| "learning_rate": 1.3213116759188525e-05, | |
| "loss": 0.1295, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 0.45732838589981445, | |
| "grad_norm": 3.473961353302002, | |
| "learning_rate": 1.3182429806274442e-05, | |
| "loss": 0.099, | |
| "step": 493 | |
| }, | |
| { | |
| "epoch": 0.4582560296846011, | |
| "grad_norm": 5.271576881408691, | |
| "learning_rate": 1.3151709471177589e-05, | |
| "loss": 0.1753, | |
| "step": 494 | |
| }, | |
| { | |
| "epoch": 0.45918367346938777, | |
| "grad_norm": 6.4196577072143555, | |
| "learning_rate": 1.3120956076139746e-05, | |
| "loss": 0.2568, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.4601113172541744, | |
| "grad_norm": 5.339606761932373, | |
| "learning_rate": 1.3090169943749475e-05, | |
| "loss": 0.1535, | |
| "step": 496 | |
| }, | |
| { | |
| "epoch": 0.461038961038961, | |
| "grad_norm": 4.3615593910217285, | |
| "learning_rate": 1.305935139693874e-05, | |
| "loss": 0.1362, | |
| "step": 497 | |
| }, | |
| { | |
| "epoch": 0.4619666048237477, | |
| "grad_norm": 4.690557956695557, | |
| "learning_rate": 1.3028500758979507e-05, | |
| "loss": 0.1776, | |
| "step": 498 | |
| }, | |
| { | |
| "epoch": 0.46289424860853434, | |
| "grad_norm": 4.32938814163208, | |
| "learning_rate": 1.299761835348038e-05, | |
| "loss": 0.1429, | |
| "step": 499 | |
| }, | |
| { | |
| "epoch": 0.46382189239332094, | |
| "grad_norm": 3.8397958278656006, | |
| "learning_rate": 1.296670450438317e-05, | |
| "loss": 0.1904, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.46382189239332094, | |
| "eval_accuracy": 0.8403547671840355, | |
| "eval_f1": 0.6381909547738693, | |
| "eval_loss": 0.3388740122318268, | |
| "eval_precision": 0.8758620689655172, | |
| "eval_recall": 0.5019762845849802, | |
| "eval_runtime": 48.3395, | |
| "eval_samples_per_second": 5.71, | |
| "eval_steps_per_second": 0.186, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.4647495361781076, | |
| "grad_norm": 4.0140509605407715, | |
| "learning_rate": 1.2935759535959528e-05, | |
| "loss": 0.2093, | |
| "step": 501 | |
| }, | |
| { | |
| "epoch": 0.46567717996289426, | |
| "grad_norm": 2.7800872325897217, | |
| "learning_rate": 1.2904783772807534e-05, | |
| "loss": 0.1333, | |
| "step": 502 | |
| }, | |
| { | |
| "epoch": 0.46660482374768086, | |
| "grad_norm": 4.866140842437744, | |
| "learning_rate": 1.2873777539848284e-05, | |
| "loss": 0.2078, | |
| "step": 503 | |
| }, | |
| { | |
| "epoch": 0.4675324675324675, | |
| "grad_norm": 4.730177879333496, | |
| "learning_rate": 1.2842741162322487e-05, | |
| "loss": 0.1834, | |
| "step": 504 | |
| }, | |
| { | |
| "epoch": 0.4684601113172542, | |
| "grad_norm": 3.826939582824707, | |
| "learning_rate": 1.2811674965787058e-05, | |
| "loss": 0.1712, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.46938775510204084, | |
| "grad_norm": 16.19306755065918, | |
| "learning_rate": 1.2780579276111702e-05, | |
| "loss": 0.2195, | |
| "step": 506 | |
| }, | |
| { | |
| "epoch": 0.47031539888682744, | |
| "grad_norm": 4.020465850830078, | |
| "learning_rate": 1.2749454419475486e-05, | |
| "loss": 0.1387, | |
| "step": 507 | |
| }, | |
| { | |
| "epoch": 0.4712430426716141, | |
| "grad_norm": 3.083519697189331, | |
| "learning_rate": 1.2718300722363431e-05, | |
| "loss": 0.1595, | |
| "step": 508 | |
| }, | |
| { | |
| "epoch": 0.47217068645640076, | |
| "grad_norm": 3.982968807220459, | |
| "learning_rate": 1.2687118511563075e-05, | |
| "loss": 0.1304, | |
| "step": 509 | |
| }, | |
| { | |
| "epoch": 0.47309833024118736, | |
| "grad_norm": 4.213962554931641, | |
| "learning_rate": 1.2655908114161053e-05, | |
| "loss": 0.1269, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.474025974025974, | |
| "grad_norm": 3.9727156162261963, | |
| "learning_rate": 1.2624669857539669e-05, | |
| "loss": 0.1327, | |
| "step": 511 | |
| }, | |
| { | |
| "epoch": 0.4749536178107607, | |
| "grad_norm": 4.792732238769531, | |
| "learning_rate": 1.2593404069373452e-05, | |
| "loss": 0.1782, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 0.47588126159554733, | |
| "grad_norm": 3.0668811798095703, | |
| "learning_rate": 1.2562111077625723e-05, | |
| "loss": 0.1568, | |
| "step": 513 | |
| }, | |
| { | |
| "epoch": 0.47680890538033394, | |
| "grad_norm": 4.204139709472656, | |
| "learning_rate": 1.2530791210545163e-05, | |
| "loss": 0.149, | |
| "step": 514 | |
| }, | |
| { | |
| "epoch": 0.4777365491651206, | |
| "grad_norm": 4.396578788757324, | |
| "learning_rate": 1.2499444796662354e-05, | |
| "loss": 0.1943, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.47866419294990725, | |
| "grad_norm": 8.477376937866211, | |
| "learning_rate": 1.2468072164786342e-05, | |
| "loss": 0.3153, | |
| "step": 516 | |
| }, | |
| { | |
| "epoch": 0.47959183673469385, | |
| "grad_norm": 4.493762016296387, | |
| "learning_rate": 1.2436673644001196e-05, | |
| "loss": 0.2028, | |
| "step": 517 | |
| }, | |
| { | |
| "epoch": 0.4805194805194805, | |
| "grad_norm": 3.7687320709228516, | |
| "learning_rate": 1.2405249563662539e-05, | |
| "loss": 0.1834, | |
| "step": 518 | |
| }, | |
| { | |
| "epoch": 0.48144712430426717, | |
| "grad_norm": 5.828054428100586, | |
| "learning_rate": 1.23738002533941e-05, | |
| "loss": 0.1587, | |
| "step": 519 | |
| }, | |
| { | |
| "epoch": 0.48237476808905383, | |
| "grad_norm": 4.38277006149292, | |
| "learning_rate": 1.2342326043084268e-05, | |
| "loss": 0.1809, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.48237476808905383, | |
| "eval_accuracy": 0.8458980044345898, | |
| "eval_f1": 0.6584766584766585, | |
| "eval_loss": 0.3318649232387543, | |
| "eval_precision": 0.8701298701298701, | |
| "eval_recall": 0.5296442687747036, | |
| "eval_runtime": 46.7049, | |
| "eval_samples_per_second": 5.909, | |
| "eval_steps_per_second": 0.193, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.48330241187384043, | |
| "grad_norm": 4.514082908630371, | |
| "learning_rate": 1.2310827262882614e-05, | |
| "loss": 0.1759, | |
| "step": 521 | |
| }, | |
| { | |
| "epoch": 0.4842300556586271, | |
| "grad_norm": 3.4471170902252197, | |
| "learning_rate": 1.2279304243196438e-05, | |
| "loss": 0.1364, | |
| "step": 522 | |
| }, | |
| { | |
| "epoch": 0.48515769944341375, | |
| "grad_norm": 2.6771576404571533, | |
| "learning_rate": 1.2247757314687296e-05, | |
| "loss": 0.146, | |
| "step": 523 | |
| }, | |
| { | |
| "epoch": 0.48608534322820035, | |
| "grad_norm": 3.6538710594177246, | |
| "learning_rate": 1.2216186808267544e-05, | |
| "loss": 0.2009, | |
| "step": 524 | |
| }, | |
| { | |
| "epoch": 0.487012987012987, | |
| "grad_norm": 4.4636149406433105, | |
| "learning_rate": 1.2184593055096853e-05, | |
| "loss": 0.1474, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.48794063079777367, | |
| "grad_norm": 5.249742031097412, | |
| "learning_rate": 1.215297638657875e-05, | |
| "loss": 0.1564, | |
| "step": 526 | |
| }, | |
| { | |
| "epoch": 0.48886827458256027, | |
| "grad_norm": 6.001680850982666, | |
| "learning_rate": 1.2121337134357121e-05, | |
| "loss": 0.1718, | |
| "step": 527 | |
| }, | |
| { | |
| "epoch": 0.4897959183673469, | |
| "grad_norm": 6.546911716461182, | |
| "learning_rate": 1.2089675630312755e-05, | |
| "loss": 0.2193, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 0.4907235621521336, | |
| "grad_norm": 2.6112513542175293, | |
| "learning_rate": 1.2057992206559837e-05, | |
| "loss": 0.1295, | |
| "step": 529 | |
| }, | |
| { | |
| "epoch": 0.49165120593692024, | |
| "grad_norm": 3.9656717777252197, | |
| "learning_rate": 1.2026287195442503e-05, | |
| "loss": 0.1707, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.49257884972170685, | |
| "grad_norm": 3.8426477909088135, | |
| "learning_rate": 1.199456092953131e-05, | |
| "loss": 0.1768, | |
| "step": 531 | |
| }, | |
| { | |
| "epoch": 0.4935064935064935, | |
| "grad_norm": 3.001831531524658, | |
| "learning_rate": 1.1962813741619777e-05, | |
| "loss": 0.1839, | |
| "step": 532 | |
| }, | |
| { | |
| "epoch": 0.49443413729128016, | |
| "grad_norm": 5.149347305297852, | |
| "learning_rate": 1.1931045964720882e-05, | |
| "loss": 0.2559, | |
| "step": 533 | |
| }, | |
| { | |
| "epoch": 0.49536178107606677, | |
| "grad_norm": 3.0468552112579346, | |
| "learning_rate": 1.189925793206357e-05, | |
| "loss": 0.1408, | |
| "step": 534 | |
| }, | |
| { | |
| "epoch": 0.4962894248608534, | |
| "grad_norm": 4.5860490798950195, | |
| "learning_rate": 1.1867449977089264e-05, | |
| "loss": 0.1945, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.4972170686456401, | |
| "grad_norm": 3.8901429176330566, | |
| "learning_rate": 1.1835622433448361e-05, | |
| "loss": 0.2126, | |
| "step": 536 | |
| }, | |
| { | |
| "epoch": 0.49814471243042674, | |
| "grad_norm": 4.597271919250488, | |
| "learning_rate": 1.1803775634996735e-05, | |
| "loss": 0.1977, | |
| "step": 537 | |
| }, | |
| { | |
| "epoch": 0.49907235621521334, | |
| "grad_norm": 3.079770803451538, | |
| "learning_rate": 1.177190991579223e-05, | |
| "loss": 0.1758, | |
| "step": 538 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 4.214216709136963, | |
| "learning_rate": 1.174002561009116e-05, | |
| "loss": 0.141, | |
| "step": 539 | |
| }, | |
| { | |
| "epoch": 0.5009276437847866, | |
| "grad_norm": 5.213557243347168, | |
| "learning_rate": 1.1708123052344803e-05, | |
| "loss": 0.1605, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.5009276437847866, | |
| "eval_accuracy": 0.8614190687361419, | |
| "eval_f1": 0.7072599531615925, | |
| "eval_loss": 0.30164089798927307, | |
| "eval_precision": 0.867816091954023, | |
| "eval_recall": 0.5968379446640316, | |
| "eval_runtime": 47.208, | |
| "eval_samples_per_second": 5.846, | |
| "eval_steps_per_second": 0.191, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.5018552875695733, | |
| "grad_norm": 2.1639671325683594, | |
| "learning_rate": 1.1676202577195901e-05, | |
| "loss": 0.0809, | |
| "step": 541 | |
| }, | |
| { | |
| "epoch": 0.5027829313543599, | |
| "grad_norm": 4.442990303039551, | |
| "learning_rate": 1.164426451947513e-05, | |
| "loss": 0.1677, | |
| "step": 542 | |
| }, | |
| { | |
| "epoch": 0.5037105751391465, | |
| "grad_norm": 3.968435049057007, | |
| "learning_rate": 1.1612309214197599e-05, | |
| "loss": 0.2114, | |
| "step": 543 | |
| }, | |
| { | |
| "epoch": 0.5046382189239332, | |
| "grad_norm": 9.67383098602295, | |
| "learning_rate": 1.1580336996559343e-05, | |
| "loss": 0.2159, | |
| "step": 544 | |
| }, | |
| { | |
| "epoch": 0.5055658627087198, | |
| "grad_norm": 3.427710771560669, | |
| "learning_rate": 1.1548348201933799e-05, | |
| "loss": 0.0846, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.5064935064935064, | |
| "grad_norm": 3.03241229057312, | |
| "learning_rate": 1.151634316586828e-05, | |
| "loss": 0.1195, | |
| "step": 546 | |
| }, | |
| { | |
| "epoch": 0.5074211502782932, | |
| "grad_norm": 5.138626575469971, | |
| "learning_rate": 1.1484322224080474e-05, | |
| "loss": 0.1542, | |
| "step": 547 | |
| }, | |
| { | |
| "epoch": 0.5083487940630798, | |
| "grad_norm": 4.37513542175293, | |
| "learning_rate": 1.1452285712454905e-05, | |
| "loss": 0.2118, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 0.5092764378478665, | |
| "grad_norm": 4.746356010437012, | |
| "learning_rate": 1.1420233967039423e-05, | |
| "loss": 0.1456, | |
| "step": 549 | |
| }, | |
| { | |
| "epoch": 0.5102040816326531, | |
| "grad_norm": 5.190648555755615, | |
| "learning_rate": 1.138816732404167e-05, | |
| "loss": 0.1921, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.5111317254174397, | |
| "grad_norm": 3.955061197280884, | |
| "learning_rate": 1.1356086119825553e-05, | |
| "loss": 0.1964, | |
| "step": 551 | |
| }, | |
| { | |
| "epoch": 0.5120593692022264, | |
| "grad_norm": 7.773352146148682, | |
| "learning_rate": 1.1323990690907734e-05, | |
| "loss": 0.2178, | |
| "step": 552 | |
| }, | |
| { | |
| "epoch": 0.512987012987013, | |
| "grad_norm": 3.211651563644409, | |
| "learning_rate": 1.1291881373954066e-05, | |
| "loss": 0.1859, | |
| "step": 553 | |
| }, | |
| { | |
| "epoch": 0.5139146567717996, | |
| "grad_norm": 3.536742687225342, | |
| "learning_rate": 1.1259758505776092e-05, | |
| "loss": 0.1949, | |
| "step": 554 | |
| }, | |
| { | |
| "epoch": 0.5148423005565863, | |
| "grad_norm": 4.817080974578857, | |
| "learning_rate": 1.1227622423327501e-05, | |
| "loss": 0.2482, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.5157699443413729, | |
| "grad_norm": 4.828971862792969, | |
| "learning_rate": 1.119547346370059e-05, | |
| "loss": 0.216, | |
| "step": 556 | |
| }, | |
| { | |
| "epoch": 0.5166975881261595, | |
| "grad_norm": 4.580413818359375, | |
| "learning_rate": 1.1163311964122733e-05, | |
| "loss": 0.2267, | |
| "step": 557 | |
| }, | |
| { | |
| "epoch": 0.5176252319109462, | |
| "grad_norm": 2.9997427463531494, | |
| "learning_rate": 1.1131138261952845e-05, | |
| "loss": 0.1556, | |
| "step": 558 | |
| }, | |
| { | |
| "epoch": 0.5185528756957328, | |
| "grad_norm": 2.876847982406616, | |
| "learning_rate": 1.109895269467783e-05, | |
| "loss": 0.1551, | |
| "step": 559 | |
| }, | |
| { | |
| "epoch": 0.5194805194805194, | |
| "grad_norm": 6.003294467926025, | |
| "learning_rate": 1.1066755599909065e-05, | |
| "loss": 0.2123, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.5194805194805194, | |
| "eval_accuracy": 0.8603104212860311, | |
| "eval_f1": 0.7136363636363636, | |
| "eval_loss": 0.2982672452926636, | |
| "eval_precision": 0.839572192513369, | |
| "eval_recall": 0.6205533596837944, | |
| "eval_runtime": 48.6549, | |
| "eval_samples_per_second": 5.673, | |
| "eval_steps_per_second": 0.185, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.5204081632653061, | |
| "grad_norm": 3.7513365745544434, | |
| "learning_rate": 1.1034547315378838e-05, | |
| "loss": 0.1808, | |
| "step": 561 | |
| }, | |
| { | |
| "epoch": 0.5213358070500927, | |
| "grad_norm": 2.755664110183716, | |
| "learning_rate": 1.1002328178936813e-05, | |
| "loss": 0.1272, | |
| "step": 562 | |
| }, | |
| { | |
| "epoch": 0.5222634508348795, | |
| "grad_norm": 6.458003520965576, | |
| "learning_rate": 1.0970098528546482e-05, | |
| "loss": 0.161, | |
| "step": 563 | |
| }, | |
| { | |
| "epoch": 0.5231910946196661, | |
| "grad_norm": 3.156869649887085, | |
| "learning_rate": 1.0937858702281631e-05, | |
| "loss": 0.1423, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 0.5241187384044527, | |
| "grad_norm": 4.826041221618652, | |
| "learning_rate": 1.090560903832278e-05, | |
| "loss": 0.1717, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.5250463821892394, | |
| "grad_norm": 2.8493692874908447, | |
| "learning_rate": 1.087334987495364e-05, | |
| "loss": 0.147, | |
| "step": 566 | |
| }, | |
| { | |
| "epoch": 0.525974025974026, | |
| "grad_norm": 3.3068854808807373, | |
| "learning_rate": 1.0841081550557577e-05, | |
| "loss": 0.1084, | |
| "step": 567 | |
| }, | |
| { | |
| "epoch": 0.5269016697588126, | |
| "grad_norm": 3.908871650695801, | |
| "learning_rate": 1.0808804403614044e-05, | |
| "loss": 0.1484, | |
| "step": 568 | |
| }, | |
| { | |
| "epoch": 0.5278293135435993, | |
| "grad_norm": 3.8984129428863525, | |
| "learning_rate": 1.0776518772695035e-05, | |
| "loss": 0.1883, | |
| "step": 569 | |
| }, | |
| { | |
| "epoch": 0.5287569573283859, | |
| "grad_norm": 3.58467960357666, | |
| "learning_rate": 1.0744224996461541e-05, | |
| "loss": 0.1238, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.5296846011131725, | |
| "grad_norm": 5.206255912780762, | |
| "learning_rate": 1.0711923413659995e-05, | |
| "loss": 0.2008, | |
| "step": 571 | |
| }, | |
| { | |
| "epoch": 0.5306122448979592, | |
| "grad_norm": 2.4864425659179688, | |
| "learning_rate": 1.0679614363118718e-05, | |
| "loss": 0.1181, | |
| "step": 572 | |
| }, | |
| { | |
| "epoch": 0.5315398886827458, | |
| "grad_norm": 3.949312686920166, | |
| "learning_rate": 1.0647298183744359e-05, | |
| "loss": 0.1927, | |
| "step": 573 | |
| }, | |
| { | |
| "epoch": 0.5324675324675324, | |
| "grad_norm": 6.005074501037598, | |
| "learning_rate": 1.061497521451835e-05, | |
| "loss": 0.1936, | |
| "step": 574 | |
| }, | |
| { | |
| "epoch": 0.5333951762523191, | |
| "grad_norm": 4.429588317871094, | |
| "learning_rate": 1.0582645794493337e-05, | |
| "loss": 0.2031, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.5343228200371057, | |
| "grad_norm": 4.36995792388916, | |
| "learning_rate": 1.055031026278965e-05, | |
| "loss": 0.1846, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 0.5352504638218923, | |
| "grad_norm": 4.059164524078369, | |
| "learning_rate": 1.0517968958591705e-05, | |
| "loss": 0.1681, | |
| "step": 577 | |
| }, | |
| { | |
| "epoch": 0.536178107606679, | |
| "grad_norm": 4.211386203765869, | |
| "learning_rate": 1.0485622221144485e-05, | |
| "loss": 0.1296, | |
| "step": 578 | |
| }, | |
| { | |
| "epoch": 0.5371057513914657, | |
| "grad_norm": 4.114365100860596, | |
| "learning_rate": 1.0453270389749956e-05, | |
| "loss": 0.164, | |
| "step": 579 | |
| }, | |
| { | |
| "epoch": 0.5380333951762524, | |
| "grad_norm": 7.943957805633545, | |
| "learning_rate": 1.0420913803763522e-05, | |
| "loss": 0.2279, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.5380333951762524, | |
| "eval_accuracy": 0.8558758314855875, | |
| "eval_f1": 0.7018348623853211, | |
| "eval_loss": 0.30458346009254456, | |
| "eval_precision": 0.8360655737704918, | |
| "eval_recall": 0.6047430830039525, | |
| "eval_runtime": 46.8877, | |
| "eval_samples_per_second": 5.886, | |
| "eval_steps_per_second": 0.192, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.538961038961039, | |
| "grad_norm": 7.277109146118164, | |
| "learning_rate": 1.0388552802590461e-05, | |
| "loss": 0.0867, | |
| "step": 581 | |
| }, | |
| { | |
| "epoch": 0.5398886827458256, | |
| "grad_norm": 3.722276449203491, | |
| "learning_rate": 1.0356187725682359e-05, | |
| "loss": 0.1233, | |
| "step": 582 | |
| }, | |
| { | |
| "epoch": 0.5408163265306123, | |
| "grad_norm": 4.75911283493042, | |
| "learning_rate": 1.0323818912533561e-05, | |
| "loss": 0.2018, | |
| "step": 583 | |
| }, | |
| { | |
| "epoch": 0.5417439703153989, | |
| "grad_norm": 5.309391498565674, | |
| "learning_rate": 1.0291446702677598e-05, | |
| "loss": 0.1853, | |
| "step": 584 | |
| }, | |
| { | |
| "epoch": 0.5426716141001855, | |
| "grad_norm": 3.6361756324768066, | |
| "learning_rate": 1.0259071435683636e-05, | |
| "loss": 0.1594, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.5435992578849722, | |
| "grad_norm": 8.379057884216309, | |
| "learning_rate": 1.02266934511529e-05, | |
| "loss": 0.1496, | |
| "step": 586 | |
| }, | |
| { | |
| "epoch": 0.5445269016697588, | |
| "grad_norm": 6.6115593910217285, | |
| "learning_rate": 1.0194313088715135e-05, | |
| "loss": 0.1887, | |
| "step": 587 | |
| }, | |
| { | |
| "epoch": 0.5454545454545454, | |
| "grad_norm": 3.5273900032043457, | |
| "learning_rate": 1.0161930688025018e-05, | |
| "loss": 0.1549, | |
| "step": 588 | |
| }, | |
| { | |
| "epoch": 0.5463821892393321, | |
| "grad_norm": 4.194179058074951, | |
| "learning_rate": 1.0129546588758605e-05, | |
| "loss": 0.1748, | |
| "step": 589 | |
| }, | |
| { | |
| "epoch": 0.5473098330241187, | |
| "grad_norm": 3.131457805633545, | |
| "learning_rate": 1.0097161130609774e-05, | |
| "loss": 0.1319, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.5482374768089053, | |
| "grad_norm": 4.083921909332275, | |
| "learning_rate": 1.0064774653286662e-05, | |
| "loss": 0.1759, | |
| "step": 591 | |
| }, | |
| { | |
| "epoch": 0.549165120593692, | |
| "grad_norm": 3.384917736053467, | |
| "learning_rate": 1.003238749650809e-05, | |
| "loss": 0.1434, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 0.5500927643784786, | |
| "grad_norm": 2.5680859088897705, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1626, | |
| "step": 593 | |
| }, | |
| { | |
| "epoch": 0.5510204081632653, | |
| "grad_norm": 3.510484457015991, | |
| "learning_rate": 9.967612503491915e-06, | |
| "loss": 0.1701, | |
| "step": 594 | |
| }, | |
| { | |
| "epoch": 0.551948051948052, | |
| "grad_norm": 5.625200271606445, | |
| "learning_rate": 9.935225346713341e-06, | |
| "loss": 0.1486, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.5528756957328386, | |
| "grad_norm": 5.352198123931885, | |
| "learning_rate": 9.90283886939023e-06, | |
| "loss": 0.1158, | |
| "step": 596 | |
| }, | |
| { | |
| "epoch": 0.5538033395176253, | |
| "grad_norm": 3.0487513542175293, | |
| "learning_rate": 9.870453411241399e-06, | |
| "loss": 0.1339, | |
| "step": 597 | |
| }, | |
| { | |
| "epoch": 0.5547309833024119, | |
| "grad_norm": 3.2191503047943115, | |
| "learning_rate": 9.838069311974986e-06, | |
| "loss": 0.1147, | |
| "step": 598 | |
| }, | |
| { | |
| "epoch": 0.5556586270871985, | |
| "grad_norm": 4.020503520965576, | |
| "learning_rate": 9.805686911284867e-06, | |
| "loss": 0.1163, | |
| "step": 599 | |
| }, | |
| { | |
| "epoch": 0.5565862708719852, | |
| "grad_norm": 6.643052101135254, | |
| "learning_rate": 9.773306548847102e-06, | |
| "loss": 0.2224, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.5565862708719852, | |
| "eval_accuracy": 0.8381374722838137, | |
| "eval_f1": 0.6313131313131313, | |
| "eval_loss": 0.3394555449485779, | |
| "eval_precision": 0.8741258741258742, | |
| "eval_recall": 0.49407114624505927, | |
| "eval_runtime": 48.1127, | |
| "eval_samples_per_second": 5.737, | |
| "eval_steps_per_second": 0.187, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.5575139146567718, | |
| "grad_norm": 4.177931308746338, | |
| "learning_rate": 9.740928564316369e-06, | |
| "loss": 0.1734, | |
| "step": 601 | |
| }, | |
| { | |
| "epoch": 0.5584415584415584, | |
| "grad_norm": 6.294393062591553, | |
| "learning_rate": 9.708553297322407e-06, | |
| "loss": 0.1923, | |
| "step": 602 | |
| }, | |
| { | |
| "epoch": 0.5593692022263451, | |
| "grad_norm": 3.275909662246704, | |
| "learning_rate": 9.676181087466444e-06, | |
| "loss": 0.1662, | |
| "step": 603 | |
| }, | |
| { | |
| "epoch": 0.5602968460111317, | |
| "grad_norm": 3.7851905822753906, | |
| "learning_rate": 9.643812274317644e-06, | |
| "loss": 0.1521, | |
| "step": 604 | |
| }, | |
| { | |
| "epoch": 0.5612244897959183, | |
| "grad_norm": 4.724743366241455, | |
| "learning_rate": 9.611447197409544e-06, | |
| "loss": 0.1713, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.562152133580705, | |
| "grad_norm": 6.553765773773193, | |
| "learning_rate": 9.579086196236483e-06, | |
| "loss": 0.1596, | |
| "step": 606 | |
| }, | |
| { | |
| "epoch": 0.5630797773654916, | |
| "grad_norm": 3.5295910835266113, | |
| "learning_rate": 9.54672961025005e-06, | |
| "loss": 0.1436, | |
| "step": 607 | |
| }, | |
| { | |
| "epoch": 0.5640074211502782, | |
| "grad_norm": 4.828715801239014, | |
| "learning_rate": 9.514377778855521e-06, | |
| "loss": 0.2361, | |
| "step": 608 | |
| }, | |
| { | |
| "epoch": 0.564935064935065, | |
| "grad_norm": 6.0731072425842285, | |
| "learning_rate": 9.482031041408296e-06, | |
| "loss": 0.165, | |
| "step": 609 | |
| }, | |
| { | |
| "epoch": 0.5658627087198516, | |
| "grad_norm": 8.083799362182617, | |
| "learning_rate": 9.449689737210352e-06, | |
| "loss": 0.1995, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.5667903525046383, | |
| "grad_norm": 3.7561042308807373, | |
| "learning_rate": 9.417354205506663e-06, | |
| "loss": 0.214, | |
| "step": 611 | |
| }, | |
| { | |
| "epoch": 0.5677179962894249, | |
| "grad_norm": 4.848057746887207, | |
| "learning_rate": 9.385024785481653e-06, | |
| "loss": 0.2019, | |
| "step": 612 | |
| }, | |
| { | |
| "epoch": 0.5686456400742115, | |
| "grad_norm": 5.188536167144775, | |
| "learning_rate": 9.352701816255643e-06, | |
| "loss": 0.1568, | |
| "step": 613 | |
| }, | |
| { | |
| "epoch": 0.5695732838589982, | |
| "grad_norm": 2.897571325302124, | |
| "learning_rate": 9.320385636881283e-06, | |
| "loss": 0.1241, | |
| "step": 614 | |
| }, | |
| { | |
| "epoch": 0.5705009276437848, | |
| "grad_norm": 5.466006278991699, | |
| "learning_rate": 9.288076586340005e-06, | |
| "loss": 0.195, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 3.484766960144043, | |
| "learning_rate": 9.255775003538462e-06, | |
| "loss": 0.1419, | |
| "step": 616 | |
| }, | |
| { | |
| "epoch": 0.5723562152133581, | |
| "grad_norm": 5.067159175872803, | |
| "learning_rate": 9.22348122730497e-06, | |
| "loss": 0.1938, | |
| "step": 617 | |
| }, | |
| { | |
| "epoch": 0.5732838589981447, | |
| "grad_norm": 4.161076545715332, | |
| "learning_rate": 9.19119559638596e-06, | |
| "loss": 0.1925, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 0.5742115027829313, | |
| "grad_norm": 3.2418394088745117, | |
| "learning_rate": 9.158918449442425e-06, | |
| "loss": 0.1592, | |
| "step": 619 | |
| }, | |
| { | |
| "epoch": 0.575139146567718, | |
| "grad_norm": 5.799723148345947, | |
| "learning_rate": 9.126650125046361e-06, | |
| "loss": 0.1655, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.575139146567718, | |
| "eval_accuracy": 0.835920177383592, | |
| "eval_f1": 0.6224489795918368, | |
| "eval_loss": 0.3387525677680969, | |
| "eval_precision": 0.8776978417266187, | |
| "eval_recall": 0.48221343873517786, | |
| "eval_runtime": 47.1826, | |
| "eval_samples_per_second": 5.85, | |
| "eval_steps_per_second": 0.191, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.5760667903525046, | |
| "grad_norm": 4.049962997436523, | |
| "learning_rate": 9.094390961677223e-06, | |
| "loss": 0.1364, | |
| "step": 621 | |
| }, | |
| { | |
| "epoch": 0.5769944341372912, | |
| "grad_norm": 3.7945103645324707, | |
| "learning_rate": 9.062141297718372e-06, | |
| "loss": 0.18, | |
| "step": 622 | |
| }, | |
| { | |
| "epoch": 0.577922077922078, | |
| "grad_norm": 3.9919049739837646, | |
| "learning_rate": 9.02990147145352e-06, | |
| "loss": 0.1833, | |
| "step": 623 | |
| }, | |
| { | |
| "epoch": 0.5788497217068646, | |
| "grad_norm": 2.300546407699585, | |
| "learning_rate": 8.99767182106319e-06, | |
| "loss": 0.0625, | |
| "step": 624 | |
| }, | |
| { | |
| "epoch": 0.5797773654916512, | |
| "grad_norm": 4.982729911804199, | |
| "learning_rate": 8.965452684621164e-06, | |
| "loss": 0.1884, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.5807050092764379, | |
| "grad_norm": 4.047850131988525, | |
| "learning_rate": 8.933244400090937e-06, | |
| "loss": 0.1206, | |
| "step": 626 | |
| }, | |
| { | |
| "epoch": 0.5816326530612245, | |
| "grad_norm": 4.123579978942871, | |
| "learning_rate": 8.901047305322172e-06, | |
| "loss": 0.1503, | |
| "step": 627 | |
| }, | |
| { | |
| "epoch": 0.5825602968460112, | |
| "grad_norm": 4.2010273933410645, | |
| "learning_rate": 8.868861738047158e-06, | |
| "loss": 0.1311, | |
| "step": 628 | |
| }, | |
| { | |
| "epoch": 0.5834879406307978, | |
| "grad_norm": 4.511427402496338, | |
| "learning_rate": 8.836688035877268e-06, | |
| "loss": 0.1521, | |
| "step": 629 | |
| }, | |
| { | |
| "epoch": 0.5844155844155844, | |
| "grad_norm": 5.0462775230407715, | |
| "learning_rate": 8.804526536299413e-06, | |
| "loss": 0.1746, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.5853432282003711, | |
| "grad_norm": 6.308788299560547, | |
| "learning_rate": 8.772377576672502e-06, | |
| "loss": 0.2369, | |
| "step": 631 | |
| }, | |
| { | |
| "epoch": 0.5862708719851577, | |
| "grad_norm": 4.165151596069336, | |
| "learning_rate": 8.740241494223911e-06, | |
| "loss": 0.2034, | |
| "step": 632 | |
| }, | |
| { | |
| "epoch": 0.5871985157699443, | |
| "grad_norm": 4.502403259277344, | |
| "learning_rate": 8.708118626045939e-06, | |
| "loss": 0.1893, | |
| "step": 633 | |
| }, | |
| { | |
| "epoch": 0.588126159554731, | |
| "grad_norm": 3.4875142574310303, | |
| "learning_rate": 8.676009309092273e-06, | |
| "loss": 0.1328, | |
| "step": 634 | |
| }, | |
| { | |
| "epoch": 0.5890538033395176, | |
| "grad_norm": 4.179616928100586, | |
| "learning_rate": 8.643913880174449e-06, | |
| "loss": 0.1466, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.5899814471243042, | |
| "grad_norm": 3.073174476623535, | |
| "learning_rate": 8.611832675958335e-06, | |
| "loss": 0.1269, | |
| "step": 636 | |
| }, | |
| { | |
| "epoch": 0.5909090909090909, | |
| "grad_norm": 3.9491851329803467, | |
| "learning_rate": 8.579766032960582e-06, | |
| "loss": 0.1247, | |
| "step": 637 | |
| }, | |
| { | |
| "epoch": 0.5918367346938775, | |
| "grad_norm": 3.502418041229248, | |
| "learning_rate": 8.5477142875451e-06, | |
| "loss": 0.1536, | |
| "step": 638 | |
| }, | |
| { | |
| "epoch": 0.5927643784786641, | |
| "grad_norm": 5.294571876525879, | |
| "learning_rate": 8.515677775919528e-06, | |
| "loss": 0.213, | |
| "step": 639 | |
| }, | |
| { | |
| "epoch": 0.5936920222634509, | |
| "grad_norm": 3.3282008171081543, | |
| "learning_rate": 8.48365683413172e-06, | |
| "loss": 0.1468, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.5936920222634509, | |
| "eval_accuracy": 0.8592017738359202, | |
| "eval_f1": 0.7093821510297483, | |
| "eval_loss": 0.3021511137485504, | |
| "eval_precision": 0.842391304347826, | |
| "eval_recall": 0.6126482213438735, | |
| "eval_runtime": 46.8153, | |
| "eval_samples_per_second": 5.896, | |
| "eval_steps_per_second": 0.192, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.5946196660482375, | |
| "grad_norm": 5.158597946166992, | |
| "learning_rate": 8.451651798066203e-06, | |
| "loss": 0.2459, | |
| "step": 641 | |
| }, | |
| { | |
| "epoch": 0.5955473098330241, | |
| "grad_norm": 7.210265159606934, | |
| "learning_rate": 8.419663003440657e-06, | |
| "loss": 0.1745, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 0.5964749536178108, | |
| "grad_norm": 4.469946384429932, | |
| "learning_rate": 8.387690785802403e-06, | |
| "loss": 0.1879, | |
| "step": 643 | |
| }, | |
| { | |
| "epoch": 0.5974025974025974, | |
| "grad_norm": 3.5130228996276855, | |
| "learning_rate": 8.355735480524874e-06, | |
| "loss": 0.1453, | |
| "step": 644 | |
| }, | |
| { | |
| "epoch": 0.5983302411873841, | |
| "grad_norm": 3.8892977237701416, | |
| "learning_rate": 8.3237974228041e-06, | |
| "loss": 0.1852, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 0.5992578849721707, | |
| "grad_norm": 4.912310600280762, | |
| "learning_rate": 8.291876947655197e-06, | |
| "loss": 0.1595, | |
| "step": 646 | |
| }, | |
| { | |
| "epoch": 0.6001855287569573, | |
| "grad_norm": 3.8313469886779785, | |
| "learning_rate": 8.259974389908842e-06, | |
| "loss": 0.1207, | |
| "step": 647 | |
| }, | |
| { | |
| "epoch": 0.601113172541744, | |
| "grad_norm": 4.376918792724609, | |
| "learning_rate": 8.228090084207773e-06, | |
| "loss": 0.2188, | |
| "step": 648 | |
| }, | |
| { | |
| "epoch": 0.6020408163265306, | |
| "grad_norm": 4.086894989013672, | |
| "learning_rate": 8.196224365003267e-06, | |
| "loss": 0.1806, | |
| "step": 649 | |
| }, | |
| { | |
| "epoch": 0.6029684601113172, | |
| "grad_norm": 2.9185261726379395, | |
| "learning_rate": 8.16437756655164e-06, | |
| "loss": 0.1301, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.6038961038961039, | |
| "grad_norm": 5.638646125793457, | |
| "learning_rate": 8.132550022910737e-06, | |
| "loss": 0.2069, | |
| "step": 651 | |
| }, | |
| { | |
| "epoch": 0.6048237476808905, | |
| "grad_norm": 6.044648170471191, | |
| "learning_rate": 8.100742067936432e-06, | |
| "loss": 0.2543, | |
| "step": 652 | |
| }, | |
| { | |
| "epoch": 0.6057513914656771, | |
| "grad_norm": 5.261450290679932, | |
| "learning_rate": 8.068954035279121e-06, | |
| "loss": 0.1861, | |
| "step": 653 | |
| }, | |
| { | |
| "epoch": 0.6066790352504638, | |
| "grad_norm": 2.6060245037078857, | |
| "learning_rate": 8.037186258380226e-06, | |
| "loss": 0.1102, | |
| "step": 654 | |
| }, | |
| { | |
| "epoch": 0.6076066790352505, | |
| "grad_norm": 5.192462921142578, | |
| "learning_rate": 8.005439070468692e-06, | |
| "loss": 0.1196, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 0.608534322820037, | |
| "grad_norm": 4.267360210418701, | |
| "learning_rate": 7.9737128045575e-06, | |
| "loss": 0.1549, | |
| "step": 656 | |
| }, | |
| { | |
| "epoch": 0.6094619666048238, | |
| "grad_norm": 2.9216721057891846, | |
| "learning_rate": 7.942007793440165e-06, | |
| "loss": 0.1326, | |
| "step": 657 | |
| }, | |
| { | |
| "epoch": 0.6103896103896104, | |
| "grad_norm": 3.592209577560425, | |
| "learning_rate": 7.91032436968725e-06, | |
| "loss": 0.1399, | |
| "step": 658 | |
| }, | |
| { | |
| "epoch": 0.6113172541743971, | |
| "grad_norm": 3.6594440937042236, | |
| "learning_rate": 7.87866286564288e-06, | |
| "loss": 0.1223, | |
| "step": 659 | |
| }, | |
| { | |
| "epoch": 0.6122448979591837, | |
| "grad_norm": 6.019983291625977, | |
| "learning_rate": 7.847023613421251e-06, | |
| "loss": 0.1421, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.6122448979591837, | |
| "eval_accuracy": 0.843680709534368, | |
| "eval_f1": 0.6483790523690773, | |
| "eval_loss": 0.3296959400177002, | |
| "eval_precision": 0.8783783783783784, | |
| "eval_recall": 0.5138339920948617, | |
| "eval_runtime": 47.6918, | |
| "eval_samples_per_second": 5.787, | |
| "eval_steps_per_second": 0.189, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.6131725417439703, | |
| "grad_norm": 3.82082462310791, | |
| "learning_rate": 7.815406944903148e-06, | |
| "loss": 0.1369, | |
| "step": 661 | |
| }, | |
| { | |
| "epoch": 0.614100185528757, | |
| "grad_norm": 3.763209819793701, | |
| "learning_rate": 7.78381319173246e-06, | |
| "loss": 0.1345, | |
| "step": 662 | |
| }, | |
| { | |
| "epoch": 0.6150278293135436, | |
| "grad_norm": 5.018528938293457, | |
| "learning_rate": 7.752242685312709e-06, | |
| "loss": 0.1762, | |
| "step": 663 | |
| }, | |
| { | |
| "epoch": 0.6159554730983302, | |
| "grad_norm": 3.500230073928833, | |
| "learning_rate": 7.720695756803569e-06, | |
| "loss": 0.0632, | |
| "step": 664 | |
| }, | |
| { | |
| "epoch": 0.6168831168831169, | |
| "grad_norm": 4.73206090927124, | |
| "learning_rate": 7.689172737117389e-06, | |
| "loss": 0.1771, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 0.6178107606679035, | |
| "grad_norm": 5.639405250549316, | |
| "learning_rate": 7.657673956915735e-06, | |
| "loss": 0.2544, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 0.6187384044526901, | |
| "grad_norm": 4.007811546325684, | |
| "learning_rate": 7.6261997466059035e-06, | |
| "loss": 0.1547, | |
| "step": 667 | |
| }, | |
| { | |
| "epoch": 0.6196660482374768, | |
| "grad_norm": 4.930268287658691, | |
| "learning_rate": 7.594750436337467e-06, | |
| "loss": 0.1426, | |
| "step": 668 | |
| }, | |
| { | |
| "epoch": 0.6205936920222634, | |
| "grad_norm": 5.008491516113281, | |
| "learning_rate": 7.5633263559988035e-06, | |
| "loss": 0.1129, | |
| "step": 669 | |
| }, | |
| { | |
| "epoch": 0.62152133580705, | |
| "grad_norm": 3.0749435424804688, | |
| "learning_rate": 7.531927835213657e-06, | |
| "loss": 0.1166, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.6224489795918368, | |
| "grad_norm": 5.739880084991455, | |
| "learning_rate": 7.500555203337647e-06, | |
| "loss": 0.1634, | |
| "step": 671 | |
| }, | |
| { | |
| "epoch": 0.6233766233766234, | |
| "grad_norm": 3.3177554607391357, | |
| "learning_rate": 7.469208789454838e-06, | |
| "loss": 0.1388, | |
| "step": 672 | |
| }, | |
| { | |
| "epoch": 0.62430426716141, | |
| "grad_norm": 4.005073070526123, | |
| "learning_rate": 7.4378889223742766e-06, | |
| "loss": 0.1429, | |
| "step": 673 | |
| }, | |
| { | |
| "epoch": 0.6252319109461967, | |
| "grad_norm": 4.268346786499023, | |
| "learning_rate": 7.40659593062655e-06, | |
| "loss": 0.187, | |
| "step": 674 | |
| }, | |
| { | |
| "epoch": 0.6261595547309833, | |
| "grad_norm": 6.763199329376221, | |
| "learning_rate": 7.375330142460331e-06, | |
| "loss": 0.2241, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.62708719851577, | |
| "grad_norm": 3.3440911769866943, | |
| "learning_rate": 7.344091885838949e-06, | |
| "loss": 0.1407, | |
| "step": 676 | |
| }, | |
| { | |
| "epoch": 0.6280148423005566, | |
| "grad_norm": 5.204977035522461, | |
| "learning_rate": 7.312881488436928e-06, | |
| "loss": 0.2322, | |
| "step": 677 | |
| }, | |
| { | |
| "epoch": 0.6289424860853432, | |
| "grad_norm": 4.433789253234863, | |
| "learning_rate": 7.2816992776365714e-06, | |
| "loss": 0.1657, | |
| "step": 678 | |
| }, | |
| { | |
| "epoch": 0.6298701298701299, | |
| "grad_norm": 4.035289287567139, | |
| "learning_rate": 7.250545580524515e-06, | |
| "loss": 0.1994, | |
| "step": 679 | |
| }, | |
| { | |
| "epoch": 0.6307977736549165, | |
| "grad_norm": 4.986621856689453, | |
| "learning_rate": 7.219420723888301e-06, | |
| "loss": 0.2483, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.6307977736549165, | |
| "eval_accuracy": 0.852549889135255, | |
| "eval_f1": 0.6855791962174941, | |
| "eval_loss": 0.30598554015159607, | |
| "eval_precision": 0.8529411764705882, | |
| "eval_recall": 0.5731225296442688, | |
| "eval_runtime": 46.8299, | |
| "eval_samples_per_second": 5.894, | |
| "eval_steps_per_second": 0.192, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.6317254174397031, | |
| "grad_norm": 5.635754108428955, | |
| "learning_rate": 7.188325034212944e-06, | |
| "loss": 0.2133, | |
| "step": 681 | |
| }, | |
| { | |
| "epoch": 0.6326530612244898, | |
| "grad_norm": 6.280815601348877, | |
| "learning_rate": 7.157258837677514e-06, | |
| "loss": 0.2312, | |
| "step": 682 | |
| }, | |
| { | |
| "epoch": 0.6335807050092764, | |
| "grad_norm": 2.508446455001831, | |
| "learning_rate": 7.126222460151719e-06, | |
| "loss": 0.1189, | |
| "step": 683 | |
| }, | |
| { | |
| "epoch": 0.634508348794063, | |
| "grad_norm": 3.8485682010650635, | |
| "learning_rate": 7.095216227192467e-06, | |
| "loss": 0.1505, | |
| "step": 684 | |
| }, | |
| { | |
| "epoch": 0.6354359925788498, | |
| "grad_norm": 3.7377712726593018, | |
| "learning_rate": 7.064240464040472e-06, | |
| "loss": 0.129, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 0.6363636363636364, | |
| "grad_norm": 3.6466944217681885, | |
| "learning_rate": 7.033295495616834e-06, | |
| "loss": 0.1457, | |
| "step": 686 | |
| }, | |
| { | |
| "epoch": 0.637291280148423, | |
| "grad_norm": 4.0310564041137695, | |
| "learning_rate": 7.002381646519625e-06, | |
| "loss": 0.1739, | |
| "step": 687 | |
| }, | |
| { | |
| "epoch": 0.6382189239332097, | |
| "grad_norm": 6.287572860717773, | |
| "learning_rate": 6.971499241020495e-06, | |
| "loss": 0.1513, | |
| "step": 688 | |
| }, | |
| { | |
| "epoch": 0.6391465677179963, | |
| "grad_norm": 5.086080551147461, | |
| "learning_rate": 6.940648603061263e-06, | |
| "loss": 0.2203, | |
| "step": 689 | |
| }, | |
| { | |
| "epoch": 0.640074211502783, | |
| "grad_norm": 3.155775785446167, | |
| "learning_rate": 6.909830056250527e-06, | |
| "loss": 0.1288, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.6410018552875696, | |
| "grad_norm": 4.002538204193115, | |
| "learning_rate": 6.8790439238602576e-06, | |
| "loss": 0.1526, | |
| "step": 691 | |
| }, | |
| { | |
| "epoch": 0.6419294990723562, | |
| "grad_norm": 5.535161972045898, | |
| "learning_rate": 6.848290528822417e-06, | |
| "loss": 0.2223, | |
| "step": 692 | |
| }, | |
| { | |
| "epoch": 0.6428571428571429, | |
| "grad_norm": 4.103289604187012, | |
| "learning_rate": 6.8175701937255645e-06, | |
| "loss": 0.1831, | |
| "step": 693 | |
| }, | |
| { | |
| "epoch": 0.6437847866419295, | |
| "grad_norm": 4.057906150817871, | |
| "learning_rate": 6.786883240811479e-06, | |
| "loss": 0.1619, | |
| "step": 694 | |
| }, | |
| { | |
| "epoch": 0.6447124304267161, | |
| "grad_norm": 2.6124215126037598, | |
| "learning_rate": 6.756229991971779e-06, | |
| "loss": 0.0988, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 0.6456400742115028, | |
| "grad_norm": 3.484631061553955, | |
| "learning_rate": 6.725610768744535e-06, | |
| "loss": 0.1127, | |
| "step": 696 | |
| }, | |
| { | |
| "epoch": 0.6465677179962894, | |
| "grad_norm": 3.0936501026153564, | |
| "learning_rate": 6.695025892310913e-06, | |
| "loss": 0.1455, | |
| "step": 697 | |
| }, | |
| { | |
| "epoch": 0.647495361781076, | |
| "grad_norm": 3.7610599994659424, | |
| "learning_rate": 6.664475683491797e-06, | |
| "loss": 0.1725, | |
| "step": 698 | |
| }, | |
| { | |
| "epoch": 0.6484230055658627, | |
| "grad_norm": 3.537524700164795, | |
| "learning_rate": 6.633960462744415e-06, | |
| "loss": 0.1971, | |
| "step": 699 | |
| }, | |
| { | |
| "epoch": 0.6493506493506493, | |
| "grad_norm": 4.47899055480957, | |
| "learning_rate": 6.603480550158995e-06, | |
| "loss": 0.1411, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.6493506493506493, | |
| "eval_accuracy": 0.8481152993348116, | |
| "eval_f1": 0.6714628297362111, | |
| "eval_loss": 0.31714314222335815, | |
| "eval_precision": 0.8536585365853658, | |
| "eval_recall": 0.5533596837944664, | |
| "eval_runtime": 47.2966, | |
| "eval_samples_per_second": 5.836, | |
| "eval_steps_per_second": 0.19, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.650278293135436, | |
| "grad_norm": 5.3951334953308105, | |
| "learning_rate": 6.5730362654554015e-06, | |
| "loss": 0.2149, | |
| "step": 701 | |
| }, | |
| { | |
| "epoch": 0.6512059369202227, | |
| "grad_norm": 4.401817798614502, | |
| "learning_rate": 6.542627927979772e-06, | |
| "loss": 0.2215, | |
| "step": 702 | |
| }, | |
| { | |
| "epoch": 0.6521335807050093, | |
| "grad_norm": 3.9779934883117676, | |
| "learning_rate": 6.5122558567011775e-06, | |
| "loss": 0.1763, | |
| "step": 703 | |
| }, | |
| { | |
| "epoch": 0.6530612244897959, | |
| "grad_norm": 3.7413315773010254, | |
| "learning_rate": 6.481920370208274e-06, | |
| "loss": 0.1176, | |
| "step": 704 | |
| }, | |
| { | |
| "epoch": 0.6539888682745826, | |
| "grad_norm": 6.082540988922119, | |
| "learning_rate": 6.4516217867059615e-06, | |
| "loss": 0.2444, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 0.6549165120593692, | |
| "grad_norm": 7.003627300262451, | |
| "learning_rate": 6.421360424012039e-06, | |
| "loss": 0.1389, | |
| "step": 706 | |
| }, | |
| { | |
| "epoch": 0.6558441558441559, | |
| "grad_norm": 4.237902641296387, | |
| "learning_rate": 6.39113659955389e-06, | |
| "loss": 0.1767, | |
| "step": 707 | |
| }, | |
| { | |
| "epoch": 0.6567717996289425, | |
| "grad_norm": 3.1200437545776367, | |
| "learning_rate": 6.360950630365126e-06, | |
| "loss": 0.1127, | |
| "step": 708 | |
| }, | |
| { | |
| "epoch": 0.6576994434137291, | |
| "grad_norm": 4.659437656402588, | |
| "learning_rate": 6.33080283308228e-06, | |
| "loss": 0.1292, | |
| "step": 709 | |
| }, | |
| { | |
| "epoch": 0.6586270871985158, | |
| "grad_norm": 7.017888069152832, | |
| "learning_rate": 6.300693523941481e-06, | |
| "loss": 0.201, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.6595547309833024, | |
| "grad_norm": 4.794656276702881, | |
| "learning_rate": 6.270623018775135e-06, | |
| "loss": 0.1835, | |
| "step": 711 | |
| }, | |
| { | |
| "epoch": 0.660482374768089, | |
| "grad_norm": 4.382326602935791, | |
| "learning_rate": 6.2405916330086106e-06, | |
| "loss": 0.1703, | |
| "step": 712 | |
| }, | |
| { | |
| "epoch": 0.6614100185528757, | |
| "grad_norm": 6.628244876861572, | |
| "learning_rate": 6.210599681656933e-06, | |
| "loss": 0.2367, | |
| "step": 713 | |
| }, | |
| { | |
| "epoch": 0.6623376623376623, | |
| "grad_norm": 4.854303359985352, | |
| "learning_rate": 6.180647479321484e-06, | |
| "loss": 0.1874, | |
| "step": 714 | |
| }, | |
| { | |
| "epoch": 0.6632653061224489, | |
| "grad_norm": 2.9809422492980957, | |
| "learning_rate": 6.1507353401866896e-06, | |
| "loss": 0.1429, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 0.6641929499072357, | |
| "grad_norm": 3.899773359298706, | |
| "learning_rate": 6.120863578016736e-06, | |
| "loss": 0.1745, | |
| "step": 716 | |
| }, | |
| { | |
| "epoch": 0.6651205936920223, | |
| "grad_norm": 5.175771713256836, | |
| "learning_rate": 6.091032506152274e-06, | |
| "loss": 0.162, | |
| "step": 717 | |
| }, | |
| { | |
| "epoch": 0.6660482374768089, | |
| "grad_norm": 4.2920002937316895, | |
| "learning_rate": 6.061242437507131e-06, | |
| "loss": 0.1806, | |
| "step": 718 | |
| }, | |
| { | |
| "epoch": 0.6669758812615956, | |
| "grad_norm": 4.961828708648682, | |
| "learning_rate": 6.0314936845650296e-06, | |
| "loss": 0.1259, | |
| "step": 719 | |
| }, | |
| { | |
| "epoch": 0.6679035250463822, | |
| "grad_norm": 4.258510589599609, | |
| "learning_rate": 6.00178655937631e-06, | |
| "loss": 0.2015, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.6679035250463822, | |
| "eval_accuracy": 0.852549889135255, | |
| "eval_f1": 0.6825775656324582, | |
| "eval_loss": 0.31203413009643555, | |
| "eval_precision": 0.8614457831325302, | |
| "eval_recall": 0.5652173913043478, | |
| "eval_runtime": 47.6328, | |
| "eval_samples_per_second": 5.794, | |
| "eval_steps_per_second": 0.189, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.6688311688311688, | |
| "grad_norm": 4.69655179977417, | |
| "learning_rate": 5.972121373554665e-06, | |
| "loss": 0.1334, | |
| "step": 721 | |
| }, | |
| { | |
| "epoch": 0.6697588126159555, | |
| "grad_norm": 4.1556806564331055, | |
| "learning_rate": 5.942498438273849e-06, | |
| "loss": 0.159, | |
| "step": 722 | |
| }, | |
| { | |
| "epoch": 0.6706864564007421, | |
| "grad_norm": 4.261734485626221, | |
| "learning_rate": 5.912918064264441e-06, | |
| "loss": 0.14, | |
| "step": 723 | |
| }, | |
| { | |
| "epoch": 0.6716141001855288, | |
| "grad_norm": 5.172565460205078, | |
| "learning_rate": 5.8833805618105635e-06, | |
| "loss": 0.2259, | |
| "step": 724 | |
| }, | |
| { | |
| "epoch": 0.6725417439703154, | |
| "grad_norm": 5.771614074707031, | |
| "learning_rate": 5.853886240746643e-06, | |
| "loss": 0.1806, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.673469387755102, | |
| "grad_norm": 3.933560371398926, | |
| "learning_rate": 5.82443541045415e-06, | |
| "loss": 0.1874, | |
| "step": 726 | |
| }, | |
| { | |
| "epoch": 0.6743970315398887, | |
| "grad_norm": 4.140106201171875, | |
| "learning_rate": 5.795028379858355e-06, | |
| "loss": 0.1252, | |
| "step": 727 | |
| }, | |
| { | |
| "epoch": 0.6753246753246753, | |
| "grad_norm": 4.954802513122559, | |
| "learning_rate": 5.765665457425102e-06, | |
| "loss": 0.2296, | |
| "step": 728 | |
| }, | |
| { | |
| "epoch": 0.6762523191094619, | |
| "grad_norm": 3.336763620376587, | |
| "learning_rate": 5.736346951157544e-06, | |
| "loss": 0.1571, | |
| "step": 729 | |
| }, | |
| { | |
| "epoch": 0.6771799628942486, | |
| "grad_norm": 6.951479434967041, | |
| "learning_rate": 5.707073168592943e-06, | |
| "loss": 0.227, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.6781076066790352, | |
| "grad_norm": 6.168885231018066, | |
| "learning_rate": 5.677844416799424e-06, | |
| "loss": 0.1793, | |
| "step": 731 | |
| }, | |
| { | |
| "epoch": 0.6790352504638218, | |
| "grad_norm": 4.202786445617676, | |
| "learning_rate": 5.648661002372769e-06, | |
| "loss": 0.1307, | |
| "step": 732 | |
| }, | |
| { | |
| "epoch": 0.6799628942486086, | |
| "grad_norm": 5.333949565887451, | |
| "learning_rate": 5.619523231433177e-06, | |
| "loss": 0.1435, | |
| "step": 733 | |
| }, | |
| { | |
| "epoch": 0.6808905380333952, | |
| "grad_norm": 3.218813896179199, | |
| "learning_rate": 5.590431409622081e-06, | |
| "loss": 0.152, | |
| "step": 734 | |
| }, | |
| { | |
| "epoch": 0.6818181818181818, | |
| "grad_norm": 3.0740416049957275, | |
| "learning_rate": 5.56138584209893e-06, | |
| "loss": 0.1625, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 0.6827458256029685, | |
| "grad_norm": 3.7008039951324463, | |
| "learning_rate": 5.5323868335379775e-06, | |
| "loss": 0.1361, | |
| "step": 736 | |
| }, | |
| { | |
| "epoch": 0.6836734693877551, | |
| "grad_norm": 3.482449769973755, | |
| "learning_rate": 5.503434688125104e-06, | |
| "loss": 0.1539, | |
| "step": 737 | |
| }, | |
| { | |
| "epoch": 0.6846011131725418, | |
| "grad_norm": 3.1356945037841797, | |
| "learning_rate": 5.4745297095546125e-06, | |
| "loss": 0.116, | |
| "step": 738 | |
| }, | |
| { | |
| "epoch": 0.6855287569573284, | |
| "grad_norm": 3.0260050296783447, | |
| "learning_rate": 5.445672201026054e-06, | |
| "loss": 0.1491, | |
| "step": 739 | |
| }, | |
| { | |
| "epoch": 0.686456400742115, | |
| "grad_norm": 6.386862754821777, | |
| "learning_rate": 5.416862465241033e-06, | |
| "loss": 0.2216, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.686456400742115, | |
| "eval_accuracy": 0.8503325942350333, | |
| "eval_f1": 0.6762589928057554, | |
| "eval_loss": 0.30298492312431335, | |
| "eval_precision": 0.8597560975609756, | |
| "eval_recall": 0.5573122529644269, | |
| "eval_runtime": 47.363, | |
| "eval_samples_per_second": 5.827, | |
| "eval_steps_per_second": 0.19, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.6873840445269017, | |
| "grad_norm": 2.5552661418914795, | |
| "learning_rate": 5.3881008044000495e-06, | |
| "loss": 0.113, | |
| "step": 741 | |
| }, | |
| { | |
| "epoch": 0.6883116883116883, | |
| "grad_norm": 3.5924112796783447, | |
| "learning_rate": 5.359387520199317e-06, | |
| "loss": 0.0896, | |
| "step": 742 | |
| }, | |
| { | |
| "epoch": 0.6892393320964749, | |
| "grad_norm": 3.5902557373046875, | |
| "learning_rate": 5.330722913827594e-06, | |
| "loss": 0.2078, | |
| "step": 743 | |
| }, | |
| { | |
| "epoch": 0.6901669758812616, | |
| "grad_norm": 3.3769776821136475, | |
| "learning_rate": 5.302107285963045e-06, | |
| "loss": 0.1311, | |
| "step": 744 | |
| }, | |
| { | |
| "epoch": 0.6910946196660482, | |
| "grad_norm": 4.20431661605835, | |
| "learning_rate": 5.273540936770059e-06, | |
| "loss": 0.1694, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 0.6920222634508348, | |
| "grad_norm": 3.680614948272705, | |
| "learning_rate": 5.245024165896126e-06, | |
| "loss": 0.168, | |
| "step": 746 | |
| }, | |
| { | |
| "epoch": 0.6929499072356216, | |
| "grad_norm": 4.88540506362915, | |
| "learning_rate": 5.216557272468675e-06, | |
| "loss": 0.2021, | |
| "step": 747 | |
| }, | |
| { | |
| "epoch": 0.6938775510204082, | |
| "grad_norm": 3.159914016723633, | |
| "learning_rate": 5.18814055509195e-06, | |
| "loss": 0.1696, | |
| "step": 748 | |
| }, | |
| { | |
| "epoch": 0.6948051948051948, | |
| "grad_norm": 5.163516044616699, | |
| "learning_rate": 5.1597743118438725e-06, | |
| "loss": 0.1485, | |
| "step": 749 | |
| }, | |
| { | |
| "epoch": 0.6957328385899815, | |
| "grad_norm": 6.143499851226807, | |
| "learning_rate": 5.131458840272905e-06, | |
| "loss": 0.1769, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.6966604823747681, | |
| "grad_norm": 3.032066822052002, | |
| "learning_rate": 5.103194437394952e-06, | |
| "loss": 0.1202, | |
| "step": 751 | |
| }, | |
| { | |
| "epoch": 0.6975881261595547, | |
| "grad_norm": 4.9984965324401855, | |
| "learning_rate": 5.074981399690219e-06, | |
| "loss": 0.2292, | |
| "step": 752 | |
| }, | |
| { | |
| "epoch": 0.6985157699443414, | |
| "grad_norm": 2.7881083488464355, | |
| "learning_rate": 5.046820023100129e-06, | |
| "loss": 0.0989, | |
| "step": 753 | |
| }, | |
| { | |
| "epoch": 0.699443413729128, | |
| "grad_norm": 4.462379455566406, | |
| "learning_rate": 5.018710603024187e-06, | |
| "loss": 0.1788, | |
| "step": 754 | |
| }, | |
| { | |
| "epoch": 0.7003710575139147, | |
| "grad_norm": 5.801154613494873, | |
| "learning_rate": 4.990653434316915e-06, | |
| "loss": 0.1807, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 0.7012987012987013, | |
| "grad_norm": 4.452095031738281, | |
| "learning_rate": 4.9626488112847384e-06, | |
| "loss": 0.1348, | |
| "step": 756 | |
| }, | |
| { | |
| "epoch": 0.7022263450834879, | |
| "grad_norm": 4.256060600280762, | |
| "learning_rate": 4.934697027682894e-06, | |
| "loss": 0.1618, | |
| "step": 757 | |
| }, | |
| { | |
| "epoch": 0.7031539888682746, | |
| "grad_norm": 2.893348217010498, | |
| "learning_rate": 4.9067983767123736e-06, | |
| "loss": 0.114, | |
| "step": 758 | |
| }, | |
| { | |
| "epoch": 0.7040816326530612, | |
| "grad_norm": 2.409883737564087, | |
| "learning_rate": 4.878953151016816e-06, | |
| "loss": 0.0739, | |
| "step": 759 | |
| }, | |
| { | |
| "epoch": 0.7050092764378478, | |
| "grad_norm": 4.646303176879883, | |
| "learning_rate": 4.851161642679466e-06, | |
| "loss": 0.1936, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.7050092764378478, | |
| "eval_accuracy": 0.8503325942350333, | |
| "eval_f1": 0.6762589928057554, | |
| "eval_loss": 0.30913296341896057, | |
| "eval_precision": 0.8597560975609756, | |
| "eval_recall": 0.5573122529644269, | |
| "eval_runtime": 47.2588, | |
| "eval_samples_per_second": 5.84, | |
| "eval_steps_per_second": 0.19, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.7059369202226345, | |
| "grad_norm": 3.7548396587371826, | |
| "learning_rate": 4.823424143220097e-06, | |
| "loss": 0.1332, | |
| "step": 761 | |
| }, | |
| { | |
| "epoch": 0.7068645640074211, | |
| "grad_norm": 4.519832611083984, | |
| "learning_rate": 4.795740943591955e-06, | |
| "loss": 0.1508, | |
| "step": 762 | |
| }, | |
| { | |
| "epoch": 0.7077922077922078, | |
| "grad_norm": 4.223718643188477, | |
| "learning_rate": 4.7681123341787e-06, | |
| "loss": 0.2027, | |
| "step": 763 | |
| }, | |
| { | |
| "epoch": 0.7087198515769945, | |
| "grad_norm": 4.345351219177246, | |
| "learning_rate": 4.740538604791371e-06, | |
| "loss": 0.1641, | |
| "step": 764 | |
| }, | |
| { | |
| "epoch": 0.7096474953617811, | |
| "grad_norm": 6.437448024749756, | |
| "learning_rate": 4.713020044665348e-06, | |
| "loss": 0.1839, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 0.7105751391465677, | |
| "grad_norm": 5.154228687286377, | |
| "learning_rate": 4.685556942457296e-06, | |
| "loss": 0.1696, | |
| "step": 766 | |
| }, | |
| { | |
| "epoch": 0.7115027829313544, | |
| "grad_norm": 4.437908172607422, | |
| "learning_rate": 4.65814958624217e-06, | |
| "loss": 0.1199, | |
| "step": 767 | |
| }, | |
| { | |
| "epoch": 0.712430426716141, | |
| "grad_norm": 4.337602615356445, | |
| "learning_rate": 4.630798263510162e-06, | |
| "loss": 0.166, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 0.7133580705009277, | |
| "grad_norm": 4.633522987365723, | |
| "learning_rate": 4.60350326116371e-06, | |
| "loss": 0.1667, | |
| "step": 769 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 4.564393997192383, | |
| "learning_rate": 4.576264865514467e-06, | |
| "loss": 0.1996, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.7152133580705009, | |
| "grad_norm": 5.6954803466796875, | |
| "learning_rate": 4.549083362280318e-06, | |
| "loss": 0.1227, | |
| "step": 771 | |
| }, | |
| { | |
| "epoch": 0.7161410018552876, | |
| "grad_norm": 2.704313278198242, | |
| "learning_rate": 4.521959036582372e-06, | |
| "loss": 0.1118, | |
| "step": 772 | |
| }, | |
| { | |
| "epoch": 0.7170686456400742, | |
| "grad_norm": 3.529452085494995, | |
| "learning_rate": 4.494892172941965e-06, | |
| "loss": 0.1595, | |
| "step": 773 | |
| }, | |
| { | |
| "epoch": 0.7179962894248608, | |
| "grad_norm": 4.921022415161133, | |
| "learning_rate": 4.467883055277696e-06, | |
| "loss": 0.1755, | |
| "step": 774 | |
| }, | |
| { | |
| "epoch": 0.7189239332096475, | |
| "grad_norm": 3.468756675720215, | |
| "learning_rate": 4.440931966902419e-06, | |
| "loss": 0.1198, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.7198515769944341, | |
| "grad_norm": 3.8362793922424316, | |
| "learning_rate": 4.414039190520308e-06, | |
| "loss": 0.162, | |
| "step": 776 | |
| }, | |
| { | |
| "epoch": 0.7207792207792207, | |
| "grad_norm": 3.9787826538085938, | |
| "learning_rate": 4.3872050082238535e-06, | |
| "loss": 0.1539, | |
| "step": 777 | |
| }, | |
| { | |
| "epoch": 0.7217068645640075, | |
| "grad_norm": 4.675662517547607, | |
| "learning_rate": 4.360429701490935e-06, | |
| "loss": 0.1961, | |
| "step": 778 | |
| }, | |
| { | |
| "epoch": 0.7226345083487941, | |
| "grad_norm": 3.907136917114258, | |
| "learning_rate": 4.3337135511818514e-06, | |
| "loss": 0.1566, | |
| "step": 779 | |
| }, | |
| { | |
| "epoch": 0.7235621521335807, | |
| "grad_norm": 4.647920608520508, | |
| "learning_rate": 4.307056837536373e-06, | |
| "loss": 0.135, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.7235621521335807, | |
| "eval_accuracy": 0.852549889135255, | |
| "eval_f1": 0.6855791962174941, | |
| "eval_loss": 0.30233433842658997, | |
| "eval_precision": 0.8529411764705882, | |
| "eval_recall": 0.5731225296442688, | |
| "eval_runtime": 49.2875, | |
| "eval_samples_per_second": 5.6, | |
| "eval_steps_per_second": 0.183, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.7244897959183674, | |
| "grad_norm": 4.066539287567139, | |
| "learning_rate": 4.280459840170818e-06, | |
| "loss": 0.098, | |
| "step": 781 | |
| }, | |
| { | |
| "epoch": 0.725417439703154, | |
| "grad_norm": 4.152294635772705, | |
| "learning_rate": 4.2539228380750955e-06, | |
| "loss": 0.1492, | |
| "step": 782 | |
| }, | |
| { | |
| "epoch": 0.7263450834879406, | |
| "grad_norm": 4.5321831703186035, | |
| "learning_rate": 4.2274461096098085e-06, | |
| "loss": 0.1727, | |
| "step": 783 | |
| }, | |
| { | |
| "epoch": 0.7272727272727273, | |
| "grad_norm": 4.524892807006836, | |
| "learning_rate": 4.201029932503303e-06, | |
| "loss": 0.1529, | |
| "step": 784 | |
| }, | |
| { | |
| "epoch": 0.7282003710575139, | |
| "grad_norm": 6.373286724090576, | |
| "learning_rate": 4.17467458384878e-06, | |
| "loss": 0.1686, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 0.7291280148423006, | |
| "grad_norm": 3.8836734294891357, | |
| "learning_rate": 4.14838034010138e-06, | |
| "loss": 0.1148, | |
| "step": 786 | |
| }, | |
| { | |
| "epoch": 0.7300556586270872, | |
| "grad_norm": 4.612453937530518, | |
| "learning_rate": 4.12214747707527e-06, | |
| "loss": 0.1585, | |
| "step": 787 | |
| }, | |
| { | |
| "epoch": 0.7309833024118738, | |
| "grad_norm": 3.490913152694702, | |
| "learning_rate": 4.095976269940777e-06, | |
| "loss": 0.1045, | |
| "step": 788 | |
| }, | |
| { | |
| "epoch": 0.7319109461966605, | |
| "grad_norm": 4.676249027252197, | |
| "learning_rate": 4.069866993221473e-06, | |
| "loss": 0.2105, | |
| "step": 789 | |
| }, | |
| { | |
| "epoch": 0.7328385899814471, | |
| "grad_norm": 4.428783416748047, | |
| "learning_rate": 4.043819920791322e-06, | |
| "loss": 0.1764, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.7337662337662337, | |
| "grad_norm": 4.41617488861084, | |
| "learning_rate": 4.017835325871781e-06, | |
| "loss": 0.1616, | |
| "step": 791 | |
| }, | |
| { | |
| "epoch": 0.7346938775510204, | |
| "grad_norm": 3.8224010467529297, | |
| "learning_rate": 3.991913481028965e-06, | |
| "loss": 0.2009, | |
| "step": 792 | |
| }, | |
| { | |
| "epoch": 0.735621521335807, | |
| "grad_norm": 4.631185531616211, | |
| "learning_rate": 3.966054658170754e-06, | |
| "loss": 0.1481, | |
| "step": 793 | |
| }, | |
| { | |
| "epoch": 0.7365491651205937, | |
| "grad_norm": 4.32712984085083, | |
| "learning_rate": 3.940259128543967e-06, | |
| "loss": 0.1977, | |
| "step": 794 | |
| }, | |
| { | |
| "epoch": 0.7374768089053804, | |
| "grad_norm": 2.936823606491089, | |
| "learning_rate": 3.914527162731498e-06, | |
| "loss": 0.1197, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 0.738404452690167, | |
| "grad_norm": 6.375387191772461, | |
| "learning_rate": 3.888859030649498e-06, | |
| "loss": 0.2587, | |
| "step": 796 | |
| }, | |
| { | |
| "epoch": 0.7393320964749536, | |
| "grad_norm": 5.587421894073486, | |
| "learning_rate": 3.863255001544526e-06, | |
| "loss": 0.2271, | |
| "step": 797 | |
| }, | |
| { | |
| "epoch": 0.7402597402597403, | |
| "grad_norm": 5.898324966430664, | |
| "learning_rate": 3.837715343990727e-06, | |
| "loss": 0.2026, | |
| "step": 798 | |
| }, | |
| { | |
| "epoch": 0.7411873840445269, | |
| "grad_norm": 6.89965295791626, | |
| "learning_rate": 3.81224032588703e-06, | |
| "loss": 0.222, | |
| "step": 799 | |
| }, | |
| { | |
| "epoch": 0.7421150278293135, | |
| "grad_norm": 4.845470428466797, | |
| "learning_rate": 3.7868302144543146e-06, | |
| "loss": 0.1332, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.7421150278293135, | |
| "eval_accuracy": 0.843680709534368, | |
| "eval_f1": 0.6466165413533834, | |
| "eval_loss": 0.32069557905197144, | |
| "eval_precision": 0.8835616438356164, | |
| "eval_recall": 0.5098814229249012, | |
| "eval_runtime": 47.5467, | |
| "eval_samples_per_second": 5.805, | |
| "eval_steps_per_second": 0.189, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.7430426716141002, | |
| "grad_norm": 3.8346760272979736, | |
| "learning_rate": 3.7614852762326303e-06, | |
| "loss": 0.1511, | |
| "step": 801 | |
| }, | |
| { | |
| "epoch": 0.7439703153988868, | |
| "grad_norm": 3.3582327365875244, | |
| "learning_rate": 3.736205777078381e-06, | |
| "loss": 0.099, | |
| "step": 802 | |
| }, | |
| { | |
| "epoch": 0.7448979591836735, | |
| "grad_norm": 4.043883323669434, | |
| "learning_rate": 3.7109919821615546e-06, | |
| "loss": 0.1684, | |
| "step": 803 | |
| }, | |
| { | |
| "epoch": 0.7458256029684601, | |
| "grad_norm": 4.061108112335205, | |
| "learning_rate": 3.685844155962931e-06, | |
| "loss": 0.1874, | |
| "step": 804 | |
| }, | |
| { | |
| "epoch": 0.7467532467532467, | |
| "grad_norm": 2.596107244491577, | |
| "learning_rate": 3.6607625622713005e-06, | |
| "loss": 0.1146, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 0.7476808905380334, | |
| "grad_norm": 3.7484891414642334, | |
| "learning_rate": 3.63574746418072e-06, | |
| "loss": 0.1027, | |
| "step": 806 | |
| }, | |
| { | |
| "epoch": 0.74860853432282, | |
| "grad_norm": 4.787120342254639, | |
| "learning_rate": 3.610799124087725e-06, | |
| "loss": 0.2284, | |
| "step": 807 | |
| }, | |
| { | |
| "epoch": 0.7495361781076066, | |
| "grad_norm": 3.2740108966827393, | |
| "learning_rate": 3.585917803688603e-06, | |
| "loss": 0.126, | |
| "step": 808 | |
| }, | |
| { | |
| "epoch": 0.7504638218923934, | |
| "grad_norm": 3.000568389892578, | |
| "learning_rate": 3.5611037639766267e-06, | |
| "loss": 0.1258, | |
| "step": 809 | |
| }, | |
| { | |
| "epoch": 0.75139146567718, | |
| "grad_norm": 3.994319200515747, | |
| "learning_rate": 3.536357265239333e-06, | |
| "loss": 0.2042, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.7523191094619666, | |
| "grad_norm": 3.8152012825012207, | |
| "learning_rate": 3.511678567055786e-06, | |
| "loss": 0.147, | |
| "step": 811 | |
| }, | |
| { | |
| "epoch": 0.7532467532467533, | |
| "grad_norm": 3.8970158100128174, | |
| "learning_rate": 3.487067928293848e-06, | |
| "loss": 0.142, | |
| "step": 812 | |
| }, | |
| { | |
| "epoch": 0.7541743970315399, | |
| "grad_norm": 4.383775234222412, | |
| "learning_rate": 3.4625256071074776e-06, | |
| "loss": 0.1752, | |
| "step": 813 | |
| }, | |
| { | |
| "epoch": 0.7551020408163265, | |
| "grad_norm": 3.43391489982605, | |
| "learning_rate": 3.4380518609340076e-06, | |
| "loss": 0.1207, | |
| "step": 814 | |
| }, | |
| { | |
| "epoch": 0.7560296846011132, | |
| "grad_norm": 3.767289161682129, | |
| "learning_rate": 3.413646946491458e-06, | |
| "loss": 0.1301, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 0.7569573283858998, | |
| "grad_norm": 3.6454761028289795, | |
| "learning_rate": 3.3893111197758276e-06, | |
| "loss": 0.1729, | |
| "step": 816 | |
| }, | |
| { | |
| "epoch": 0.7578849721706865, | |
| "grad_norm": 4.36724328994751, | |
| "learning_rate": 3.3650446360584276e-06, | |
| "loss": 0.1707, | |
| "step": 817 | |
| }, | |
| { | |
| "epoch": 0.7588126159554731, | |
| "grad_norm": 2.9927897453308105, | |
| "learning_rate": 3.3408477498831917e-06, | |
| "loss": 0.112, | |
| "step": 818 | |
| }, | |
| { | |
| "epoch": 0.7597402597402597, | |
| "grad_norm": 4.448176860809326, | |
| "learning_rate": 3.3167207150640003e-06, | |
| "loss": 0.1064, | |
| "step": 819 | |
| }, | |
| { | |
| "epoch": 0.7606679035250464, | |
| "grad_norm": 6.467900276184082, | |
| "learning_rate": 3.2926637846820366e-06, | |
| "loss": 0.249, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.7606679035250464, | |
| "eval_accuracy": 0.8592017738359202, | |
| "eval_f1": 0.6997635933806147, | |
| "eval_loss": 0.3030702769756317, | |
| "eval_precision": 0.8705882352941177, | |
| "eval_recall": 0.5849802371541502, | |
| "eval_runtime": 47.6515, | |
| "eval_samples_per_second": 5.792, | |
| "eval_steps_per_second": 0.189, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.761595547309833, | |
| "grad_norm": 3.21474027633667, | |
| "learning_rate": 3.268677211083109e-06, | |
| "loss": 0.1103, | |
| "step": 821 | |
| }, | |
| { | |
| "epoch": 0.7625231910946196, | |
| "grad_norm": 3.8230926990509033, | |
| "learning_rate": 3.2447612458750365e-06, | |
| "loss": 0.1608, | |
| "step": 822 | |
| }, | |
| { | |
| "epoch": 0.7634508348794063, | |
| "grad_norm": 4.736266136169434, | |
| "learning_rate": 3.2209161399249677e-06, | |
| "loss": 0.1549, | |
| "step": 823 | |
| }, | |
| { | |
| "epoch": 0.764378478664193, | |
| "grad_norm": 5.4161481857299805, | |
| "learning_rate": 3.197142143356787e-06, | |
| "loss": 0.1905, | |
| "step": 824 | |
| }, | |
| { | |
| "epoch": 0.7653061224489796, | |
| "grad_norm": 3.857360601425171, | |
| "learning_rate": 3.1734395055484623e-06, | |
| "loss": 0.1287, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 0.7662337662337663, | |
| "grad_norm": 4.246245861053467, | |
| "learning_rate": 3.1498084751294523e-06, | |
| "loss": 0.1751, | |
| "step": 826 | |
| }, | |
| { | |
| "epoch": 0.7671614100185529, | |
| "grad_norm": 4.21674108505249, | |
| "learning_rate": 3.126249299978086e-06, | |
| "loss": 0.1593, | |
| "step": 827 | |
| }, | |
| { | |
| "epoch": 0.7680890538033395, | |
| "grad_norm": 3.7095324993133545, | |
| "learning_rate": 3.1027622272189572e-06, | |
| "loss": 0.1384, | |
| "step": 828 | |
| }, | |
| { | |
| "epoch": 0.7690166975881262, | |
| "grad_norm": 6.3794965744018555, | |
| "learning_rate": 3.0793475032203513e-06, | |
| "loss": 0.1583, | |
| "step": 829 | |
| }, | |
| { | |
| "epoch": 0.7699443413729128, | |
| "grad_norm": 3.0277578830718994, | |
| "learning_rate": 3.0560053735916372e-06, | |
| "loss": 0.1043, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.7708719851576994, | |
| "grad_norm": 5.117831707000732, | |
| "learning_rate": 3.032736083180716e-06, | |
| "loss": 0.15, | |
| "step": 831 | |
| }, | |
| { | |
| "epoch": 0.7717996289424861, | |
| "grad_norm": 2.76505184173584, | |
| "learning_rate": 3.009539876071427e-06, | |
| "loss": 0.0558, | |
| "step": 832 | |
| }, | |
| { | |
| "epoch": 0.7727272727272727, | |
| "grad_norm": 4.057276725769043, | |
| "learning_rate": 2.9864169955810085e-06, | |
| "loss": 0.1391, | |
| "step": 833 | |
| }, | |
| { | |
| "epoch": 0.7736549165120594, | |
| "grad_norm": 3.700852870941162, | |
| "learning_rate": 2.9633676842575386e-06, | |
| "loss": 0.1721, | |
| "step": 834 | |
| }, | |
| { | |
| "epoch": 0.774582560296846, | |
| "grad_norm": 4.1468939781188965, | |
| "learning_rate": 2.940392183877382e-06, | |
| "loss": 0.1866, | |
| "step": 835 | |
| }, | |
| { | |
| "epoch": 0.7755102040816326, | |
| "grad_norm": 3.1190547943115234, | |
| "learning_rate": 2.9174907354426696e-06, | |
| "loss": 0.1292, | |
| "step": 836 | |
| }, | |
| { | |
| "epoch": 0.7764378478664193, | |
| "grad_norm": 3.0235095024108887, | |
| "learning_rate": 2.8946635791787546e-06, | |
| "loss": 0.0629, | |
| "step": 837 | |
| }, | |
| { | |
| "epoch": 0.7773654916512059, | |
| "grad_norm": 3.435035467147827, | |
| "learning_rate": 2.8719109545317102e-06, | |
| "loss": 0.1064, | |
| "step": 838 | |
| }, | |
| { | |
| "epoch": 0.7782931354359925, | |
| "grad_norm": 5.368072986602783, | |
| "learning_rate": 2.849233100165795e-06, | |
| "loss": 0.1662, | |
| "step": 839 | |
| }, | |
| { | |
| "epoch": 0.7792207792207793, | |
| "grad_norm": 4.488304615020752, | |
| "learning_rate": 2.8266302539609747e-06, | |
| "loss": 0.2033, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.7792207792207793, | |
| "eval_accuracy": 0.8592017738359202, | |
| "eval_f1": 0.6983372921615202, | |
| "eval_loss": 0.30760514736175537, | |
| "eval_precision": 0.875, | |
| "eval_recall": 0.5810276679841897, | |
| "eval_runtime": 47.6262, | |
| "eval_samples_per_second": 5.795, | |
| "eval_steps_per_second": 0.189, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.7801484230055659, | |
| "grad_norm": 4.42378044128418, | |
| "learning_rate": 2.8041026530104144e-06, | |
| "loss": 0.1223, | |
| "step": 841 | |
| }, | |
| { | |
| "epoch": 0.7810760667903525, | |
| "grad_norm": 3.143146514892578, | |
| "learning_rate": 2.78165053361798e-06, | |
| "loss": 0.1408, | |
| "step": 842 | |
| }, | |
| { | |
| "epoch": 0.7820037105751392, | |
| "grad_norm": 3.8427817821502686, | |
| "learning_rate": 2.759274131295787e-06, | |
| "loss": 0.0995, | |
| "step": 843 | |
| }, | |
| { | |
| "epoch": 0.7829313543599258, | |
| "grad_norm": 4.896306991577148, | |
| "learning_rate": 2.736973680761702e-06, | |
| "loss": 0.1621, | |
| "step": 844 | |
| }, | |
| { | |
| "epoch": 0.7838589981447124, | |
| "grad_norm": 5.344152450561523, | |
| "learning_rate": 2.714749415936904e-06, | |
| "loss": 0.1749, | |
| "step": 845 | |
| }, | |
| { | |
| "epoch": 0.7847866419294991, | |
| "grad_norm": 4.093927383422852, | |
| "learning_rate": 2.692601569943407e-06, | |
| "loss": 0.1716, | |
| "step": 846 | |
| }, | |
| { | |
| "epoch": 0.7857142857142857, | |
| "grad_norm": 3.3568267822265625, | |
| "learning_rate": 2.670530375101641e-06, | |
| "loss": 0.1658, | |
| "step": 847 | |
| }, | |
| { | |
| "epoch": 0.7866419294990723, | |
| "grad_norm": 3.3225290775299072, | |
| "learning_rate": 2.648536062927999e-06, | |
| "loss": 0.1358, | |
| "step": 848 | |
| }, | |
| { | |
| "epoch": 0.787569573283859, | |
| "grad_norm": 4.490353584289551, | |
| "learning_rate": 2.6266188641324e-06, | |
| "loss": 0.1178, | |
| "step": 849 | |
| }, | |
| { | |
| "epoch": 0.7884972170686456, | |
| "grad_norm": 3.4706945419311523, | |
| "learning_rate": 2.604779008615895e-06, | |
| "loss": 0.0946, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.7894248608534323, | |
| "grad_norm": 5.027894020080566, | |
| "learning_rate": 2.583016725468226e-06, | |
| "loss": 0.1434, | |
| "step": 851 | |
| }, | |
| { | |
| "epoch": 0.7903525046382189, | |
| "grad_norm": 4.010559558868408, | |
| "learning_rate": 2.5613322429654573e-06, | |
| "loss": 0.158, | |
| "step": 852 | |
| }, | |
| { | |
| "epoch": 0.7912801484230055, | |
| "grad_norm": 6.057810306549072, | |
| "learning_rate": 2.5397257885675396e-06, | |
| "loss": 0.1912, | |
| "step": 853 | |
| }, | |
| { | |
| "epoch": 0.7922077922077922, | |
| "grad_norm": 5.5663371086120605, | |
| "learning_rate": 2.5181975889159615e-06, | |
| "loss": 0.2398, | |
| "step": 854 | |
| }, | |
| { | |
| "epoch": 0.7931354359925789, | |
| "grad_norm": 2.8672025203704834, | |
| "learning_rate": 2.496747869831345e-06, | |
| "loss": 0.0944, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 0.7940630797773655, | |
| "grad_norm": 4.048581123352051, | |
| "learning_rate": 2.475376856311097e-06, | |
| "loss": 0.1603, | |
| "step": 856 | |
| }, | |
| { | |
| "epoch": 0.7949907235621522, | |
| "grad_norm": 5.737659931182861, | |
| "learning_rate": 2.4540847725270376e-06, | |
| "loss": 0.1366, | |
| "step": 857 | |
| }, | |
| { | |
| "epoch": 0.7959183673469388, | |
| "grad_norm": 4.4261155128479, | |
| "learning_rate": 2.432871841823047e-06, | |
| "loss": 0.1939, | |
| "step": 858 | |
| }, | |
| { | |
| "epoch": 0.7968460111317254, | |
| "grad_norm": 5.629834175109863, | |
| "learning_rate": 2.411738286712735e-06, | |
| "loss": 0.2281, | |
| "step": 859 | |
| }, | |
| { | |
| "epoch": 0.7977736549165121, | |
| "grad_norm": 3.919034004211426, | |
| "learning_rate": 2.390684328877089e-06, | |
| "loss": 0.1418, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.7977736549165121, | |
| "eval_accuracy": 0.8614190687361419, | |
| "eval_f1": 0.7072599531615925, | |
| "eval_loss": 0.29977986216545105, | |
| "eval_precision": 0.867816091954023, | |
| "eval_recall": 0.5968379446640316, | |
| "eval_runtime": 47.4113, | |
| "eval_samples_per_second": 5.821, | |
| "eval_steps_per_second": 0.19, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.7987012987012987, | |
| "grad_norm": 7.607851982116699, | |
| "learning_rate": 2.36971018916217e-06, | |
| "loss": 0.187, | |
| "step": 861 | |
| }, | |
| { | |
| "epoch": 0.7996289424860853, | |
| "grad_norm": 3.1179118156433105, | |
| "learning_rate": 2.3488160875767717e-06, | |
| "loss": 0.1326, | |
| "step": 862 | |
| }, | |
| { | |
| "epoch": 0.800556586270872, | |
| "grad_norm": 3.8754749298095703, | |
| "learning_rate": 2.328002243290138e-06, | |
| "loss": 0.1497, | |
| "step": 863 | |
| }, | |
| { | |
| "epoch": 0.8014842300556586, | |
| "grad_norm": 4.216552734375, | |
| "learning_rate": 2.307268874629649e-06, | |
| "loss": 0.1259, | |
| "step": 864 | |
| }, | |
| { | |
| "epoch": 0.8024118738404453, | |
| "grad_norm": 5.980984210968018, | |
| "learning_rate": 2.2866161990785228e-06, | |
| "loss": 0.1778, | |
| "step": 865 | |
| }, | |
| { | |
| "epoch": 0.8033395176252319, | |
| "grad_norm": 4.638891220092773, | |
| "learning_rate": 2.266044433273562e-06, | |
| "loss": 0.2258, | |
| "step": 866 | |
| }, | |
| { | |
| "epoch": 0.8042671614100185, | |
| "grad_norm": 3.3244616985321045, | |
| "learning_rate": 2.245553793002849e-06, | |
| "loss": 0.1215, | |
| "step": 867 | |
| }, | |
| { | |
| "epoch": 0.8051948051948052, | |
| "grad_norm": 6.757506370544434, | |
| "learning_rate": 2.2251444932035094e-06, | |
| "loss": 0.159, | |
| "step": 868 | |
| }, | |
| { | |
| "epoch": 0.8061224489795918, | |
| "grad_norm": 5.527317523956299, | |
| "learning_rate": 2.204816747959434e-06, | |
| "loss": 0.1025, | |
| "step": 869 | |
| }, | |
| { | |
| "epoch": 0.8070500927643784, | |
| "grad_norm": 3.5259664058685303, | |
| "learning_rate": 2.184570770499056e-06, | |
| "loss": 0.1522, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.8079777365491652, | |
| "grad_norm": 4.936224937438965, | |
| "learning_rate": 2.1644067731931005e-06, | |
| "loss": 0.1679, | |
| "step": 871 | |
| }, | |
| { | |
| "epoch": 0.8089053803339518, | |
| "grad_norm": 4.338299751281738, | |
| "learning_rate": 2.1443249675523536e-06, | |
| "loss": 0.1705, | |
| "step": 872 | |
| }, | |
| { | |
| "epoch": 0.8098330241187384, | |
| "grad_norm": 3.631812334060669, | |
| "learning_rate": 2.124325564225458e-06, | |
| "loss": 0.1034, | |
| "step": 873 | |
| }, | |
| { | |
| "epoch": 0.8107606679035251, | |
| "grad_norm": 3.276409864425659, | |
| "learning_rate": 2.1044087729966856e-06, | |
| "loss": 0.1486, | |
| "step": 874 | |
| }, | |
| { | |
| "epoch": 0.8116883116883117, | |
| "grad_norm": 5.677032947540283, | |
| "learning_rate": 2.0845748027837585e-06, | |
| "loss": 0.2155, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 0.8126159554730983, | |
| "grad_norm": 6.064774036407471, | |
| "learning_rate": 2.064823861635633e-06, | |
| "loss": 0.1773, | |
| "step": 876 | |
| }, | |
| { | |
| "epoch": 0.813543599257885, | |
| "grad_norm": 3.783052444458008, | |
| "learning_rate": 2.0451561567303378e-06, | |
| "loss": 0.1696, | |
| "step": 877 | |
| }, | |
| { | |
| "epoch": 0.8144712430426716, | |
| "grad_norm": 3.9778928756713867, | |
| "learning_rate": 2.025571894372794e-06, | |
| "loss": 0.0916, | |
| "step": 878 | |
| }, | |
| { | |
| "epoch": 0.8153988868274582, | |
| "grad_norm": 3.2632224559783936, | |
| "learning_rate": 2.0060712799926407e-06, | |
| "loss": 0.1004, | |
| "step": 879 | |
| }, | |
| { | |
| "epoch": 0.8163265306122449, | |
| "grad_norm": 5.624824047088623, | |
| "learning_rate": 1.9866545181421016e-06, | |
| "loss": 0.1826, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.8163265306122449, | |
| "eval_accuracy": 0.8625277161862528, | |
| "eval_f1": 0.7089201877934272, | |
| "eval_loss": 0.30142825841903687, | |
| "eval_precision": 0.8728323699421965, | |
| "eval_recall": 0.5968379446640316, | |
| "eval_runtime": 47.5017, | |
| "eval_samples_per_second": 5.81, | |
| "eval_steps_per_second": 0.189, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.8172541743970315, | |
| "grad_norm": 5.620955944061279, | |
| "learning_rate": 1.967321812493813e-06, | |
| "loss": 0.159, | |
| "step": 881 | |
| }, | |
| { | |
| "epoch": 0.8181818181818182, | |
| "grad_norm": 3.5917294025421143, | |
| "learning_rate": 1.9480733658387175e-06, | |
| "loss": 0.1544, | |
| "step": 882 | |
| }, | |
| { | |
| "epoch": 0.8191094619666048, | |
| "grad_norm": 4.478516101837158, | |
| "learning_rate": 1.9289093800839067e-06, | |
| "loss": 0.1835, | |
| "step": 883 | |
| }, | |
| { | |
| "epoch": 0.8200371057513914, | |
| "grad_norm": 5.72329044342041, | |
| "learning_rate": 1.9098300562505266e-06, | |
| "loss": 0.181, | |
| "step": 884 | |
| }, | |
| { | |
| "epoch": 0.8209647495361782, | |
| "grad_norm": 4.014090538024902, | |
| "learning_rate": 1.8908355944716516e-06, | |
| "loss": 0.123, | |
| "step": 885 | |
| }, | |
| { | |
| "epoch": 0.8218923933209648, | |
| "grad_norm": 3.9502549171447754, | |
| "learning_rate": 1.8719261939902023e-06, | |
| "loss": 0.1675, | |
| "step": 886 | |
| }, | |
| { | |
| "epoch": 0.8228200371057514, | |
| "grad_norm": 3.701931953430176, | |
| "learning_rate": 1.8531020531568377e-06, | |
| "loss": 0.1297, | |
| "step": 887 | |
| }, | |
| { | |
| "epoch": 0.8237476808905381, | |
| "grad_norm": 4.026066303253174, | |
| "learning_rate": 1.8343633694278895e-06, | |
| "loss": 0.1366, | |
| "step": 888 | |
| }, | |
| { | |
| "epoch": 0.8246753246753247, | |
| "grad_norm": 4.122823715209961, | |
| "learning_rate": 1.8157103393632869e-06, | |
| "loss": 0.1819, | |
| "step": 889 | |
| }, | |
| { | |
| "epoch": 0.8256029684601113, | |
| "grad_norm": 4.512097358703613, | |
| "learning_rate": 1.7971431586244814e-06, | |
| "loss": 0.2281, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.826530612244898, | |
| "grad_norm": 3.6927201747894287, | |
| "learning_rate": 1.7786620219724205e-06, | |
| "loss": 0.0749, | |
| "step": 891 | |
| }, | |
| { | |
| "epoch": 0.8274582560296846, | |
| "grad_norm": 5.3355207443237305, | |
| "learning_rate": 1.7602671232654755e-06, | |
| "loss": 0.1781, | |
| "step": 892 | |
| }, | |
| { | |
| "epoch": 0.8283858998144712, | |
| "grad_norm": 3.310504674911499, | |
| "learning_rate": 1.7419586554574364e-06, | |
| "loss": 0.0927, | |
| "step": 893 | |
| }, | |
| { | |
| "epoch": 0.8293135435992579, | |
| "grad_norm": 3.6718716621398926, | |
| "learning_rate": 1.723736810595461e-06, | |
| "loss": 0.159, | |
| "step": 894 | |
| }, | |
| { | |
| "epoch": 0.8302411873840445, | |
| "grad_norm": 4.083915710449219, | |
| "learning_rate": 1.7056017798180824e-06, | |
| "loss": 0.1516, | |
| "step": 895 | |
| }, | |
| { | |
| "epoch": 0.8311688311688312, | |
| "grad_norm": 8.39147663116455, | |
| "learning_rate": 1.687553753353195e-06, | |
| "loss": 0.1548, | |
| "step": 896 | |
| }, | |
| { | |
| "epoch": 0.8320964749536178, | |
| "grad_norm": 3.83030366897583, | |
| "learning_rate": 1.669592920516049e-06, | |
| "loss": 0.1613, | |
| "step": 897 | |
| }, | |
| { | |
| "epoch": 0.8330241187384044, | |
| "grad_norm": 3.559238910675049, | |
| "learning_rate": 1.6517194697072903e-06, | |
| "loss": 0.1193, | |
| "step": 898 | |
| }, | |
| { | |
| "epoch": 0.8339517625231911, | |
| "grad_norm": 4.015861511230469, | |
| "learning_rate": 1.633933588410952e-06, | |
| "loss": 0.1383, | |
| "step": 899 | |
| }, | |
| { | |
| "epoch": 0.8348794063079777, | |
| "grad_norm": 4.48312520980835, | |
| "learning_rate": 1.6162354631925203e-06, | |
| "loss": 0.1538, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.8348794063079777, | |
| "eval_accuracy": 0.8614190687361419, | |
| "eval_f1": 0.7016706443914081, | |
| "eval_loss": 0.3091951012611389, | |
| "eval_precision": 0.8855421686746988, | |
| "eval_recall": 0.5810276679841897, | |
| "eval_runtime": 47.4361, | |
| "eval_samples_per_second": 5.818, | |
| "eval_steps_per_second": 0.19, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.8358070500927643, | |
| "grad_norm": 3.852740526199341, | |
| "learning_rate": 1.5986252796969482e-06, | |
| "loss": 0.1165, | |
| "step": 901 | |
| }, | |
| { | |
| "epoch": 0.8367346938775511, | |
| "grad_norm": 5.131833076477051, | |
| "learning_rate": 1.5811032226467304e-06, | |
| "loss": 0.198, | |
| "step": 902 | |
| }, | |
| { | |
| "epoch": 0.8376623376623377, | |
| "grad_norm": 4.975651741027832, | |
| "learning_rate": 1.5636694758399563e-06, | |
| "loss": 0.1891, | |
| "step": 903 | |
| }, | |
| { | |
| "epoch": 0.8385899814471243, | |
| "grad_norm": 3.24419903755188, | |
| "learning_rate": 1.5463242221483742e-06, | |
| "loss": 0.0935, | |
| "step": 904 | |
| }, | |
| { | |
| "epoch": 0.839517625231911, | |
| "grad_norm": 3.5641651153564453, | |
| "learning_rate": 1.5290676435154949e-06, | |
| "loss": 0.1533, | |
| "step": 905 | |
| }, | |
| { | |
| "epoch": 0.8404452690166976, | |
| "grad_norm": 3.872134208679199, | |
| "learning_rate": 1.511899920954656e-06, | |
| "loss": 0.1545, | |
| "step": 906 | |
| }, | |
| { | |
| "epoch": 0.8413729128014842, | |
| "grad_norm": 6.075543403625488, | |
| "learning_rate": 1.4948212345471492e-06, | |
| "loss": 0.2032, | |
| "step": 907 | |
| }, | |
| { | |
| "epoch": 0.8423005565862709, | |
| "grad_norm": 2.9056954383850098, | |
| "learning_rate": 1.4778317634403082e-06, | |
| "loss": 0.0986, | |
| "step": 908 | |
| }, | |
| { | |
| "epoch": 0.8432282003710575, | |
| "grad_norm": 5.516162872314453, | |
| "learning_rate": 1.460931685845649e-06, | |
| "loss": 0.1868, | |
| "step": 909 | |
| }, | |
| { | |
| "epoch": 0.8441558441558441, | |
| "grad_norm": 2.8610849380493164, | |
| "learning_rate": 1.4441211790369892e-06, | |
| "loss": 0.0923, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.8450834879406308, | |
| "grad_norm": 3.700863838195801, | |
| "learning_rate": 1.427400419348588e-06, | |
| "loss": 0.1291, | |
| "step": 911 | |
| }, | |
| { | |
| "epoch": 0.8460111317254174, | |
| "grad_norm": 4.772455215454102, | |
| "learning_rate": 1.4107695821733026e-06, | |
| "loss": 0.1352, | |
| "step": 912 | |
| }, | |
| { | |
| "epoch": 0.8469387755102041, | |
| "grad_norm": 3.5742745399475098, | |
| "learning_rate": 1.3942288419607476e-06, | |
| "loss": 0.1824, | |
| "step": 913 | |
| }, | |
| { | |
| "epoch": 0.8478664192949907, | |
| "grad_norm": 8.259415626525879, | |
| "learning_rate": 1.3777783722154603e-06, | |
| "loss": 0.2448, | |
| "step": 914 | |
| }, | |
| { | |
| "epoch": 0.8487940630797773, | |
| "grad_norm": 3.900238513946533, | |
| "learning_rate": 1.3614183454950824e-06, | |
| "loss": 0.1273, | |
| "step": 915 | |
| }, | |
| { | |
| "epoch": 0.849721706864564, | |
| "grad_norm": 2.9773433208465576, | |
| "learning_rate": 1.3451489334085555e-06, | |
| "loss": 0.1522, | |
| "step": 916 | |
| }, | |
| { | |
| "epoch": 0.8506493506493507, | |
| "grad_norm": 3.071232318878174, | |
| "learning_rate": 1.3289703066143112e-06, | |
| "loss": 0.1256, | |
| "step": 917 | |
| }, | |
| { | |
| "epoch": 0.8515769944341373, | |
| "grad_norm": 3.8165667057037354, | |
| "learning_rate": 1.3128826348184886e-06, | |
| "loss": 0.1111, | |
| "step": 918 | |
| }, | |
| { | |
| "epoch": 0.852504638218924, | |
| "grad_norm": 3.7821688652038574, | |
| "learning_rate": 1.296886086773157e-06, | |
| "loss": 0.2091, | |
| "step": 919 | |
| }, | |
| { | |
| "epoch": 0.8534322820037106, | |
| "grad_norm": 4.833895206451416, | |
| "learning_rate": 1.2809808302745298e-06, | |
| "loss": 0.1762, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.8534322820037106, | |
| "eval_accuracy": 0.8603104212860311, | |
| "eval_f1": 0.704225352112676, | |
| "eval_loss": 0.30113720893859863, | |
| "eval_precision": 0.8670520231213873, | |
| "eval_recall": 0.5928853754940712, | |
| "eval_runtime": 47.313, | |
| "eval_samples_per_second": 5.833, | |
| "eval_steps_per_second": 0.19, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.8543599257884972, | |
| "grad_norm": 3.3207972049713135, | |
| "learning_rate": 1.2651670321612264e-06, | |
| "loss": 0.1367, | |
| "step": 921 | |
| }, | |
| { | |
| "epoch": 0.8552875695732839, | |
| "grad_norm": 3.202796697616577, | |
| "learning_rate": 1.249444858312502e-06, | |
| "loss": 0.1379, | |
| "step": 922 | |
| }, | |
| { | |
| "epoch": 0.8562152133580705, | |
| "grad_norm": 6.188356876373291, | |
| "learning_rate": 1.233814473646524e-06, | |
| "loss": 0.2627, | |
| "step": 923 | |
| }, | |
| { | |
| "epoch": 0.8571428571428571, | |
| "grad_norm": 3.4624321460723877, | |
| "learning_rate": 1.218276042118629e-06, | |
| "loss": 0.1318, | |
| "step": 924 | |
| }, | |
| { | |
| "epoch": 0.8580705009276438, | |
| "grad_norm": 3.288809061050415, | |
| "learning_rate": 1.202829726719611e-06, | |
| "loss": 0.1188, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 0.8589981447124304, | |
| "grad_norm": 2.691675901412964, | |
| "learning_rate": 1.1874756894740137e-06, | |
| "loss": 0.1252, | |
| "step": 926 | |
| }, | |
| { | |
| "epoch": 0.859925788497217, | |
| "grad_norm": 3.750600576400757, | |
| "learning_rate": 1.1722140914384162e-06, | |
| "loss": 0.1644, | |
| "step": 927 | |
| }, | |
| { | |
| "epoch": 0.8608534322820037, | |
| "grad_norm": 3.1353397369384766, | |
| "learning_rate": 1.1570450926997657e-06, | |
| "loss": 0.1461, | |
| "step": 928 | |
| }, | |
| { | |
| "epoch": 0.8617810760667903, | |
| "grad_norm": 5.295469760894775, | |
| "learning_rate": 1.1419688523736761e-06, | |
| "loss": 0.1967, | |
| "step": 929 | |
| }, | |
| { | |
| "epoch": 0.862708719851577, | |
| "grad_norm": 3.461599349975586, | |
| "learning_rate": 1.1269855286027798e-06, | |
| "loss": 0.1426, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.8636363636363636, | |
| "grad_norm": 6.9660420417785645, | |
| "learning_rate": 1.1120952785550477e-06, | |
| "loss": 0.2015, | |
| "step": 931 | |
| }, | |
| { | |
| "epoch": 0.8645640074211502, | |
| "grad_norm": 2.989213705062866, | |
| "learning_rate": 1.0972982584221592e-06, | |
| "loss": 0.1204, | |
| "step": 932 | |
| }, | |
| { | |
| "epoch": 0.865491651205937, | |
| "grad_norm": 4.492414474487305, | |
| "learning_rate": 1.0825946234178575e-06, | |
| "loss": 0.1579, | |
| "step": 933 | |
| }, | |
| { | |
| "epoch": 0.8664192949907236, | |
| "grad_norm": 4.693439960479736, | |
| "learning_rate": 1.067984527776309e-06, | |
| "loss": 0.1959, | |
| "step": 934 | |
| }, | |
| { | |
| "epoch": 0.8673469387755102, | |
| "grad_norm": 5.462426662445068, | |
| "learning_rate": 1.0534681247505107e-06, | |
| "loss": 0.1435, | |
| "step": 935 | |
| }, | |
| { | |
| "epoch": 0.8682745825602969, | |
| "grad_norm": 2.594604730606079, | |
| "learning_rate": 1.0390455666106547e-06, | |
| "loss": 0.115, | |
| "step": 936 | |
| }, | |
| { | |
| "epoch": 0.8692022263450835, | |
| "grad_norm": 5.900606155395508, | |
| "learning_rate": 1.024717004642557e-06, | |
| "loss": 0.1749, | |
| "step": 937 | |
| }, | |
| { | |
| "epoch": 0.8701298701298701, | |
| "grad_norm": 5.774359226226807, | |
| "learning_rate": 1.010482589146048e-06, | |
| "loss": 0.1802, | |
| "step": 938 | |
| }, | |
| { | |
| "epoch": 0.8710575139146568, | |
| "grad_norm": 4.002913951873779, | |
| "learning_rate": 9.963424694334122e-07, | |
| "loss": 0.1277, | |
| "step": 939 | |
| }, | |
| { | |
| "epoch": 0.8719851576994434, | |
| "grad_norm": 3.6173672676086426, | |
| "learning_rate": 9.822967938278172e-07, | |
| "loss": 0.1561, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.8719851576994434, | |
| "eval_accuracy": 0.8603104212860311, | |
| "eval_f1": 0.704225352112676, | |
| "eval_loss": 0.29984721541404724, | |
| "eval_precision": 0.8670520231213873, | |
| "eval_recall": 0.5928853754940712, | |
| "eval_runtime": 48.0345, | |
| "eval_samples_per_second": 5.746, | |
| "eval_steps_per_second": 0.187, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.87291280148423, | |
| "grad_norm": 5.298496723175049, | |
| "learning_rate": 9.683457096617487e-07, | |
| "loss": 0.1343, | |
| "step": 941 | |
| }, | |
| { | |
| "epoch": 0.8738404452690167, | |
| "grad_norm": 4.087591648101807, | |
| "learning_rate": 9.544893632754816e-07, | |
| "loss": 0.1342, | |
| "step": 942 | |
| }, | |
| { | |
| "epoch": 0.8747680890538033, | |
| "grad_norm": 3.6953861713409424, | |
| "learning_rate": 9.407279000155311e-07, | |
| "loss": 0.1125, | |
| "step": 943 | |
| }, | |
| { | |
| "epoch": 0.87569573283859, | |
| "grad_norm": 5.693349838256836, | |
| "learning_rate": 9.270614642331377e-07, | |
| "loss": 0.2285, | |
| "step": 944 | |
| }, | |
| { | |
| "epoch": 0.8766233766233766, | |
| "grad_norm": 4.321276664733887, | |
| "learning_rate": 9.134901992827427e-07, | |
| "loss": 0.2169, | |
| "step": 945 | |
| }, | |
| { | |
| "epoch": 0.8775510204081632, | |
| "grad_norm": 5.951560020446777, | |
| "learning_rate": 9.000142475204965e-07, | |
| "loss": 0.2039, | |
| "step": 946 | |
| }, | |
| { | |
| "epoch": 0.87847866419295, | |
| "grad_norm": 5.382765293121338, | |
| "learning_rate": 8.866337503027523e-07, | |
| "loss": 0.1347, | |
| "step": 947 | |
| }, | |
| { | |
| "epoch": 0.8794063079777366, | |
| "grad_norm": 4.566171646118164, | |
| "learning_rate": 8.733488479845997e-07, | |
| "loss": 0.1929, | |
| "step": 948 | |
| }, | |
| { | |
| "epoch": 0.8803339517625232, | |
| "grad_norm": 4.413459300994873, | |
| "learning_rate": 8.60159679918372e-07, | |
| "loss": 0.1463, | |
| "step": 949 | |
| }, | |
| { | |
| "epoch": 0.8812615955473099, | |
| "grad_norm": 3.8674092292785645, | |
| "learning_rate": 8.470663844522053e-07, | |
| "loss": 0.1523, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.8821892393320965, | |
| "grad_norm": 3.844576597213745, | |
| "learning_rate": 8.340690989285727e-07, | |
| "loss": 0.1248, | |
| "step": 951 | |
| }, | |
| { | |
| "epoch": 0.8831168831168831, | |
| "grad_norm": 4.541808605194092, | |
| "learning_rate": 8.211679596828481e-07, | |
| "loss": 0.1571, | |
| "step": 952 | |
| }, | |
| { | |
| "epoch": 0.8840445269016698, | |
| "grad_norm": 3.0702145099639893, | |
| "learning_rate": 8.083631020418792e-07, | |
| "loss": 0.157, | |
| "step": 953 | |
| }, | |
| { | |
| "epoch": 0.8849721706864564, | |
| "grad_norm": 3.5125439167022705, | |
| "learning_rate": 7.956546603225601e-07, | |
| "loss": 0.1011, | |
| "step": 954 | |
| }, | |
| { | |
| "epoch": 0.885899814471243, | |
| "grad_norm": 4.256104469299316, | |
| "learning_rate": 7.830427678304353e-07, | |
| "loss": 0.1411, | |
| "step": 955 | |
| }, | |
| { | |
| "epoch": 0.8868274582560297, | |
| "grad_norm": 4.931686878204346, | |
| "learning_rate": 7.705275568582848e-07, | |
| "loss": 0.1953, | |
| "step": 956 | |
| }, | |
| { | |
| "epoch": 0.8877551020408163, | |
| "grad_norm": 5.233354091644287, | |
| "learning_rate": 7.581091586847522e-07, | |
| "loss": 0.2095, | |
| "step": 957 | |
| }, | |
| { | |
| "epoch": 0.8886827458256029, | |
| "grad_norm": 6.383068084716797, | |
| "learning_rate": 7.457877035729588e-07, | |
| "loss": 0.2274, | |
| "step": 958 | |
| }, | |
| { | |
| "epoch": 0.8896103896103896, | |
| "grad_norm": 2.8475682735443115, | |
| "learning_rate": 7.335633207691362e-07, | |
| "loss": 0.1336, | |
| "step": 959 | |
| }, | |
| { | |
| "epoch": 0.8905380333951762, | |
| "grad_norm": 3.393915891647339, | |
| "learning_rate": 7.21436138501278e-07, | |
| "loss": 0.1633, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.8905380333951762, | |
| "eval_accuracy": 0.8569844789356984, | |
| "eval_f1": 0.6935866983372921, | |
| "eval_loss": 0.3064272701740265, | |
| "eval_precision": 0.8690476190476191, | |
| "eval_recall": 0.5770750988142292, | |
| "eval_runtime": 48.2701, | |
| "eval_samples_per_second": 5.718, | |
| "eval_steps_per_second": 0.186, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.891465677179963, | |
| "grad_norm": 4.68550968170166, | |
| "learning_rate": 7.094062839777838e-07, | |
| "loss": 0.1854, | |
| "step": 961 | |
| }, | |
| { | |
| "epoch": 0.8923933209647495, | |
| "grad_norm": 5.072958946228027, | |
| "learning_rate": 6.974738833861383e-07, | |
| "loss": 0.1762, | |
| "step": 962 | |
| }, | |
| { | |
| "epoch": 0.8933209647495362, | |
| "grad_norm": 4.519327640533447, | |
| "learning_rate": 6.856390618915775e-07, | |
| "loss": 0.182, | |
| "step": 963 | |
| }, | |
| { | |
| "epoch": 0.8942486085343229, | |
| "grad_norm": 5.558988094329834, | |
| "learning_rate": 6.739019436357774e-07, | |
| "loss": 0.1665, | |
| "step": 964 | |
| }, | |
| { | |
| "epoch": 0.8951762523191095, | |
| "grad_norm": 2.263278007507324, | |
| "learning_rate": 6.622626517355557e-07, | |
| "loss": 0.1112, | |
| "step": 965 | |
| }, | |
| { | |
| "epoch": 0.8961038961038961, | |
| "grad_norm": 5.888603687286377, | |
| "learning_rate": 6.507213082815745e-07, | |
| "loss": 0.1455, | |
| "step": 966 | |
| }, | |
| { | |
| "epoch": 0.8970315398886828, | |
| "grad_norm": 5.091086387634277, | |
| "learning_rate": 6.392780343370686e-07, | |
| "loss": 0.1812, | |
| "step": 967 | |
| }, | |
| { | |
| "epoch": 0.8979591836734694, | |
| "grad_norm": 6.290548324584961, | |
| "learning_rate": 6.279329499365649e-07, | |
| "loss": 0.1527, | |
| "step": 968 | |
| }, | |
| { | |
| "epoch": 0.898886827458256, | |
| "grad_norm": 6.533473014831543, | |
| "learning_rate": 6.166861740846297e-07, | |
| "loss": 0.2105, | |
| "step": 969 | |
| }, | |
| { | |
| "epoch": 0.8998144712430427, | |
| "grad_norm": 3.4495279788970947, | |
| "learning_rate": 6.055378247546217e-07, | |
| "loss": 0.1222, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.9007421150278293, | |
| "grad_norm": 5.290384769439697, | |
| "learning_rate": 5.94488018887448e-07, | |
| "loss": 0.2046, | |
| "step": 971 | |
| }, | |
| { | |
| "epoch": 0.9016697588126159, | |
| "grad_norm": 6.091614723205566, | |
| "learning_rate": 5.835368723903456e-07, | |
| "loss": 0.2643, | |
| "step": 972 | |
| }, | |
| { | |
| "epoch": 0.9025974025974026, | |
| "grad_norm": 4.488548278808594, | |
| "learning_rate": 5.726845001356573e-07, | |
| "loss": 0.1263, | |
| "step": 973 | |
| }, | |
| { | |
| "epoch": 0.9035250463821892, | |
| "grad_norm": 2.7875099182128906, | |
| "learning_rate": 5.619310159596358e-07, | |
| "loss": 0.0922, | |
| "step": 974 | |
| }, | |
| { | |
| "epoch": 0.9044526901669759, | |
| "grad_norm": 5.558516025543213, | |
| "learning_rate": 5.51276532661238e-07, | |
| "loss": 0.2045, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 0.9053803339517625, | |
| "grad_norm": 5.901011943817139, | |
| "learning_rate": 5.407211620009545e-07, | |
| "loss": 0.1743, | |
| "step": 976 | |
| }, | |
| { | |
| "epoch": 0.9063079777365491, | |
| "grad_norm": 3.838674783706665, | |
| "learning_rate": 5.30265014699628e-07, | |
| "loss": 0.1728, | |
| "step": 977 | |
| }, | |
| { | |
| "epoch": 0.9072356215213359, | |
| "grad_norm": 3.811453104019165, | |
| "learning_rate": 5.199082004372958e-07, | |
| "loss": 0.153, | |
| "step": 978 | |
| }, | |
| { | |
| "epoch": 0.9081632653061225, | |
| "grad_norm": 5.14892578125, | |
| "learning_rate": 5.096508278520385e-07, | |
| "loss": 0.1991, | |
| "step": 979 | |
| }, | |
| { | |
| "epoch": 0.9090909090909091, | |
| "grad_norm": 3.6292712688446045, | |
| "learning_rate": 4.994930045388414e-07, | |
| "loss": 0.1452, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.9090909090909091, | |
| "eval_accuracy": 0.8603104212860311, | |
| "eval_f1": 0.7028301886792453, | |
| "eval_loss": 0.3034472167491913, | |
| "eval_precision": 0.8713450292397661, | |
| "eval_recall": 0.5889328063241107, | |
| "eval_runtime": 48.7572, | |
| "eval_samples_per_second": 5.661, | |
| "eval_steps_per_second": 0.185, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.9100185528756958, | |
| "grad_norm": 3.438109874725342, | |
| "learning_rate": 4.894348370484648e-07, | |
| "loss": 0.1054, | |
| "step": 981 | |
| }, | |
| { | |
| "epoch": 0.9109461966604824, | |
| "grad_norm": 5.481462478637695, | |
| "learning_rate": 4.794764308863242e-07, | |
| "loss": 0.1463, | |
| "step": 982 | |
| }, | |
| { | |
| "epoch": 0.911873840445269, | |
| "grad_norm": 6.784456253051758, | |
| "learning_rate": 4.696178905113913e-07, | |
| "loss": 0.1634, | |
| "step": 983 | |
| }, | |
| { | |
| "epoch": 0.9128014842300557, | |
| "grad_norm": 3.902355194091797, | |
| "learning_rate": 4.5985931933508757e-07, | |
| "loss": 0.1689, | |
| "step": 984 | |
| }, | |
| { | |
| "epoch": 0.9137291280148423, | |
| "grad_norm": 4.524623394012451, | |
| "learning_rate": 4.502008197202068e-07, | |
| "loss": 0.1428, | |
| "step": 985 | |
| }, | |
| { | |
| "epoch": 0.9146567717996289, | |
| "grad_norm": 3.976349353790283, | |
| "learning_rate": 4.406424929798403e-07, | |
| "loss": 0.1864, | |
| "step": 986 | |
| }, | |
| { | |
| "epoch": 0.9155844155844156, | |
| "grad_norm": 3.3905527591705322, | |
| "learning_rate": 4.3118443937631094e-07, | |
| "loss": 0.1719, | |
| "step": 987 | |
| }, | |
| { | |
| "epoch": 0.9165120593692022, | |
| "grad_norm": 4.1316938400268555, | |
| "learning_rate": 4.218267581201296e-07, | |
| "loss": 0.1124, | |
| "step": 988 | |
| }, | |
| { | |
| "epoch": 0.9174397031539888, | |
| "grad_norm": 5.6381754875183105, | |
| "learning_rate": 4.125695473689406e-07, | |
| "loss": 0.1994, | |
| "step": 989 | |
| }, | |
| { | |
| "epoch": 0.9183673469387755, | |
| "grad_norm": 7.625948905944824, | |
| "learning_rate": 4.034129042265067e-07, | |
| "loss": 0.2211, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.9192949907235621, | |
| "grad_norm": 3.567246437072754, | |
| "learning_rate": 3.943569247416801e-07, | |
| "loss": 0.1359, | |
| "step": 991 | |
| }, | |
| { | |
| "epoch": 0.9202226345083488, | |
| "grad_norm": 4.336119174957275, | |
| "learning_rate": 3.8540170390740097e-07, | |
| "loss": 0.1519, | |
| "step": 992 | |
| }, | |
| { | |
| "epoch": 0.9211502782931354, | |
| "grad_norm": 4.9389848709106445, | |
| "learning_rate": 3.7654733565969826e-07, | |
| "loss": 0.1874, | |
| "step": 993 | |
| }, | |
| { | |
| "epoch": 0.922077922077922, | |
| "grad_norm": 3.25769305229187, | |
| "learning_rate": 3.67793912876705e-07, | |
| "loss": 0.1191, | |
| "step": 994 | |
| }, | |
| { | |
| "epoch": 0.9230055658627088, | |
| "grad_norm": 3.4334826469421387, | |
| "learning_rate": 3.591415273776855e-07, | |
| "loss": 0.1012, | |
| "step": 995 | |
| }, | |
| { | |
| "epoch": 0.9239332096474954, | |
| "grad_norm": 3.1981468200683594, | |
| "learning_rate": 3.5059026992206645e-07, | |
| "loss": 0.0812, | |
| "step": 996 | |
| }, | |
| { | |
| "epoch": 0.924860853432282, | |
| "grad_norm": 5.118222236633301, | |
| "learning_rate": 3.421402302084953e-07, | |
| "loss": 0.1293, | |
| "step": 997 | |
| }, | |
| { | |
| "epoch": 0.9257884972170687, | |
| "grad_norm": 4.047184944152832, | |
| "learning_rate": 3.3379149687388866e-07, | |
| "loss": 0.1723, | |
| "step": 998 | |
| }, | |
| { | |
| "epoch": 0.9267161410018553, | |
| "grad_norm": 7.083133220672607, | |
| "learning_rate": 3.255441574925089e-07, | |
| "loss": 0.2061, | |
| "step": 999 | |
| }, | |
| { | |
| "epoch": 0.9276437847866419, | |
| "grad_norm": 2.8097355365753174, | |
| "learning_rate": 3.1739829857504235e-07, | |
| "loss": 0.086, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.9276437847866419, | |
| "eval_accuracy": 0.8580931263858093, | |
| "eval_f1": 0.6966824644549763, | |
| "eval_loss": 0.30505669116973877, | |
| "eval_precision": 0.8698224852071006, | |
| "eval_recall": 0.5810276679841897, | |
| "eval_runtime": 47.8654, | |
| "eval_samples_per_second": 5.766, | |
| "eval_steps_per_second": 0.188, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.9285714285714286, | |
| "grad_norm": 6.499805927276611, | |
| "learning_rate": 3.093540055676958e-07, | |
| "loss": 0.2015, | |
| "step": 1001 | |
| }, | |
| { | |
| "epoch": 0.9294990723562152, | |
| "grad_norm": 4.742324352264404, | |
| "learning_rate": 3.0141136285129825e-07, | |
| "loss": 0.1585, | |
| "step": 1002 | |
| }, | |
| { | |
| "epoch": 0.9304267161410018, | |
| "grad_norm": 4.395940780639648, | |
| "learning_rate": 2.935704537404083e-07, | |
| "loss": 0.1249, | |
| "step": 1003 | |
| }, | |
| { | |
| "epoch": 0.9313543599257885, | |
| "grad_norm": 3.036573886871338, | |
| "learning_rate": 2.8583136048245697e-07, | |
| "loss": 0.1331, | |
| "step": 1004 | |
| }, | |
| { | |
| "epoch": 0.9322820037105751, | |
| "grad_norm": 4.401485919952393, | |
| "learning_rate": 2.781941642568686e-07, | |
| "loss": 0.2138, | |
| "step": 1005 | |
| }, | |
| { | |
| "epoch": 0.9332096474953617, | |
| "grad_norm": 4.973133087158203, | |
| "learning_rate": 2.706589451742181e-07, | |
| "loss": 0.2253, | |
| "step": 1006 | |
| }, | |
| { | |
| "epoch": 0.9341372912801484, | |
| "grad_norm": 6.711733818054199, | |
| "learning_rate": 2.632257822753881e-07, | |
| "loss": 0.2465, | |
| "step": 1007 | |
| }, | |
| { | |
| "epoch": 0.935064935064935, | |
| "grad_norm": 3.2245848178863525, | |
| "learning_rate": 2.5589475353073987e-07, | |
| "loss": 0.1524, | |
| "step": 1008 | |
| }, | |
| { | |
| "epoch": 0.9359925788497218, | |
| "grad_norm": 3.8495306968688965, | |
| "learning_rate": 2.486659358392951e-07, | |
| "loss": 0.1646, | |
| "step": 1009 | |
| }, | |
| { | |
| "epoch": 0.9369202226345084, | |
| "grad_norm": 5.713381290435791, | |
| "learning_rate": 2.4153940502793185e-07, | |
| "loss": 0.2161, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.937847866419295, | |
| "grad_norm": 3.7789957523345947, | |
| "learning_rate": 2.3451523585058756e-07, | |
| "loss": 0.1509, | |
| "step": 1011 | |
| }, | |
| { | |
| "epoch": 0.9387755102040817, | |
| "grad_norm": 4.0073652267456055, | |
| "learning_rate": 2.2759350198746978e-07, | |
| "loss": 0.1402, | |
| "step": 1012 | |
| }, | |
| { | |
| "epoch": 0.9397031539888683, | |
| "grad_norm": 3.3210415840148926, | |
| "learning_rate": 2.2077427604429435e-07, | |
| "loss": 0.1431, | |
| "step": 1013 | |
| }, | |
| { | |
| "epoch": 0.9406307977736549, | |
| "grad_norm": 5.2902750968933105, | |
| "learning_rate": 2.1405762955151178e-07, | |
| "loss": 0.1239, | |
| "step": 1014 | |
| }, | |
| { | |
| "epoch": 0.9415584415584416, | |
| "grad_norm": 5.931840419769287, | |
| "learning_rate": 2.0744363296356872e-07, | |
| "loss": 0.1965, | |
| "step": 1015 | |
| }, | |
| { | |
| "epoch": 0.9424860853432282, | |
| "grad_norm": 3.9065020084381104, | |
| "learning_rate": 2.009323556581566e-07, | |
| "loss": 0.1344, | |
| "step": 1016 | |
| }, | |
| { | |
| "epoch": 0.9434137291280148, | |
| "grad_norm": 3.194225311279297, | |
| "learning_rate": 1.9452386593549534e-07, | |
| "loss": 0.0979, | |
| "step": 1017 | |
| }, | |
| { | |
| "epoch": 0.9443413729128015, | |
| "grad_norm": 3.794304847717285, | |
| "learning_rate": 1.8821823101760949e-07, | |
| "loss": 0.2038, | |
| "step": 1018 | |
| }, | |
| { | |
| "epoch": 0.9452690166975881, | |
| "grad_norm": 3.638219118118286, | |
| "learning_rate": 1.8201551704762453e-07, | |
| "loss": 0.1254, | |
| "step": 1019 | |
| }, | |
| { | |
| "epoch": 0.9461966604823747, | |
| "grad_norm": 4.820856094360352, | |
| "learning_rate": 1.7591578908907724e-07, | |
| "loss": 0.1909, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.9461966604823747, | |
| "eval_accuracy": 0.8580931263858093, | |
| "eval_f1": 0.6966824644549763, | |
| "eval_loss": 0.3055438697338104, | |
| "eval_precision": 0.8698224852071006, | |
| "eval_recall": 0.5810276679841897, | |
| "eval_runtime": 47.7431, | |
| "eval_samples_per_second": 5.781, | |
| "eval_steps_per_second": 0.189, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.9471243042671614, | |
| "grad_norm": 3.5032293796539307, | |
| "learning_rate": 1.699191111252241e-07, | |
| "loss": 0.077, | |
| "step": 1021 | |
| }, | |
| { | |
| "epoch": 0.948051948051948, | |
| "grad_norm": 3.7761011123657227, | |
| "learning_rate": 1.6402554605838173e-07, | |
| "loss": 0.1564, | |
| "step": 1022 | |
| }, | |
| { | |
| "epoch": 0.9489795918367347, | |
| "grad_norm": 5.951882839202881, | |
| "learning_rate": 1.5823515570925763e-07, | |
| "loss": 0.2323, | |
| "step": 1023 | |
| }, | |
| { | |
| "epoch": 0.9499072356215214, | |
| "grad_norm": 4.935650825500488, | |
| "learning_rate": 1.5254800081630828e-07, | |
| "loss": 0.2172, | |
| "step": 1024 | |
| }, | |
| { | |
| "epoch": 0.950834879406308, | |
| "grad_norm": 5.279281139373779, | |
| "learning_rate": 1.469641410350964e-07, | |
| "loss": 0.1845, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 0.9517625231910947, | |
| "grad_norm": 5.034005641937256, | |
| "learning_rate": 1.4148363493766803e-07, | |
| "loss": 0.174, | |
| "step": 1026 | |
| }, | |
| { | |
| "epoch": 0.9526901669758813, | |
| "grad_norm": 3.964360237121582, | |
| "learning_rate": 1.361065400119399e-07, | |
| "loss": 0.0888, | |
| "step": 1027 | |
| }, | |
| { | |
| "epoch": 0.9536178107606679, | |
| "grad_norm": 4.862616062164307, | |
| "learning_rate": 1.30832912661093e-07, | |
| "loss": 0.2007, | |
| "step": 1028 | |
| }, | |
| { | |
| "epoch": 0.9545454545454546, | |
| "grad_norm": 4.140252590179443, | |
| "learning_rate": 1.2566280820298427e-07, | |
| "loss": 0.13, | |
| "step": 1029 | |
| }, | |
| { | |
| "epoch": 0.9554730983302412, | |
| "grad_norm": 5.299205780029297, | |
| "learning_rate": 1.2059628086956044e-07, | |
| "loss": 0.1795, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.9564007421150278, | |
| "grad_norm": 5.694372653961182, | |
| "learning_rate": 1.1563338380629618e-07, | |
| "loss": 0.2278, | |
| "step": 1031 | |
| }, | |
| { | |
| "epoch": 0.9573283858998145, | |
| "grad_norm": 4.10621452331543, | |
| "learning_rate": 1.1077416907163573e-07, | |
| "loss": 0.1832, | |
| "step": 1032 | |
| }, | |
| { | |
| "epoch": 0.9582560296846011, | |
| "grad_norm": 3.0105836391448975, | |
| "learning_rate": 1.0601868763643997e-07, | |
| "loss": 0.1126, | |
| "step": 1033 | |
| }, | |
| { | |
| "epoch": 0.9591836734693877, | |
| "grad_norm": 5.412391185760498, | |
| "learning_rate": 1.0136698938346012e-07, | |
| "loss": 0.1996, | |
| "step": 1034 | |
| }, | |
| { | |
| "epoch": 0.9601113172541744, | |
| "grad_norm": 3.507596492767334, | |
| "learning_rate": 9.68191231068083e-08, | |
| "loss": 0.1647, | |
| "step": 1035 | |
| }, | |
| { | |
| "epoch": 0.961038961038961, | |
| "grad_norm": 4.733442783355713, | |
| "learning_rate": 9.237513651145224e-08, | |
| "loss": 0.102, | |
| "step": 1036 | |
| }, | |
| { | |
| "epoch": 0.9619666048237476, | |
| "grad_norm": 6.855641841888428, | |
| "learning_rate": 8.80350762127058e-08, | |
| "loss": 0.1197, | |
| "step": 1037 | |
| }, | |
| { | |
| "epoch": 0.9628942486085343, | |
| "grad_norm": 3.809262275695801, | |
| "learning_rate": 8.379898773574924e-08, | |
| "loss": 0.1287, | |
| "step": 1038 | |
| }, | |
| { | |
| "epoch": 0.963821892393321, | |
| "grad_norm": 3.4764761924743652, | |
| "learning_rate": 7.966691551514527e-08, | |
| "loss": 0.1106, | |
| "step": 1039 | |
| }, | |
| { | |
| "epoch": 0.9647495361781077, | |
| "grad_norm": 5.395627021789551, | |
| "learning_rate": 7.563890289437825e-08, | |
| "loss": 0.2017, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.9647495361781077, | |
| "eval_accuracy": 0.8580931263858093, | |
| "eval_f1": 0.6952380952380952, | |
| "eval_loss": 0.30581432580947876, | |
| "eval_precision": 0.874251497005988, | |
| "eval_recall": 0.5770750988142292, | |
| "eval_runtime": 48.746, | |
| "eval_samples_per_second": 5.662, | |
| "eval_steps_per_second": 0.185, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.9656771799628943, | |
| "grad_norm": 3.9932026863098145, | |
| "learning_rate": 7.171499212539124e-08, | |
| "loss": 0.1513, | |
| "step": 1041 | |
| }, | |
| { | |
| "epoch": 0.9666048237476809, | |
| "grad_norm": 5.8107075691223145, | |
| "learning_rate": 6.78952243681541e-08, | |
| "loss": 0.172, | |
| "step": 1042 | |
| }, | |
| { | |
| "epoch": 0.9675324675324676, | |
| "grad_norm": 3.7357017993927, | |
| "learning_rate": 6.417963969022389e-08, | |
| "loss": 0.1422, | |
| "step": 1043 | |
| }, | |
| { | |
| "epoch": 0.9684601113172542, | |
| "grad_norm": 3.854876756668091, | |
| "learning_rate": 6.056827706632185e-08, | |
| "loss": 0.1587, | |
| "step": 1044 | |
| }, | |
| { | |
| "epoch": 0.9693877551020408, | |
| "grad_norm": 6.006348133087158, | |
| "learning_rate": 5.7061174377937015e-08, | |
| "loss": 0.2244, | |
| "step": 1045 | |
| }, | |
| { | |
| "epoch": 0.9703153988868275, | |
| "grad_norm": 4.745636463165283, | |
| "learning_rate": 5.365836841291439e-08, | |
| "loss": 0.1803, | |
| "step": 1046 | |
| }, | |
| { | |
| "epoch": 0.9712430426716141, | |
| "grad_norm": 3.8510711193084717, | |
| "learning_rate": 5.035989486508075e-08, | |
| "loss": 0.1635, | |
| "step": 1047 | |
| }, | |
| { | |
| "epoch": 0.9721706864564007, | |
| "grad_norm": 5.504276752471924, | |
| "learning_rate": 4.716578833386054e-08, | |
| "loss": 0.1517, | |
| "step": 1048 | |
| }, | |
| { | |
| "epoch": 0.9730983302411874, | |
| "grad_norm": 3.3400299549102783, | |
| "learning_rate": 4.4076082323920576e-08, | |
| "loss": 0.1494, | |
| "step": 1049 | |
| }, | |
| { | |
| "epoch": 0.974025974025974, | |
| "grad_norm": 5.584471225738525, | |
| "learning_rate": 4.109080924481479e-08, | |
| "loss": 0.1781, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.9749536178107606, | |
| "grad_norm": 4.575666904449463, | |
| "learning_rate": 3.82100004106456e-08, | |
| "loss": 0.1298, | |
| "step": 1051 | |
| }, | |
| { | |
| "epoch": 0.9758812615955473, | |
| "grad_norm": 3.4456560611724854, | |
| "learning_rate": 3.543368603973529e-08, | |
| "loss": 0.1292, | |
| "step": 1052 | |
| }, | |
| { | |
| "epoch": 0.9768089053803339, | |
| "grad_norm": 2.841853618621826, | |
| "learning_rate": 3.2761895254306285e-08, | |
| "loss": 0.076, | |
| "step": 1053 | |
| }, | |
| { | |
| "epoch": 0.9777365491651205, | |
| "grad_norm": 4.662397384643555, | |
| "learning_rate": 3.019465608018024e-08, | |
| "loss": 0.2181, | |
| "step": 1054 | |
| }, | |
| { | |
| "epoch": 0.9786641929499073, | |
| "grad_norm": 3.259526014328003, | |
| "learning_rate": 2.773199544648164e-08, | |
| "loss": 0.108, | |
| "step": 1055 | |
| }, | |
| { | |
| "epoch": 0.9795918367346939, | |
| "grad_norm": 4.32330322265625, | |
| "learning_rate": 2.537393918535358e-08, | |
| "loss": 0.2068, | |
| "step": 1056 | |
| }, | |
| { | |
| "epoch": 0.9805194805194806, | |
| "grad_norm": 4.7618536949157715, | |
| "learning_rate": 2.312051203169352e-08, | |
| "loss": 0.1936, | |
| "step": 1057 | |
| }, | |
| { | |
| "epoch": 0.9814471243042672, | |
| "grad_norm": 4.779612064361572, | |
| "learning_rate": 2.0971737622883515e-08, | |
| "loss": 0.1007, | |
| "step": 1058 | |
| }, | |
| { | |
| "epoch": 0.9823747680890538, | |
| "grad_norm": 4.346301078796387, | |
| "learning_rate": 1.8927638498551502e-08, | |
| "loss": 0.1594, | |
| "step": 1059 | |
| }, | |
| { | |
| "epoch": 0.9833024118738405, | |
| "grad_norm": 4.017016410827637, | |
| "learning_rate": 1.698823610032929e-08, | |
| "loss": 0.1828, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.9833024118738405, | |
| "eval_accuracy": 0.8603104212860311, | |
| "eval_f1": 0.7028301886792453, | |
| "eval_loss": 0.306577205657959, | |
| "eval_precision": 0.8713450292397661, | |
| "eval_recall": 0.5889328063241107, | |
| "eval_runtime": 47.7001, | |
| "eval_samples_per_second": 5.786, | |
| "eval_steps_per_second": 0.189, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.9842300556586271, | |
| "grad_norm": 5.6250505447387695, | |
| "learning_rate": 1.5153550771630498e-08, | |
| "loss": 0.17, | |
| "step": 1061 | |
| }, | |
| { | |
| "epoch": 0.9851576994434137, | |
| "grad_norm": 5.426425933837891, | |
| "learning_rate": 1.3423601757436289e-08, | |
| "loss": 0.2122, | |
| "step": 1062 | |
| }, | |
| { | |
| "epoch": 0.9860853432282004, | |
| "grad_norm": 4.71135950088501, | |
| "learning_rate": 1.179840720409331e-08, | |
| "loss": 0.1715, | |
| "step": 1063 | |
| }, | |
| { | |
| "epoch": 0.987012987012987, | |
| "grad_norm": 4.350978851318359, | |
| "learning_rate": 1.0277984159122734e-08, | |
| "loss": 0.1704, | |
| "step": 1064 | |
| }, | |
| { | |
| "epoch": 0.9879406307977736, | |
| "grad_norm": 5.211360931396484, | |
| "learning_rate": 8.862348571043733e-09, | |
| "loss": 0.166, | |
| "step": 1065 | |
| }, | |
| { | |
| "epoch": 0.9888682745825603, | |
| "grad_norm": 4.015779495239258, | |
| "learning_rate": 7.551515289203615e-09, | |
| "loss": 0.1616, | |
| "step": 1066 | |
| }, | |
| { | |
| "epoch": 0.9897959183673469, | |
| "grad_norm": 4.356948375701904, | |
| "learning_rate": 6.345498063622391e-09, | |
| "loss": 0.1961, | |
| "step": 1067 | |
| }, | |
| { | |
| "epoch": 0.9907235621521335, | |
| "grad_norm": 6.508297920227051, | |
| "learning_rate": 5.2443095448506674e-09, | |
| "loss": 0.1935, | |
| "step": 1068 | |
| }, | |
| { | |
| "epoch": 0.9916512059369202, | |
| "grad_norm": 4.595229625701904, | |
| "learning_rate": 4.247961283835311e-09, | |
| "loss": 0.2116, | |
| "step": 1069 | |
| }, | |
| { | |
| "epoch": 0.9925788497217068, | |
| "grad_norm": 4.39501428604126, | |
| "learning_rate": 3.3564637317984318e-09, | |
| "loss": 0.1568, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.9935064935064936, | |
| "grad_norm": 4.391909599304199, | |
| "learning_rate": 2.5698262401263607e-09, | |
| "loss": 0.1553, | |
| "step": 1071 | |
| }, | |
| { | |
| "epoch": 0.9944341372912802, | |
| "grad_norm": 3.195699453353882, | |
| "learning_rate": 1.888057060274173e-09, | |
| "loss": 0.1469, | |
| "step": 1072 | |
| }, | |
| { | |
| "epoch": 0.9953617810760668, | |
| "grad_norm": 5.479938507080078, | |
| "learning_rate": 1.3111633436779792e-09, | |
| "loss": 0.1662, | |
| "step": 1073 | |
| }, | |
| { | |
| "epoch": 0.9962894248608535, | |
| "grad_norm": 4.181588172912598, | |
| "learning_rate": 8.391511416816489e-10, | |
| "loss": 0.1746, | |
| "step": 1074 | |
| }, | |
| { | |
| "epoch": 0.9972170686456401, | |
| "grad_norm": 4.144800662994385, | |
| "learning_rate": 4.720254054679796e-10, | |
| "loss": 0.1624, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 0.9981447124304267, | |
| "grad_norm": 3.857682228088379, | |
| "learning_rate": 2.0978998601206558e-10, | |
| "loss": 0.1293, | |
| "step": 1076 | |
| }, | |
| { | |
| "epoch": 0.9990723562152134, | |
| "grad_norm": 5.734769344329834, | |
| "learning_rate": 5.244763404133046e-11, | |
| "loss": 0.1897, | |
| "step": 1077 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 3.7049508094787598, | |
| "learning_rate": 0.0, | |
| "loss": 0.1706, | |
| "step": 1078 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 1078, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.4499307937307034e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |