amburger66's picture
LoRA fine-tune on RobotSmith task03 after fixing dataset
aea53cd verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.008237986270022883,
"eval_steps": 50,
"global_step": 450,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.8306636155606407e-05,
"grad_norm": 6.099522590637207,
"learning_rate": 0.0,
"loss": 0.6392,
"step": 1
},
{
"epoch": 3.6613272311212814e-05,
"grad_norm": 8.014727592468262,
"learning_rate": 2.0000000000000002e-07,
"loss": 0.8375,
"step": 2
},
{
"epoch": 5.491990846681922e-05,
"grad_norm": 10.58983039855957,
"learning_rate": 4.0000000000000003e-07,
"loss": 0.5021,
"step": 3
},
{
"epoch": 7.322654462242563e-05,
"grad_norm": 6.642515659332275,
"learning_rate": 6.000000000000001e-07,
"loss": 0.7624,
"step": 4
},
{
"epoch": 9.153318077803204e-05,
"grad_norm": 8.307601928710938,
"learning_rate": 8.000000000000001e-07,
"loss": 1.005,
"step": 5
},
{
"epoch": 0.00010983981693363844,
"grad_norm": 11.282310485839844,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.939,
"step": 6
},
{
"epoch": 0.00012814645308924485,
"grad_norm": 9.1802339553833,
"learning_rate": 1.2000000000000002e-06,
"loss": 1.0419,
"step": 7
},
{
"epoch": 0.00014645308924485126,
"grad_norm": 2.651777744293213,
"learning_rate": 1.4000000000000001e-06,
"loss": 0.4708,
"step": 8
},
{
"epoch": 0.00016475972540045766,
"grad_norm": 7.600271701812744,
"learning_rate": 1.6000000000000001e-06,
"loss": 0.6082,
"step": 9
},
{
"epoch": 0.00018306636155606407,
"grad_norm": 6.370179653167725,
"learning_rate": 1.8000000000000001e-06,
"loss": 0.8817,
"step": 10
},
{
"epoch": 0.00020137299771167048,
"grad_norm": 7.1832499504089355,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.5443,
"step": 11
},
{
"epoch": 0.00021967963386727689,
"grad_norm": 6.7331223487854,
"learning_rate": 2.2e-06,
"loss": 0.5602,
"step": 12
},
{
"epoch": 0.0002379862700228833,
"grad_norm": 8.009416580200195,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.741,
"step": 13
},
{
"epoch": 0.0002562929061784897,
"grad_norm": 9.607633590698242,
"learning_rate": 2.6e-06,
"loss": 0.7022,
"step": 14
},
{
"epoch": 0.00027459954233409613,
"grad_norm": 10.585896492004395,
"learning_rate": 2.8000000000000003e-06,
"loss": 1.1929,
"step": 15
},
{
"epoch": 0.0002929061784897025,
"grad_norm": 3.4262940883636475,
"learning_rate": 3e-06,
"loss": 0.5217,
"step": 16
},
{
"epoch": 0.00031121281464530895,
"grad_norm": 11.94228458404541,
"learning_rate": 3.2000000000000003e-06,
"loss": 1.0208,
"step": 17
},
{
"epoch": 0.00032951945080091533,
"grad_norm": 8.004399299621582,
"learning_rate": 3.4000000000000005e-06,
"loss": 0.5694,
"step": 18
},
{
"epoch": 0.00034782608695652176,
"grad_norm": 7.853118896484375,
"learning_rate": 3.6000000000000003e-06,
"loss": 0.5695,
"step": 19
},
{
"epoch": 0.00036613272311212814,
"grad_norm": 6.011293411254883,
"learning_rate": 3.8000000000000005e-06,
"loss": 0.4659,
"step": 20
},
{
"epoch": 0.0003844393592677346,
"grad_norm": 10.169012069702148,
"learning_rate": 4.000000000000001e-06,
"loss": 0.8618,
"step": 21
},
{
"epoch": 0.00040274599542334096,
"grad_norm": 3.0373430252075195,
"learning_rate": 4.2000000000000004e-06,
"loss": 0.2469,
"step": 22
},
{
"epoch": 0.0004210526315789474,
"grad_norm": 7.847787380218506,
"learning_rate": 4.4e-06,
"loss": 0.738,
"step": 23
},
{
"epoch": 0.00043935926773455377,
"grad_norm": 6.6885809898376465,
"learning_rate": 4.600000000000001e-06,
"loss": 0.6311,
"step": 24
},
{
"epoch": 0.0004576659038901602,
"grad_norm": 7.829857349395752,
"learning_rate": 4.800000000000001e-06,
"loss": 0.5967,
"step": 25
},
{
"epoch": 0.0004759725400457666,
"grad_norm": 15.481698989868164,
"learning_rate": 5e-06,
"loss": 0.9285,
"step": 26
},
{
"epoch": 0.000494279176201373,
"grad_norm": 23.23982810974121,
"learning_rate": 5.2e-06,
"loss": 0.6175,
"step": 27
},
{
"epoch": 0.0005125858123569794,
"grad_norm": 9.985934257507324,
"learning_rate": 5.400000000000001e-06,
"loss": 0.4765,
"step": 28
},
{
"epoch": 0.0005308924485125858,
"grad_norm": 17.83190155029297,
"learning_rate": 5.600000000000001e-06,
"loss": 1.1398,
"step": 29
},
{
"epoch": 0.0005491990846681923,
"grad_norm": 7.259341239929199,
"learning_rate": 5.8e-06,
"loss": 0.3864,
"step": 30
},
{
"epoch": 0.0005675057208237986,
"grad_norm": 8.247320175170898,
"learning_rate": 6e-06,
"loss": 0.5853,
"step": 31
},
{
"epoch": 0.000585812356979405,
"grad_norm": 8.801642417907715,
"learning_rate": 6.200000000000001e-06,
"loss": 1.197,
"step": 32
},
{
"epoch": 0.0006041189931350115,
"grad_norm": 8.88306713104248,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.3122,
"step": 33
},
{
"epoch": 0.0006224256292906179,
"grad_norm": 10.055649757385254,
"learning_rate": 6.600000000000001e-06,
"loss": 0.5566,
"step": 34
},
{
"epoch": 0.0006407322654462242,
"grad_norm": 12.692378044128418,
"learning_rate": 6.800000000000001e-06,
"loss": 1.0306,
"step": 35
},
{
"epoch": 0.0006590389016018307,
"grad_norm": 6.696789741516113,
"learning_rate": 7e-06,
"loss": 0.3655,
"step": 36
},
{
"epoch": 0.0006773455377574371,
"grad_norm": 5.720799922943115,
"learning_rate": 7.2000000000000005e-06,
"loss": 0.4338,
"step": 37
},
{
"epoch": 0.0006956521739130435,
"grad_norm": 6.0295610427856445,
"learning_rate": 7.4e-06,
"loss": 0.5826,
"step": 38
},
{
"epoch": 0.0007139588100686499,
"grad_norm": 3.975026845932007,
"learning_rate": 7.600000000000001e-06,
"loss": 0.5669,
"step": 39
},
{
"epoch": 0.0007322654462242563,
"grad_norm": 8.472329139709473,
"learning_rate": 7.800000000000002e-06,
"loss": 0.8793,
"step": 40
},
{
"epoch": 0.0007505720823798627,
"grad_norm": 4.2498321533203125,
"learning_rate": 8.000000000000001e-06,
"loss": 0.3409,
"step": 41
},
{
"epoch": 0.0007688787185354692,
"grad_norm": 8.282734870910645,
"learning_rate": 8.2e-06,
"loss": 0.549,
"step": 42
},
{
"epoch": 0.0007871853546910755,
"grad_norm": 7.966742992401123,
"learning_rate": 8.400000000000001e-06,
"loss": 0.7406,
"step": 43
},
{
"epoch": 0.0008054919908466819,
"grad_norm": 17.36241340637207,
"learning_rate": 8.6e-06,
"loss": 1.0215,
"step": 44
},
{
"epoch": 0.0008237986270022883,
"grad_norm": 8.946808815002441,
"learning_rate": 8.8e-06,
"loss": 0.5102,
"step": 45
},
{
"epoch": 0.0008421052631578948,
"grad_norm": 9.421518325805664,
"learning_rate": 9e-06,
"loss": 1.1237,
"step": 46
},
{
"epoch": 0.0008604118993135011,
"grad_norm": 11.310033798217773,
"learning_rate": 9.200000000000002e-06,
"loss": 0.7825,
"step": 47
},
{
"epoch": 0.0008787185354691075,
"grad_norm": 11.267560005187988,
"learning_rate": 9.4e-06,
"loss": 1.0458,
"step": 48
},
{
"epoch": 0.000897025171624714,
"grad_norm": 2.9605565071105957,
"learning_rate": 9.600000000000001e-06,
"loss": 0.2229,
"step": 49
},
{
"epoch": 0.0009153318077803204,
"grad_norm": 5.507591724395752,
"learning_rate": 9.800000000000001e-06,
"loss": 0.3601,
"step": 50
},
{
"epoch": 0.0009336384439359267,
"grad_norm": 7.83518123626709,
"learning_rate": 1e-05,
"loss": 0.5938,
"step": 51
},
{
"epoch": 0.0009519450800915332,
"grad_norm": 6.110620975494385,
"learning_rate": 1.02e-05,
"loss": 0.8618,
"step": 52
},
{
"epoch": 0.0009702517162471396,
"grad_norm": 7.460414409637451,
"learning_rate": 1.04e-05,
"loss": 0.8692,
"step": 53
},
{
"epoch": 0.000988558352402746,
"grad_norm": 7.539251804351807,
"learning_rate": 1.0600000000000002e-05,
"loss": 0.7236,
"step": 54
},
{
"epoch": 0.0010068649885583525,
"grad_norm": 5.357115268707275,
"learning_rate": 1.0800000000000002e-05,
"loss": 0.3101,
"step": 55
},
{
"epoch": 0.0010251716247139588,
"grad_norm": 3.6166787147521973,
"learning_rate": 1.1000000000000001e-05,
"loss": 0.4426,
"step": 56
},
{
"epoch": 0.0010434782608695651,
"grad_norm": 5.8816680908203125,
"learning_rate": 1.1200000000000001e-05,
"loss": 0.4829,
"step": 57
},
{
"epoch": 0.0010617848970251717,
"grad_norm": 8.535571098327637,
"learning_rate": 1.14e-05,
"loss": 0.9369,
"step": 58
},
{
"epoch": 0.001080091533180778,
"grad_norm": 5.3663482666015625,
"learning_rate": 1.16e-05,
"loss": 0.3664,
"step": 59
},
{
"epoch": 0.0010983981693363845,
"grad_norm": 10.66060733795166,
"learning_rate": 1.18e-05,
"loss": 1.2902,
"step": 60
},
{
"epoch": 0.0011167048054919909,
"grad_norm": 2.3751587867736816,
"learning_rate": 1.2e-05,
"loss": 0.2074,
"step": 61
},
{
"epoch": 0.0011350114416475972,
"grad_norm": 6.494527816772461,
"learning_rate": 1.22e-05,
"loss": 0.5742,
"step": 62
},
{
"epoch": 0.0011533180778032037,
"grad_norm": 1.9130464792251587,
"learning_rate": 1.2400000000000002e-05,
"loss": 0.1833,
"step": 63
},
{
"epoch": 0.00117162471395881,
"grad_norm": 7.697911262512207,
"learning_rate": 1.2600000000000001e-05,
"loss": 1.0019,
"step": 64
},
{
"epoch": 0.0011899313501144164,
"grad_norm": 16.3773250579834,
"learning_rate": 1.2800000000000001e-05,
"loss": 1.5617,
"step": 65
},
{
"epoch": 0.001208237986270023,
"grad_norm": 6.1076836585998535,
"learning_rate": 1.3000000000000001e-05,
"loss": 0.5233,
"step": 66
},
{
"epoch": 0.0012265446224256293,
"grad_norm": 1.6926074028015137,
"learning_rate": 1.3200000000000002e-05,
"loss": 0.2422,
"step": 67
},
{
"epoch": 0.0012448512585812358,
"grad_norm": 3.413689374923706,
"learning_rate": 1.3400000000000002e-05,
"loss": 0.2152,
"step": 68
},
{
"epoch": 0.0012631578947368421,
"grad_norm": 9.58433723449707,
"learning_rate": 1.3600000000000002e-05,
"loss": 1.0936,
"step": 69
},
{
"epoch": 0.0012814645308924484,
"grad_norm": 7.524669647216797,
"learning_rate": 1.38e-05,
"loss": 1.1815,
"step": 70
},
{
"epoch": 0.001299771167048055,
"grad_norm": 6.692052841186523,
"learning_rate": 1.4e-05,
"loss": 0.5309,
"step": 71
},
{
"epoch": 0.0013180778032036613,
"grad_norm": 14.243474006652832,
"learning_rate": 1.4200000000000001e-05,
"loss": 0.8553,
"step": 72
},
{
"epoch": 0.0013363844393592676,
"grad_norm": 10.684292793273926,
"learning_rate": 1.4400000000000001e-05,
"loss": 0.9399,
"step": 73
},
{
"epoch": 0.0013546910755148742,
"grad_norm": 5.949723243713379,
"learning_rate": 1.46e-05,
"loss": 0.7573,
"step": 74
},
{
"epoch": 0.0013729977116704805,
"grad_norm": 6.119026184082031,
"learning_rate": 1.48e-05,
"loss": 0.5557,
"step": 75
},
{
"epoch": 0.001391304347826087,
"grad_norm": 6.306831359863281,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.6165,
"step": 76
},
{
"epoch": 0.0014096109839816934,
"grad_norm": 5.815463066101074,
"learning_rate": 1.5200000000000002e-05,
"loss": 0.6491,
"step": 77
},
{
"epoch": 0.0014279176201372997,
"grad_norm": 4.443011283874512,
"learning_rate": 1.54e-05,
"loss": 0.4984,
"step": 78
},
{
"epoch": 0.0014462242562929062,
"grad_norm": 4.615481376647949,
"learning_rate": 1.5600000000000003e-05,
"loss": 0.3598,
"step": 79
},
{
"epoch": 0.0014645308924485126,
"grad_norm": 6.703000545501709,
"learning_rate": 1.58e-05,
"loss": 0.926,
"step": 80
},
{
"epoch": 0.001482837528604119,
"grad_norm": 3.9417295455932617,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.4523,
"step": 81
},
{
"epoch": 0.0015011441647597254,
"grad_norm": 5.64249849319458,
"learning_rate": 1.62e-05,
"loss": 0.4319,
"step": 82
},
{
"epoch": 0.0015194508009153318,
"grad_norm": 4.152187347412109,
"learning_rate": 1.64e-05,
"loss": 0.3675,
"step": 83
},
{
"epoch": 0.0015377574370709383,
"grad_norm": 5.285708427429199,
"learning_rate": 1.66e-05,
"loss": 0.383,
"step": 84
},
{
"epoch": 0.0015560640732265446,
"grad_norm": 9.757657051086426,
"learning_rate": 1.6800000000000002e-05,
"loss": 0.8236,
"step": 85
},
{
"epoch": 0.001574370709382151,
"grad_norm": 3.1572093963623047,
"learning_rate": 1.7e-05,
"loss": 0.556,
"step": 86
},
{
"epoch": 0.0015926773455377575,
"grad_norm": 4.673479080200195,
"learning_rate": 1.72e-05,
"loss": 0.3168,
"step": 87
},
{
"epoch": 0.0016109839816933638,
"grad_norm": 4.761971950531006,
"learning_rate": 1.7400000000000003e-05,
"loss": 0.7314,
"step": 88
},
{
"epoch": 0.0016292906178489702,
"grad_norm": 4.868711471557617,
"learning_rate": 1.76e-05,
"loss": 0.5399,
"step": 89
},
{
"epoch": 0.0016475972540045767,
"grad_norm": 11.782761573791504,
"learning_rate": 1.7800000000000002e-05,
"loss": 0.7641,
"step": 90
},
{
"epoch": 0.001665903890160183,
"grad_norm": 7.96957540512085,
"learning_rate": 1.8e-05,
"loss": 0.7079,
"step": 91
},
{
"epoch": 0.0016842105263157896,
"grad_norm": 2.9694902896881104,
"learning_rate": 1.8200000000000002e-05,
"loss": 0.2075,
"step": 92
},
{
"epoch": 0.001702517162471396,
"grad_norm": 8.391504287719727,
"learning_rate": 1.8400000000000003e-05,
"loss": 0.7879,
"step": 93
},
{
"epoch": 0.0017208237986270022,
"grad_norm": 5.025172710418701,
"learning_rate": 1.86e-05,
"loss": 0.7385,
"step": 94
},
{
"epoch": 0.0017391304347826088,
"grad_norm": 9.963128089904785,
"learning_rate": 1.88e-05,
"loss": 0.8,
"step": 95
},
{
"epoch": 0.001757437070938215,
"grad_norm": 5.368072032928467,
"learning_rate": 1.9e-05,
"loss": 0.6446,
"step": 96
},
{
"epoch": 0.0017757437070938214,
"grad_norm": 3.758234739303589,
"learning_rate": 1.9200000000000003e-05,
"loss": 0.4276,
"step": 97
},
{
"epoch": 0.001794050343249428,
"grad_norm": 5.807685852050781,
"learning_rate": 1.94e-05,
"loss": 0.4856,
"step": 98
},
{
"epoch": 0.0018123569794050343,
"grad_norm": 4.966123104095459,
"learning_rate": 1.9600000000000002e-05,
"loss": 0.3153,
"step": 99
},
{
"epoch": 0.0018306636155606408,
"grad_norm": 6.432656764984131,
"learning_rate": 1.98e-05,
"loss": 0.4553,
"step": 100
},
{
"epoch": 0.0018489702517162471,
"grad_norm": 6.9228410720825195,
"learning_rate": 2e-05,
"loss": 0.375,
"step": 101
},
{
"epoch": 0.0018672768878718535,
"grad_norm": 5.237428665161133,
"learning_rate": 1.9999939076577906e-05,
"loss": 0.484,
"step": 102
},
{
"epoch": 0.00188558352402746,
"grad_norm": 5.173490524291992,
"learning_rate": 1.9999756307053947e-05,
"loss": 0.4135,
"step": 103
},
{
"epoch": 0.0019038901601830663,
"grad_norm": 4.331842422485352,
"learning_rate": 1.9999451693655125e-05,
"loss": 0.3857,
"step": 104
},
{
"epoch": 0.0019221967963386727,
"grad_norm": 6.129421710968018,
"learning_rate": 1.9999025240093045e-05,
"loss": 0.5126,
"step": 105
},
{
"epoch": 0.0019405034324942792,
"grad_norm": 4.619616985321045,
"learning_rate": 1.9998476951563914e-05,
"loss": 0.477,
"step": 106
},
{
"epoch": 0.0019588100686498858,
"grad_norm": 3.936915397644043,
"learning_rate": 1.9997806834748455e-05,
"loss": 0.3516,
"step": 107
},
{
"epoch": 0.001977116704805492,
"grad_norm": 8.568585395812988,
"learning_rate": 1.9997014897811834e-05,
"loss": 0.7743,
"step": 108
},
{
"epoch": 0.0019954233409610984,
"grad_norm": 4.892576694488525,
"learning_rate": 1.9996101150403543e-05,
"loss": 0.4471,
"step": 109
},
{
"epoch": 0.002013729977116705,
"grad_norm": 10.018097877502441,
"learning_rate": 1.9995065603657317e-05,
"loss": 1.1247,
"step": 110
},
{
"epoch": 0.002032036613272311,
"grad_norm": 5.875444412231445,
"learning_rate": 1.999390827019096e-05,
"loss": 0.7445,
"step": 111
},
{
"epoch": 0.0020503432494279176,
"grad_norm": 7.044773101806641,
"learning_rate": 1.999262916410621e-05,
"loss": 0.4433,
"step": 112
},
{
"epoch": 0.002068649885583524,
"grad_norm": 4.854395866394043,
"learning_rate": 1.9991228300988586e-05,
"loss": 0.4177,
"step": 113
},
{
"epoch": 0.0020869565217391303,
"grad_norm": 3.947049379348755,
"learning_rate": 1.998970569790715e-05,
"loss": 0.3204,
"step": 114
},
{
"epoch": 0.002105263157894737,
"grad_norm": 2.9266178607940674,
"learning_rate": 1.9988061373414342e-05,
"loss": 0.3421,
"step": 115
},
{
"epoch": 0.0021235697940503433,
"grad_norm": 14.068440437316895,
"learning_rate": 1.9986295347545738e-05,
"loss": 1.2803,
"step": 116
},
{
"epoch": 0.0021418764302059494,
"grad_norm": 2.746870279312134,
"learning_rate": 1.9984407641819812e-05,
"loss": 0.1913,
"step": 117
},
{
"epoch": 0.002160183066361556,
"grad_norm": 5.603977680206299,
"learning_rate": 1.9982398279237657e-05,
"loss": 0.535,
"step": 118
},
{
"epoch": 0.0021784897025171625,
"grad_norm": 4.298122882843018,
"learning_rate": 1.9980267284282718e-05,
"loss": 0.3521,
"step": 119
},
{
"epoch": 0.002196796338672769,
"grad_norm": 3.4252305030822754,
"learning_rate": 1.9978014682920503e-05,
"loss": 0.3127,
"step": 120
},
{
"epoch": 0.002215102974828375,
"grad_norm": 4.943688869476318,
"learning_rate": 1.9975640502598243e-05,
"loss": 0.2819,
"step": 121
},
{
"epoch": 0.0022334096109839817,
"grad_norm": 6.345489978790283,
"learning_rate": 1.997314477224458e-05,
"loss": 0.8686,
"step": 122
},
{
"epoch": 0.0022517162471395883,
"grad_norm": 6.48477840423584,
"learning_rate": 1.9970527522269204e-05,
"loss": 0.4396,
"step": 123
},
{
"epoch": 0.0022700228832951944,
"grad_norm": 12.649872779846191,
"learning_rate": 1.9967788784562474e-05,
"loss": 0.4447,
"step": 124
},
{
"epoch": 0.002288329519450801,
"grad_norm": 3.368851900100708,
"learning_rate": 1.9964928592495046e-05,
"loss": 0.213,
"step": 125
},
{
"epoch": 0.0023066361556064075,
"grad_norm": 5.273118019104004,
"learning_rate": 1.9961946980917457e-05,
"loss": 0.6374,
"step": 126
},
{
"epoch": 0.0023249427917620136,
"grad_norm": 8.816353797912598,
"learning_rate": 1.9958843986159705e-05,
"loss": 0.8738,
"step": 127
},
{
"epoch": 0.00234324942791762,
"grad_norm": 7.0079345703125,
"learning_rate": 1.99556196460308e-05,
"loss": 0.7057,
"step": 128
},
{
"epoch": 0.0023615560640732267,
"grad_norm": 2.5678298473358154,
"learning_rate": 1.9952273999818312e-05,
"loss": 0.182,
"step": 129
},
{
"epoch": 0.0023798627002288328,
"grad_norm": 5.133856773376465,
"learning_rate": 1.9948807088287884e-05,
"loss": 0.3598,
"step": 130
},
{
"epoch": 0.0023981693363844393,
"grad_norm": 9.948471069335938,
"learning_rate": 1.9945218953682736e-05,
"loss": 0.4483,
"step": 131
},
{
"epoch": 0.002416475972540046,
"grad_norm": 5.9176459312438965,
"learning_rate": 1.9941509639723155e-05,
"loss": 0.8298,
"step": 132
},
{
"epoch": 0.002434782608695652,
"grad_norm": 2.4215924739837646,
"learning_rate": 1.9937679191605964e-05,
"loss": 0.1617,
"step": 133
},
{
"epoch": 0.0024530892448512585,
"grad_norm": 11.955750465393066,
"learning_rate": 1.9933727656003964e-05,
"loss": 0.609,
"step": 134
},
{
"epoch": 0.002471395881006865,
"grad_norm": 5.947329998016357,
"learning_rate": 1.992965508106537e-05,
"loss": 0.5718,
"step": 135
},
{
"epoch": 0.0024897025171624716,
"grad_norm": 5.794270038604736,
"learning_rate": 1.9925461516413224e-05,
"loss": 0.5392,
"step": 136
},
{
"epoch": 0.0025080091533180777,
"grad_norm": 5.274944305419922,
"learning_rate": 1.9921147013144782e-05,
"loss": 0.5278,
"step": 137
},
{
"epoch": 0.0025263157894736842,
"grad_norm": 5.6011962890625,
"learning_rate": 1.9916711623830904e-05,
"loss": 0.4404,
"step": 138
},
{
"epoch": 0.0025446224256292908,
"grad_norm": 7.330393314361572,
"learning_rate": 1.991215540251542e-05,
"loss": 0.6554,
"step": 139
},
{
"epoch": 0.002562929061784897,
"grad_norm": 8.020462036132812,
"learning_rate": 1.9907478404714438e-05,
"loss": 0.7649,
"step": 140
},
{
"epoch": 0.0025812356979405034,
"grad_norm": 3.3086674213409424,
"learning_rate": 1.9902680687415704e-05,
"loss": 0.3638,
"step": 141
},
{
"epoch": 0.00259954233409611,
"grad_norm": 7.1618781089782715,
"learning_rate": 1.989776230907789e-05,
"loss": 0.4597,
"step": 142
},
{
"epoch": 0.002617848970251716,
"grad_norm": 5.93783712387085,
"learning_rate": 1.9892723329629885e-05,
"loss": 0.4452,
"step": 143
},
{
"epoch": 0.0026361556064073226,
"grad_norm": 5.123695373535156,
"learning_rate": 1.988756381047006e-05,
"loss": 0.5444,
"step": 144
},
{
"epoch": 0.002654462242562929,
"grad_norm": 7.8915181159973145,
"learning_rate": 1.988228381446553e-05,
"loss": 0.758,
"step": 145
},
{
"epoch": 0.0026727688787185353,
"grad_norm": 7.822751998901367,
"learning_rate": 1.9876883405951378e-05,
"loss": 0.6956,
"step": 146
},
{
"epoch": 0.002691075514874142,
"grad_norm": 2.519596815109253,
"learning_rate": 1.987136265072988e-05,
"loss": 0.238,
"step": 147
},
{
"epoch": 0.0027093821510297484,
"grad_norm": 3.658250093460083,
"learning_rate": 1.9865721616069695e-05,
"loss": 0.3844,
"step": 148
},
{
"epoch": 0.0027276887871853545,
"grad_norm": 5.0784430503845215,
"learning_rate": 1.985996037070505e-05,
"loss": 0.5564,
"step": 149
},
{
"epoch": 0.002745995423340961,
"grad_norm": 6.504764556884766,
"learning_rate": 1.9854078984834904e-05,
"loss": 0.4866,
"step": 150
},
{
"epoch": 0.0027643020594965676,
"grad_norm": 4.09471321105957,
"learning_rate": 1.9848077530122083e-05,
"loss": 0.2998,
"step": 151
},
{
"epoch": 0.002782608695652174,
"grad_norm": 5.1176886558532715,
"learning_rate": 1.984195607969242e-05,
"loss": 0.2826,
"step": 152
},
{
"epoch": 0.00280091533180778,
"grad_norm": 12.725276947021484,
"learning_rate": 1.983571470813386e-05,
"loss": 0.7915,
"step": 153
},
{
"epoch": 0.0028192219679633868,
"grad_norm": 7.101748943328857,
"learning_rate": 1.9829353491495545e-05,
"loss": 0.7455,
"step": 154
},
{
"epoch": 0.0028375286041189933,
"grad_norm": 6.615306854248047,
"learning_rate": 1.982287250728689e-05,
"loss": 0.8178,
"step": 155
},
{
"epoch": 0.0028558352402745994,
"grad_norm": 6.7726240158081055,
"learning_rate": 1.9816271834476642e-05,
"loss": 0.3764,
"step": 156
},
{
"epoch": 0.002874141876430206,
"grad_norm": 4.513782978057861,
"learning_rate": 1.9809551553491918e-05,
"loss": 0.3002,
"step": 157
},
{
"epoch": 0.0028924485125858125,
"grad_norm": 6.409885883331299,
"learning_rate": 1.9802711746217222e-05,
"loss": 0.4299,
"step": 158
},
{
"epoch": 0.0029107551487414186,
"grad_norm": 1.4180457592010498,
"learning_rate": 1.979575249599344e-05,
"loss": 0.0436,
"step": 159
},
{
"epoch": 0.002929061784897025,
"grad_norm": 7.368013858795166,
"learning_rate": 1.9788673887616852e-05,
"loss": 0.4066,
"step": 160
},
{
"epoch": 0.0029473684210526317,
"grad_norm": 3.97849178314209,
"learning_rate": 1.9781476007338058e-05,
"loss": 0.2171,
"step": 161
},
{
"epoch": 0.002965675057208238,
"grad_norm": 8.33909797668457,
"learning_rate": 1.9774158942860962e-05,
"loss": 0.5762,
"step": 162
},
{
"epoch": 0.0029839816933638443,
"grad_norm": 1.4704389572143555,
"learning_rate": 1.9766722783341682e-05,
"loss": 0.1528,
"step": 163
},
{
"epoch": 0.003002288329519451,
"grad_norm": 6.321743965148926,
"learning_rate": 1.9759167619387474e-05,
"loss": 0.4852,
"step": 164
},
{
"epoch": 0.0030205949656750574,
"grad_norm": 8.366913795471191,
"learning_rate": 1.9751493543055634e-05,
"loss": 0.4964,
"step": 165
},
{
"epoch": 0.0030389016018306635,
"grad_norm": 6.21965217590332,
"learning_rate": 1.9743700647852356e-05,
"loss": 0.527,
"step": 166
},
{
"epoch": 0.00305720823798627,
"grad_norm": 9.058350563049316,
"learning_rate": 1.9735789028731603e-05,
"loss": 0.9455,
"step": 167
},
{
"epoch": 0.0030755148741418766,
"grad_norm": 7.52754020690918,
"learning_rate": 1.972775878209397e-05,
"loss": 0.618,
"step": 168
},
{
"epoch": 0.0030938215102974827,
"grad_norm": 8.560585975646973,
"learning_rate": 1.9719610005785466e-05,
"loss": 0.476,
"step": 169
},
{
"epoch": 0.0031121281464530893,
"grad_norm": 7.208701133728027,
"learning_rate": 1.971134279909636e-05,
"loss": 0.7781,
"step": 170
},
{
"epoch": 0.003130434782608696,
"grad_norm": 5.757314205169678,
"learning_rate": 1.9702957262759964e-05,
"loss": 0.518,
"step": 171
},
{
"epoch": 0.003148741418764302,
"grad_norm": 2.8194165229797363,
"learning_rate": 1.9694453498951392e-05,
"loss": 0.2128,
"step": 172
},
{
"epoch": 0.0031670480549199085,
"grad_norm": 3.0178167819976807,
"learning_rate": 1.9685831611286312e-05,
"loss": 0.3055,
"step": 173
},
{
"epoch": 0.003185354691075515,
"grad_norm": 1.6432427167892456,
"learning_rate": 1.9677091704819714e-05,
"loss": 0.216,
"step": 174
},
{
"epoch": 0.003203661327231121,
"grad_norm": 8.396689414978027,
"learning_rate": 1.9668233886044597e-05,
"loss": 0.6197,
"step": 175
},
{
"epoch": 0.0032219679633867277,
"grad_norm": 7.577538967132568,
"learning_rate": 1.9659258262890683e-05,
"loss": 0.7145,
"step": 176
},
{
"epoch": 0.003240274599542334,
"grad_norm": 4.660325527191162,
"learning_rate": 1.9650164944723116e-05,
"loss": 0.3487,
"step": 177
},
{
"epoch": 0.0032585812356979403,
"grad_norm": 2.1696557998657227,
"learning_rate": 1.96409540423411e-05,
"loss": 0.3491,
"step": 178
},
{
"epoch": 0.003276887871853547,
"grad_norm": 3.31132435798645,
"learning_rate": 1.9631625667976584e-05,
"loss": 0.2511,
"step": 179
},
{
"epoch": 0.0032951945080091534,
"grad_norm": 5.26754903793335,
"learning_rate": 1.9622179935292855e-05,
"loss": 0.4235,
"step": 180
},
{
"epoch": 0.00331350114416476,
"grad_norm": 4.9605865478515625,
"learning_rate": 1.961261695938319e-05,
"loss": 0.2065,
"step": 181
},
{
"epoch": 0.003331807780320366,
"grad_norm": 5.259721755981445,
"learning_rate": 1.9602936856769432e-05,
"loss": 0.3804,
"step": 182
},
{
"epoch": 0.0033501144164759726,
"grad_norm": 3.692326784133911,
"learning_rate": 1.9593139745400575e-05,
"loss": 0.3388,
"step": 183
},
{
"epoch": 0.003368421052631579,
"grad_norm": 4.228832721710205,
"learning_rate": 1.9583225744651334e-05,
"loss": 0.2778,
"step": 184
},
{
"epoch": 0.0033867276887871852,
"grad_norm": 8.879986763000488,
"learning_rate": 1.9573194975320672e-05,
"loss": 0.7031,
"step": 185
},
{
"epoch": 0.003405034324942792,
"grad_norm": 7.6145853996276855,
"learning_rate": 1.9563047559630356e-05,
"loss": 0.6261,
"step": 186
},
{
"epoch": 0.0034233409610983983,
"grad_norm": 10.076028823852539,
"learning_rate": 1.9552783621223437e-05,
"loss": 0.6617,
"step": 187
},
{
"epoch": 0.0034416475972540044,
"grad_norm": 4.281607151031494,
"learning_rate": 1.954240328516277e-05,
"loss": 0.2965,
"step": 188
},
{
"epoch": 0.003459954233409611,
"grad_norm": 3.990032911300659,
"learning_rate": 1.9531906677929472e-05,
"loss": 0.2232,
"step": 189
},
{
"epoch": 0.0034782608695652175,
"grad_norm": 10.36355209350586,
"learning_rate": 1.9521293927421388e-05,
"loss": 0.3606,
"step": 190
},
{
"epoch": 0.0034965675057208236,
"grad_norm": 9.396668434143066,
"learning_rate": 1.9510565162951538e-05,
"loss": 0.6809,
"step": 191
},
{
"epoch": 0.00351487414187643,
"grad_norm": 4.969096660614014,
"learning_rate": 1.9499720515246524e-05,
"loss": 0.3711,
"step": 192
},
{
"epoch": 0.0035331807780320367,
"grad_norm": 2.855027914047241,
"learning_rate": 1.9488760116444966e-05,
"loss": 0.3334,
"step": 193
},
{
"epoch": 0.003551487414187643,
"grad_norm": 9.800653457641602,
"learning_rate": 1.947768410009586e-05,
"loss": 0.8046,
"step": 194
},
{
"epoch": 0.0035697940503432494,
"grad_norm": 6.4518632888793945,
"learning_rate": 1.9466492601156964e-05,
"loss": 0.7623,
"step": 195
},
{
"epoch": 0.003588100686498856,
"grad_norm": 9.151044845581055,
"learning_rate": 1.945518575599317e-05,
"loss": 0.4479,
"step": 196
},
{
"epoch": 0.0036064073226544625,
"grad_norm": 6.074557304382324,
"learning_rate": 1.944376370237481e-05,
"loss": 0.3635,
"step": 197
},
{
"epoch": 0.0036247139588100686,
"grad_norm": 6.840311050415039,
"learning_rate": 1.943222657947601e-05,
"loss": 0.3051,
"step": 198
},
{
"epoch": 0.003643020594965675,
"grad_norm": 3.7585532665252686,
"learning_rate": 1.942057452787297e-05,
"loss": 0.3266,
"step": 199
},
{
"epoch": 0.0036613272311212816,
"grad_norm": 4.411327838897705,
"learning_rate": 1.9408807689542257e-05,
"loss": 0.2551,
"step": 200
},
{
"epoch": 0.0036796338672768878,
"grad_norm": 3.8587820529937744,
"learning_rate": 1.9396926207859085e-05,
"loss": 0.3105,
"step": 201
},
{
"epoch": 0.0036979405034324943,
"grad_norm": 4.376249313354492,
"learning_rate": 1.938493022759556e-05,
"loss": 0.2243,
"step": 202
},
{
"epoch": 0.003716247139588101,
"grad_norm": 5.54730224609375,
"learning_rate": 1.937281989491892e-05,
"loss": 0.4158,
"step": 203
},
{
"epoch": 0.003734553775743707,
"grad_norm": 2.6035118103027344,
"learning_rate": 1.9360595357389735e-05,
"loss": 0.1788,
"step": 204
},
{
"epoch": 0.0037528604118993135,
"grad_norm": 10.728706359863281,
"learning_rate": 1.9348256763960146e-05,
"loss": 1.0367,
"step": 205
},
{
"epoch": 0.00377116704805492,
"grad_norm": 5.158708572387695,
"learning_rate": 1.9335804264972018e-05,
"loss": 0.4394,
"step": 206
},
{
"epoch": 0.003789473684210526,
"grad_norm": 9.297528266906738,
"learning_rate": 1.9323238012155125e-05,
"loss": 0.4115,
"step": 207
},
{
"epoch": 0.0038077803203661327,
"grad_norm": 11.042555809020996,
"learning_rate": 1.9310558158625286e-05,
"loss": 0.7565,
"step": 208
},
{
"epoch": 0.0038260869565217392,
"grad_norm": 6.377527236938477,
"learning_rate": 1.9297764858882516e-05,
"loss": 0.5614,
"step": 209
},
{
"epoch": 0.0038443935926773453,
"grad_norm": 3.4690988063812256,
"learning_rate": 1.9284858268809135e-05,
"loss": 0.1462,
"step": 210
},
{
"epoch": 0.003862700228832952,
"grad_norm": 8.21645450592041,
"learning_rate": 1.9271838545667876e-05,
"loss": 0.6482,
"step": 211
},
{
"epoch": 0.0038810068649885584,
"grad_norm": 13.70019817352295,
"learning_rate": 1.925870584809995e-05,
"loss": 1.3146,
"step": 212
},
{
"epoch": 0.003899313501144165,
"grad_norm": 6.1762375831604,
"learning_rate": 1.9245460336123136e-05,
"loss": 0.2371,
"step": 213
},
{
"epoch": 0.0039176201372997715,
"grad_norm": 3.4190640449523926,
"learning_rate": 1.923210217112981e-05,
"loss": 0.1756,
"step": 214
},
{
"epoch": 0.003935926773455377,
"grad_norm": 4.4201250076293945,
"learning_rate": 1.9218631515885007e-05,
"loss": 0.3832,
"step": 215
},
{
"epoch": 0.003954233409610984,
"grad_norm": 4.916990756988525,
"learning_rate": 1.9205048534524405e-05,
"loss": 0.55,
"step": 216
},
{
"epoch": 0.00397254004576659,
"grad_norm": 5.253897666931152,
"learning_rate": 1.9191353392552346e-05,
"loss": 0.4081,
"step": 217
},
{
"epoch": 0.003990846681922197,
"grad_norm": 9.138065338134766,
"learning_rate": 1.9177546256839814e-05,
"loss": 0.4096,
"step": 218
},
{
"epoch": 0.004009153318077803,
"grad_norm": 5.5543437004089355,
"learning_rate": 1.9163627295622397e-05,
"loss": 0.3725,
"step": 219
},
{
"epoch": 0.00402745995423341,
"grad_norm": 6.35120153427124,
"learning_rate": 1.914959667849825e-05,
"loss": 0.6631,
"step": 220
},
{
"epoch": 0.0040457665903890164,
"grad_norm": 6.352338790893555,
"learning_rate": 1.913545457642601e-05,
"loss": 0.6608,
"step": 221
},
{
"epoch": 0.004064073226544622,
"grad_norm": 5.439535140991211,
"learning_rate": 1.9121201161722732e-05,
"loss": 0.3461,
"step": 222
},
{
"epoch": 0.004082379862700229,
"grad_norm": 6.05610990524292,
"learning_rate": 1.910683660806177e-05,
"loss": 0.7047,
"step": 223
},
{
"epoch": 0.004100686498855835,
"grad_norm": 16.13218879699707,
"learning_rate": 1.9092361090470688e-05,
"loss": 0.9417,
"step": 224
},
{
"epoch": 0.004118993135011442,
"grad_norm": 11.939602851867676,
"learning_rate": 1.907777478532909e-05,
"loss": 0.4711,
"step": 225
},
{
"epoch": 0.004137299771167048,
"grad_norm": 9.328332901000977,
"learning_rate": 1.9063077870366504e-05,
"loss": 0.4825,
"step": 226
},
{
"epoch": 0.004155606407322655,
"grad_norm": 3.4537312984466553,
"learning_rate": 1.9048270524660197e-05,
"loss": 0.2696,
"step": 227
},
{
"epoch": 0.0041739130434782605,
"grad_norm": 8.465746879577637,
"learning_rate": 1.903335292863301e-05,
"loss": 0.3368,
"step": 228
},
{
"epoch": 0.004192219679633867,
"grad_norm": 2.80598783493042,
"learning_rate": 1.901832526405114e-05,
"loss": 0.2279,
"step": 229
},
{
"epoch": 0.004210526315789474,
"grad_norm": 9.187653541564941,
"learning_rate": 1.9003187714021936e-05,
"loss": 0.5421,
"step": 230
},
{
"epoch": 0.00422883295194508,
"grad_norm": 2.9173386096954346,
"learning_rate": 1.8987940462991673e-05,
"loss": 0.1865,
"step": 231
},
{
"epoch": 0.004247139588100687,
"grad_norm": 8.808391571044922,
"learning_rate": 1.8972583696743284e-05,
"loss": 0.6127,
"step": 232
},
{
"epoch": 0.004265446224256293,
"grad_norm": 3.902742862701416,
"learning_rate": 1.895711760239413e-05,
"loss": 0.4918,
"step": 233
},
{
"epoch": 0.004283752860411899,
"grad_norm": 7.3988237380981445,
"learning_rate": 1.8941542368393683e-05,
"loss": 0.4754,
"step": 234
},
{
"epoch": 0.0043020594965675054,
"grad_norm": 3.434345006942749,
"learning_rate": 1.892585818452126e-05,
"loss": 0.2698,
"step": 235
},
{
"epoch": 0.004320366132723112,
"grad_norm": 5.183997631072998,
"learning_rate": 1.891006524188368e-05,
"loss": 0.2622,
"step": 236
},
{
"epoch": 0.0043386727688787185,
"grad_norm": 4.6859235763549805,
"learning_rate": 1.889416373291298e-05,
"loss": 0.2881,
"step": 237
},
{
"epoch": 0.004356979405034325,
"grad_norm": 3.172283887863159,
"learning_rate": 1.8878153851364013e-05,
"loss": 0.2605,
"step": 238
},
{
"epoch": 0.004375286041189932,
"grad_norm": 11.702736854553223,
"learning_rate": 1.8862035792312148e-05,
"loss": 0.6882,
"step": 239
},
{
"epoch": 0.004393592677345538,
"grad_norm": 6.005770683288574,
"learning_rate": 1.884580975215084e-05,
"loss": 0.4795,
"step": 240
},
{
"epoch": 0.004411899313501144,
"grad_norm": 3.117210865020752,
"learning_rate": 1.8829475928589272e-05,
"loss": 0.3978,
"step": 241
},
{
"epoch": 0.00443020594965675,
"grad_norm": 6.215647220611572,
"learning_rate": 1.8813034520649923e-05,
"loss": 0.4373,
"step": 242
},
{
"epoch": 0.004448512585812357,
"grad_norm": 5.335672378540039,
"learning_rate": 1.879648572866617e-05,
"loss": 0.2903,
"step": 243
},
{
"epoch": 0.0044668192219679635,
"grad_norm": 1.4431347846984863,
"learning_rate": 1.8779829754279806e-05,
"loss": 0.1387,
"step": 244
},
{
"epoch": 0.00448512585812357,
"grad_norm": 8.997931480407715,
"learning_rate": 1.8763066800438638e-05,
"loss": 0.7452,
"step": 245
},
{
"epoch": 0.0045034324942791765,
"grad_norm": 7.0658721923828125,
"learning_rate": 1.874619707139396e-05,
"loss": 0.4082,
"step": 246
},
{
"epoch": 0.004521739130434782,
"grad_norm": 16.99602508544922,
"learning_rate": 1.8729220772698096e-05,
"loss": 0.5462,
"step": 247
},
{
"epoch": 0.004540045766590389,
"grad_norm": 4.291457653045654,
"learning_rate": 1.8712138111201898e-05,
"loss": 0.2849,
"step": 248
},
{
"epoch": 0.004558352402745995,
"grad_norm": 1.2436845302581787,
"learning_rate": 1.869494929505219e-05,
"loss": 0.1299,
"step": 249
},
{
"epoch": 0.004576659038901602,
"grad_norm": 13.032325744628906,
"learning_rate": 1.8677654533689287e-05,
"loss": 1.1319,
"step": 250
},
{
"epoch": 0.004594965675057208,
"grad_norm": 4.819525718688965,
"learning_rate": 1.866025403784439e-05,
"loss": 0.2317,
"step": 251
},
{
"epoch": 0.004613272311212815,
"grad_norm": 4.058289051055908,
"learning_rate": 1.864274801953705e-05,
"loss": 0.1868,
"step": 252
},
{
"epoch": 0.0046315789473684215,
"grad_norm": 5.123549461364746,
"learning_rate": 1.8625136692072577e-05,
"loss": 0.2682,
"step": 253
},
{
"epoch": 0.004649885583524027,
"grad_norm": 4.105663299560547,
"learning_rate": 1.860742027003944e-05,
"loss": 0.2852,
"step": 254
},
{
"epoch": 0.004668192219679634,
"grad_norm": 8.609756469726562,
"learning_rate": 1.8589598969306646e-05,
"loss": 0.638,
"step": 255
},
{
"epoch": 0.00468649885583524,
"grad_norm": 10.366744041442871,
"learning_rate": 1.8571673007021124e-05,
"loss": 0.7038,
"step": 256
},
{
"epoch": 0.004704805491990847,
"grad_norm": 3.733368158340454,
"learning_rate": 1.855364260160507e-05,
"loss": 0.3285,
"step": 257
},
{
"epoch": 0.004723112128146453,
"grad_norm": 8.309182167053223,
"learning_rate": 1.8535507972753275e-05,
"loss": 0.9593,
"step": 258
},
{
"epoch": 0.00474141876430206,
"grad_norm": 9.302151679992676,
"learning_rate": 1.851726934143048e-05,
"loss": 0.5591,
"step": 259
},
{
"epoch": 0.0047597254004576655,
"grad_norm": 1.1159979104995728,
"learning_rate": 1.849892692986864e-05,
"loss": 0.1096,
"step": 260
},
{
"epoch": 0.004778032036613272,
"grad_norm": 5.174903392791748,
"learning_rate": 1.848048096156426e-05,
"loss": 0.2641,
"step": 261
},
{
"epoch": 0.004796338672768879,
"grad_norm": 4.628619194030762,
"learning_rate": 1.8461931661275642e-05,
"loss": 0.4448,
"step": 262
},
{
"epoch": 0.004814645308924485,
"grad_norm": 2.9997923374176025,
"learning_rate": 1.8443279255020153e-05,
"loss": 0.2183,
"step": 263
},
{
"epoch": 0.004832951945080092,
"grad_norm": 9.500200271606445,
"learning_rate": 1.842452397007148e-05,
"loss": 0.4821,
"step": 264
},
{
"epoch": 0.004851258581235698,
"grad_norm": 7.443029403686523,
"learning_rate": 1.8405666034956842e-05,
"loss": 0.6391,
"step": 265
},
{
"epoch": 0.004869565217391304,
"grad_norm": 13.987396240234375,
"learning_rate": 1.8386705679454243e-05,
"loss": 0.8478,
"step": 266
},
{
"epoch": 0.0048878718535469105,
"grad_norm": 3.692591667175293,
"learning_rate": 1.836764313458962e-05,
"loss": 0.2808,
"step": 267
},
{
"epoch": 0.004906178489702517,
"grad_norm": 6.317406177520752,
"learning_rate": 1.8348478632634067e-05,
"loss": 0.3462,
"step": 268
},
{
"epoch": 0.0049244851258581235,
"grad_norm": 10.023675918579102,
"learning_rate": 1.8329212407100996e-05,
"loss": 1.0451,
"step": 269
},
{
"epoch": 0.00494279176201373,
"grad_norm": 5.884584426879883,
"learning_rate": 1.8309844692743283e-05,
"loss": 0.5844,
"step": 270
},
{
"epoch": 0.004961098398169337,
"grad_norm": 2.740469217300415,
"learning_rate": 1.8290375725550417e-05,
"loss": 0.1969,
"step": 271
},
{
"epoch": 0.004979405034324943,
"grad_norm": 6.099292755126953,
"learning_rate": 1.827080574274562e-05,
"loss": 0.726,
"step": 272
},
{
"epoch": 0.004997711670480549,
"grad_norm": 6.810953140258789,
"learning_rate": 1.8251134982782952e-05,
"loss": 0.5525,
"step": 273
},
{
"epoch": 0.005016018306636155,
"grad_norm": 17.33843231201172,
"learning_rate": 1.8231363685344422e-05,
"loss": 0.9222,
"step": 274
},
{
"epoch": 0.005034324942791762,
"grad_norm": 6.817041397094727,
"learning_rate": 1.821149209133704e-05,
"loss": 0.4998,
"step": 275
},
{
"epoch": 0.0050526315789473685,
"grad_norm": 9.165572166442871,
"learning_rate": 1.819152044288992e-05,
"loss": 0.6976,
"step": 276
},
{
"epoch": 0.005070938215102975,
"grad_norm": 4.791163444519043,
"learning_rate": 1.8171448983351284e-05,
"loss": 0.4825,
"step": 277
},
{
"epoch": 0.0050892448512585816,
"grad_norm": 2.2928450107574463,
"learning_rate": 1.815127795728554e-05,
"loss": 0.1951,
"step": 278
},
{
"epoch": 0.005107551487414187,
"grad_norm": 4.821455955505371,
"learning_rate": 1.8131007610470278e-05,
"loss": 0.2852,
"step": 279
},
{
"epoch": 0.005125858123569794,
"grad_norm": 6.22464656829834,
"learning_rate": 1.8110638189893267e-05,
"loss": 0.3738,
"step": 280
},
{
"epoch": 0.0051441647597254,
"grad_norm": 4.849123001098633,
"learning_rate": 1.8090169943749477e-05,
"loss": 0.296,
"step": 281
},
{
"epoch": 0.005162471395881007,
"grad_norm": 6.145839691162109,
"learning_rate": 1.806960312143802e-05,
"loss": 0.3723,
"step": 282
},
{
"epoch": 0.005180778032036613,
"grad_norm": 4.0611724853515625,
"learning_rate": 1.804893797355914e-05,
"loss": 0.4265,
"step": 283
},
{
"epoch": 0.00519908466819222,
"grad_norm": 3.252166271209717,
"learning_rate": 1.8028174751911147e-05,
"loss": 0.3467,
"step": 284
},
{
"epoch": 0.0052173913043478265,
"grad_norm": 6.532531261444092,
"learning_rate": 1.8007313709487334e-05,
"loss": 0.3968,
"step": 285
},
{
"epoch": 0.005235697940503432,
"grad_norm": 8.545121192932129,
"learning_rate": 1.798635510047293e-05,
"loss": 0.5772,
"step": 286
},
{
"epoch": 0.005254004576659039,
"grad_norm": 3.646812677383423,
"learning_rate": 1.7965299180241963e-05,
"loss": 0.1868,
"step": 287
},
{
"epoch": 0.005272311212814645,
"grad_norm": 6.056759357452393,
"learning_rate": 1.7944146205354182e-05,
"loss": 0.615,
"step": 288
},
{
"epoch": 0.005290617848970252,
"grad_norm": 9.61752986907959,
"learning_rate": 1.792289643355191e-05,
"loss": 0.6434,
"step": 289
},
{
"epoch": 0.005308924485125858,
"grad_norm": 7.096751689910889,
"learning_rate": 1.7901550123756906e-05,
"loss": 0.3842,
"step": 290
},
{
"epoch": 0.005327231121281465,
"grad_norm": 3.8110721111297607,
"learning_rate": 1.788010753606722e-05,
"loss": 0.2328,
"step": 291
},
{
"epoch": 0.0053455377574370706,
"grad_norm": 5.858436584472656,
"learning_rate": 1.785856893175402e-05,
"loss": 0.431,
"step": 292
},
{
"epoch": 0.005363844393592677,
"grad_norm": 4.623099327087402,
"learning_rate": 1.78369345732584e-05,
"loss": 0.337,
"step": 293
},
{
"epoch": 0.005382151029748284,
"grad_norm": 5.802640914916992,
"learning_rate": 1.781520472418819e-05,
"loss": 0.4879,
"step": 294
},
{
"epoch": 0.00540045766590389,
"grad_norm": 5.544589042663574,
"learning_rate": 1.7793379649314743e-05,
"loss": 0.4911,
"step": 295
},
{
"epoch": 0.005418764302059497,
"grad_norm": 6.768428802490234,
"learning_rate": 1.777145961456971e-05,
"loss": 0.5283,
"step": 296
},
{
"epoch": 0.005437070938215103,
"grad_norm": 4.4601731300354,
"learning_rate": 1.7749444887041797e-05,
"loss": 0.4052,
"step": 297
},
{
"epoch": 0.005455377574370709,
"grad_norm": 16.009952545166016,
"learning_rate": 1.7727335734973512e-05,
"loss": 0.4814,
"step": 298
},
{
"epoch": 0.0054736842105263155,
"grad_norm": 10.445368766784668,
"learning_rate": 1.7705132427757895e-05,
"loss": 0.8704,
"step": 299
},
{
"epoch": 0.005491990846681922,
"grad_norm": 3.6240663528442383,
"learning_rate": 1.7682835235935236e-05,
"loss": 0.1805,
"step": 300
},
{
"epoch": 0.005510297482837529,
"grad_norm": 6.040288925170898,
"learning_rate": 1.766044443118978e-05,
"loss": 0.536,
"step": 301
},
{
"epoch": 0.005528604118993135,
"grad_norm": 5.494821548461914,
"learning_rate": 1.7637960286346423e-05,
"loss": 0.3142,
"step": 302
},
{
"epoch": 0.005546910755148742,
"grad_norm": 4.8304219245910645,
"learning_rate": 1.761538307536737e-05,
"loss": 0.444,
"step": 303
},
{
"epoch": 0.005565217391304348,
"grad_norm": 8.82863712310791,
"learning_rate": 1.759271307334881e-05,
"loss": 0.7043,
"step": 304
},
{
"epoch": 0.005583524027459954,
"grad_norm": 11.274286270141602,
"learning_rate": 1.7569950556517566e-05,
"loss": 1.0306,
"step": 305
},
{
"epoch": 0.00560183066361556,
"grad_norm": 7.698188304901123,
"learning_rate": 1.7547095802227723e-05,
"loss": 0.5291,
"step": 306
},
{
"epoch": 0.005620137299771167,
"grad_norm": 6.544892311096191,
"learning_rate": 1.7524149088957244e-05,
"loss": 0.679,
"step": 307
},
{
"epoch": 0.0056384439359267735,
"grad_norm": 13.220139503479004,
"learning_rate": 1.7501110696304598e-05,
"loss": 0.7442,
"step": 308
},
{
"epoch": 0.00565675057208238,
"grad_norm": 4.368415355682373,
"learning_rate": 1.747798090498532e-05,
"loss": 0.3106,
"step": 309
},
{
"epoch": 0.005675057208237987,
"grad_norm": 5.565270900726318,
"learning_rate": 1.7454759996828622e-05,
"loss": 0.2814,
"step": 310
},
{
"epoch": 0.005693363844393592,
"grad_norm": 6.844931125640869,
"learning_rate": 1.7431448254773943e-05,
"loss": 0.5184,
"step": 311
},
{
"epoch": 0.005711670480549199,
"grad_norm": 4.695760726928711,
"learning_rate": 1.74080459628675e-05,
"loss": 0.463,
"step": 312
},
{
"epoch": 0.005729977116704805,
"grad_norm": 7.724005222320557,
"learning_rate": 1.7384553406258842e-05,
"loss": 0.468,
"step": 313
},
{
"epoch": 0.005748283752860412,
"grad_norm": 3.6156113147735596,
"learning_rate": 1.7360970871197347e-05,
"loss": 0.3026,
"step": 314
},
{
"epoch": 0.0057665903890160184,
"grad_norm": 1.1042062044143677,
"learning_rate": 1.7337298645028764e-05,
"loss": 0.0863,
"step": 315
},
{
"epoch": 0.005784897025171625,
"grad_norm": 3.604750633239746,
"learning_rate": 1.7313537016191706e-05,
"loss": 0.1948,
"step": 316
},
{
"epoch": 0.0058032036613272315,
"grad_norm": 3.3758010864257812,
"learning_rate": 1.7289686274214116e-05,
"loss": 0.2802,
"step": 317
},
{
"epoch": 0.005821510297482837,
"grad_norm": 12.799773216247559,
"learning_rate": 1.7265746709709762e-05,
"loss": 0.9552,
"step": 318
},
{
"epoch": 0.005839816933638444,
"grad_norm": 2.934678077697754,
"learning_rate": 1.7241718614374678e-05,
"loss": 0.2117,
"step": 319
},
{
"epoch": 0.00585812356979405,
"grad_norm": 2.4731688499450684,
"learning_rate": 1.7217602280983622e-05,
"loss": 0.1622,
"step": 320
},
{
"epoch": 0.005876430205949657,
"grad_norm": 2.6618807315826416,
"learning_rate": 1.7193398003386514e-05,
"loss": 0.211,
"step": 321
},
{
"epoch": 0.005894736842105263,
"grad_norm": 5.7936906814575195,
"learning_rate": 1.716910607650483e-05,
"loss": 0.6325,
"step": 322
},
{
"epoch": 0.00591304347826087,
"grad_norm": 5.763343334197998,
"learning_rate": 1.7144726796328034e-05,
"loss": 0.6117,
"step": 323
},
{
"epoch": 0.005931350114416476,
"grad_norm": 6.462403774261475,
"learning_rate": 1.712026045990997e-05,
"loss": 0.3717,
"step": 324
},
{
"epoch": 0.005949656750572082,
"grad_norm": 2.8275930881500244,
"learning_rate": 1.709570736536521e-05,
"loss": 0.1894,
"step": 325
},
{
"epoch": 0.005967963386727689,
"grad_norm": 7.836248874664307,
"learning_rate": 1.7071067811865477e-05,
"loss": 0.4134,
"step": 326
},
{
"epoch": 0.005986270022883295,
"grad_norm": 4.337584972381592,
"learning_rate": 1.7046342099635948e-05,
"loss": 0.3016,
"step": 327
},
{
"epoch": 0.006004576659038902,
"grad_norm": 4.750609874725342,
"learning_rate": 1.7021530529951627e-05,
"loss": 0.7295,
"step": 328
},
{
"epoch": 0.006022883295194508,
"grad_norm": 4.368948459625244,
"learning_rate": 1.6996633405133656e-05,
"loss": 0.4981,
"step": 329
},
{
"epoch": 0.006041189931350115,
"grad_norm": 5.648835182189941,
"learning_rate": 1.697165102854565e-05,
"loss": 0.3509,
"step": 330
},
{
"epoch": 0.0060594965675057205,
"grad_norm": 6.33278226852417,
"learning_rate": 1.6946583704589973e-05,
"loss": 0.553,
"step": 331
},
{
"epoch": 0.006077803203661327,
"grad_norm": 3.5381555557250977,
"learning_rate": 1.692143173870407e-05,
"loss": 0.3945,
"step": 332
},
{
"epoch": 0.006096109839816934,
"grad_norm": 6.314048767089844,
"learning_rate": 1.68961954373567e-05,
"loss": 0.4074,
"step": 333
},
{
"epoch": 0.00611441647597254,
"grad_norm": 10.963640213012695,
"learning_rate": 1.6870875108044233e-05,
"loss": 0.4685,
"step": 334
},
{
"epoch": 0.006132723112128147,
"grad_norm": 4.838613510131836,
"learning_rate": 1.684547105928689e-05,
"loss": 0.3112,
"step": 335
},
{
"epoch": 0.006151029748283753,
"grad_norm": 6.644429683685303,
"learning_rate": 1.6819983600624986e-05,
"loss": 0.4721,
"step": 336
},
{
"epoch": 0.006169336384439359,
"grad_norm": 7.574588775634766,
"learning_rate": 1.6794413042615168e-05,
"loss": 0.4311,
"step": 337
},
{
"epoch": 0.0061876430205949655,
"grad_norm": 16.03948402404785,
"learning_rate": 1.6768759696826608e-05,
"loss": 1.1899,
"step": 338
},
{
"epoch": 0.006205949656750572,
"grad_norm": 8.45483112335205,
"learning_rate": 1.6743023875837233e-05,
"loss": 0.4542,
"step": 339
},
{
"epoch": 0.0062242562929061785,
"grad_norm": 5.345456600189209,
"learning_rate": 1.6717205893229904e-05,
"loss": 0.514,
"step": 340
},
{
"epoch": 0.006242562929061785,
"grad_norm": 6.069270610809326,
"learning_rate": 1.6691306063588583e-05,
"loss": 0.675,
"step": 341
},
{
"epoch": 0.006260869565217392,
"grad_norm": 8.289877891540527,
"learning_rate": 1.6665324702494524e-05,
"loss": 0.4616,
"step": 342
},
{
"epoch": 0.006279176201372997,
"grad_norm": 8.072705268859863,
"learning_rate": 1.6639262126522417e-05,
"loss": 0.4349,
"step": 343
},
{
"epoch": 0.006297482837528604,
"grad_norm": 8.641850471496582,
"learning_rate": 1.661311865323652e-05,
"loss": 0.4676,
"step": 344
},
{
"epoch": 0.00631578947368421,
"grad_norm": 4.457225799560547,
"learning_rate": 1.6586894601186804e-05,
"loss": 0.4408,
"step": 345
},
{
"epoch": 0.006334096109839817,
"grad_norm": 4.834903240203857,
"learning_rate": 1.6560590289905074e-05,
"loss": 0.3358,
"step": 346
},
{
"epoch": 0.0063524027459954235,
"grad_norm": 11.663195610046387,
"learning_rate": 1.6534206039901057e-05,
"loss": 0.6072,
"step": 347
},
{
"epoch": 0.00637070938215103,
"grad_norm": 3.625476360321045,
"learning_rate": 1.650774217265851e-05,
"loss": 0.2633,
"step": 348
},
{
"epoch": 0.0063890160183066366,
"grad_norm": 6.559308052062988,
"learning_rate": 1.6481199010631312e-05,
"loss": 0.2867,
"step": 349
},
{
"epoch": 0.006407322654462242,
"grad_norm": 11.27363395690918,
"learning_rate": 1.645457687723951e-05,
"loss": 0.8009,
"step": 350
},
{
"epoch": 0.006425629290617849,
"grad_norm": 6.976966381072998,
"learning_rate": 1.6427876096865394e-05,
"loss": 0.5221,
"step": 351
},
{
"epoch": 0.006443935926773455,
"grad_norm": 5.93279504776001,
"learning_rate": 1.6401096994849558e-05,
"loss": 0.3511,
"step": 352
},
{
"epoch": 0.006462242562929062,
"grad_norm": 5.895767688751221,
"learning_rate": 1.63742398974869e-05,
"loss": 0.3957,
"step": 353
},
{
"epoch": 0.006480549199084668,
"grad_norm": 4.896230220794678,
"learning_rate": 1.6347305132022677e-05,
"loss": 0.4095,
"step": 354
},
{
"epoch": 0.006498855835240275,
"grad_norm": 4.811487674713135,
"learning_rate": 1.632029302664851e-05,
"loss": 0.3359,
"step": 355
},
{
"epoch": 0.006517162471395881,
"grad_norm": 6.440311908721924,
"learning_rate": 1.6293203910498375e-05,
"loss": 0.4722,
"step": 356
},
{
"epoch": 0.006535469107551487,
"grad_norm": 4.4008965492248535,
"learning_rate": 1.6266038113644605e-05,
"loss": 0.325,
"step": 357
},
{
"epoch": 0.006553775743707094,
"grad_norm": 5.9258713722229,
"learning_rate": 1.6238795967093865e-05,
"loss": 0.4317,
"step": 358
},
{
"epoch": 0.0065720823798627,
"grad_norm": 9.05823040008545,
"learning_rate": 1.6211477802783105e-05,
"loss": 0.6237,
"step": 359
},
{
"epoch": 0.006590389016018307,
"grad_norm": 12.546273231506348,
"learning_rate": 1.6184083953575543e-05,
"loss": 0.4178,
"step": 360
},
{
"epoch": 0.006608695652173913,
"grad_norm": 12.221634864807129,
"learning_rate": 1.6156614753256583e-05,
"loss": 0.6527,
"step": 361
},
{
"epoch": 0.00662700228832952,
"grad_norm": 8.335714340209961,
"learning_rate": 1.6129070536529767e-05,
"loss": 0.5695,
"step": 362
},
{
"epoch": 0.0066453089244851255,
"grad_norm": 9.451141357421875,
"learning_rate": 1.610145163901268e-05,
"loss": 0.4343,
"step": 363
},
{
"epoch": 0.006663615560640732,
"grad_norm": 6.33415412902832,
"learning_rate": 1.607375839723287e-05,
"loss": 0.3475,
"step": 364
},
{
"epoch": 0.006681922196796339,
"grad_norm": 8.798182487487793,
"learning_rate": 1.6045991148623752e-05,
"loss": 0.4564,
"step": 365
},
{
"epoch": 0.006700228832951945,
"grad_norm": 7.0115647315979,
"learning_rate": 1.6018150231520486e-05,
"loss": 0.4848,
"step": 366
},
{
"epoch": 0.006718535469107552,
"grad_norm": 11.819734573364258,
"learning_rate": 1.599023598515586e-05,
"loss": 0.9038,
"step": 367
},
{
"epoch": 0.006736842105263158,
"grad_norm": 7.8828325271606445,
"learning_rate": 1.5962248749656158e-05,
"loss": 0.252,
"step": 368
},
{
"epoch": 0.006755148741418764,
"grad_norm": 1.1683067083358765,
"learning_rate": 1.5934188866037017e-05,
"loss": 0.0875,
"step": 369
},
{
"epoch": 0.0067734553775743705,
"grad_norm": 4.919974327087402,
"learning_rate": 1.5906056676199256e-05,
"loss": 0.1626,
"step": 370
},
{
"epoch": 0.006791762013729977,
"grad_norm": 5.247345447540283,
"learning_rate": 1.5877852522924733e-05,
"loss": 0.2482,
"step": 371
},
{
"epoch": 0.006810068649885584,
"grad_norm": 2.816721200942993,
"learning_rate": 1.584957674987216e-05,
"loss": 0.1829,
"step": 372
},
{
"epoch": 0.00682837528604119,
"grad_norm": 15.067553520202637,
"learning_rate": 1.5821229701572897e-05,
"loss": 0.9891,
"step": 373
},
{
"epoch": 0.006846681922196797,
"grad_norm": 12.05018424987793,
"learning_rate": 1.5792811723426787e-05,
"loss": 0.5695,
"step": 374
},
{
"epoch": 0.006864988558352402,
"grad_norm": 7.035960674285889,
"learning_rate": 1.5764323161697933e-05,
"loss": 0.362,
"step": 375
},
{
"epoch": 0.006883295194508009,
"grad_norm": 10.163368225097656,
"learning_rate": 1.573576436351046e-05,
"loss": 0.5263,
"step": 376
},
{
"epoch": 0.006901601830663615,
"grad_norm": 8.28557014465332,
"learning_rate": 1.570713567684432e-05,
"loss": 0.4129,
"step": 377
},
{
"epoch": 0.006919908466819222,
"grad_norm": 12.570793151855469,
"learning_rate": 1.5678437450531014e-05,
"loss": 0.7611,
"step": 378
},
{
"epoch": 0.0069382151029748285,
"grad_norm": 6.368137836456299,
"learning_rate": 1.564967003424938e-05,
"loss": 0.5362,
"step": 379
},
{
"epoch": 0.006956521739130435,
"grad_norm": 3.3329522609710693,
"learning_rate": 1.5620833778521306e-05,
"loss": 0.3633,
"step": 380
},
{
"epoch": 0.006974828375286042,
"grad_norm": 5.921566009521484,
"learning_rate": 1.5591929034707468e-05,
"loss": 0.45,
"step": 381
},
{
"epoch": 0.006993135011441647,
"grad_norm": 4.826172828674316,
"learning_rate": 1.556295615500305e-05,
"loss": 0.3278,
"step": 382
},
{
"epoch": 0.007011441647597254,
"grad_norm": 6.323006629943848,
"learning_rate": 1.553391549243344e-05,
"loss": 0.3469,
"step": 383
},
{
"epoch": 0.00702974828375286,
"grad_norm": 5.0740203857421875,
"learning_rate": 1.5504807400849957e-05,
"loss": 0.3713,
"step": 384
},
{
"epoch": 0.007048054919908467,
"grad_norm": 4.266338348388672,
"learning_rate": 1.5475632234925505e-05,
"loss": 0.2165,
"step": 385
},
{
"epoch": 0.007066361556064073,
"grad_norm": 4.6646504402160645,
"learning_rate": 1.5446390350150272e-05,
"loss": 0.4328,
"step": 386
},
{
"epoch": 0.00708466819221968,
"grad_norm": 4.261294841766357,
"learning_rate": 1.54170821028274e-05,
"loss": 0.4952,
"step": 387
},
{
"epoch": 0.007102974828375286,
"grad_norm": 4.2962846755981445,
"learning_rate": 1.5387707850068633e-05,
"loss": 0.3523,
"step": 388
},
{
"epoch": 0.007121281464530892,
"grad_norm": 3.559885263442993,
"learning_rate": 1.5358267949789968e-05,
"loss": 0.1836,
"step": 389
},
{
"epoch": 0.007139588100686499,
"grad_norm": 4.17678689956665,
"learning_rate": 1.53287627607073e-05,
"loss": 0.1978,
"step": 390
},
{
"epoch": 0.007157894736842105,
"grad_norm": 9.370595932006836,
"learning_rate": 1.529919264233205e-05,
"loss": 0.5643,
"step": 391
},
{
"epoch": 0.007176201372997712,
"grad_norm": 4.1542463302612305,
"learning_rate": 1.5269557954966777e-05,
"loss": 0.3046,
"step": 392
},
{
"epoch": 0.007194508009153318,
"grad_norm": 7.581055164337158,
"learning_rate": 1.5239859059700794e-05,
"loss": 0.4282,
"step": 393
},
{
"epoch": 0.007212814645308925,
"grad_norm": 2.4097397327423096,
"learning_rate": 1.5210096318405768e-05,
"loss": 0.154,
"step": 394
},
{
"epoch": 0.007231121281464531,
"grad_norm": 3.8569397926330566,
"learning_rate": 1.5180270093731305e-05,
"loss": 0.0843,
"step": 395
},
{
"epoch": 0.007249427917620137,
"grad_norm": 7.704616069793701,
"learning_rate": 1.5150380749100545e-05,
"loss": 0.5627,
"step": 396
},
{
"epoch": 0.007267734553775744,
"grad_norm": 4.946331024169922,
"learning_rate": 1.5120428648705716e-05,
"loss": 0.2496,
"step": 397
},
{
"epoch": 0.00728604118993135,
"grad_norm": 3.8404288291931152,
"learning_rate": 1.5090414157503715e-05,
"loss": 0.1916,
"step": 398
},
{
"epoch": 0.007304347826086957,
"grad_norm": 1.226649522781372,
"learning_rate": 1.5060337641211637e-05,
"loss": 0.1359,
"step": 399
},
{
"epoch": 0.007322654462242563,
"grad_norm": 10.208996772766113,
"learning_rate": 1.5030199466302354e-05,
"loss": 0.3555,
"step": 400
},
{
"epoch": 0.007340961098398169,
"grad_norm": 7.164267539978027,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.4576,
"step": 401
},
{
"epoch": 0.0073592677345537755,
"grad_norm": 6.956635475158691,
"learning_rate": 1.4969739610275556e-05,
"loss": 0.3394,
"step": 402
},
{
"epoch": 0.007377574370709382,
"grad_norm": 11.293649673461914,
"learning_rate": 1.493941866584231e-05,
"loss": 0.6647,
"step": 403
},
{
"epoch": 0.007395881006864989,
"grad_norm": 9.281755447387695,
"learning_rate": 1.490903753615141e-05,
"loss": 0.4833,
"step": 404
},
{
"epoch": 0.007414187643020595,
"grad_norm": 3.4506609439849854,
"learning_rate": 1.4878596591387329e-05,
"loss": 0.239,
"step": 405
},
{
"epoch": 0.007432494279176202,
"grad_norm": 1.7865498065948486,
"learning_rate": 1.4848096202463373e-05,
"loss": 0.135,
"step": 406
},
{
"epoch": 0.007450800915331807,
"grad_norm": 2.3500795364379883,
"learning_rate": 1.4817536741017153e-05,
"loss": 0.2015,
"step": 407
},
{
"epoch": 0.007469107551487414,
"grad_norm": 8.29547119140625,
"learning_rate": 1.478691857940607e-05,
"loss": 0.4149,
"step": 408
},
{
"epoch": 0.0074874141876430204,
"grad_norm": 2.8842177391052246,
"learning_rate": 1.4756242090702756e-05,
"loss": 0.1729,
"step": 409
},
{
"epoch": 0.007505720823798627,
"grad_norm": 8.263029098510742,
"learning_rate": 1.4725507648690542e-05,
"loss": 0.5903,
"step": 410
},
{
"epoch": 0.0075240274599542335,
"grad_norm": 4.133641719818115,
"learning_rate": 1.469471562785891e-05,
"loss": 0.3041,
"step": 411
},
{
"epoch": 0.00754233409610984,
"grad_norm": 9.040873527526855,
"learning_rate": 1.4663866403398915e-05,
"loss": 0.6155,
"step": 412
},
{
"epoch": 0.007560640732265447,
"grad_norm": 5.259375095367432,
"learning_rate": 1.463296035119862e-05,
"loss": 0.357,
"step": 413
},
{
"epoch": 0.007578947368421052,
"grad_norm": 5.846144676208496,
"learning_rate": 1.4601997847838518e-05,
"loss": 0.2454,
"step": 414
},
{
"epoch": 0.007597254004576659,
"grad_norm": 5.604888439178467,
"learning_rate": 1.4570979270586944e-05,
"loss": 0.4757,
"step": 415
},
{
"epoch": 0.007615560640732265,
"grad_norm": 6.676849365234375,
"learning_rate": 1.4539904997395468e-05,
"loss": 0.2546,
"step": 416
},
{
"epoch": 0.007633867276887872,
"grad_norm": 7.3254523277282715,
"learning_rate": 1.4508775406894308e-05,
"loss": 0.4365,
"step": 417
},
{
"epoch": 0.0076521739130434785,
"grad_norm": 7.06563663482666,
"learning_rate": 1.4477590878387697e-05,
"loss": 0.3348,
"step": 418
},
{
"epoch": 0.007670480549199085,
"grad_norm": 4.191524982452393,
"learning_rate": 1.4446351791849276e-05,
"loss": 0.2775,
"step": 419
},
{
"epoch": 0.007688787185354691,
"grad_norm": 6.883635520935059,
"learning_rate": 1.4415058527917454e-05,
"loss": 0.336,
"step": 420
},
{
"epoch": 0.007707093821510297,
"grad_norm": 2.3200838565826416,
"learning_rate": 1.4383711467890776e-05,
"loss": 0.1396,
"step": 421
},
{
"epoch": 0.007725400457665904,
"grad_norm": 4.974113464355469,
"learning_rate": 1.4352310993723277e-05,
"loss": 0.2149,
"step": 422
},
{
"epoch": 0.00774370709382151,
"grad_norm": 7.065567970275879,
"learning_rate": 1.4320857488019826e-05,
"loss": 0.3439,
"step": 423
},
{
"epoch": 0.007762013729977117,
"grad_norm": 6.593064308166504,
"learning_rate": 1.4289351334031461e-05,
"loss": 0.327,
"step": 424
},
{
"epoch": 0.007780320366132723,
"grad_norm": 4.018970012664795,
"learning_rate": 1.4257792915650728e-05,
"loss": 0.2449,
"step": 425
},
{
"epoch": 0.00779862700228833,
"grad_norm": 1.6839569807052612,
"learning_rate": 1.4226182617406996e-05,
"loss": 0.1382,
"step": 426
},
{
"epoch": 0.007816933638443936,
"grad_norm": 3.03248929977417,
"learning_rate": 1.4194520824461773e-05,
"loss": 0.1527,
"step": 427
},
{
"epoch": 0.007835240274599543,
"grad_norm": 10.530253410339355,
"learning_rate": 1.4162807922604014e-05,
"loss": 0.4916,
"step": 428
},
{
"epoch": 0.00785354691075515,
"grad_norm": 9.175287246704102,
"learning_rate": 1.413104429824542e-05,
"loss": 0.5146,
"step": 429
},
{
"epoch": 0.007871853546910754,
"grad_norm": 9.110077857971191,
"learning_rate": 1.4099230338415728e-05,
"loss": 0.2713,
"step": 430
},
{
"epoch": 0.007890160183066361,
"grad_norm": 6.078294277191162,
"learning_rate": 1.4067366430758004e-05,
"loss": 0.297,
"step": 431
},
{
"epoch": 0.007908466819221967,
"grad_norm": 9.00320816040039,
"learning_rate": 1.4035452963523903e-05,
"loss": 0.219,
"step": 432
},
{
"epoch": 0.007926773455377574,
"grad_norm": 11.155050277709961,
"learning_rate": 1.4003490325568953e-05,
"loss": 0.5544,
"step": 433
},
{
"epoch": 0.00794508009153318,
"grad_norm": 9.077802658081055,
"learning_rate": 1.3971478906347806e-05,
"loss": 0.3477,
"step": 434
},
{
"epoch": 0.007963386727688787,
"grad_norm": 5.8080668449401855,
"learning_rate": 1.3939419095909513e-05,
"loss": 0.275,
"step": 435
},
{
"epoch": 0.007981693363844394,
"grad_norm": 19.085351943969727,
"learning_rate": 1.3907311284892737e-05,
"loss": 0.3296,
"step": 436
},
{
"epoch": 0.008,
"grad_norm": 12.508645057678223,
"learning_rate": 1.3875155864521031e-05,
"loss": 0.5693,
"step": 437
},
{
"epoch": 0.008018306636155607,
"grad_norm": 5.50254487991333,
"learning_rate": 1.3842953226598036e-05,
"loss": 0.1062,
"step": 438
},
{
"epoch": 0.008036613272311213,
"grad_norm": 14.789398193359375,
"learning_rate": 1.3810703763502744e-05,
"loss": 0.9844,
"step": 439
},
{
"epoch": 0.00805491990846682,
"grad_norm": 19.38977813720703,
"learning_rate": 1.3778407868184674e-05,
"loss": 0.6566,
"step": 440
},
{
"epoch": 0.008073226544622426,
"grad_norm": 13.897294998168945,
"learning_rate": 1.3746065934159123e-05,
"loss": 0.7898,
"step": 441
},
{
"epoch": 0.008091533180778033,
"grad_norm": 9.941490173339844,
"learning_rate": 1.371367835550235e-05,
"loss": 0.4879,
"step": 442
},
{
"epoch": 0.008109839816933638,
"grad_norm": 2.901772975921631,
"learning_rate": 1.3681245526846782e-05,
"loss": 0.2148,
"step": 443
},
{
"epoch": 0.008128146453089244,
"grad_norm": 4.679471492767334,
"learning_rate": 1.3648767843376196e-05,
"loss": 0.2317,
"step": 444
},
{
"epoch": 0.00814645308924485,
"grad_norm": 3.4032607078552246,
"learning_rate": 1.3616245700820922e-05,
"loss": 0.2151,
"step": 445
},
{
"epoch": 0.008164759725400457,
"grad_norm": 2.5919456481933594,
"learning_rate": 1.3583679495453e-05,
"loss": 0.1284,
"step": 446
},
{
"epoch": 0.008183066361556064,
"grad_norm": 6.18490743637085,
"learning_rate": 1.3551069624081372e-05,
"loss": 0.3029,
"step": 447
},
{
"epoch": 0.00820137299771167,
"grad_norm": 10.599230766296387,
"learning_rate": 1.3518416484047018e-05,
"loss": 0.5019,
"step": 448
},
{
"epoch": 0.008219679633867277,
"grad_norm": 8.500578880310059,
"learning_rate": 1.3485720473218153e-05,
"loss": 0.4647,
"step": 449
},
{
"epoch": 0.008237986270022883,
"grad_norm": 2.737617254257202,
"learning_rate": 1.3452981989985347e-05,
"loss": 0.1593,
"step": 450
}
],
"logging_steps": 1,
"max_steps": 1000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}