AnonymousNodeGAE's picture
Upload folder using huggingface_hub
a8aeb57 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5885815185403178,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0005885815185403178,
"grad_norm": 35.75,
"learning_rate": 6.666666666666667e-07,
"loss": 3.1406,
"step": 1
},
{
"epoch": 0.0011771630370806356,
"grad_norm": 39.0,
"learning_rate": 1.3333333333333334e-06,
"loss": 3.1719,
"step": 2
},
{
"epoch": 0.0017657445556209534,
"grad_norm": 59.75,
"learning_rate": 2.0000000000000003e-06,
"loss": 3.2656,
"step": 3
},
{
"epoch": 0.002354326074161271,
"grad_norm": 41.25,
"learning_rate": 2.666666666666667e-06,
"loss": 3.1562,
"step": 4
},
{
"epoch": 0.002942907592701589,
"grad_norm": 43.5,
"learning_rate": 3.3333333333333333e-06,
"loss": 3.2812,
"step": 5
},
{
"epoch": 0.003531489111241907,
"grad_norm": 700.0,
"learning_rate": 4.000000000000001e-06,
"loss": 3.3438,
"step": 6
},
{
"epoch": 0.004120070629782225,
"grad_norm": 92.5,
"learning_rate": 4.666666666666667e-06,
"loss": 3.2969,
"step": 7
},
{
"epoch": 0.004708652148322542,
"grad_norm": 87.0,
"learning_rate": 5.333333333333334e-06,
"loss": 3.2031,
"step": 8
},
{
"epoch": 0.00529723366686286,
"grad_norm": 46.5,
"learning_rate": 6e-06,
"loss": 3.1094,
"step": 9
},
{
"epoch": 0.005885815185403178,
"grad_norm": 27.875,
"learning_rate": 6.666666666666667e-06,
"loss": 3.0781,
"step": 10
},
{
"epoch": 0.006474396703943496,
"grad_norm": 53.25,
"learning_rate": 7.333333333333334e-06,
"loss": 3.0781,
"step": 11
},
{
"epoch": 0.007062978222483814,
"grad_norm": 48.5,
"learning_rate": 8.000000000000001e-06,
"loss": 3.25,
"step": 12
},
{
"epoch": 0.007651559741024131,
"grad_norm": 35.75,
"learning_rate": 8.666666666666668e-06,
"loss": 3.0469,
"step": 13
},
{
"epoch": 0.00824014125956445,
"grad_norm": 165.0,
"learning_rate": 9.333333333333334e-06,
"loss": 3.2969,
"step": 14
},
{
"epoch": 0.008828722778104767,
"grad_norm": 213.0,
"learning_rate": 1e-05,
"loss": 3.0781,
"step": 15
},
{
"epoch": 0.009417304296645085,
"grad_norm": 69.0,
"learning_rate": 1.0666666666666667e-05,
"loss": 3.0469,
"step": 16
},
{
"epoch": 0.010005885815185403,
"grad_norm": 104.5,
"learning_rate": 1.1333333333333334e-05,
"loss": 3.0469,
"step": 17
},
{
"epoch": 0.01059446733372572,
"grad_norm": 20.625,
"learning_rate": 1.2e-05,
"loss": 3.1094,
"step": 18
},
{
"epoch": 0.011183048852266038,
"grad_norm": 38.25,
"learning_rate": 1.2666666666666668e-05,
"loss": 3.1719,
"step": 19
},
{
"epoch": 0.011771630370806356,
"grad_norm": 65.5,
"learning_rate": 1.3333333333333333e-05,
"loss": 3.0938,
"step": 20
},
{
"epoch": 0.012360211889346674,
"grad_norm": 39.5,
"learning_rate": 1.4000000000000001e-05,
"loss": 3.1406,
"step": 21
},
{
"epoch": 0.012948793407886992,
"grad_norm": 16.5,
"learning_rate": 1.4666666666666668e-05,
"loss": 3.0625,
"step": 22
},
{
"epoch": 0.01353737492642731,
"grad_norm": 35.5,
"learning_rate": 1.5333333333333334e-05,
"loss": 3.125,
"step": 23
},
{
"epoch": 0.014125956444967627,
"grad_norm": 133.0,
"learning_rate": 1.6000000000000003e-05,
"loss": 3.0625,
"step": 24
},
{
"epoch": 0.014714537963507945,
"grad_norm": 31.25,
"learning_rate": 1.6666666666666667e-05,
"loss": 3.1406,
"step": 25
},
{
"epoch": 0.015303119482048263,
"grad_norm": 22.5,
"learning_rate": 1.7333333333333336e-05,
"loss": 3.0938,
"step": 26
},
{
"epoch": 0.015891701000588582,
"grad_norm": 41.25,
"learning_rate": 1.8e-05,
"loss": 3.125,
"step": 27
},
{
"epoch": 0.0164802825191289,
"grad_norm": 42.75,
"learning_rate": 1.866666666666667e-05,
"loss": 3.0,
"step": 28
},
{
"epoch": 0.017068864037669218,
"grad_norm": 90.5,
"learning_rate": 1.9333333333333333e-05,
"loss": 3.0469,
"step": 29
},
{
"epoch": 0.017657445556209534,
"grad_norm": 45.25,
"learning_rate": 2e-05,
"loss": 3.1406,
"step": 30
},
{
"epoch": 0.018246027074749854,
"grad_norm": 105.0,
"learning_rate": 2.0666666666666666e-05,
"loss": 2.9531,
"step": 31
},
{
"epoch": 0.01883460859329017,
"grad_norm": 29.875,
"learning_rate": 2.1333333333333335e-05,
"loss": 3.1719,
"step": 32
},
{
"epoch": 0.01942319011183049,
"grad_norm": 29.125,
"learning_rate": 2.2000000000000003e-05,
"loss": 3.1875,
"step": 33
},
{
"epoch": 0.020011771630370805,
"grad_norm": 34.0,
"learning_rate": 2.2666666666666668e-05,
"loss": 3.125,
"step": 34
},
{
"epoch": 0.020600353148911125,
"grad_norm": 121.0,
"learning_rate": 2.3333333333333336e-05,
"loss": 3.2812,
"step": 35
},
{
"epoch": 0.02118893466745144,
"grad_norm": 18.75,
"learning_rate": 2.4e-05,
"loss": 3.125,
"step": 36
},
{
"epoch": 0.02177751618599176,
"grad_norm": 32.0,
"learning_rate": 2.466666666666667e-05,
"loss": 2.9688,
"step": 37
},
{
"epoch": 0.022366097704532076,
"grad_norm": 72.0,
"learning_rate": 2.5333333333333337e-05,
"loss": 2.9531,
"step": 38
},
{
"epoch": 0.022954679223072396,
"grad_norm": 59.25,
"learning_rate": 2.6000000000000002e-05,
"loss": 3.125,
"step": 39
},
{
"epoch": 0.023543260741612712,
"grad_norm": 20.625,
"learning_rate": 2.6666666666666667e-05,
"loss": 2.9844,
"step": 40
},
{
"epoch": 0.02413184226015303,
"grad_norm": 73.0,
"learning_rate": 2.733333333333333e-05,
"loss": 3.0156,
"step": 41
},
{
"epoch": 0.024720423778693348,
"grad_norm": 11.8125,
"learning_rate": 2.8000000000000003e-05,
"loss": 2.8906,
"step": 42
},
{
"epoch": 0.025309005297233667,
"grad_norm": 34.5,
"learning_rate": 2.8666666666666668e-05,
"loss": 3.0312,
"step": 43
},
{
"epoch": 0.025897586815773983,
"grad_norm": 25.375,
"learning_rate": 2.9333333333333336e-05,
"loss": 2.9844,
"step": 44
},
{
"epoch": 0.026486168334314303,
"grad_norm": 17.5,
"learning_rate": 3e-05,
"loss": 3.0,
"step": 45
},
{
"epoch": 0.02707474985285462,
"grad_norm": 29.0,
"learning_rate": 3.066666666666667e-05,
"loss": 3.0156,
"step": 46
},
{
"epoch": 0.02766333137139494,
"grad_norm": 56.25,
"learning_rate": 3.1333333333333334e-05,
"loss": 2.875,
"step": 47
},
{
"epoch": 0.028251912889935255,
"grad_norm": 133.0,
"learning_rate": 3.2000000000000005e-05,
"loss": 3.0,
"step": 48
},
{
"epoch": 0.028840494408475574,
"grad_norm": 21.5,
"learning_rate": 3.266666666666667e-05,
"loss": 2.8438,
"step": 49
},
{
"epoch": 0.02942907592701589,
"grad_norm": 21.0,
"learning_rate": 3.3333333333333335e-05,
"loss": 2.8438,
"step": 50
},
{
"epoch": 0.03001765744555621,
"grad_norm": 29.125,
"learning_rate": 3.4000000000000007e-05,
"loss": 2.8125,
"step": 51
},
{
"epoch": 0.030606238964096526,
"grad_norm": 34.25,
"learning_rate": 3.466666666666667e-05,
"loss": 2.8906,
"step": 52
},
{
"epoch": 0.031194820482636845,
"grad_norm": 74.5,
"learning_rate": 3.5333333333333336e-05,
"loss": 2.7344,
"step": 53
},
{
"epoch": 0.031783402001177165,
"grad_norm": 19.375,
"learning_rate": 3.6e-05,
"loss": 2.7969,
"step": 54
},
{
"epoch": 0.032371983519717484,
"grad_norm": 146.0,
"learning_rate": 3.6666666666666666e-05,
"loss": 2.9219,
"step": 55
},
{
"epoch": 0.0329605650382578,
"grad_norm": 40.25,
"learning_rate": 3.733333333333334e-05,
"loss": 2.6719,
"step": 56
},
{
"epoch": 0.033549146556798116,
"grad_norm": 64.0,
"learning_rate": 3.8e-05,
"loss": 2.5938,
"step": 57
},
{
"epoch": 0.034137728075338436,
"grad_norm": 44.0,
"learning_rate": 3.866666666666667e-05,
"loss": 2.5,
"step": 58
},
{
"epoch": 0.034726309593878756,
"grad_norm": 30.625,
"learning_rate": 3.933333333333333e-05,
"loss": 2.6719,
"step": 59
},
{
"epoch": 0.03531489111241907,
"grad_norm": 59.25,
"learning_rate": 4e-05,
"loss": 2.5312,
"step": 60
},
{
"epoch": 0.03590347263095939,
"grad_norm": 13.9375,
"learning_rate": 4.066666666666667e-05,
"loss": 2.4688,
"step": 61
},
{
"epoch": 0.03649205414949971,
"grad_norm": 91.5,
"learning_rate": 4.133333333333333e-05,
"loss": 2.5625,
"step": 62
},
{
"epoch": 0.03708063566804003,
"grad_norm": 54.5,
"learning_rate": 4.2e-05,
"loss": 2.5312,
"step": 63
},
{
"epoch": 0.03766921718658034,
"grad_norm": 234.0,
"learning_rate": 4.266666666666667e-05,
"loss": 2.5,
"step": 64
},
{
"epoch": 0.03825779870512066,
"grad_norm": 58.75,
"learning_rate": 4.3333333333333334e-05,
"loss": 2.2969,
"step": 65
},
{
"epoch": 0.03884638022366098,
"grad_norm": 26.875,
"learning_rate": 4.4000000000000006e-05,
"loss": 2.1562,
"step": 66
},
{
"epoch": 0.0394349617422013,
"grad_norm": 30.25,
"learning_rate": 4.466666666666667e-05,
"loss": 2.25,
"step": 67
},
{
"epoch": 0.04002354326074161,
"grad_norm": 61.0,
"learning_rate": 4.5333333333333335e-05,
"loss": 2.3594,
"step": 68
},
{
"epoch": 0.04061212477928193,
"grad_norm": 36.0,
"learning_rate": 4.600000000000001e-05,
"loss": 2.1719,
"step": 69
},
{
"epoch": 0.04120070629782225,
"grad_norm": 56.0,
"learning_rate": 4.666666666666667e-05,
"loss": 2.1875,
"step": 70
},
{
"epoch": 0.04178928781636257,
"grad_norm": 34.75,
"learning_rate": 4.7333333333333336e-05,
"loss": 2.1719,
"step": 71
},
{
"epoch": 0.04237786933490288,
"grad_norm": 58.25,
"learning_rate": 4.8e-05,
"loss": 2.125,
"step": 72
},
{
"epoch": 0.0429664508534432,
"grad_norm": 67.0,
"learning_rate": 4.866666666666667e-05,
"loss": 2.4531,
"step": 73
},
{
"epoch": 0.04355503237198352,
"grad_norm": 21.625,
"learning_rate": 4.933333333333334e-05,
"loss": 2.1094,
"step": 74
},
{
"epoch": 0.04414361389052384,
"grad_norm": 43.5,
"learning_rate": 5e-05,
"loss": 2.0156,
"step": 75
},
{
"epoch": 0.04473219540906415,
"grad_norm": 17.125,
"learning_rate": 5.0666666666666674e-05,
"loss": 2.0156,
"step": 76
},
{
"epoch": 0.04532077692760447,
"grad_norm": 40.25,
"learning_rate": 5.133333333333333e-05,
"loss": 2.1094,
"step": 77
},
{
"epoch": 0.04590935844614479,
"grad_norm": 29.875,
"learning_rate": 5.2000000000000004e-05,
"loss": 1.9297,
"step": 78
},
{
"epoch": 0.04649793996468511,
"grad_norm": 45.5,
"learning_rate": 5.266666666666666e-05,
"loss": 2.0938,
"step": 79
},
{
"epoch": 0.047086521483225424,
"grad_norm": 73.0,
"learning_rate": 5.333333333333333e-05,
"loss": 1.9453,
"step": 80
},
{
"epoch": 0.047675103001765744,
"grad_norm": 28.5,
"learning_rate": 5.4000000000000005e-05,
"loss": 1.7891,
"step": 81
},
{
"epoch": 0.04826368452030606,
"grad_norm": 544.0,
"learning_rate": 5.466666666666666e-05,
"loss": 2.0469,
"step": 82
},
{
"epoch": 0.04885226603884638,
"grad_norm": 42.25,
"learning_rate": 5.5333333333333334e-05,
"loss": 1.75,
"step": 83
},
{
"epoch": 0.049440847557386695,
"grad_norm": 52.25,
"learning_rate": 5.6000000000000006e-05,
"loss": 1.7031,
"step": 84
},
{
"epoch": 0.050029429075927015,
"grad_norm": 41.5,
"learning_rate": 5.666666666666667e-05,
"loss": 1.7969,
"step": 85
},
{
"epoch": 0.050618010594467334,
"grad_norm": 131.0,
"learning_rate": 5.7333333333333336e-05,
"loss": 1.5234,
"step": 86
},
{
"epoch": 0.051206592113007654,
"grad_norm": 52.0,
"learning_rate": 5.8e-05,
"loss": 1.625,
"step": 87
},
{
"epoch": 0.05179517363154797,
"grad_norm": 43.75,
"learning_rate": 5.866666666666667e-05,
"loss": 1.6719,
"step": 88
},
{
"epoch": 0.052383755150088286,
"grad_norm": 27.375,
"learning_rate": 5.9333333333333343e-05,
"loss": 1.7188,
"step": 89
},
{
"epoch": 0.052972336668628606,
"grad_norm": 32.25,
"learning_rate": 6e-05,
"loss": 1.8047,
"step": 90
},
{
"epoch": 0.053560918187168925,
"grad_norm": 46.25,
"learning_rate": 6.066666666666667e-05,
"loss": 1.5859,
"step": 91
},
{
"epoch": 0.05414949970570924,
"grad_norm": 76.5,
"learning_rate": 6.133333333333334e-05,
"loss": 1.4062,
"step": 92
},
{
"epoch": 0.05473808122424956,
"grad_norm": 187.0,
"learning_rate": 6.2e-05,
"loss": 1.3359,
"step": 93
},
{
"epoch": 0.05532666274278988,
"grad_norm": 36.5,
"learning_rate": 6.266666666666667e-05,
"loss": 1.3672,
"step": 94
},
{
"epoch": 0.055915244261330196,
"grad_norm": 41.5,
"learning_rate": 6.333333333333333e-05,
"loss": 1.1172,
"step": 95
},
{
"epoch": 0.05650382577987051,
"grad_norm": 141.0,
"learning_rate": 6.400000000000001e-05,
"loss": 1.6016,
"step": 96
},
{
"epoch": 0.05709240729841083,
"grad_norm": 49.75,
"learning_rate": 6.466666666666666e-05,
"loss": 1.4375,
"step": 97
},
{
"epoch": 0.05768098881695115,
"grad_norm": 23.875,
"learning_rate": 6.533333333333334e-05,
"loss": 1.3594,
"step": 98
},
{
"epoch": 0.05826957033549147,
"grad_norm": 37.75,
"learning_rate": 6.6e-05,
"loss": 1.5,
"step": 99
},
{
"epoch": 0.05885815185403178,
"grad_norm": 32.0,
"learning_rate": 6.666666666666667e-05,
"loss": 1.3438,
"step": 100
},
{
"epoch": 0.0594467333725721,
"grad_norm": 37.0,
"learning_rate": 6.733333333333333e-05,
"loss": 1.2578,
"step": 101
},
{
"epoch": 0.06003531489111242,
"grad_norm": 66.5,
"learning_rate": 6.800000000000001e-05,
"loss": 1.3125,
"step": 102
},
{
"epoch": 0.06062389640965274,
"grad_norm": 52.5,
"learning_rate": 6.866666666666666e-05,
"loss": 1.3281,
"step": 103
},
{
"epoch": 0.06121247792819305,
"grad_norm": 122.0,
"learning_rate": 6.933333333333334e-05,
"loss": 1.1641,
"step": 104
},
{
"epoch": 0.06180105944673337,
"grad_norm": 13.375,
"learning_rate": 7e-05,
"loss": 1.0547,
"step": 105
},
{
"epoch": 0.06238964096527369,
"grad_norm": 97.5,
"learning_rate": 7.066666666666667e-05,
"loss": 1.3203,
"step": 106
},
{
"epoch": 0.062978222483814,
"grad_norm": 53.25,
"learning_rate": 7.133333333333334e-05,
"loss": 1.0547,
"step": 107
},
{
"epoch": 0.06356680400235433,
"grad_norm": 25.125,
"learning_rate": 7.2e-05,
"loss": 1.3906,
"step": 108
},
{
"epoch": 0.06415538552089464,
"grad_norm": 50.75,
"learning_rate": 7.266666666666667e-05,
"loss": 1.2109,
"step": 109
},
{
"epoch": 0.06474396703943497,
"grad_norm": 78.0,
"learning_rate": 7.333333333333333e-05,
"loss": 1.1406,
"step": 110
},
{
"epoch": 0.06533254855797528,
"grad_norm": 81.0,
"learning_rate": 7.4e-05,
"loss": 1.6875,
"step": 111
},
{
"epoch": 0.0659211300765156,
"grad_norm": 187.0,
"learning_rate": 7.466666666666667e-05,
"loss": 1.3125,
"step": 112
},
{
"epoch": 0.06650971159505592,
"grad_norm": 104.5,
"learning_rate": 7.533333333333334e-05,
"loss": 0.8438,
"step": 113
},
{
"epoch": 0.06709829311359623,
"grad_norm": 34.75,
"learning_rate": 7.6e-05,
"loss": 1.0703,
"step": 114
},
{
"epoch": 0.06768687463213655,
"grad_norm": 42.75,
"learning_rate": 7.666666666666667e-05,
"loss": 1.3359,
"step": 115
},
{
"epoch": 0.06827545615067687,
"grad_norm": 61.75,
"learning_rate": 7.733333333333333e-05,
"loss": 1.1562,
"step": 116
},
{
"epoch": 0.06886403766921718,
"grad_norm": 53.0,
"learning_rate": 7.800000000000001e-05,
"loss": 1.1797,
"step": 117
},
{
"epoch": 0.06945261918775751,
"grad_norm": 50.75,
"learning_rate": 7.866666666666666e-05,
"loss": 1.3906,
"step": 118
},
{
"epoch": 0.07004120070629782,
"grad_norm": 57.75,
"learning_rate": 7.933333333333334e-05,
"loss": 0.9883,
"step": 119
},
{
"epoch": 0.07062978222483814,
"grad_norm": 43.5,
"learning_rate": 8e-05,
"loss": 0.8828,
"step": 120
},
{
"epoch": 0.07121836374337846,
"grad_norm": 38.25,
"learning_rate": 8.066666666666667e-05,
"loss": 0.918,
"step": 121
},
{
"epoch": 0.07180694526191878,
"grad_norm": 37.75,
"learning_rate": 8.133333333333334e-05,
"loss": 1.1875,
"step": 122
},
{
"epoch": 0.07239552678045909,
"grad_norm": 21.125,
"learning_rate": 8.2e-05,
"loss": 0.7461,
"step": 123
},
{
"epoch": 0.07298410829899941,
"grad_norm": 45.0,
"learning_rate": 8.266666666666667e-05,
"loss": 0.8945,
"step": 124
},
{
"epoch": 0.07357268981753973,
"grad_norm": 130.0,
"learning_rate": 8.333333333333334e-05,
"loss": 1.1484,
"step": 125
},
{
"epoch": 0.07416127133608005,
"grad_norm": 41.25,
"learning_rate": 8.4e-05,
"loss": 1.375,
"step": 126
},
{
"epoch": 0.07474985285462037,
"grad_norm": 53.5,
"learning_rate": 8.466666666666667e-05,
"loss": 1.0469,
"step": 127
},
{
"epoch": 0.07533843437316068,
"grad_norm": 318.0,
"learning_rate": 8.533333333333334e-05,
"loss": 1.0703,
"step": 128
},
{
"epoch": 0.075927015891701,
"grad_norm": 30.125,
"learning_rate": 8.6e-05,
"loss": 1.0547,
"step": 129
},
{
"epoch": 0.07651559741024132,
"grad_norm": 170.0,
"learning_rate": 8.666666666666667e-05,
"loss": 0.8633,
"step": 130
},
{
"epoch": 0.07710417892878163,
"grad_norm": 26.75,
"learning_rate": 8.733333333333333e-05,
"loss": 0.6914,
"step": 131
},
{
"epoch": 0.07769276044732196,
"grad_norm": 64.5,
"learning_rate": 8.800000000000001e-05,
"loss": 1.0391,
"step": 132
},
{
"epoch": 0.07828134196586227,
"grad_norm": 67.5,
"learning_rate": 8.866666666666668e-05,
"loss": 0.9961,
"step": 133
},
{
"epoch": 0.0788699234844026,
"grad_norm": 62.0,
"learning_rate": 8.933333333333334e-05,
"loss": 0.9023,
"step": 134
},
{
"epoch": 0.07945850500294291,
"grad_norm": 40.0,
"learning_rate": 9e-05,
"loss": 0.7578,
"step": 135
},
{
"epoch": 0.08004708652148322,
"grad_norm": 63.5,
"learning_rate": 9.066666666666667e-05,
"loss": 1.0,
"step": 136
},
{
"epoch": 0.08063566804002355,
"grad_norm": 94.5,
"learning_rate": 9.133333333333334e-05,
"loss": 1.0859,
"step": 137
},
{
"epoch": 0.08122424955856386,
"grad_norm": 39.5,
"learning_rate": 9.200000000000001e-05,
"loss": 0.6836,
"step": 138
},
{
"epoch": 0.08181283107710417,
"grad_norm": 26.5,
"learning_rate": 9.266666666666666e-05,
"loss": 0.7031,
"step": 139
},
{
"epoch": 0.0824014125956445,
"grad_norm": 108.0,
"learning_rate": 9.333333333333334e-05,
"loss": 1.0938,
"step": 140
},
{
"epoch": 0.08298999411418481,
"grad_norm": 38.0,
"learning_rate": 9.4e-05,
"loss": 0.6289,
"step": 141
},
{
"epoch": 0.08357857563272514,
"grad_norm": 89.0,
"learning_rate": 9.466666666666667e-05,
"loss": 0.8203,
"step": 142
},
{
"epoch": 0.08416715715126545,
"grad_norm": 98.5,
"learning_rate": 9.533333333333334e-05,
"loss": 0.5508,
"step": 143
},
{
"epoch": 0.08475573866980576,
"grad_norm": 39.25,
"learning_rate": 9.6e-05,
"loss": 0.7773,
"step": 144
},
{
"epoch": 0.08534432018834609,
"grad_norm": 49.5,
"learning_rate": 9.666666666666667e-05,
"loss": 0.7852,
"step": 145
},
{
"epoch": 0.0859329017068864,
"grad_norm": 190.0,
"learning_rate": 9.733333333333335e-05,
"loss": 1.1328,
"step": 146
},
{
"epoch": 0.08652148322542672,
"grad_norm": 48.0,
"learning_rate": 9.8e-05,
"loss": 0.7305,
"step": 147
},
{
"epoch": 0.08711006474396704,
"grad_norm": 284.0,
"learning_rate": 9.866666666666668e-05,
"loss": 0.8867,
"step": 148
},
{
"epoch": 0.08769864626250735,
"grad_norm": 41.5,
"learning_rate": 9.933333333333334e-05,
"loss": 0.9023,
"step": 149
},
{
"epoch": 0.08828722778104768,
"grad_norm": 29.75,
"learning_rate": 0.0001,
"loss": 0.5391,
"step": 150
},
{
"epoch": 0.088875809299588,
"grad_norm": 35.25,
"learning_rate": 0.00010066666666666667,
"loss": 0.543,
"step": 151
},
{
"epoch": 0.0894643908181283,
"grad_norm": 117.0,
"learning_rate": 0.00010133333333333335,
"loss": 0.6094,
"step": 152
},
{
"epoch": 0.09005297233666863,
"grad_norm": 16.0,
"learning_rate": 0.00010200000000000001,
"loss": 0.4746,
"step": 153
},
{
"epoch": 0.09064155385520895,
"grad_norm": 62.25,
"learning_rate": 0.00010266666666666666,
"loss": 0.8164,
"step": 154
},
{
"epoch": 0.09123013537374926,
"grad_norm": 43.5,
"learning_rate": 0.00010333333333333334,
"loss": 0.5977,
"step": 155
},
{
"epoch": 0.09181871689228958,
"grad_norm": 16.375,
"learning_rate": 0.00010400000000000001,
"loss": 0.4199,
"step": 156
},
{
"epoch": 0.0924072984108299,
"grad_norm": 18.75,
"learning_rate": 0.00010466666666666667,
"loss": 0.3906,
"step": 157
},
{
"epoch": 0.09299587992937022,
"grad_norm": 44.25,
"learning_rate": 0.00010533333333333332,
"loss": 0.5742,
"step": 158
},
{
"epoch": 0.09358446144791054,
"grad_norm": 17.5,
"learning_rate": 0.00010600000000000002,
"loss": 0.4531,
"step": 159
},
{
"epoch": 0.09417304296645085,
"grad_norm": 35.25,
"learning_rate": 0.00010666666666666667,
"loss": 0.6992,
"step": 160
},
{
"epoch": 0.09476162448499117,
"grad_norm": 32.0,
"learning_rate": 0.00010733333333333333,
"loss": 0.5234,
"step": 161
},
{
"epoch": 0.09535020600353149,
"grad_norm": 15.5625,
"learning_rate": 0.00010800000000000001,
"loss": 0.3984,
"step": 162
},
{
"epoch": 0.0959387875220718,
"grad_norm": 14.875,
"learning_rate": 0.00010866666666666667,
"loss": 0.4062,
"step": 163
},
{
"epoch": 0.09652736904061213,
"grad_norm": 15.125,
"learning_rate": 0.00010933333333333333,
"loss": 0.4141,
"step": 164
},
{
"epoch": 0.09711595055915244,
"grad_norm": 33.0,
"learning_rate": 0.00011000000000000002,
"loss": 0.4551,
"step": 165
},
{
"epoch": 0.09770453207769277,
"grad_norm": 11.8125,
"learning_rate": 0.00011066666666666667,
"loss": 0.4023,
"step": 166
},
{
"epoch": 0.09829311359623308,
"grad_norm": 18.625,
"learning_rate": 0.00011133333333333333,
"loss": 0.4805,
"step": 167
},
{
"epoch": 0.09888169511477339,
"grad_norm": 8.5,
"learning_rate": 0.00011200000000000001,
"loss": 0.2812,
"step": 168
},
{
"epoch": 0.09947027663331372,
"grad_norm": 21.0,
"learning_rate": 0.00011266666666666668,
"loss": 0.4199,
"step": 169
},
{
"epoch": 0.10005885815185403,
"grad_norm": 29.875,
"learning_rate": 0.00011333333333333334,
"loss": 0.3711,
"step": 170
},
{
"epoch": 0.10064743967039436,
"grad_norm": 72.0,
"learning_rate": 0.00011399999999999999,
"loss": 0.375,
"step": 171
},
{
"epoch": 0.10123602118893467,
"grad_norm": 72.5,
"learning_rate": 0.00011466666666666667,
"loss": 0.4961,
"step": 172
},
{
"epoch": 0.10182460270747498,
"grad_norm": 19.5,
"learning_rate": 0.00011533333333333334,
"loss": 0.4766,
"step": 173
},
{
"epoch": 0.10241318422601531,
"grad_norm": 56.75,
"learning_rate": 0.000116,
"loss": 0.3223,
"step": 174
},
{
"epoch": 0.10300176574455562,
"grad_norm": 20.25,
"learning_rate": 0.00011666666666666668,
"loss": 0.2363,
"step": 175
},
{
"epoch": 0.10359034726309593,
"grad_norm": 26.75,
"learning_rate": 0.00011733333333333334,
"loss": 0.3164,
"step": 176
},
{
"epoch": 0.10417892878163626,
"grad_norm": 23.75,
"learning_rate": 0.000118,
"loss": 0.4141,
"step": 177
},
{
"epoch": 0.10476751030017657,
"grad_norm": 13.625,
"learning_rate": 0.00011866666666666669,
"loss": 0.3438,
"step": 178
},
{
"epoch": 0.1053560918187169,
"grad_norm": 20.25,
"learning_rate": 0.00011933333333333334,
"loss": 0.373,
"step": 179
},
{
"epoch": 0.10594467333725721,
"grad_norm": 17.875,
"learning_rate": 0.00012,
"loss": 0.4336,
"step": 180
},
{
"epoch": 0.10653325485579752,
"grad_norm": 8.875,
"learning_rate": 0.00012066666666666668,
"loss": 0.2002,
"step": 181
},
{
"epoch": 0.10712183637433785,
"grad_norm": 41.5,
"learning_rate": 0.00012133333333333335,
"loss": 0.2812,
"step": 182
},
{
"epoch": 0.10771041789287816,
"grad_norm": 24.375,
"learning_rate": 0.000122,
"loss": 0.2119,
"step": 183
},
{
"epoch": 0.10829899941141848,
"grad_norm": 8.5625,
"learning_rate": 0.00012266666666666668,
"loss": 0.1934,
"step": 184
},
{
"epoch": 0.1088875809299588,
"grad_norm": 540.0,
"learning_rate": 0.00012333333333333334,
"loss": 0.3203,
"step": 185
},
{
"epoch": 0.10947616244849911,
"grad_norm": 56.5,
"learning_rate": 0.000124,
"loss": 0.3809,
"step": 186
},
{
"epoch": 0.11006474396703944,
"grad_norm": 50.0,
"learning_rate": 0.00012466666666666667,
"loss": 0.207,
"step": 187
},
{
"epoch": 0.11065332548557975,
"grad_norm": 28.5,
"learning_rate": 0.00012533333333333334,
"loss": 0.3281,
"step": 188
},
{
"epoch": 0.11124190700412007,
"grad_norm": 20.375,
"learning_rate": 0.000126,
"loss": 0.373,
"step": 189
},
{
"epoch": 0.11183048852266039,
"grad_norm": 35.25,
"learning_rate": 0.00012666666666666666,
"loss": 0.3535,
"step": 190
},
{
"epoch": 0.1124190700412007,
"grad_norm": 23.5,
"learning_rate": 0.00012733333333333336,
"loss": 0.1885,
"step": 191
},
{
"epoch": 0.11300765155974102,
"grad_norm": 18.75,
"learning_rate": 0.00012800000000000002,
"loss": 0.4121,
"step": 192
},
{
"epoch": 0.11359623307828134,
"grad_norm": 96.0,
"learning_rate": 0.00012866666666666666,
"loss": 0.3574,
"step": 193
},
{
"epoch": 0.11418481459682166,
"grad_norm": 12.9375,
"learning_rate": 0.00012933333333333332,
"loss": 0.4023,
"step": 194
},
{
"epoch": 0.11477339611536198,
"grad_norm": 103.5,
"learning_rate": 0.00013000000000000002,
"loss": 0.3477,
"step": 195
},
{
"epoch": 0.1153619776339023,
"grad_norm": 13.3125,
"learning_rate": 0.00013066666666666668,
"loss": 0.3047,
"step": 196
},
{
"epoch": 0.11595055915244261,
"grad_norm": 96.5,
"learning_rate": 0.00013133333333333332,
"loss": 0.2773,
"step": 197
},
{
"epoch": 0.11653914067098294,
"grad_norm": 15.3125,
"learning_rate": 0.000132,
"loss": 0.2148,
"step": 198
},
{
"epoch": 0.11712772218952325,
"grad_norm": 24.125,
"learning_rate": 0.00013266666666666667,
"loss": 0.2461,
"step": 199
},
{
"epoch": 0.11771630370806356,
"grad_norm": 22.75,
"learning_rate": 0.00013333333333333334,
"loss": 0.2617,
"step": 200
},
{
"epoch": 0.11830488522660389,
"grad_norm": 23.625,
"learning_rate": 0.000134,
"loss": 0.1396,
"step": 201
},
{
"epoch": 0.1188934667451442,
"grad_norm": 7.21875,
"learning_rate": 0.00013466666666666667,
"loss": 0.1514,
"step": 202
},
{
"epoch": 0.11948204826368453,
"grad_norm": 37.0,
"learning_rate": 0.00013533333333333333,
"loss": 0.3223,
"step": 203
},
{
"epoch": 0.12007062978222484,
"grad_norm": 17.625,
"learning_rate": 0.00013600000000000003,
"loss": 0.2324,
"step": 204
},
{
"epoch": 0.12065921130076515,
"grad_norm": 11.875,
"learning_rate": 0.00013666666666666666,
"loss": 0.2051,
"step": 205
},
{
"epoch": 0.12124779281930548,
"grad_norm": 13.3125,
"learning_rate": 0.00013733333333333333,
"loss": 0.2793,
"step": 206
},
{
"epoch": 0.12183637433784579,
"grad_norm": 13.3125,
"learning_rate": 0.000138,
"loss": 0.2061,
"step": 207
},
{
"epoch": 0.1224249558563861,
"grad_norm": 16.75,
"learning_rate": 0.00013866666666666669,
"loss": 0.2148,
"step": 208
},
{
"epoch": 0.12301353737492643,
"grad_norm": 11.625,
"learning_rate": 0.00013933333333333335,
"loss": 0.2324,
"step": 209
},
{
"epoch": 0.12360211889346674,
"grad_norm": 17.5,
"learning_rate": 0.00014,
"loss": 0.1396,
"step": 210
},
{
"epoch": 0.12419070041200707,
"grad_norm": 4.40625,
"learning_rate": 0.00014066666666666668,
"loss": 0.106,
"step": 211
},
{
"epoch": 0.12477928193054738,
"grad_norm": 72.0,
"learning_rate": 0.00014133333333333334,
"loss": 0.2012,
"step": 212
},
{
"epoch": 0.1253678634490877,
"grad_norm": 23.25,
"learning_rate": 0.000142,
"loss": 0.0991,
"step": 213
},
{
"epoch": 0.125956444967628,
"grad_norm": 21.25,
"learning_rate": 0.00014266666666666667,
"loss": 0.3809,
"step": 214
},
{
"epoch": 0.12654502648616833,
"grad_norm": 6.0625,
"learning_rate": 0.00014333333333333334,
"loss": 0.1206,
"step": 215
},
{
"epoch": 0.12713360800470866,
"grad_norm": 42.0,
"learning_rate": 0.000144,
"loss": 0.1445,
"step": 216
},
{
"epoch": 0.12772218952324896,
"grad_norm": 17.125,
"learning_rate": 0.0001446666666666667,
"loss": 0.2344,
"step": 217
},
{
"epoch": 0.12831077104178928,
"grad_norm": 19.875,
"learning_rate": 0.00014533333333333333,
"loss": 0.2266,
"step": 218
},
{
"epoch": 0.1288993525603296,
"grad_norm": 25.0,
"learning_rate": 0.000146,
"loss": 0.1045,
"step": 219
},
{
"epoch": 0.12948793407886994,
"grad_norm": 14.1875,
"learning_rate": 0.00014666666666666666,
"loss": 0.1592,
"step": 220
},
{
"epoch": 0.13007651559741024,
"grad_norm": 8.75,
"learning_rate": 0.00014733333333333335,
"loss": 0.1982,
"step": 221
},
{
"epoch": 0.13066509711595056,
"grad_norm": 9.1875,
"learning_rate": 0.000148,
"loss": 0.1436,
"step": 222
},
{
"epoch": 0.1312536786344909,
"grad_norm": 6.8125,
"learning_rate": 0.00014866666666666666,
"loss": 0.1172,
"step": 223
},
{
"epoch": 0.1318422601530312,
"grad_norm": 17.25,
"learning_rate": 0.00014933333333333335,
"loss": 0.123,
"step": 224
},
{
"epoch": 0.13243084167157151,
"grad_norm": 12.625,
"learning_rate": 0.00015000000000000001,
"loss": 0.1055,
"step": 225
},
{
"epoch": 0.13301942319011184,
"grad_norm": 7.46875,
"learning_rate": 0.00015066666666666668,
"loss": 0.1641,
"step": 226
},
{
"epoch": 0.13360800470865214,
"grad_norm": 8.4375,
"learning_rate": 0.00015133333333333334,
"loss": 0.1069,
"step": 227
},
{
"epoch": 0.13419658622719247,
"grad_norm": 15.5,
"learning_rate": 0.000152,
"loss": 0.1436,
"step": 228
},
{
"epoch": 0.1347851677457328,
"grad_norm": 18.625,
"learning_rate": 0.00015266666666666667,
"loss": 0.1172,
"step": 229
},
{
"epoch": 0.1353737492642731,
"grad_norm": 8.4375,
"learning_rate": 0.00015333333333333334,
"loss": 0.1631,
"step": 230
},
{
"epoch": 0.13596233078281342,
"grad_norm": 8.1875,
"learning_rate": 0.000154,
"loss": 0.1118,
"step": 231
},
{
"epoch": 0.13655091230135374,
"grad_norm": 7.71875,
"learning_rate": 0.00015466666666666667,
"loss": 0.2559,
"step": 232
},
{
"epoch": 0.13713949381989404,
"grad_norm": 28.875,
"learning_rate": 0.00015533333333333333,
"loss": 0.1641,
"step": 233
},
{
"epoch": 0.13772807533843437,
"grad_norm": 5.78125,
"learning_rate": 0.00015600000000000002,
"loss": 0.0732,
"step": 234
},
{
"epoch": 0.1383166568569747,
"grad_norm": 21.5,
"learning_rate": 0.00015666666666666666,
"loss": 0.248,
"step": 235
},
{
"epoch": 0.13890523837551502,
"grad_norm": 9.375,
"learning_rate": 0.00015733333333333333,
"loss": 0.1504,
"step": 236
},
{
"epoch": 0.13949381989405532,
"grad_norm": 8.5625,
"learning_rate": 0.00015800000000000002,
"loss": 0.1226,
"step": 237
},
{
"epoch": 0.14008240141259565,
"grad_norm": 2.9375,
"learning_rate": 0.00015866666666666668,
"loss": 0.106,
"step": 238
},
{
"epoch": 0.14067098293113597,
"grad_norm": 5.53125,
"learning_rate": 0.00015933333333333332,
"loss": 0.1328,
"step": 239
},
{
"epoch": 0.14125956444967627,
"grad_norm": 8.3125,
"learning_rate": 0.00016,
"loss": 0.1001,
"step": 240
},
{
"epoch": 0.1418481459682166,
"grad_norm": 3.53125,
"learning_rate": 0.00016066666666666668,
"loss": 0.0588,
"step": 241
},
{
"epoch": 0.14243672748675693,
"grad_norm": 5.25,
"learning_rate": 0.00016133333333333334,
"loss": 0.1128,
"step": 242
},
{
"epoch": 0.14302530900529722,
"grad_norm": 14.375,
"learning_rate": 0.000162,
"loss": 0.293,
"step": 243
},
{
"epoch": 0.14361389052383755,
"grad_norm": 13.5,
"learning_rate": 0.00016266666666666667,
"loss": 0.0859,
"step": 244
},
{
"epoch": 0.14420247204237788,
"grad_norm": 21.375,
"learning_rate": 0.00016333333333333334,
"loss": 0.124,
"step": 245
},
{
"epoch": 0.14479105356091818,
"grad_norm": 11.625,
"learning_rate": 0.000164,
"loss": 0.1191,
"step": 246
},
{
"epoch": 0.1453796350794585,
"grad_norm": 5.28125,
"learning_rate": 0.00016466666666666667,
"loss": 0.0728,
"step": 247
},
{
"epoch": 0.14596821659799883,
"grad_norm": 10.3125,
"learning_rate": 0.00016533333333333333,
"loss": 0.1562,
"step": 248
},
{
"epoch": 0.14655679811653913,
"grad_norm": 2.53125,
"learning_rate": 0.000166,
"loss": 0.0752,
"step": 249
},
{
"epoch": 0.14714537963507945,
"grad_norm": 3.078125,
"learning_rate": 0.0001666666666666667,
"loss": 0.2041,
"step": 250
},
{
"epoch": 0.14773396115361978,
"grad_norm": 4.21875,
"learning_rate": 0.00016733333333333335,
"loss": 0.0796,
"step": 251
},
{
"epoch": 0.1483225426721601,
"grad_norm": 13.9375,
"learning_rate": 0.000168,
"loss": 0.0781,
"step": 252
},
{
"epoch": 0.1489111241907004,
"grad_norm": 11.5625,
"learning_rate": 0.00016866666666666668,
"loss": 0.1162,
"step": 253
},
{
"epoch": 0.14949970570924073,
"grad_norm": 13.0625,
"learning_rate": 0.00016933333333333335,
"loss": 0.083,
"step": 254
},
{
"epoch": 0.15008828722778106,
"grad_norm": 2.765625,
"learning_rate": 0.00017,
"loss": 0.053,
"step": 255
},
{
"epoch": 0.15067686874632136,
"grad_norm": 3.484375,
"learning_rate": 0.00017066666666666668,
"loss": 0.0806,
"step": 256
},
{
"epoch": 0.15126545026486168,
"grad_norm": 8.125,
"learning_rate": 0.00017133333333333334,
"loss": 0.0771,
"step": 257
},
{
"epoch": 0.151854031783402,
"grad_norm": 16.875,
"learning_rate": 0.000172,
"loss": 0.0752,
"step": 258
},
{
"epoch": 0.1524426133019423,
"grad_norm": 7.84375,
"learning_rate": 0.00017266666666666667,
"loss": 0.1406,
"step": 259
},
{
"epoch": 0.15303119482048264,
"grad_norm": 1.921875,
"learning_rate": 0.00017333333333333334,
"loss": 0.0603,
"step": 260
},
{
"epoch": 0.15361977633902296,
"grad_norm": 5.46875,
"learning_rate": 0.000174,
"loss": 0.0547,
"step": 261
},
{
"epoch": 0.15420835785756326,
"grad_norm": 3.921875,
"learning_rate": 0.00017466666666666667,
"loss": 0.0525,
"step": 262
},
{
"epoch": 0.1547969393761036,
"grad_norm": 7.53125,
"learning_rate": 0.00017533333333333336,
"loss": 0.0713,
"step": 263
},
{
"epoch": 0.1553855208946439,
"grad_norm": 2.90625,
"learning_rate": 0.00017600000000000002,
"loss": 0.0537,
"step": 264
},
{
"epoch": 0.1559741024131842,
"grad_norm": 2.0625,
"learning_rate": 0.00017666666666666666,
"loss": 0.0698,
"step": 265
},
{
"epoch": 0.15656268393172454,
"grad_norm": 5.875,
"learning_rate": 0.00017733333333333335,
"loss": 0.0544,
"step": 266
},
{
"epoch": 0.15715126545026487,
"grad_norm": 4.28125,
"learning_rate": 0.00017800000000000002,
"loss": 0.1099,
"step": 267
},
{
"epoch": 0.1577398469688052,
"grad_norm": 1.6171875,
"learning_rate": 0.00017866666666666668,
"loss": 0.0593,
"step": 268
},
{
"epoch": 0.1583284284873455,
"grad_norm": 20.625,
"learning_rate": 0.00017933333333333332,
"loss": 0.0728,
"step": 269
},
{
"epoch": 0.15891701000588582,
"grad_norm": 15.375,
"learning_rate": 0.00018,
"loss": 0.084,
"step": 270
},
{
"epoch": 0.15950559152442614,
"grad_norm": 2.09375,
"learning_rate": 0.00018066666666666668,
"loss": 0.0776,
"step": 271
},
{
"epoch": 0.16009417304296644,
"grad_norm": 8.0,
"learning_rate": 0.00018133333333333334,
"loss": 0.1187,
"step": 272
},
{
"epoch": 0.16068275456150677,
"grad_norm": 0.7421875,
"learning_rate": 0.000182,
"loss": 0.0398,
"step": 273
},
{
"epoch": 0.1612713360800471,
"grad_norm": 2.171875,
"learning_rate": 0.00018266666666666667,
"loss": 0.053,
"step": 274
},
{
"epoch": 0.1618599175985874,
"grad_norm": 11.0,
"learning_rate": 0.00018333333333333334,
"loss": 0.0723,
"step": 275
},
{
"epoch": 0.16244849911712772,
"grad_norm": 2.21875,
"learning_rate": 0.00018400000000000003,
"loss": 0.1396,
"step": 276
},
{
"epoch": 0.16303708063566805,
"grad_norm": 0.4296875,
"learning_rate": 0.00018466666666666666,
"loss": 0.0291,
"step": 277
},
{
"epoch": 0.16362566215420835,
"grad_norm": 38.75,
"learning_rate": 0.00018533333333333333,
"loss": 0.0547,
"step": 278
},
{
"epoch": 0.16421424367274867,
"grad_norm": 0.62109375,
"learning_rate": 0.00018600000000000002,
"loss": 0.0403,
"step": 279
},
{
"epoch": 0.164802825191289,
"grad_norm": 2.640625,
"learning_rate": 0.0001866666666666667,
"loss": 0.063,
"step": 280
},
{
"epoch": 0.16539140670982933,
"grad_norm": 4.21875,
"learning_rate": 0.00018733333333333335,
"loss": 0.0447,
"step": 281
},
{
"epoch": 0.16597998822836962,
"grad_norm": 3.09375,
"learning_rate": 0.000188,
"loss": 0.0415,
"step": 282
},
{
"epoch": 0.16656856974690995,
"grad_norm": 1.453125,
"learning_rate": 0.00018866666666666668,
"loss": 0.0513,
"step": 283
},
{
"epoch": 0.16715715126545028,
"grad_norm": 9.0625,
"learning_rate": 0.00018933333333333335,
"loss": 0.0295,
"step": 284
},
{
"epoch": 0.16774573278399058,
"grad_norm": 17.75,
"learning_rate": 0.00019,
"loss": 0.0747,
"step": 285
},
{
"epoch": 0.1683343143025309,
"grad_norm": 10.6875,
"learning_rate": 0.00019066666666666668,
"loss": 0.0664,
"step": 286
},
{
"epoch": 0.16892289582107123,
"grad_norm": 13.8125,
"learning_rate": 0.00019133333333333334,
"loss": 0.052,
"step": 287
},
{
"epoch": 0.16951147733961153,
"grad_norm": 0.94140625,
"learning_rate": 0.000192,
"loss": 0.0444,
"step": 288
},
{
"epoch": 0.17010005885815185,
"grad_norm": 8.5,
"learning_rate": 0.0001926666666666667,
"loss": 0.0618,
"step": 289
},
{
"epoch": 0.17068864037669218,
"grad_norm": 4.59375,
"learning_rate": 0.00019333333333333333,
"loss": 0.0569,
"step": 290
},
{
"epoch": 0.17127722189523248,
"grad_norm": 2.78125,
"learning_rate": 0.000194,
"loss": 0.0598,
"step": 291
},
{
"epoch": 0.1718658034137728,
"grad_norm": 15.125,
"learning_rate": 0.0001946666666666667,
"loss": 0.0986,
"step": 292
},
{
"epoch": 0.17245438493231313,
"grad_norm": 0.33984375,
"learning_rate": 0.00019533333333333336,
"loss": 0.026,
"step": 293
},
{
"epoch": 0.17304296645085343,
"grad_norm": 1.6875,
"learning_rate": 0.000196,
"loss": 0.0369,
"step": 294
},
{
"epoch": 0.17363154796939376,
"grad_norm": 0.7890625,
"learning_rate": 0.00019666666666666666,
"loss": 0.031,
"step": 295
},
{
"epoch": 0.17422012948793408,
"grad_norm": 8.5,
"learning_rate": 0.00019733333333333335,
"loss": 0.0903,
"step": 296
},
{
"epoch": 0.1748087110064744,
"grad_norm": 1.015625,
"learning_rate": 0.00019800000000000002,
"loss": 0.0286,
"step": 297
},
{
"epoch": 0.1753972925250147,
"grad_norm": 1.90625,
"learning_rate": 0.00019866666666666668,
"loss": 0.1016,
"step": 298
},
{
"epoch": 0.17598587404355504,
"grad_norm": 0.361328125,
"learning_rate": 0.00019933333333333334,
"loss": 0.0239,
"step": 299
},
{
"epoch": 0.17657445556209536,
"grad_norm": 7.9375,
"learning_rate": 0.0002,
"loss": 0.0408,
"step": 300
},
{
"epoch": 0.17716303708063566,
"grad_norm": 1.71875,
"learning_rate": 0.00019997559487492376,
"loss": 0.0317,
"step": 301
},
{
"epoch": 0.177751618599176,
"grad_norm": 0.81640625,
"learning_rate": 0.00019995118974984748,
"loss": 0.0327,
"step": 302
},
{
"epoch": 0.1783402001177163,
"grad_norm": 2.21875,
"learning_rate": 0.00019992678462477123,
"loss": 0.0698,
"step": 303
},
{
"epoch": 0.1789287816362566,
"grad_norm": 0.24609375,
"learning_rate": 0.00019990237949969495,
"loss": 0.022,
"step": 304
},
{
"epoch": 0.17951736315479694,
"grad_norm": 0.3125,
"learning_rate": 0.0001998779743746187,
"loss": 0.0231,
"step": 305
},
{
"epoch": 0.18010594467333726,
"grad_norm": 4.375,
"learning_rate": 0.00019985356924954241,
"loss": 0.0232,
"step": 306
},
{
"epoch": 0.18069452619187756,
"grad_norm": 3.1875,
"learning_rate": 0.00019982916412446616,
"loss": 0.0403,
"step": 307
},
{
"epoch": 0.1812831077104179,
"grad_norm": 2.984375,
"learning_rate": 0.00019980475899938988,
"loss": 0.085,
"step": 308
},
{
"epoch": 0.18187168922895822,
"grad_norm": 1.0,
"learning_rate": 0.00019978035387431363,
"loss": 0.0304,
"step": 309
},
{
"epoch": 0.18246027074749852,
"grad_norm": 5.09375,
"learning_rate": 0.00019975594874923735,
"loss": 0.033,
"step": 310
},
{
"epoch": 0.18304885226603884,
"grad_norm": 0.26953125,
"learning_rate": 0.0001997315436241611,
"loss": 0.0184,
"step": 311
},
{
"epoch": 0.18363743378457917,
"grad_norm": 0.47265625,
"learning_rate": 0.00019970713849908482,
"loss": 0.0287,
"step": 312
},
{
"epoch": 0.1842260153031195,
"grad_norm": 0.466796875,
"learning_rate": 0.00019968273337400857,
"loss": 0.0228,
"step": 313
},
{
"epoch": 0.1848145968216598,
"grad_norm": 1.2734375,
"learning_rate": 0.00019965832824893229,
"loss": 0.0337,
"step": 314
},
{
"epoch": 0.18540317834020012,
"grad_norm": 25.25,
"learning_rate": 0.00019963392312385603,
"loss": 0.1172,
"step": 315
},
{
"epoch": 0.18599175985874045,
"grad_norm": 2.0,
"learning_rate": 0.00019960951799877975,
"loss": 0.0398,
"step": 316
},
{
"epoch": 0.18658034137728075,
"grad_norm": 0.29296875,
"learning_rate": 0.0001995851128737035,
"loss": 0.0248,
"step": 317
},
{
"epoch": 0.18716892289582107,
"grad_norm": 0.439453125,
"learning_rate": 0.00019956070774862722,
"loss": 0.0254,
"step": 318
},
{
"epoch": 0.1877575044143614,
"grad_norm": 0.244140625,
"learning_rate": 0.00019953630262355097,
"loss": 0.022,
"step": 319
},
{
"epoch": 0.1883460859329017,
"grad_norm": 2.671875,
"learning_rate": 0.0001995118974984747,
"loss": 0.0427,
"step": 320
},
{
"epoch": 0.18893466745144202,
"grad_norm": 1.0546875,
"learning_rate": 0.00019948749237339844,
"loss": 0.0415,
"step": 321
},
{
"epoch": 0.18952324896998235,
"grad_norm": 2.0625,
"learning_rate": 0.00019946308724832216,
"loss": 0.0806,
"step": 322
},
{
"epoch": 0.19011183048852265,
"grad_norm": 1.6484375,
"learning_rate": 0.0001994386821232459,
"loss": 0.0405,
"step": 323
},
{
"epoch": 0.19070041200706297,
"grad_norm": 2.625,
"learning_rate": 0.00019941427699816963,
"loss": 0.0258,
"step": 324
},
{
"epoch": 0.1912889935256033,
"grad_norm": 1.21875,
"learning_rate": 0.00019938987187309337,
"loss": 0.0344,
"step": 325
},
{
"epoch": 0.1918775750441436,
"grad_norm": 0.6171875,
"learning_rate": 0.0001993654667480171,
"loss": 0.026,
"step": 326
},
{
"epoch": 0.19246615656268393,
"grad_norm": 0.52734375,
"learning_rate": 0.00019934106162294081,
"loss": 0.0264,
"step": 327
},
{
"epoch": 0.19305473808122425,
"grad_norm": 0.8828125,
"learning_rate": 0.00019931665649786456,
"loss": 0.0262,
"step": 328
},
{
"epoch": 0.19364331959976458,
"grad_norm": 0.2265625,
"learning_rate": 0.00019929225137278828,
"loss": 0.0195,
"step": 329
},
{
"epoch": 0.19423190111830488,
"grad_norm": 0.9375,
"learning_rate": 0.00019926784624771203,
"loss": 0.0374,
"step": 330
},
{
"epoch": 0.1948204826368452,
"grad_norm": 2.875,
"learning_rate": 0.00019924344112263575,
"loss": 0.0344,
"step": 331
},
{
"epoch": 0.19540906415538553,
"grad_norm": 0.3125,
"learning_rate": 0.0001992190359975595,
"loss": 0.0199,
"step": 332
},
{
"epoch": 0.19599764567392583,
"grad_norm": 0.87109375,
"learning_rate": 0.00019919463087248322,
"loss": 0.033,
"step": 333
},
{
"epoch": 0.19658622719246616,
"grad_norm": 0.166015625,
"learning_rate": 0.00019917022574740697,
"loss": 0.0157,
"step": 334
},
{
"epoch": 0.19717480871100648,
"grad_norm": 1.5859375,
"learning_rate": 0.00019914582062233069,
"loss": 0.0266,
"step": 335
},
{
"epoch": 0.19776339022954678,
"grad_norm": 0.150390625,
"learning_rate": 0.00019912141549725443,
"loss": 0.0153,
"step": 336
},
{
"epoch": 0.1983519717480871,
"grad_norm": 0.318359375,
"learning_rate": 0.00019909701037217815,
"loss": 0.0197,
"step": 337
},
{
"epoch": 0.19894055326662743,
"grad_norm": 4.8125,
"learning_rate": 0.0001990726052471019,
"loss": 0.0471,
"step": 338
},
{
"epoch": 0.19952913478516773,
"grad_norm": 1.2890625,
"learning_rate": 0.00019904820012202562,
"loss": 0.0786,
"step": 339
},
{
"epoch": 0.20011771630370806,
"grad_norm": 11.5,
"learning_rate": 0.00019902379499694937,
"loss": 0.0413,
"step": 340
},
{
"epoch": 0.2007062978222484,
"grad_norm": 1.640625,
"learning_rate": 0.0001989993898718731,
"loss": 0.0508,
"step": 341
},
{
"epoch": 0.2012948793407887,
"grad_norm": 1.15625,
"learning_rate": 0.00019897498474679684,
"loss": 0.0297,
"step": 342
},
{
"epoch": 0.201883460859329,
"grad_norm": 0.384765625,
"learning_rate": 0.00019895057962172056,
"loss": 0.0261,
"step": 343
},
{
"epoch": 0.20247204237786934,
"grad_norm": 0.12890625,
"learning_rate": 0.0001989261744966443,
"loss": 0.0145,
"step": 344
},
{
"epoch": 0.20306062389640966,
"grad_norm": 5.0,
"learning_rate": 0.00019890176937156803,
"loss": 0.0239,
"step": 345
},
{
"epoch": 0.20364920541494996,
"grad_norm": 0.96484375,
"learning_rate": 0.00019887736424649177,
"loss": 0.0206,
"step": 346
},
{
"epoch": 0.2042377869334903,
"grad_norm": 0.318359375,
"learning_rate": 0.0001988529591214155,
"loss": 0.0181,
"step": 347
},
{
"epoch": 0.20482636845203062,
"grad_norm": 0.671875,
"learning_rate": 0.00019882855399633924,
"loss": 0.0226,
"step": 348
},
{
"epoch": 0.20541494997057091,
"grad_norm": 0.365234375,
"learning_rate": 0.00019880414887126296,
"loss": 0.0175,
"step": 349
},
{
"epoch": 0.20600353148911124,
"grad_norm": 0.1904296875,
"learning_rate": 0.0001987797437461867,
"loss": 0.0181,
"step": 350
},
{
"epoch": 0.20659211300765157,
"grad_norm": 0.1279296875,
"learning_rate": 0.00019875533862111043,
"loss": 0.0135,
"step": 351
},
{
"epoch": 0.20718069452619187,
"grad_norm": 0.2041015625,
"learning_rate": 0.00019873093349603418,
"loss": 0.0388,
"step": 352
},
{
"epoch": 0.2077692760447322,
"grad_norm": 0.2041015625,
"learning_rate": 0.0001987065283709579,
"loss": 0.0172,
"step": 353
},
{
"epoch": 0.20835785756327252,
"grad_norm": 7.71875,
"learning_rate": 0.00019868212324588165,
"loss": 0.0327,
"step": 354
},
{
"epoch": 0.20894643908181282,
"grad_norm": 0.1376953125,
"learning_rate": 0.0001986577181208054,
"loss": 0.0153,
"step": 355
},
{
"epoch": 0.20953502060035314,
"grad_norm": 2.859375,
"learning_rate": 0.00019863331299572911,
"loss": 0.0693,
"step": 356
},
{
"epoch": 0.21012360211889347,
"grad_norm": 0.1376953125,
"learning_rate": 0.00019860890787065286,
"loss": 0.0157,
"step": 357
},
{
"epoch": 0.2107121836374338,
"grad_norm": 0.3046875,
"learning_rate": 0.00019858450274557658,
"loss": 0.022,
"step": 358
},
{
"epoch": 0.2113007651559741,
"grad_norm": 0.2490234375,
"learning_rate": 0.00019856009762050033,
"loss": 0.0205,
"step": 359
},
{
"epoch": 0.21188934667451442,
"grad_norm": 0.376953125,
"learning_rate": 0.00019853569249542405,
"loss": 0.0226,
"step": 360
},
{
"epoch": 0.21247792819305475,
"grad_norm": 0.2314453125,
"learning_rate": 0.0001985112873703478,
"loss": 0.0165,
"step": 361
},
{
"epoch": 0.21306650971159505,
"grad_norm": 0.134765625,
"learning_rate": 0.00019848688224527152,
"loss": 0.0166,
"step": 362
},
{
"epoch": 0.21365509123013537,
"grad_norm": 0.134765625,
"learning_rate": 0.00019846247712019527,
"loss": 0.0136,
"step": 363
},
{
"epoch": 0.2142436727486757,
"grad_norm": 0.71875,
"learning_rate": 0.00019843807199511899,
"loss": 0.0369,
"step": 364
},
{
"epoch": 0.214832254267216,
"grad_norm": 9.0,
"learning_rate": 0.00019841366687004273,
"loss": 0.0464,
"step": 365
},
{
"epoch": 0.21542083578575633,
"grad_norm": 0.361328125,
"learning_rate": 0.00019838926174496645,
"loss": 0.0149,
"step": 366
},
{
"epoch": 0.21600941730429665,
"grad_norm": 0.2890625,
"learning_rate": 0.0001983648566198902,
"loss": 0.0188,
"step": 367
},
{
"epoch": 0.21659799882283695,
"grad_norm": 0.1591796875,
"learning_rate": 0.00019834045149481392,
"loss": 0.0172,
"step": 368
},
{
"epoch": 0.21718658034137728,
"grad_norm": 0.1708984375,
"learning_rate": 0.00019831604636973767,
"loss": 0.0168,
"step": 369
},
{
"epoch": 0.2177751618599176,
"grad_norm": 0.1318359375,
"learning_rate": 0.0001982916412446614,
"loss": 0.015,
"step": 370
},
{
"epoch": 0.2183637433784579,
"grad_norm": 2.734375,
"learning_rate": 0.00019826723611958514,
"loss": 0.0378,
"step": 371
},
{
"epoch": 0.21895232489699823,
"grad_norm": 1.234375,
"learning_rate": 0.00019824283099450886,
"loss": 0.0237,
"step": 372
},
{
"epoch": 0.21954090641553856,
"grad_norm": 0.205078125,
"learning_rate": 0.0001982184258694326,
"loss": 0.0156,
"step": 373
},
{
"epoch": 0.22012948793407888,
"grad_norm": 0.376953125,
"learning_rate": 0.00019819402074435633,
"loss": 0.0204,
"step": 374
},
{
"epoch": 0.22071806945261918,
"grad_norm": 1.453125,
"learning_rate": 0.00019816961561928007,
"loss": 0.0189,
"step": 375
},
{
"epoch": 0.2213066509711595,
"grad_norm": 0.64453125,
"learning_rate": 0.0001981452104942038,
"loss": 0.0208,
"step": 376
},
{
"epoch": 0.22189523248969983,
"grad_norm": 0.25,
"learning_rate": 0.00019812080536912751,
"loss": 0.0203,
"step": 377
},
{
"epoch": 0.22248381400824013,
"grad_norm": 0.2431640625,
"learning_rate": 0.00019809640024405126,
"loss": 0.0175,
"step": 378
},
{
"epoch": 0.22307239552678046,
"grad_norm": 0.138671875,
"learning_rate": 0.00019807199511897498,
"loss": 0.0154,
"step": 379
},
{
"epoch": 0.22366097704532079,
"grad_norm": 2.328125,
"learning_rate": 0.00019804758999389873,
"loss": 0.0164,
"step": 380
},
{
"epoch": 0.22424955856386108,
"grad_norm": 0.53515625,
"learning_rate": 0.00019802318486882245,
"loss": 0.0175,
"step": 381
},
{
"epoch": 0.2248381400824014,
"grad_norm": 0.427734375,
"learning_rate": 0.0001979987797437462,
"loss": 0.0154,
"step": 382
},
{
"epoch": 0.22542672160094174,
"grad_norm": 0.1328125,
"learning_rate": 0.00019797437461866992,
"loss": 0.014,
"step": 383
},
{
"epoch": 0.22601530311948204,
"grad_norm": 0.166015625,
"learning_rate": 0.00019794996949359367,
"loss": 0.0153,
"step": 384
},
{
"epoch": 0.22660388463802236,
"grad_norm": 0.28515625,
"learning_rate": 0.0001979255643685174,
"loss": 0.0172,
"step": 385
},
{
"epoch": 0.2271924661565627,
"grad_norm": 0.0830078125,
"learning_rate": 0.00019790115924344113,
"loss": 0.0115,
"step": 386
},
{
"epoch": 0.22778104767510302,
"grad_norm": 0.11474609375,
"learning_rate": 0.00019787675411836485,
"loss": 0.0132,
"step": 387
},
{
"epoch": 0.22836962919364331,
"grad_norm": 1.4609375,
"learning_rate": 0.0001978523489932886,
"loss": 0.0234,
"step": 388
},
{
"epoch": 0.22895821071218364,
"grad_norm": 0.10498046875,
"learning_rate": 0.00019782794386821232,
"loss": 0.0125,
"step": 389
},
{
"epoch": 0.22954679223072397,
"grad_norm": 0.1328125,
"learning_rate": 0.00019780353874313607,
"loss": 0.0156,
"step": 390
},
{
"epoch": 0.23013537374926427,
"grad_norm": 1.15625,
"learning_rate": 0.0001977791336180598,
"loss": 0.0522,
"step": 391
},
{
"epoch": 0.2307239552678046,
"grad_norm": 0.154296875,
"learning_rate": 0.00019775472849298354,
"loss": 0.0151,
"step": 392
},
{
"epoch": 0.23131253678634492,
"grad_norm": 0.1591796875,
"learning_rate": 0.00019773032336790726,
"loss": 0.016,
"step": 393
},
{
"epoch": 0.23190111830488522,
"grad_norm": 1.1953125,
"learning_rate": 0.000197705918242831,
"loss": 0.0198,
"step": 394
},
{
"epoch": 0.23248969982342554,
"grad_norm": 0.1513671875,
"learning_rate": 0.00019768151311775473,
"loss": 0.0133,
"step": 395
},
{
"epoch": 0.23307828134196587,
"grad_norm": 0.1259765625,
"learning_rate": 0.00019765710799267847,
"loss": 0.0128,
"step": 396
},
{
"epoch": 0.23366686286050617,
"grad_norm": 0.251953125,
"learning_rate": 0.0001976327028676022,
"loss": 0.0173,
"step": 397
},
{
"epoch": 0.2342554443790465,
"grad_norm": 1.5625,
"learning_rate": 0.00019760829774252594,
"loss": 0.0593,
"step": 398
},
{
"epoch": 0.23484402589758682,
"grad_norm": 0.1376953125,
"learning_rate": 0.00019758389261744966,
"loss": 0.0148,
"step": 399
},
{
"epoch": 0.23543260741612712,
"grad_norm": 0.189453125,
"learning_rate": 0.0001975594874923734,
"loss": 0.0144,
"step": 400
},
{
"epoch": 0.23602118893466745,
"grad_norm": 0.10009765625,
"learning_rate": 0.00019753508236729713,
"loss": 0.0127,
"step": 401
},
{
"epoch": 0.23660977045320777,
"grad_norm": 0.12890625,
"learning_rate": 0.00019751067724222088,
"loss": 0.0137,
"step": 402
},
{
"epoch": 0.2371983519717481,
"grad_norm": 0.10888671875,
"learning_rate": 0.0001974862721171446,
"loss": 0.0126,
"step": 403
},
{
"epoch": 0.2377869334902884,
"grad_norm": 0.119140625,
"learning_rate": 0.00019746186699206835,
"loss": 0.0149,
"step": 404
},
{
"epoch": 0.23837551500882873,
"grad_norm": 0.318359375,
"learning_rate": 0.00019743746186699207,
"loss": 0.0168,
"step": 405
},
{
"epoch": 0.23896409652736905,
"grad_norm": 0.2265625,
"learning_rate": 0.00019741305674191581,
"loss": 0.0188,
"step": 406
},
{
"epoch": 0.23955267804590935,
"grad_norm": 0.11083984375,
"learning_rate": 0.00019738865161683954,
"loss": 0.0125,
"step": 407
},
{
"epoch": 0.24014125956444968,
"grad_norm": 0.84765625,
"learning_rate": 0.00019736424649176328,
"loss": 0.0361,
"step": 408
},
{
"epoch": 0.24072984108299,
"grad_norm": 0.181640625,
"learning_rate": 0.00019733984136668703,
"loss": 0.014,
"step": 409
},
{
"epoch": 0.2413184226015303,
"grad_norm": 0.1171875,
"learning_rate": 0.00019731543624161075,
"loss": 0.0139,
"step": 410
},
{
"epoch": 0.24190700412007063,
"grad_norm": 0.08544921875,
"learning_rate": 0.0001972910311165345,
"loss": 0.0115,
"step": 411
},
{
"epoch": 0.24249558563861096,
"grad_norm": 0.318359375,
"learning_rate": 0.00019726662599145822,
"loss": 0.0173,
"step": 412
},
{
"epoch": 0.24308416715715125,
"grad_norm": 0.08935546875,
"learning_rate": 0.00019724222086638197,
"loss": 0.012,
"step": 413
},
{
"epoch": 0.24367274867569158,
"grad_norm": 1.0859375,
"learning_rate": 0.0001972178157413057,
"loss": 0.0273,
"step": 414
},
{
"epoch": 0.2442613301942319,
"grad_norm": 0.08740234375,
"learning_rate": 0.00019719341061622943,
"loss": 0.0117,
"step": 415
},
{
"epoch": 0.2448499117127722,
"grad_norm": 0.0751953125,
"learning_rate": 0.00019716900549115315,
"loss": 0.0109,
"step": 416
},
{
"epoch": 0.24543849323131253,
"grad_norm": 0.2294921875,
"learning_rate": 0.0001971446003660769,
"loss": 0.0188,
"step": 417
},
{
"epoch": 0.24602707474985286,
"grad_norm": 0.12890625,
"learning_rate": 0.00019712019524100062,
"loss": 0.015,
"step": 418
},
{
"epoch": 0.24661565626839319,
"grad_norm": 0.138671875,
"learning_rate": 0.00019709579011592437,
"loss": 0.014,
"step": 419
},
{
"epoch": 0.24720423778693348,
"grad_norm": 0.10546875,
"learning_rate": 0.0001970713849908481,
"loss": 0.0106,
"step": 420
},
{
"epoch": 0.2477928193054738,
"grad_norm": 0.083984375,
"learning_rate": 0.00019704697986577184,
"loss": 0.0115,
"step": 421
},
{
"epoch": 0.24838140082401414,
"grad_norm": 0.314453125,
"learning_rate": 0.00019702257474069556,
"loss": 0.0153,
"step": 422
},
{
"epoch": 0.24896998234255444,
"grad_norm": 0.23046875,
"learning_rate": 0.0001969981696156193,
"loss": 0.0181,
"step": 423
},
{
"epoch": 0.24955856386109476,
"grad_norm": 0.1171875,
"learning_rate": 0.00019697376449054303,
"loss": 0.0134,
"step": 424
},
{
"epoch": 0.25014714537963506,
"grad_norm": 0.27734375,
"learning_rate": 0.00019694935936546677,
"loss": 0.019,
"step": 425
},
{
"epoch": 0.2507357268981754,
"grad_norm": 0.177734375,
"learning_rate": 0.0001969249542403905,
"loss": 0.0149,
"step": 426
},
{
"epoch": 0.2513243084167157,
"grad_norm": 1.15625,
"learning_rate": 0.00019690054911531422,
"loss": 0.0165,
"step": 427
},
{
"epoch": 0.251912889935256,
"grad_norm": 0.84375,
"learning_rate": 0.00019687614399023794,
"loss": 0.0131,
"step": 428
},
{
"epoch": 0.25250147145379637,
"grad_norm": 0.115234375,
"learning_rate": 0.00019685173886516168,
"loss": 0.0109,
"step": 429
},
{
"epoch": 0.25309005297233667,
"grad_norm": 0.67578125,
"learning_rate": 0.00019682733374008543,
"loss": 0.0229,
"step": 430
},
{
"epoch": 0.25367863449087696,
"grad_norm": 0.484375,
"learning_rate": 0.00019680292861500915,
"loss": 0.0172,
"step": 431
},
{
"epoch": 0.2542672160094173,
"grad_norm": 0.1484375,
"learning_rate": 0.0001967785234899329,
"loss": 0.0138,
"step": 432
},
{
"epoch": 0.2548557975279576,
"grad_norm": 0.220703125,
"learning_rate": 0.00019675411836485662,
"loss": 0.017,
"step": 433
},
{
"epoch": 0.2554443790464979,
"grad_norm": 0.11328125,
"learning_rate": 0.00019672971323978037,
"loss": 0.0125,
"step": 434
},
{
"epoch": 0.25603296056503827,
"grad_norm": 0.1474609375,
"learning_rate": 0.0001967053081147041,
"loss": 0.0131,
"step": 435
},
{
"epoch": 0.25662154208357857,
"grad_norm": 0.1533203125,
"learning_rate": 0.00019668090298962784,
"loss": 0.0147,
"step": 436
},
{
"epoch": 0.25721012360211887,
"grad_norm": 0.0966796875,
"learning_rate": 0.00019665649786455156,
"loss": 0.0122,
"step": 437
},
{
"epoch": 0.2577987051206592,
"grad_norm": 0.11474609375,
"learning_rate": 0.0001966320927394753,
"loss": 0.014,
"step": 438
},
{
"epoch": 0.2583872866391995,
"grad_norm": 0.06640625,
"learning_rate": 0.00019660768761439902,
"loss": 0.0097,
"step": 439
},
{
"epoch": 0.2589758681577399,
"grad_norm": 0.2470703125,
"learning_rate": 0.00019658328248932277,
"loss": 0.0133,
"step": 440
},
{
"epoch": 0.2595644496762802,
"grad_norm": 0.07421875,
"learning_rate": 0.0001965588773642465,
"loss": 0.0104,
"step": 441
},
{
"epoch": 0.26015303119482047,
"grad_norm": 0.08154296875,
"learning_rate": 0.00019653447223917024,
"loss": 0.0123,
"step": 442
},
{
"epoch": 0.2607416127133608,
"grad_norm": 0.1005859375,
"learning_rate": 0.00019651006711409396,
"loss": 0.0117,
"step": 443
},
{
"epoch": 0.2613301942319011,
"grad_norm": 0.55859375,
"learning_rate": 0.0001964856619890177,
"loss": 0.0182,
"step": 444
},
{
"epoch": 0.2619187757504414,
"grad_norm": 0.0712890625,
"learning_rate": 0.00019646125686394143,
"loss": 0.0111,
"step": 445
},
{
"epoch": 0.2625073572689818,
"grad_norm": 0.52734375,
"learning_rate": 0.00019643685173886518,
"loss": 0.0143,
"step": 446
},
{
"epoch": 0.2630959387875221,
"grad_norm": 0.150390625,
"learning_rate": 0.0001964124466137889,
"loss": 0.0293,
"step": 447
},
{
"epoch": 0.2636845203060624,
"grad_norm": 0.2333984375,
"learning_rate": 0.00019638804148871264,
"loss": 0.0152,
"step": 448
},
{
"epoch": 0.26427310182460273,
"grad_norm": 0.166015625,
"learning_rate": 0.00019636363636363636,
"loss": 0.0135,
"step": 449
},
{
"epoch": 0.26486168334314303,
"grad_norm": 0.08447265625,
"learning_rate": 0.0001963392312385601,
"loss": 0.011,
"step": 450
},
{
"epoch": 0.2654502648616833,
"grad_norm": 0.359375,
"learning_rate": 0.00019631482611348383,
"loss": 0.0122,
"step": 451
},
{
"epoch": 0.2660388463802237,
"grad_norm": 0.076171875,
"learning_rate": 0.00019629042098840758,
"loss": 0.0107,
"step": 452
},
{
"epoch": 0.266627427898764,
"grad_norm": 0.53125,
"learning_rate": 0.0001962660158633313,
"loss": 0.0151,
"step": 453
},
{
"epoch": 0.2672160094173043,
"grad_norm": 0.359375,
"learning_rate": 0.00019624161073825505,
"loss": 0.016,
"step": 454
},
{
"epoch": 0.26780459093584463,
"grad_norm": 0.25,
"learning_rate": 0.00019621720561317877,
"loss": 0.0182,
"step": 455
},
{
"epoch": 0.26839317245438493,
"grad_norm": 1.0390625,
"learning_rate": 0.00019619280048810252,
"loss": 0.0123,
"step": 456
},
{
"epoch": 0.26898175397292523,
"grad_norm": 0.099609375,
"learning_rate": 0.00019616839536302624,
"loss": 0.013,
"step": 457
},
{
"epoch": 0.2695703354914656,
"grad_norm": 1.0625,
"learning_rate": 0.00019614399023794998,
"loss": 0.0222,
"step": 458
},
{
"epoch": 0.2701589170100059,
"grad_norm": 0.2109375,
"learning_rate": 0.0001961195851128737,
"loss": 0.0125,
"step": 459
},
{
"epoch": 0.2707474985285462,
"grad_norm": 0.15625,
"learning_rate": 0.00019609517998779745,
"loss": 0.013,
"step": 460
},
{
"epoch": 0.27133608004708654,
"grad_norm": 0.10498046875,
"learning_rate": 0.00019607077486272117,
"loss": 0.0107,
"step": 461
},
{
"epoch": 0.27192466156562684,
"grad_norm": 1.0703125,
"learning_rate": 0.00019604636973764492,
"loss": 0.0153,
"step": 462
},
{
"epoch": 0.27251324308416713,
"grad_norm": 0.173828125,
"learning_rate": 0.00019602196461256867,
"loss": 0.0139,
"step": 463
},
{
"epoch": 0.2731018246027075,
"grad_norm": 1.390625,
"learning_rate": 0.0001959975594874924,
"loss": 0.019,
"step": 464
},
{
"epoch": 0.2736904061212478,
"grad_norm": 0.1474609375,
"learning_rate": 0.00019597315436241613,
"loss": 0.0128,
"step": 465
},
{
"epoch": 0.2742789876397881,
"grad_norm": 0.11962890625,
"learning_rate": 0.00019594874923733986,
"loss": 0.012,
"step": 466
},
{
"epoch": 0.27486756915832844,
"grad_norm": 0.248046875,
"learning_rate": 0.0001959243441122636,
"loss": 0.0151,
"step": 467
},
{
"epoch": 0.27545615067686874,
"grad_norm": 6.1875,
"learning_rate": 0.00019589993898718732,
"loss": 0.0189,
"step": 468
},
{
"epoch": 0.27604473219540904,
"grad_norm": 0.08349609375,
"learning_rate": 0.00019587553386211107,
"loss": 0.0112,
"step": 469
},
{
"epoch": 0.2766333137139494,
"grad_norm": 0.1181640625,
"learning_rate": 0.0001958511287370348,
"loss": 0.0143,
"step": 470
},
{
"epoch": 0.2772218952324897,
"grad_norm": 0.388671875,
"learning_rate": 0.00019582672361195854,
"loss": 0.0141,
"step": 471
},
{
"epoch": 0.27781047675103004,
"grad_norm": 0.08837890625,
"learning_rate": 0.00019580231848688226,
"loss": 0.011,
"step": 472
},
{
"epoch": 0.27839905826957034,
"grad_norm": 0.07373046875,
"learning_rate": 0.000195777913361806,
"loss": 0.0106,
"step": 473
},
{
"epoch": 0.27898763978811064,
"grad_norm": 0.2373046875,
"learning_rate": 0.00019575350823672973,
"loss": 0.0139,
"step": 474
},
{
"epoch": 0.279576221306651,
"grad_norm": 0.1025390625,
"learning_rate": 0.00019572910311165348,
"loss": 0.0119,
"step": 475
},
{
"epoch": 0.2801648028251913,
"grad_norm": 0.09814453125,
"learning_rate": 0.0001957046979865772,
"loss": 0.0122,
"step": 476
},
{
"epoch": 0.2807533843437316,
"grad_norm": 0.09619140625,
"learning_rate": 0.00019568029286150094,
"loss": 0.0113,
"step": 477
},
{
"epoch": 0.28134196586227195,
"grad_norm": 0.08203125,
"learning_rate": 0.00019565588773642464,
"loss": 0.0121,
"step": 478
},
{
"epoch": 0.28193054738081225,
"grad_norm": 0.59375,
"learning_rate": 0.00019563148261134838,
"loss": 0.013,
"step": 479
},
{
"epoch": 0.28251912889935255,
"grad_norm": 0.2734375,
"learning_rate": 0.0001956070774862721,
"loss": 0.0159,
"step": 480
},
{
"epoch": 0.2831077104178929,
"grad_norm": 0.1201171875,
"learning_rate": 0.00019558267236119585,
"loss": 0.0125,
"step": 481
},
{
"epoch": 0.2836962919364332,
"grad_norm": 0.890625,
"learning_rate": 0.00019555826723611957,
"loss": 0.0294,
"step": 482
},
{
"epoch": 0.2842848734549735,
"grad_norm": 0.08740234375,
"learning_rate": 0.00019553386211104332,
"loss": 0.011,
"step": 483
},
{
"epoch": 0.28487345497351385,
"grad_norm": 0.39453125,
"learning_rate": 0.00019550945698596707,
"loss": 0.0182,
"step": 484
},
{
"epoch": 0.28546203649205415,
"grad_norm": 0.52734375,
"learning_rate": 0.0001954850518608908,
"loss": 0.0165,
"step": 485
},
{
"epoch": 0.28605061801059445,
"grad_norm": 0.15625,
"learning_rate": 0.00019546064673581454,
"loss": 0.03,
"step": 486
},
{
"epoch": 0.2866391995291348,
"grad_norm": 0.1298828125,
"learning_rate": 0.00019543624161073826,
"loss": 0.0118,
"step": 487
},
{
"epoch": 0.2872277810476751,
"grad_norm": 0.10888671875,
"learning_rate": 0.000195411836485662,
"loss": 0.0121,
"step": 488
},
{
"epoch": 0.2878163625662154,
"grad_norm": 1.890625,
"learning_rate": 0.00019538743136058572,
"loss": 0.0276,
"step": 489
},
{
"epoch": 0.28840494408475575,
"grad_norm": 0.11962890625,
"learning_rate": 0.00019536302623550947,
"loss": 0.0146,
"step": 490
},
{
"epoch": 0.28899352560329605,
"grad_norm": 0.1044921875,
"learning_rate": 0.0001953386211104332,
"loss": 0.0118,
"step": 491
},
{
"epoch": 0.28958210712183635,
"grad_norm": 0.0888671875,
"learning_rate": 0.00019531421598535694,
"loss": 0.0112,
"step": 492
},
{
"epoch": 0.2901706886403767,
"grad_norm": 0.2333984375,
"learning_rate": 0.00019528981086028066,
"loss": 0.0107,
"step": 493
},
{
"epoch": 0.290759270158917,
"grad_norm": 0.09033203125,
"learning_rate": 0.0001952654057352044,
"loss": 0.0112,
"step": 494
},
{
"epoch": 0.2913478516774573,
"grad_norm": 0.08837890625,
"learning_rate": 0.00019524100061012813,
"loss": 0.0117,
"step": 495
},
{
"epoch": 0.29193643319599766,
"grad_norm": 0.0966796875,
"learning_rate": 0.00019521659548505188,
"loss": 0.0117,
"step": 496
},
{
"epoch": 0.29252501471453796,
"grad_norm": 1.34375,
"learning_rate": 0.0001951921903599756,
"loss": 0.0131,
"step": 497
},
{
"epoch": 0.29311359623307826,
"grad_norm": 0.171875,
"learning_rate": 0.00019516778523489934,
"loss": 0.0114,
"step": 498
},
{
"epoch": 0.2937021777516186,
"grad_norm": 0.1875,
"learning_rate": 0.00019514338010982306,
"loss": 0.0117,
"step": 499
},
{
"epoch": 0.2942907592701589,
"grad_norm": 0.08203125,
"learning_rate": 0.0001951189749847468,
"loss": 0.0114,
"step": 500
},
{
"epoch": 0.29487934078869926,
"grad_norm": 0.10205078125,
"learning_rate": 0.00019509456985967053,
"loss": 0.012,
"step": 501
},
{
"epoch": 0.29546792230723956,
"grad_norm": 0.240234375,
"learning_rate": 0.00019507016473459428,
"loss": 0.014,
"step": 502
},
{
"epoch": 0.29605650382577986,
"grad_norm": 0.24609375,
"learning_rate": 0.000195045759609518,
"loss": 0.0193,
"step": 503
},
{
"epoch": 0.2966450853443202,
"grad_norm": 0.06396484375,
"learning_rate": 0.00019502135448444175,
"loss": 0.0114,
"step": 504
},
{
"epoch": 0.2972336668628605,
"grad_norm": 0.08740234375,
"learning_rate": 0.00019499694935936547,
"loss": 0.0111,
"step": 505
},
{
"epoch": 0.2978222483814008,
"grad_norm": 0.11767578125,
"learning_rate": 0.00019497254423428922,
"loss": 0.0135,
"step": 506
},
{
"epoch": 0.29841082989994117,
"grad_norm": 0.1640625,
"learning_rate": 0.00019494813910921294,
"loss": 0.0125,
"step": 507
},
{
"epoch": 0.29899941141848146,
"grad_norm": 8.3125,
"learning_rate": 0.00019492373398413668,
"loss": 0.0181,
"step": 508
},
{
"epoch": 0.29958799293702176,
"grad_norm": 0.083984375,
"learning_rate": 0.0001948993288590604,
"loss": 0.011,
"step": 509
},
{
"epoch": 0.3001765744555621,
"grad_norm": 0.1904296875,
"learning_rate": 0.00019487492373398415,
"loss": 0.0129,
"step": 510
},
{
"epoch": 0.3007651559741024,
"grad_norm": 0.09619140625,
"learning_rate": 0.00019485051860890787,
"loss": 0.0108,
"step": 511
},
{
"epoch": 0.3013537374926427,
"grad_norm": 0.1005859375,
"learning_rate": 0.00019482611348383162,
"loss": 0.0129,
"step": 512
},
{
"epoch": 0.30194231901118307,
"grad_norm": 0.08447265625,
"learning_rate": 0.00019480170835875534,
"loss": 0.0109,
"step": 513
},
{
"epoch": 0.30253090052972337,
"grad_norm": 0.055419921875,
"learning_rate": 0.0001947773032336791,
"loss": 0.0097,
"step": 514
},
{
"epoch": 0.30311948204826367,
"grad_norm": 0.09716796875,
"learning_rate": 0.0001947528981086028,
"loss": 0.0133,
"step": 515
},
{
"epoch": 0.303708063566804,
"grad_norm": 2.546875,
"learning_rate": 0.00019472849298352656,
"loss": 0.0112,
"step": 516
},
{
"epoch": 0.3042966450853443,
"grad_norm": 0.1318359375,
"learning_rate": 0.0001947040878584503,
"loss": 0.0129,
"step": 517
},
{
"epoch": 0.3048852266038846,
"grad_norm": 0.294921875,
"learning_rate": 0.00019467968273337402,
"loss": 0.0147,
"step": 518
},
{
"epoch": 0.305473808122425,
"grad_norm": 0.08349609375,
"learning_rate": 0.00019465527760829777,
"loss": 0.0116,
"step": 519
},
{
"epoch": 0.30606238964096527,
"grad_norm": 0.08837890625,
"learning_rate": 0.0001946308724832215,
"loss": 0.0106,
"step": 520
},
{
"epoch": 0.30665097115950557,
"grad_norm": 0.0908203125,
"learning_rate": 0.00019460646735814524,
"loss": 0.0113,
"step": 521
},
{
"epoch": 0.3072395526780459,
"grad_norm": 0.400390625,
"learning_rate": 0.00019458206223306896,
"loss": 0.0187,
"step": 522
},
{
"epoch": 0.3078281341965862,
"grad_norm": 0.48828125,
"learning_rate": 0.0001945576571079927,
"loss": 0.0226,
"step": 523
},
{
"epoch": 0.3084167157151265,
"grad_norm": 0.1328125,
"learning_rate": 0.00019453325198291643,
"loss": 0.0149,
"step": 524
},
{
"epoch": 0.3090052972336669,
"grad_norm": 0.08203125,
"learning_rate": 0.00019450884685784018,
"loss": 0.0112,
"step": 525
},
{
"epoch": 0.3095938787522072,
"grad_norm": 0.054443359375,
"learning_rate": 0.0001944844417327639,
"loss": 0.0103,
"step": 526
},
{
"epoch": 0.3101824602707475,
"grad_norm": 0.609375,
"learning_rate": 0.00019446003660768764,
"loss": 0.0234,
"step": 527
},
{
"epoch": 0.3107710417892878,
"grad_norm": 0.267578125,
"learning_rate": 0.00019443563148261134,
"loss": 0.0108,
"step": 528
},
{
"epoch": 0.3113596233078281,
"grad_norm": 0.2216796875,
"learning_rate": 0.00019441122635753508,
"loss": 0.0138,
"step": 529
},
{
"epoch": 0.3119482048263684,
"grad_norm": 0.125,
"learning_rate": 0.0001943868212324588,
"loss": 0.0107,
"step": 530
},
{
"epoch": 0.3125367863449088,
"grad_norm": 0.1357421875,
"learning_rate": 0.00019436241610738255,
"loss": 0.0128,
"step": 531
},
{
"epoch": 0.3131253678634491,
"grad_norm": 0.10986328125,
"learning_rate": 0.00019433801098230627,
"loss": 0.0118,
"step": 532
},
{
"epoch": 0.31371394938198943,
"grad_norm": 0.09326171875,
"learning_rate": 0.00019431360585723002,
"loss": 0.0113,
"step": 533
},
{
"epoch": 0.31430253090052973,
"grad_norm": 0.125,
"learning_rate": 0.00019428920073215374,
"loss": 0.0124,
"step": 534
},
{
"epoch": 0.31489111241907003,
"grad_norm": 0.142578125,
"learning_rate": 0.0001942647956070775,
"loss": 0.0123,
"step": 535
},
{
"epoch": 0.3154796939376104,
"grad_norm": 0.07275390625,
"learning_rate": 0.0001942403904820012,
"loss": 0.0104,
"step": 536
},
{
"epoch": 0.3160682754561507,
"grad_norm": 0.10009765625,
"learning_rate": 0.00019421598535692496,
"loss": 0.012,
"step": 537
},
{
"epoch": 0.316656856974691,
"grad_norm": 0.279296875,
"learning_rate": 0.0001941915802318487,
"loss": 0.0141,
"step": 538
},
{
"epoch": 0.31724543849323134,
"grad_norm": 0.72265625,
"learning_rate": 0.00019416717510677242,
"loss": 0.0271,
"step": 539
},
{
"epoch": 0.31783402001177163,
"grad_norm": 0.08203125,
"learning_rate": 0.00019414276998169617,
"loss": 0.0114,
"step": 540
},
{
"epoch": 0.31842260153031193,
"grad_norm": 1.4765625,
"learning_rate": 0.0001941183648566199,
"loss": 0.0378,
"step": 541
},
{
"epoch": 0.3190111830488523,
"grad_norm": 0.2578125,
"learning_rate": 0.00019409395973154364,
"loss": 0.0176,
"step": 542
},
{
"epoch": 0.3195997645673926,
"grad_norm": 0.2158203125,
"learning_rate": 0.00019406955460646736,
"loss": 0.0148,
"step": 543
},
{
"epoch": 0.3201883460859329,
"grad_norm": 0.251953125,
"learning_rate": 0.0001940451494813911,
"loss": 0.0144,
"step": 544
},
{
"epoch": 0.32077692760447324,
"grad_norm": 0.11669921875,
"learning_rate": 0.00019402074435631483,
"loss": 0.0121,
"step": 545
},
{
"epoch": 0.32136550912301354,
"grad_norm": 0.130859375,
"learning_rate": 0.00019399633923123858,
"loss": 0.0131,
"step": 546
},
{
"epoch": 0.32195409064155384,
"grad_norm": 0.10107421875,
"learning_rate": 0.0001939719341061623,
"loss": 0.0115,
"step": 547
},
{
"epoch": 0.3225426721600942,
"grad_norm": 0.1806640625,
"learning_rate": 0.00019394752898108604,
"loss": 0.0135,
"step": 548
},
{
"epoch": 0.3231312536786345,
"grad_norm": 0.62890625,
"learning_rate": 0.00019392312385600976,
"loss": 0.0154,
"step": 549
},
{
"epoch": 0.3237198351971748,
"grad_norm": 0.08935546875,
"learning_rate": 0.0001938987187309335,
"loss": 0.0112,
"step": 550
},
{
"epoch": 0.32430841671571514,
"grad_norm": 0.0908203125,
"learning_rate": 0.00019387431360585723,
"loss": 0.0118,
"step": 551
},
{
"epoch": 0.32489699823425544,
"grad_norm": 17.375,
"learning_rate": 0.00019384990848078098,
"loss": 0.0457,
"step": 552
},
{
"epoch": 0.32548557975279574,
"grad_norm": 4.5625,
"learning_rate": 0.0001938255033557047,
"loss": 0.0131,
"step": 553
},
{
"epoch": 0.3260741612713361,
"grad_norm": 0.15234375,
"learning_rate": 0.00019380109823062845,
"loss": 0.0132,
"step": 554
},
{
"epoch": 0.3266627427898764,
"grad_norm": 0.220703125,
"learning_rate": 0.00019377669310555217,
"loss": 0.0148,
"step": 555
},
{
"epoch": 0.3272513243084167,
"grad_norm": 0.08251953125,
"learning_rate": 0.00019375228798047592,
"loss": 0.0113,
"step": 556
},
{
"epoch": 0.32783990582695705,
"grad_norm": 0.52734375,
"learning_rate": 0.00019372788285539964,
"loss": 0.012,
"step": 557
},
{
"epoch": 0.32842848734549734,
"grad_norm": 0.083984375,
"learning_rate": 0.00019370347773032338,
"loss": 0.0106,
"step": 558
},
{
"epoch": 0.32901706886403764,
"grad_norm": 0.2021484375,
"learning_rate": 0.0001936790726052471,
"loss": 0.0145,
"step": 559
},
{
"epoch": 0.329605650382578,
"grad_norm": 0.0859375,
"learning_rate": 0.00019365466748017085,
"loss": 0.0099,
"step": 560
},
{
"epoch": 0.3301942319011183,
"grad_norm": 0.06640625,
"learning_rate": 0.00019363026235509457,
"loss": 0.0114,
"step": 561
},
{
"epoch": 0.33078281341965865,
"grad_norm": 0.1982421875,
"learning_rate": 0.00019360585723001832,
"loss": 0.015,
"step": 562
},
{
"epoch": 0.33137139493819895,
"grad_norm": 0.0810546875,
"learning_rate": 0.00019358145210494204,
"loss": 0.0107,
"step": 563
},
{
"epoch": 0.33195997645673925,
"grad_norm": 0.0625,
"learning_rate": 0.0001935570469798658,
"loss": 0.0106,
"step": 564
},
{
"epoch": 0.3325485579752796,
"grad_norm": 0.056396484375,
"learning_rate": 0.0001935326418547895,
"loss": 0.0087,
"step": 565
},
{
"epoch": 0.3331371394938199,
"grad_norm": 0.08642578125,
"learning_rate": 0.00019350823672971326,
"loss": 0.0125,
"step": 566
},
{
"epoch": 0.3337257210123602,
"grad_norm": 0.23828125,
"learning_rate": 0.00019348383160463698,
"loss": 0.0127,
"step": 567
},
{
"epoch": 0.33431430253090055,
"grad_norm": 0.5390625,
"learning_rate": 0.00019345942647956072,
"loss": 0.0187,
"step": 568
},
{
"epoch": 0.33490288404944085,
"grad_norm": 1.015625,
"learning_rate": 0.00019343502135448445,
"loss": 0.0132,
"step": 569
},
{
"epoch": 0.33549146556798115,
"grad_norm": 0.10302734375,
"learning_rate": 0.0001934106162294082,
"loss": 0.0126,
"step": 570
},
{
"epoch": 0.3360800470865215,
"grad_norm": 0.126953125,
"learning_rate": 0.00019338621110433194,
"loss": 0.0192,
"step": 571
},
{
"epoch": 0.3366686286050618,
"grad_norm": 0.087890625,
"learning_rate": 0.00019336180597925566,
"loss": 0.0114,
"step": 572
},
{
"epoch": 0.3372572101236021,
"grad_norm": 0.16796875,
"learning_rate": 0.0001933374008541794,
"loss": 0.014,
"step": 573
},
{
"epoch": 0.33784579164214246,
"grad_norm": 0.076171875,
"learning_rate": 0.00019331299572910313,
"loss": 0.0106,
"step": 574
},
{
"epoch": 0.33843437316068276,
"grad_norm": 0.0693359375,
"learning_rate": 0.00019328859060402688,
"loss": 0.0105,
"step": 575
},
{
"epoch": 0.33902295467922305,
"grad_norm": 0.07080078125,
"learning_rate": 0.0001932641854789506,
"loss": 0.0108,
"step": 576
},
{
"epoch": 0.3396115361977634,
"grad_norm": 0.059326171875,
"learning_rate": 0.00019323978035387434,
"loss": 0.0095,
"step": 577
},
{
"epoch": 0.3402001177163037,
"grad_norm": 0.0615234375,
"learning_rate": 0.00019321537522879806,
"loss": 0.0102,
"step": 578
},
{
"epoch": 0.340788699234844,
"grad_norm": 0.0634765625,
"learning_rate": 0.00019319097010372179,
"loss": 0.0098,
"step": 579
},
{
"epoch": 0.34137728075338436,
"grad_norm": 3.6875,
"learning_rate": 0.0001931665649786455,
"loss": 0.0378,
"step": 580
},
{
"epoch": 0.34196586227192466,
"grad_norm": 0.173828125,
"learning_rate": 0.00019314215985356925,
"loss": 0.0141,
"step": 581
},
{
"epoch": 0.34255444379046496,
"grad_norm": 0.09033203125,
"learning_rate": 0.00019311775472849297,
"loss": 0.0107,
"step": 582
},
{
"epoch": 0.3431430253090053,
"grad_norm": 4.40625,
"learning_rate": 0.00019309334960341672,
"loss": 0.0598,
"step": 583
},
{
"epoch": 0.3437316068275456,
"grad_norm": 0.6328125,
"learning_rate": 0.00019306894447834044,
"loss": 0.032,
"step": 584
},
{
"epoch": 0.3443201883460859,
"grad_norm": 0.11669921875,
"learning_rate": 0.0001930445393532642,
"loss": 0.0117,
"step": 585
},
{
"epoch": 0.34490876986462626,
"grad_norm": 0.064453125,
"learning_rate": 0.0001930201342281879,
"loss": 0.0093,
"step": 586
},
{
"epoch": 0.34549735138316656,
"grad_norm": 0.07373046875,
"learning_rate": 0.00019299572910311166,
"loss": 0.0102,
"step": 587
},
{
"epoch": 0.34608593290170686,
"grad_norm": 0.060302734375,
"learning_rate": 0.00019297132397803538,
"loss": 0.0096,
"step": 588
},
{
"epoch": 0.3466745144202472,
"grad_norm": 0.73046875,
"learning_rate": 0.00019294691885295913,
"loss": 0.0176,
"step": 589
},
{
"epoch": 0.3472630959387875,
"grad_norm": 0.07763671875,
"learning_rate": 0.00019292251372788285,
"loss": 0.0107,
"step": 590
},
{
"epoch": 0.3478516774573278,
"grad_norm": 0.06396484375,
"learning_rate": 0.0001928981086028066,
"loss": 0.0096,
"step": 591
},
{
"epoch": 0.34844025897586817,
"grad_norm": 0.078125,
"learning_rate": 0.00019287370347773034,
"loss": 0.0264,
"step": 592
},
{
"epoch": 0.34902884049440847,
"grad_norm": 0.275390625,
"learning_rate": 0.00019284929835265406,
"loss": 0.0139,
"step": 593
},
{
"epoch": 0.3496174220129488,
"grad_norm": 0.064453125,
"learning_rate": 0.0001928248932275778,
"loss": 0.0101,
"step": 594
},
{
"epoch": 0.3502060035314891,
"grad_norm": 0.0634765625,
"learning_rate": 0.00019280048810250153,
"loss": 0.01,
"step": 595
},
{
"epoch": 0.3507945850500294,
"grad_norm": 0.0771484375,
"learning_rate": 0.00019277608297742528,
"loss": 0.0106,
"step": 596
},
{
"epoch": 0.35138316656856977,
"grad_norm": 0.08056640625,
"learning_rate": 0.000192751677852349,
"loss": 0.0106,
"step": 597
},
{
"epoch": 0.35197174808711007,
"grad_norm": 0.07177734375,
"learning_rate": 0.00019272727272727274,
"loss": 0.0109,
"step": 598
},
{
"epoch": 0.35256032960565037,
"grad_norm": 0.052001953125,
"learning_rate": 0.00019270286760219647,
"loss": 0.0094,
"step": 599
},
{
"epoch": 0.3531489111241907,
"grad_norm": 0.08203125,
"learning_rate": 0.0001926784624771202,
"loss": 0.0096,
"step": 600
},
{
"epoch": 0.353737492642731,
"grad_norm": 0.0810546875,
"learning_rate": 0.00019265405735204393,
"loss": 0.0115,
"step": 601
},
{
"epoch": 0.3543260741612713,
"grad_norm": 0.1123046875,
"learning_rate": 0.00019262965222696768,
"loss": 0.0115,
"step": 602
},
{
"epoch": 0.3549146556798117,
"grad_norm": 0.060302734375,
"learning_rate": 0.0001926052471018914,
"loss": 0.0095,
"step": 603
},
{
"epoch": 0.355503237198352,
"grad_norm": 0.10693359375,
"learning_rate": 0.00019258084197681515,
"loss": 0.0121,
"step": 604
},
{
"epoch": 0.35609181871689227,
"grad_norm": 0.1259765625,
"learning_rate": 0.00019255643685173887,
"loss": 0.0129,
"step": 605
},
{
"epoch": 0.3566804002354326,
"grad_norm": 0.062255859375,
"learning_rate": 0.00019253203172666262,
"loss": 0.0099,
"step": 606
},
{
"epoch": 0.3572689817539729,
"grad_norm": 0.0908203125,
"learning_rate": 0.00019250762660158634,
"loss": 0.0108,
"step": 607
},
{
"epoch": 0.3578575632725132,
"grad_norm": 0.061767578125,
"learning_rate": 0.00019248322147651008,
"loss": 0.0094,
"step": 608
},
{
"epoch": 0.3584461447910536,
"grad_norm": 0.11572265625,
"learning_rate": 0.0001924588163514338,
"loss": 0.0097,
"step": 609
},
{
"epoch": 0.3590347263095939,
"grad_norm": 0.083984375,
"learning_rate": 0.00019243441122635755,
"loss": 0.0121,
"step": 610
},
{
"epoch": 0.3596233078281342,
"grad_norm": 0.10498046875,
"learning_rate": 0.00019241000610128127,
"loss": 0.012,
"step": 611
},
{
"epoch": 0.36021188934667453,
"grad_norm": 0.068359375,
"learning_rate": 0.00019238560097620502,
"loss": 0.0091,
"step": 612
},
{
"epoch": 0.36080047086521483,
"grad_norm": 0.060791015625,
"learning_rate": 0.00019236119585112874,
"loss": 0.0087,
"step": 613
},
{
"epoch": 0.3613890523837551,
"grad_norm": 0.061767578125,
"learning_rate": 0.0001923367907260525,
"loss": 0.009,
"step": 614
},
{
"epoch": 0.3619776339022955,
"grad_norm": 0.11083984375,
"learning_rate": 0.0001923123856009762,
"loss": 0.0115,
"step": 615
},
{
"epoch": 0.3625662154208358,
"grad_norm": 0.06884765625,
"learning_rate": 0.00019228798047589996,
"loss": 0.0102,
"step": 616
},
{
"epoch": 0.3631547969393761,
"grad_norm": 0.07373046875,
"learning_rate": 0.00019226357535082368,
"loss": 0.0109,
"step": 617
},
{
"epoch": 0.36374337845791643,
"grad_norm": 0.0859375,
"learning_rate": 0.00019223917022574743,
"loss": 0.0108,
"step": 618
},
{
"epoch": 0.36433195997645673,
"grad_norm": 0.054443359375,
"learning_rate": 0.00019221476510067115,
"loss": 0.0091,
"step": 619
},
{
"epoch": 0.36492054149499703,
"grad_norm": 0.1806640625,
"learning_rate": 0.0001921903599755949,
"loss": 0.0134,
"step": 620
},
{
"epoch": 0.3655091230135374,
"grad_norm": 0.057861328125,
"learning_rate": 0.00019216595485051861,
"loss": 0.0092,
"step": 621
},
{
"epoch": 0.3660977045320777,
"grad_norm": 0.07080078125,
"learning_rate": 0.00019214154972544236,
"loss": 0.011,
"step": 622
},
{
"epoch": 0.36668628605061804,
"grad_norm": 0.07080078125,
"learning_rate": 0.00019211714460036608,
"loss": 0.0095,
"step": 623
},
{
"epoch": 0.36727486756915834,
"grad_norm": 0.1748046875,
"learning_rate": 0.00019209273947528983,
"loss": 0.0114,
"step": 624
},
{
"epoch": 0.36786344908769864,
"grad_norm": 0.56640625,
"learning_rate": 0.00019206833435021358,
"loss": 0.0288,
"step": 625
},
{
"epoch": 0.368452030606239,
"grad_norm": 0.208984375,
"learning_rate": 0.0001920439292251373,
"loss": 0.0131,
"step": 626
},
{
"epoch": 0.3690406121247793,
"grad_norm": 0.07470703125,
"learning_rate": 0.00019201952410006104,
"loss": 0.0103,
"step": 627
},
{
"epoch": 0.3696291936433196,
"grad_norm": 0.09716796875,
"learning_rate": 0.00019199511897498477,
"loss": 0.0109,
"step": 628
},
{
"epoch": 0.37021777516185994,
"grad_norm": 0.0712890625,
"learning_rate": 0.00019197071384990849,
"loss": 0.0093,
"step": 629
},
{
"epoch": 0.37080635668040024,
"grad_norm": 0.09912109375,
"learning_rate": 0.0001919463087248322,
"loss": 0.0117,
"step": 630
},
{
"epoch": 0.37139493819894054,
"grad_norm": 0.0673828125,
"learning_rate": 0.00019192190359975595,
"loss": 0.0089,
"step": 631
},
{
"epoch": 0.3719835197174809,
"grad_norm": 0.16796875,
"learning_rate": 0.00019189749847467967,
"loss": 0.013,
"step": 632
},
{
"epoch": 0.3725721012360212,
"grad_norm": 0.064453125,
"learning_rate": 0.00019187309334960342,
"loss": 0.0095,
"step": 633
},
{
"epoch": 0.3731606827545615,
"grad_norm": 0.099609375,
"learning_rate": 0.00019184868822452714,
"loss": 0.0101,
"step": 634
},
{
"epoch": 0.37374926427310184,
"grad_norm": 0.0673828125,
"learning_rate": 0.0001918242830994509,
"loss": 0.0096,
"step": 635
},
{
"epoch": 0.37433784579164214,
"grad_norm": 0.08935546875,
"learning_rate": 0.0001917998779743746,
"loss": 0.0106,
"step": 636
},
{
"epoch": 0.37492642731018244,
"grad_norm": 0.068359375,
"learning_rate": 0.00019177547284929836,
"loss": 0.0098,
"step": 637
},
{
"epoch": 0.3755150088287228,
"grad_norm": 0.11328125,
"learning_rate": 0.00019175106772422208,
"loss": 0.0118,
"step": 638
},
{
"epoch": 0.3761035903472631,
"grad_norm": 0.06689453125,
"learning_rate": 0.00019172666259914583,
"loss": 0.0096,
"step": 639
},
{
"epoch": 0.3766921718658034,
"grad_norm": 0.1591796875,
"learning_rate": 0.00019170225747406955,
"loss": 0.0153,
"step": 640
},
{
"epoch": 0.37728075338434375,
"grad_norm": 0.06884765625,
"learning_rate": 0.0001916778523489933,
"loss": 0.0093,
"step": 641
},
{
"epoch": 0.37786933490288405,
"grad_norm": 0.1396484375,
"learning_rate": 0.00019165344722391701,
"loss": 0.0118,
"step": 642
},
{
"epoch": 0.37845791642142435,
"grad_norm": 1.015625,
"learning_rate": 0.00019162904209884076,
"loss": 0.0164,
"step": 643
},
{
"epoch": 0.3790464979399647,
"grad_norm": 0.1552734375,
"learning_rate": 0.00019160463697376448,
"loss": 0.0121,
"step": 644
},
{
"epoch": 0.379635079458505,
"grad_norm": 0.2431640625,
"learning_rate": 0.00019158023184868823,
"loss": 0.0167,
"step": 645
},
{
"epoch": 0.3802236609770453,
"grad_norm": 0.05322265625,
"learning_rate": 0.00019155582672361198,
"loss": 0.0091,
"step": 646
},
{
"epoch": 0.38081224249558565,
"grad_norm": 0.061767578125,
"learning_rate": 0.0001915314215985357,
"loss": 0.0086,
"step": 647
},
{
"epoch": 0.38140082401412595,
"grad_norm": 0.3828125,
"learning_rate": 0.00019150701647345945,
"loss": 0.0157,
"step": 648
},
{
"epoch": 0.38198940553266625,
"grad_norm": 1.4296875,
"learning_rate": 0.00019148261134838317,
"loss": 0.0136,
"step": 649
},
{
"epoch": 0.3825779870512066,
"grad_norm": 1.40625,
"learning_rate": 0.0001914582062233069,
"loss": 0.026,
"step": 650
},
{
"epoch": 0.3831665685697469,
"grad_norm": 0.06396484375,
"learning_rate": 0.00019143380109823063,
"loss": 0.0096,
"step": 651
},
{
"epoch": 0.3837551500882872,
"grad_norm": 0.06982421875,
"learning_rate": 0.00019140939597315438,
"loss": 0.0103,
"step": 652
},
{
"epoch": 0.38434373160682755,
"grad_norm": 11.625,
"learning_rate": 0.0001913849908480781,
"loss": 0.0148,
"step": 653
},
{
"epoch": 0.38493231312536785,
"grad_norm": 0.08837890625,
"learning_rate": 0.00019136058572300185,
"loss": 0.0103,
"step": 654
},
{
"epoch": 0.3855208946439082,
"grad_norm": 0.083984375,
"learning_rate": 0.00019133618059792557,
"loss": 0.0115,
"step": 655
},
{
"epoch": 0.3861094761624485,
"grad_norm": 3.09375,
"learning_rate": 0.00019131177547284932,
"loss": 0.0154,
"step": 656
},
{
"epoch": 0.3866980576809888,
"grad_norm": 0.2041015625,
"learning_rate": 0.00019128737034777304,
"loss": 0.0125,
"step": 657
},
{
"epoch": 0.38728663919952916,
"grad_norm": 0.053955078125,
"learning_rate": 0.00019126296522269679,
"loss": 0.0089,
"step": 658
},
{
"epoch": 0.38787522071806946,
"grad_norm": 0.08349609375,
"learning_rate": 0.0001912385600976205,
"loss": 0.0107,
"step": 659
},
{
"epoch": 0.38846380223660976,
"grad_norm": 0.09521484375,
"learning_rate": 0.00019121415497254425,
"loss": 0.0104,
"step": 660
},
{
"epoch": 0.3890523837551501,
"grad_norm": 1.6328125,
"learning_rate": 0.00019118974984746797,
"loss": 0.0222,
"step": 661
},
{
"epoch": 0.3896409652736904,
"grad_norm": 0.08984375,
"learning_rate": 0.00019116534472239172,
"loss": 0.0101,
"step": 662
},
{
"epoch": 0.3902295467922307,
"grad_norm": 0.07421875,
"learning_rate": 0.00019114093959731544,
"loss": 0.0102,
"step": 663
},
{
"epoch": 0.39081812831077106,
"grad_norm": 0.087890625,
"learning_rate": 0.0001911165344722392,
"loss": 0.0109,
"step": 664
},
{
"epoch": 0.39140670982931136,
"grad_norm": 0.80859375,
"learning_rate": 0.0001910921293471629,
"loss": 0.0181,
"step": 665
},
{
"epoch": 0.39199529134785166,
"grad_norm": 0.0947265625,
"learning_rate": 0.00019106772422208666,
"loss": 0.0104,
"step": 666
},
{
"epoch": 0.392583872866392,
"grad_norm": 0.080078125,
"learning_rate": 0.00019104331909701038,
"loss": 0.01,
"step": 667
},
{
"epoch": 0.3931724543849323,
"grad_norm": 0.1396484375,
"learning_rate": 0.00019101891397193413,
"loss": 0.0177,
"step": 668
},
{
"epoch": 0.3937610359034726,
"grad_norm": 0.0869140625,
"learning_rate": 0.00019099450884685785,
"loss": 0.0101,
"step": 669
},
{
"epoch": 0.39434961742201297,
"grad_norm": 0.076171875,
"learning_rate": 0.0001909701037217816,
"loss": 0.0101,
"step": 670
},
{
"epoch": 0.39493819894055326,
"grad_norm": 0.13671875,
"learning_rate": 0.00019094569859670531,
"loss": 0.0128,
"step": 671
},
{
"epoch": 0.39552678045909356,
"grad_norm": 0.3984375,
"learning_rate": 0.00019092129347162906,
"loss": 0.0143,
"step": 672
},
{
"epoch": 0.3961153619776339,
"grad_norm": 0.059814453125,
"learning_rate": 0.00019089688834655278,
"loss": 0.0103,
"step": 673
},
{
"epoch": 0.3967039434961742,
"grad_norm": 0.08740234375,
"learning_rate": 0.00019087248322147653,
"loss": 0.0098,
"step": 674
},
{
"epoch": 0.3972925250147145,
"grad_norm": 0.0966796875,
"learning_rate": 0.00019084807809640025,
"loss": 0.0106,
"step": 675
},
{
"epoch": 0.39788110653325487,
"grad_norm": 0.072265625,
"learning_rate": 0.000190823672971324,
"loss": 0.0097,
"step": 676
},
{
"epoch": 0.39846968805179517,
"grad_norm": 0.0615234375,
"learning_rate": 0.00019079926784624772,
"loss": 0.0084,
"step": 677
},
{
"epoch": 0.39905826957033547,
"grad_norm": 0.1962890625,
"learning_rate": 0.00019077486272117147,
"loss": 0.012,
"step": 678
},
{
"epoch": 0.3996468510888758,
"grad_norm": 0.06201171875,
"learning_rate": 0.0001907504575960952,
"loss": 0.0095,
"step": 679
},
{
"epoch": 0.4002354326074161,
"grad_norm": 0.06591796875,
"learning_rate": 0.0001907260524710189,
"loss": 0.0091,
"step": 680
},
{
"epoch": 0.4008240141259564,
"grad_norm": 0.0625,
"learning_rate": 0.00019070164734594265,
"loss": 0.0091,
"step": 681
},
{
"epoch": 0.4014125956444968,
"grad_norm": 0.0908203125,
"learning_rate": 0.00019067724222086637,
"loss": 0.0108,
"step": 682
},
{
"epoch": 0.40200117716303707,
"grad_norm": 0.06689453125,
"learning_rate": 0.00019065283709579012,
"loss": 0.0094,
"step": 683
},
{
"epoch": 0.4025897586815774,
"grad_norm": 0.083984375,
"learning_rate": 0.00019062843197071384,
"loss": 0.0107,
"step": 684
},
{
"epoch": 0.4031783402001177,
"grad_norm": 0.11962890625,
"learning_rate": 0.0001906040268456376,
"loss": 0.012,
"step": 685
},
{
"epoch": 0.403766921718658,
"grad_norm": 0.07373046875,
"learning_rate": 0.0001905796217205613,
"loss": 0.0093,
"step": 686
},
{
"epoch": 0.4043555032371984,
"grad_norm": 0.20703125,
"learning_rate": 0.00019055521659548506,
"loss": 0.0141,
"step": 687
},
{
"epoch": 0.4049440847557387,
"grad_norm": 0.1943359375,
"learning_rate": 0.00019053081147040878,
"loss": 0.0154,
"step": 688
},
{
"epoch": 0.405532666274279,
"grad_norm": 0.05859375,
"learning_rate": 0.00019050640634533253,
"loss": 0.0095,
"step": 689
},
{
"epoch": 0.40612124779281933,
"grad_norm": 0.11328125,
"learning_rate": 0.00019048200122025625,
"loss": 0.0123,
"step": 690
},
{
"epoch": 0.4067098293113596,
"grad_norm": 0.0556640625,
"learning_rate": 0.00019045759609518,
"loss": 0.0084,
"step": 691
},
{
"epoch": 0.4072984108298999,
"grad_norm": 0.07421875,
"learning_rate": 0.00019043319097010371,
"loss": 0.0096,
"step": 692
},
{
"epoch": 0.4078869923484403,
"grad_norm": 0.05712890625,
"learning_rate": 0.00019040878584502746,
"loss": 0.0092,
"step": 693
},
{
"epoch": 0.4084755738669806,
"grad_norm": 0.7578125,
"learning_rate": 0.00019038438071995118,
"loss": 0.0114,
"step": 694
},
{
"epoch": 0.4090641553855209,
"grad_norm": 0.193359375,
"learning_rate": 0.00019035997559487493,
"loss": 0.0125,
"step": 695
},
{
"epoch": 0.40965273690406123,
"grad_norm": 0.11572265625,
"learning_rate": 0.00019033557046979865,
"loss": 0.0146,
"step": 696
},
{
"epoch": 0.41024131842260153,
"grad_norm": 0.11083984375,
"learning_rate": 0.0001903111653447224,
"loss": 0.011,
"step": 697
},
{
"epoch": 0.41082989994114183,
"grad_norm": 0.08251953125,
"learning_rate": 0.00019028676021964612,
"loss": 0.0099,
"step": 698
},
{
"epoch": 0.4114184814596822,
"grad_norm": 0.1455078125,
"learning_rate": 0.00019026235509456987,
"loss": 0.0139,
"step": 699
},
{
"epoch": 0.4120070629782225,
"grad_norm": 0.130859375,
"learning_rate": 0.00019023794996949361,
"loss": 0.0121,
"step": 700
},
{
"epoch": 0.4125956444967628,
"grad_norm": 0.310546875,
"learning_rate": 0.00019021354484441733,
"loss": 0.0122,
"step": 701
},
{
"epoch": 0.41318422601530314,
"grad_norm": 0.1025390625,
"learning_rate": 0.00019018913971934108,
"loss": 0.0109,
"step": 702
},
{
"epoch": 0.41377280753384343,
"grad_norm": 0.07470703125,
"learning_rate": 0.0001901647345942648,
"loss": 0.0099,
"step": 703
},
{
"epoch": 0.41436138905238373,
"grad_norm": 0.068359375,
"learning_rate": 0.00019014032946918855,
"loss": 0.0098,
"step": 704
},
{
"epoch": 0.4149499705709241,
"grad_norm": 0.0576171875,
"learning_rate": 0.00019011592434411227,
"loss": 0.0095,
"step": 705
},
{
"epoch": 0.4155385520894644,
"grad_norm": 0.1201171875,
"learning_rate": 0.00019009151921903602,
"loss": 0.0113,
"step": 706
},
{
"epoch": 0.4161271336080047,
"grad_norm": 0.05419921875,
"learning_rate": 0.00019006711409395974,
"loss": 0.009,
"step": 707
},
{
"epoch": 0.41671571512654504,
"grad_norm": 0.10302734375,
"learning_rate": 0.00019004270896888349,
"loss": 0.011,
"step": 708
},
{
"epoch": 0.41730429664508534,
"grad_norm": 0.056640625,
"learning_rate": 0.0001900183038438072,
"loss": 0.009,
"step": 709
},
{
"epoch": 0.41789287816362564,
"grad_norm": 0.1083984375,
"learning_rate": 0.00018999389871873095,
"loss": 0.0107,
"step": 710
},
{
"epoch": 0.418481459682166,
"grad_norm": 0.07763671875,
"learning_rate": 0.00018996949359365467,
"loss": 0.0112,
"step": 711
},
{
"epoch": 0.4190700412007063,
"grad_norm": 0.0556640625,
"learning_rate": 0.00018994508846857842,
"loss": 0.0087,
"step": 712
},
{
"epoch": 0.41965862271924664,
"grad_norm": 0.07421875,
"learning_rate": 0.00018992068334350214,
"loss": 0.0093,
"step": 713
},
{
"epoch": 0.42024720423778694,
"grad_norm": 0.058837890625,
"learning_rate": 0.0001898962782184259,
"loss": 0.009,
"step": 714
},
{
"epoch": 0.42083578575632724,
"grad_norm": 1.90625,
"learning_rate": 0.0001898718730933496,
"loss": 0.0347,
"step": 715
},
{
"epoch": 0.4214243672748676,
"grad_norm": 0.048095703125,
"learning_rate": 0.00018984746796827336,
"loss": 0.0087,
"step": 716
},
{
"epoch": 0.4220129487934079,
"grad_norm": 0.2275390625,
"learning_rate": 0.00018982306284319708,
"loss": 0.0143,
"step": 717
},
{
"epoch": 0.4226015303119482,
"grad_norm": 2.234375,
"learning_rate": 0.00018979865771812083,
"loss": 0.0284,
"step": 718
},
{
"epoch": 0.42319011183048855,
"grad_norm": 0.0576171875,
"learning_rate": 0.00018977425259304455,
"loss": 0.009,
"step": 719
},
{
"epoch": 0.42377869334902885,
"grad_norm": 0.049560546875,
"learning_rate": 0.0001897498474679683,
"loss": 0.0081,
"step": 720
},
{
"epoch": 0.42436727486756914,
"grad_norm": 0.06982421875,
"learning_rate": 0.00018972544234289201,
"loss": 0.0099,
"step": 721
},
{
"epoch": 0.4249558563861095,
"grad_norm": 0.06982421875,
"learning_rate": 0.00018970103721781576,
"loss": 0.0095,
"step": 722
},
{
"epoch": 0.4255444379046498,
"grad_norm": 0.0703125,
"learning_rate": 0.00018967663209273948,
"loss": 0.0103,
"step": 723
},
{
"epoch": 0.4261330194231901,
"grad_norm": 0.068359375,
"learning_rate": 0.00018965222696766323,
"loss": 0.0093,
"step": 724
},
{
"epoch": 0.42672160094173045,
"grad_norm": 0.07080078125,
"learning_rate": 0.00018962782184258695,
"loss": 0.0094,
"step": 725
},
{
"epoch": 0.42731018246027075,
"grad_norm": 0.06689453125,
"learning_rate": 0.0001896034167175107,
"loss": 0.0093,
"step": 726
},
{
"epoch": 0.42789876397881105,
"grad_norm": 0.2236328125,
"learning_rate": 0.00018957901159243442,
"loss": 0.0145,
"step": 727
},
{
"epoch": 0.4284873454973514,
"grad_norm": 0.09765625,
"learning_rate": 0.00018955460646735817,
"loss": 0.0085,
"step": 728
},
{
"epoch": 0.4290759270158917,
"grad_norm": 0.059326171875,
"learning_rate": 0.0001895302013422819,
"loss": 0.0092,
"step": 729
},
{
"epoch": 0.429664508534432,
"grad_norm": 0.1240234375,
"learning_rate": 0.0001895057962172056,
"loss": 0.0118,
"step": 730
},
{
"epoch": 0.43025309005297235,
"grad_norm": 0.056396484375,
"learning_rate": 0.00018948139109212935,
"loss": 0.0087,
"step": 731
},
{
"epoch": 0.43084167157151265,
"grad_norm": 0.171875,
"learning_rate": 0.00018945698596705308,
"loss": 0.0127,
"step": 732
},
{
"epoch": 0.43143025309005295,
"grad_norm": 0.2197265625,
"learning_rate": 0.00018943258084197682,
"loss": 0.0123,
"step": 733
},
{
"epoch": 0.4320188346085933,
"grad_norm": 0.06689453125,
"learning_rate": 0.00018940817571690054,
"loss": 0.01,
"step": 734
},
{
"epoch": 0.4326074161271336,
"grad_norm": 0.142578125,
"learning_rate": 0.0001893837705918243,
"loss": 0.011,
"step": 735
},
{
"epoch": 0.4331959976456739,
"grad_norm": 0.369140625,
"learning_rate": 0.000189359365466748,
"loss": 0.0121,
"step": 736
},
{
"epoch": 0.43378457916421426,
"grad_norm": 0.0478515625,
"learning_rate": 0.00018933496034167176,
"loss": 0.0085,
"step": 737
},
{
"epoch": 0.43437316068275456,
"grad_norm": 8.6875,
"learning_rate": 0.00018931055521659548,
"loss": 0.0413,
"step": 738
},
{
"epoch": 0.43496174220129485,
"grad_norm": 0.091796875,
"learning_rate": 0.00018928615009151923,
"loss": 0.0099,
"step": 739
},
{
"epoch": 0.4355503237198352,
"grad_norm": 0.1484375,
"learning_rate": 0.00018926174496644295,
"loss": 0.0145,
"step": 740
},
{
"epoch": 0.4361389052383755,
"grad_norm": 0.0654296875,
"learning_rate": 0.0001892373398413667,
"loss": 0.009,
"step": 741
},
{
"epoch": 0.4367274867569158,
"grad_norm": 0.058837890625,
"learning_rate": 0.00018921293471629042,
"loss": 0.0085,
"step": 742
},
{
"epoch": 0.43731606827545616,
"grad_norm": 0.1953125,
"learning_rate": 0.00018918852959121416,
"loss": 0.0112,
"step": 743
},
{
"epoch": 0.43790464979399646,
"grad_norm": 0.07470703125,
"learning_rate": 0.00018916412446613788,
"loss": 0.0101,
"step": 744
},
{
"epoch": 0.4384932313125368,
"grad_norm": 0.1123046875,
"learning_rate": 0.00018913971934106163,
"loss": 0.0339,
"step": 745
},
{
"epoch": 0.4390818128310771,
"grad_norm": 0.89453125,
"learning_rate": 0.00018911531421598535,
"loss": 0.0156,
"step": 746
},
{
"epoch": 0.4396703943496174,
"grad_norm": 0.103515625,
"learning_rate": 0.0001890909090909091,
"loss": 0.0125,
"step": 747
},
{
"epoch": 0.44025897586815776,
"grad_norm": 0.08935546875,
"learning_rate": 0.00018906650396583282,
"loss": 0.0123,
"step": 748
},
{
"epoch": 0.44084755738669806,
"grad_norm": 0.06884765625,
"learning_rate": 0.00018904209884075657,
"loss": 0.0098,
"step": 749
},
{
"epoch": 0.44143613890523836,
"grad_norm": 0.06884765625,
"learning_rate": 0.0001890176937156803,
"loss": 0.0096,
"step": 750
},
{
"epoch": 0.4420247204237787,
"grad_norm": 0.05712890625,
"learning_rate": 0.00018899328859060404,
"loss": 0.0085,
"step": 751
},
{
"epoch": 0.442613301942319,
"grad_norm": 0.1484375,
"learning_rate": 0.00018896888346552776,
"loss": 0.0121,
"step": 752
},
{
"epoch": 0.4432018834608593,
"grad_norm": 0.07275390625,
"learning_rate": 0.0001889444783404515,
"loss": 0.0094,
"step": 753
},
{
"epoch": 0.44379046497939967,
"grad_norm": 0.08251953125,
"learning_rate": 0.00018892007321537525,
"loss": 0.0103,
"step": 754
},
{
"epoch": 0.44437904649793997,
"grad_norm": 0.07177734375,
"learning_rate": 0.00018889566809029897,
"loss": 0.0106,
"step": 755
},
{
"epoch": 0.44496762801648027,
"grad_norm": 0.061279296875,
"learning_rate": 0.00018887126296522272,
"loss": 0.0095,
"step": 756
},
{
"epoch": 0.4455562095350206,
"grad_norm": 0.06640625,
"learning_rate": 0.00018884685784014644,
"loss": 0.0099,
"step": 757
},
{
"epoch": 0.4461447910535609,
"grad_norm": 0.0556640625,
"learning_rate": 0.0001888224527150702,
"loss": 0.0086,
"step": 758
},
{
"epoch": 0.4467333725721012,
"grad_norm": 0.05419921875,
"learning_rate": 0.0001887980475899939,
"loss": 0.0085,
"step": 759
},
{
"epoch": 0.44732195409064157,
"grad_norm": 0.052490234375,
"learning_rate": 0.00018877364246491765,
"loss": 0.0087,
"step": 760
},
{
"epoch": 0.44791053560918187,
"grad_norm": 0.09130859375,
"learning_rate": 0.00018874923733984138,
"loss": 0.0096,
"step": 761
},
{
"epoch": 0.44849911712772217,
"grad_norm": 0.061279296875,
"learning_rate": 0.00018872483221476512,
"loss": 0.0092,
"step": 762
},
{
"epoch": 0.4490876986462625,
"grad_norm": 5.4375,
"learning_rate": 0.00018870042708968884,
"loss": 0.0142,
"step": 763
},
{
"epoch": 0.4496762801648028,
"grad_norm": 0.059814453125,
"learning_rate": 0.0001886760219646126,
"loss": 0.0096,
"step": 764
},
{
"epoch": 0.4502648616833431,
"grad_norm": 0.55078125,
"learning_rate": 0.0001886516168395363,
"loss": 0.0229,
"step": 765
},
{
"epoch": 0.4508534432018835,
"grad_norm": 2.109375,
"learning_rate": 0.00018862721171446006,
"loss": 0.0161,
"step": 766
},
{
"epoch": 0.4514420247204238,
"grad_norm": 1.0,
"learning_rate": 0.00018860280658938378,
"loss": 0.0103,
"step": 767
},
{
"epoch": 0.45203060623896407,
"grad_norm": 0.06396484375,
"learning_rate": 0.00018857840146430753,
"loss": 0.0088,
"step": 768
},
{
"epoch": 0.4526191877575044,
"grad_norm": 0.053955078125,
"learning_rate": 0.00018855399633923125,
"loss": 0.0085,
"step": 769
},
{
"epoch": 0.4532077692760447,
"grad_norm": 0.064453125,
"learning_rate": 0.000188529591214155,
"loss": 0.0089,
"step": 770
},
{
"epoch": 0.453796350794585,
"grad_norm": 0.0693359375,
"learning_rate": 0.00018850518608907872,
"loss": 0.009,
"step": 771
},
{
"epoch": 0.4543849323131254,
"grad_norm": 0.49609375,
"learning_rate": 0.00018848078096400246,
"loss": 0.0176,
"step": 772
},
{
"epoch": 0.4549735138316657,
"grad_norm": 0.419921875,
"learning_rate": 0.00018845637583892618,
"loss": 0.0164,
"step": 773
},
{
"epoch": 0.45556209535020603,
"grad_norm": 0.06787109375,
"learning_rate": 0.00018843197071384993,
"loss": 0.0096,
"step": 774
},
{
"epoch": 0.45615067686874633,
"grad_norm": 0.051513671875,
"learning_rate": 0.00018840756558877365,
"loss": 0.0085,
"step": 775
},
{
"epoch": 0.45673925838728663,
"grad_norm": 0.14453125,
"learning_rate": 0.0001883831604636974,
"loss": 0.0134,
"step": 776
},
{
"epoch": 0.457327839905827,
"grad_norm": 0.1279296875,
"learning_rate": 0.00018835875533862112,
"loss": 0.0098,
"step": 777
},
{
"epoch": 0.4579164214243673,
"grad_norm": 0.07373046875,
"learning_rate": 0.00018833435021354487,
"loss": 0.0087,
"step": 778
},
{
"epoch": 0.4585050029429076,
"grad_norm": 0.138671875,
"learning_rate": 0.0001883099450884686,
"loss": 0.0111,
"step": 779
},
{
"epoch": 0.45909358446144793,
"grad_norm": 0.10595703125,
"learning_rate": 0.00018828553996339233,
"loss": 0.0116,
"step": 780
},
{
"epoch": 0.45968216597998823,
"grad_norm": 0.0693359375,
"learning_rate": 0.00018826113483831606,
"loss": 0.0094,
"step": 781
},
{
"epoch": 0.46027074749852853,
"grad_norm": 0.07763671875,
"learning_rate": 0.00018823672971323978,
"loss": 0.0098,
"step": 782
},
{
"epoch": 0.4608593290170689,
"grad_norm": 0.427734375,
"learning_rate": 0.00018821232458816352,
"loss": 0.0129,
"step": 783
},
{
"epoch": 0.4614479105356092,
"grad_norm": 0.06591796875,
"learning_rate": 0.00018818791946308724,
"loss": 0.009,
"step": 784
},
{
"epoch": 0.4620364920541495,
"grad_norm": 0.25,
"learning_rate": 0.000188163514338011,
"loss": 0.0124,
"step": 785
},
{
"epoch": 0.46262507357268984,
"grad_norm": 0.061279296875,
"learning_rate": 0.0001881391092129347,
"loss": 0.0092,
"step": 786
},
{
"epoch": 0.46321365509123014,
"grad_norm": 0.1708984375,
"learning_rate": 0.00018811470408785846,
"loss": 0.0116,
"step": 787
},
{
"epoch": 0.46380223660977044,
"grad_norm": 0.06884765625,
"learning_rate": 0.00018809029896278218,
"loss": 0.0102,
"step": 788
},
{
"epoch": 0.4643908181283108,
"grad_norm": 0.0751953125,
"learning_rate": 0.00018806589383770593,
"loss": 0.0107,
"step": 789
},
{
"epoch": 0.4649793996468511,
"grad_norm": 0.06494140625,
"learning_rate": 0.00018804148871262965,
"loss": 0.0096,
"step": 790
},
{
"epoch": 0.4655679811653914,
"grad_norm": 0.06689453125,
"learning_rate": 0.0001880170835875534,
"loss": 0.0103,
"step": 791
},
{
"epoch": 0.46615656268393174,
"grad_norm": 0.11865234375,
"learning_rate": 0.00018799267846247712,
"loss": 0.0099,
"step": 792
},
{
"epoch": 0.46674514420247204,
"grad_norm": 0.06982421875,
"learning_rate": 0.00018796827333740086,
"loss": 0.0098,
"step": 793
},
{
"epoch": 0.46733372572101234,
"grad_norm": 0.06787109375,
"learning_rate": 0.00018794386821232458,
"loss": 0.0086,
"step": 794
},
{
"epoch": 0.4679223072395527,
"grad_norm": 0.09619140625,
"learning_rate": 0.00018791946308724833,
"loss": 0.0105,
"step": 795
},
{
"epoch": 0.468510888758093,
"grad_norm": 2.234375,
"learning_rate": 0.00018789505796217205,
"loss": 0.0322,
"step": 796
},
{
"epoch": 0.4690994702766333,
"grad_norm": 0.05810546875,
"learning_rate": 0.0001878706528370958,
"loss": 0.0092,
"step": 797
},
{
"epoch": 0.46968805179517364,
"grad_norm": 0.0771484375,
"learning_rate": 0.00018784624771201952,
"loss": 0.0101,
"step": 798
},
{
"epoch": 0.47027663331371394,
"grad_norm": 0.1015625,
"learning_rate": 0.00018782184258694327,
"loss": 0.012,
"step": 799
},
{
"epoch": 0.47086521483225424,
"grad_norm": 0.1875,
"learning_rate": 0.000187797437461867,
"loss": 0.0137,
"step": 800
},
{
"epoch": 0.4714537963507946,
"grad_norm": 0.054931640625,
"learning_rate": 0.00018777303233679074,
"loss": 0.0084,
"step": 801
},
{
"epoch": 0.4720423778693349,
"grad_norm": 0.064453125,
"learning_rate": 0.00018774862721171446,
"loss": 0.0096,
"step": 802
},
{
"epoch": 0.4726309593878752,
"grad_norm": 4.6875,
"learning_rate": 0.0001877242220866382,
"loss": 0.0297,
"step": 803
},
{
"epoch": 0.47321954090641555,
"grad_norm": 0.06640625,
"learning_rate": 0.00018769981696156192,
"loss": 0.0095,
"step": 804
},
{
"epoch": 0.47380812242495585,
"grad_norm": 0.058837890625,
"learning_rate": 0.00018767541183648567,
"loss": 0.009,
"step": 805
},
{
"epoch": 0.4743967039434962,
"grad_norm": 0.078125,
"learning_rate": 0.0001876510067114094,
"loss": 0.0103,
"step": 806
},
{
"epoch": 0.4749852854620365,
"grad_norm": 0.0634765625,
"learning_rate": 0.00018762660158633314,
"loss": 0.0094,
"step": 807
},
{
"epoch": 0.4755738669805768,
"grad_norm": 0.05615234375,
"learning_rate": 0.0001876021964612569,
"loss": 0.0086,
"step": 808
},
{
"epoch": 0.47616244849911715,
"grad_norm": 0.0703125,
"learning_rate": 0.0001875777913361806,
"loss": 0.0096,
"step": 809
},
{
"epoch": 0.47675103001765745,
"grad_norm": 0.0625,
"learning_rate": 0.00018755338621110436,
"loss": 0.0091,
"step": 810
},
{
"epoch": 0.47733961153619775,
"grad_norm": 0.2314453125,
"learning_rate": 0.00018752898108602808,
"loss": 0.0112,
"step": 811
},
{
"epoch": 0.4779281930547381,
"grad_norm": 0.72265625,
"learning_rate": 0.00018750457596095182,
"loss": 0.0141,
"step": 812
},
{
"epoch": 0.4785167745732784,
"grad_norm": 0.07958984375,
"learning_rate": 0.00018748017083587554,
"loss": 0.0104,
"step": 813
},
{
"epoch": 0.4791053560918187,
"grad_norm": 0.05810546875,
"learning_rate": 0.0001874557657107993,
"loss": 0.01,
"step": 814
},
{
"epoch": 0.47969393761035906,
"grad_norm": 0.06591796875,
"learning_rate": 0.000187431360585723,
"loss": 0.0098,
"step": 815
},
{
"epoch": 0.48028251912889935,
"grad_norm": 0.05078125,
"learning_rate": 0.00018740695546064676,
"loss": 0.0088,
"step": 816
},
{
"epoch": 0.48087110064743965,
"grad_norm": 0.24609375,
"learning_rate": 0.00018738255033557048,
"loss": 0.0145,
"step": 817
},
{
"epoch": 0.48145968216598,
"grad_norm": 0.0732421875,
"learning_rate": 0.00018735814521049423,
"loss": 0.0101,
"step": 818
},
{
"epoch": 0.4820482636845203,
"grad_norm": 0.087890625,
"learning_rate": 0.00018733374008541795,
"loss": 0.0118,
"step": 819
},
{
"epoch": 0.4826368452030606,
"grad_norm": 0.06103515625,
"learning_rate": 0.0001873093349603417,
"loss": 0.0095,
"step": 820
},
{
"epoch": 0.48322542672160096,
"grad_norm": 0.11376953125,
"learning_rate": 0.00018728492983526542,
"loss": 0.0115,
"step": 821
},
{
"epoch": 0.48381400824014126,
"grad_norm": 0.055908203125,
"learning_rate": 0.00018726052471018916,
"loss": 0.009,
"step": 822
},
{
"epoch": 0.48440258975868156,
"grad_norm": 0.068359375,
"learning_rate": 0.00018723611958511288,
"loss": 0.0092,
"step": 823
},
{
"epoch": 0.4849911712772219,
"grad_norm": 0.06298828125,
"learning_rate": 0.00018721171446003663,
"loss": 0.0093,
"step": 824
},
{
"epoch": 0.4855797527957622,
"grad_norm": 0.054931640625,
"learning_rate": 0.00018718730933496035,
"loss": 0.0089,
"step": 825
},
{
"epoch": 0.4861683343143025,
"grad_norm": 0.052490234375,
"learning_rate": 0.0001871629042098841,
"loss": 0.0087,
"step": 826
},
{
"epoch": 0.48675691583284286,
"grad_norm": 0.06591796875,
"learning_rate": 0.00018713849908480782,
"loss": 0.0089,
"step": 827
},
{
"epoch": 0.48734549735138316,
"grad_norm": 0.1318359375,
"learning_rate": 0.00018711409395973157,
"loss": 0.0099,
"step": 828
},
{
"epoch": 0.48793407886992346,
"grad_norm": 0.1494140625,
"learning_rate": 0.0001870896888346553,
"loss": 0.0129,
"step": 829
},
{
"epoch": 0.4885226603884638,
"grad_norm": 0.173828125,
"learning_rate": 0.00018706528370957904,
"loss": 0.0118,
"step": 830
},
{
"epoch": 0.4891112419070041,
"grad_norm": 0.05224609375,
"learning_rate": 0.00018704087858450276,
"loss": 0.0087,
"step": 831
},
{
"epoch": 0.4896998234255444,
"grad_norm": 0.09716796875,
"learning_rate": 0.00018701647345942648,
"loss": 0.0101,
"step": 832
},
{
"epoch": 0.49028840494408477,
"grad_norm": 0.043701171875,
"learning_rate": 0.00018699206833435022,
"loss": 0.0079,
"step": 833
},
{
"epoch": 0.49087698646262506,
"grad_norm": 0.0498046875,
"learning_rate": 0.00018696766320927394,
"loss": 0.009,
"step": 834
},
{
"epoch": 0.4914655679811654,
"grad_norm": 0.048583984375,
"learning_rate": 0.0001869432580841977,
"loss": 0.0078,
"step": 835
},
{
"epoch": 0.4920541494997057,
"grad_norm": 0.06982421875,
"learning_rate": 0.0001869188529591214,
"loss": 0.009,
"step": 836
},
{
"epoch": 0.492642731018246,
"grad_norm": 0.287109375,
"learning_rate": 0.00018689444783404516,
"loss": 0.0124,
"step": 837
},
{
"epoch": 0.49323131253678637,
"grad_norm": 0.0478515625,
"learning_rate": 0.00018687004270896888,
"loss": 0.0082,
"step": 838
},
{
"epoch": 0.49381989405532667,
"grad_norm": 0.05908203125,
"learning_rate": 0.00018684563758389263,
"loss": 0.0085,
"step": 839
},
{
"epoch": 0.49440847557386697,
"grad_norm": 0.126953125,
"learning_rate": 0.00018682123245881635,
"loss": 0.0135,
"step": 840
},
{
"epoch": 0.4949970570924073,
"grad_norm": 0.0673828125,
"learning_rate": 0.0001867968273337401,
"loss": 0.0099,
"step": 841
},
{
"epoch": 0.4955856386109476,
"grad_norm": 0.0576171875,
"learning_rate": 0.00018677242220866382,
"loss": 0.0093,
"step": 842
},
{
"epoch": 0.4961742201294879,
"grad_norm": 0.0673828125,
"learning_rate": 0.00018674801708358756,
"loss": 0.0096,
"step": 843
},
{
"epoch": 0.4967628016480283,
"grad_norm": 0.291015625,
"learning_rate": 0.00018672361195851128,
"loss": 0.0126,
"step": 844
},
{
"epoch": 0.4973513831665686,
"grad_norm": 0.08056640625,
"learning_rate": 0.00018669920683343503,
"loss": 0.0087,
"step": 845
},
{
"epoch": 0.49793996468510887,
"grad_norm": 0.0712890625,
"learning_rate": 0.00018667480170835875,
"loss": 0.0098,
"step": 846
},
{
"epoch": 0.4985285462036492,
"grad_norm": 0.046630859375,
"learning_rate": 0.0001866503965832825,
"loss": 0.0084,
"step": 847
},
{
"epoch": 0.4991171277221895,
"grad_norm": 0.0615234375,
"learning_rate": 0.00018662599145820622,
"loss": 0.0091,
"step": 848
},
{
"epoch": 0.4997057092407298,
"grad_norm": 0.09033203125,
"learning_rate": 0.00018660158633312997,
"loss": 0.0094,
"step": 849
},
{
"epoch": 0.5002942907592701,
"grad_norm": 0.0791015625,
"learning_rate": 0.0001865771812080537,
"loss": 0.0099,
"step": 850
},
{
"epoch": 0.5008828722778105,
"grad_norm": 0.625,
"learning_rate": 0.00018655277608297744,
"loss": 0.0119,
"step": 851
},
{
"epoch": 0.5014714537963508,
"grad_norm": 0.056640625,
"learning_rate": 0.00018652837095790116,
"loss": 0.0088,
"step": 852
},
{
"epoch": 0.5020600353148911,
"grad_norm": 0.07421875,
"learning_rate": 0.0001865039658328249,
"loss": 0.0103,
"step": 853
},
{
"epoch": 0.5026486168334314,
"grad_norm": 0.054931640625,
"learning_rate": 0.00018647956070774862,
"loss": 0.0083,
"step": 854
},
{
"epoch": 0.5032371983519718,
"grad_norm": 0.083984375,
"learning_rate": 0.00018645515558267237,
"loss": 0.0104,
"step": 855
},
{
"epoch": 0.503825779870512,
"grad_norm": 0.048095703125,
"learning_rate": 0.0001864307504575961,
"loss": 0.0081,
"step": 856
},
{
"epoch": 0.5044143613890524,
"grad_norm": 0.07421875,
"learning_rate": 0.00018640634533251984,
"loss": 0.0096,
"step": 857
},
{
"epoch": 0.5050029429075927,
"grad_norm": 0.1064453125,
"learning_rate": 0.00018638194020744356,
"loss": 0.0098,
"step": 858
},
{
"epoch": 0.505591524426133,
"grad_norm": 0.36328125,
"learning_rate": 0.0001863575350823673,
"loss": 0.0154,
"step": 859
},
{
"epoch": 0.5061801059446733,
"grad_norm": 0.04833984375,
"learning_rate": 0.00018633312995729103,
"loss": 0.0086,
"step": 860
},
{
"epoch": 0.5067686874632137,
"grad_norm": 0.0732421875,
"learning_rate": 0.00018630872483221478,
"loss": 0.0103,
"step": 861
},
{
"epoch": 0.5073572689817539,
"grad_norm": 0.052734375,
"learning_rate": 0.00018628431970713852,
"loss": 0.009,
"step": 862
},
{
"epoch": 0.5079458505002943,
"grad_norm": 0.1533203125,
"learning_rate": 0.00018625991458206224,
"loss": 0.012,
"step": 863
},
{
"epoch": 0.5085344320188346,
"grad_norm": 0.0556640625,
"learning_rate": 0.000186235509456986,
"loss": 0.0084,
"step": 864
},
{
"epoch": 0.5091230135373749,
"grad_norm": 0.072265625,
"learning_rate": 0.0001862111043319097,
"loss": 0.0103,
"step": 865
},
{
"epoch": 0.5097115950559152,
"grad_norm": 0.283203125,
"learning_rate": 0.00018618669920683346,
"loss": 0.0098,
"step": 866
},
{
"epoch": 0.5103001765744556,
"grad_norm": 0.2353515625,
"learning_rate": 0.00018616229408175718,
"loss": 0.017,
"step": 867
},
{
"epoch": 0.5108887580929958,
"grad_norm": 0.058837890625,
"learning_rate": 0.00018613788895668093,
"loss": 0.0097,
"step": 868
},
{
"epoch": 0.5114773396115362,
"grad_norm": 0.08935546875,
"learning_rate": 0.00018611348383160465,
"loss": 0.0099,
"step": 869
},
{
"epoch": 0.5120659211300765,
"grad_norm": 0.068359375,
"learning_rate": 0.0001860890787065284,
"loss": 0.0089,
"step": 870
},
{
"epoch": 0.5126545026486168,
"grad_norm": 0.29296875,
"learning_rate": 0.00018606467358145212,
"loss": 0.0151,
"step": 871
},
{
"epoch": 0.5132430841671571,
"grad_norm": 0.046630859375,
"learning_rate": 0.00018604026845637586,
"loss": 0.0091,
"step": 872
},
{
"epoch": 0.5138316656856975,
"grad_norm": 0.08349609375,
"learning_rate": 0.00018601586333129958,
"loss": 0.011,
"step": 873
},
{
"epoch": 0.5144202472042377,
"grad_norm": 0.0966796875,
"learning_rate": 0.00018599145820622333,
"loss": 0.0101,
"step": 874
},
{
"epoch": 0.5150088287227781,
"grad_norm": 0.053466796875,
"learning_rate": 0.00018596705308114705,
"loss": 0.0083,
"step": 875
},
{
"epoch": 0.5155974102413184,
"grad_norm": 0.0654296875,
"learning_rate": 0.0001859426479560708,
"loss": 0.0092,
"step": 876
},
{
"epoch": 0.5161859917598587,
"grad_norm": 0.061279296875,
"learning_rate": 0.00018591824283099452,
"loss": 0.009,
"step": 877
},
{
"epoch": 0.516774573278399,
"grad_norm": 0.07763671875,
"learning_rate": 0.00018589383770591827,
"loss": 0.0283,
"step": 878
},
{
"epoch": 0.5173631547969394,
"grad_norm": 0.052734375,
"learning_rate": 0.000185869432580842,
"loss": 0.0086,
"step": 879
},
{
"epoch": 0.5179517363154797,
"grad_norm": 0.060546875,
"learning_rate": 0.00018584502745576574,
"loss": 0.0087,
"step": 880
},
{
"epoch": 0.51854031783402,
"grad_norm": 0.0830078125,
"learning_rate": 0.00018582062233068946,
"loss": 0.0103,
"step": 881
},
{
"epoch": 0.5191288993525603,
"grad_norm": 0.07470703125,
"learning_rate": 0.00018579621720561318,
"loss": 0.0103,
"step": 882
},
{
"epoch": 0.5197174808711007,
"grad_norm": 0.10009765625,
"learning_rate": 0.00018577181208053692,
"loss": 0.0128,
"step": 883
},
{
"epoch": 0.5203060623896409,
"grad_norm": 0.05322265625,
"learning_rate": 0.00018574740695546064,
"loss": 0.009,
"step": 884
},
{
"epoch": 0.5208946439081813,
"grad_norm": 0.06201171875,
"learning_rate": 0.0001857230018303844,
"loss": 0.0089,
"step": 885
},
{
"epoch": 0.5214832254267217,
"grad_norm": 0.1982421875,
"learning_rate": 0.0001856985967053081,
"loss": 0.0125,
"step": 886
},
{
"epoch": 0.5220718069452619,
"grad_norm": 0.11328125,
"learning_rate": 0.00018567419158023186,
"loss": 0.0114,
"step": 887
},
{
"epoch": 0.5226603884638023,
"grad_norm": 0.054443359375,
"learning_rate": 0.00018564978645515558,
"loss": 0.0087,
"step": 888
},
{
"epoch": 0.5232489699823426,
"grad_norm": 0.05859375,
"learning_rate": 0.00018562538133007933,
"loss": 0.0099,
"step": 889
},
{
"epoch": 0.5238375515008828,
"grad_norm": 0.0556640625,
"learning_rate": 0.00018560097620500305,
"loss": 0.0084,
"step": 890
},
{
"epoch": 0.5244261330194232,
"grad_norm": 0.05615234375,
"learning_rate": 0.0001855765710799268,
"loss": 0.0083,
"step": 891
},
{
"epoch": 0.5250147145379636,
"grad_norm": 0.0634765625,
"learning_rate": 0.00018555216595485052,
"loss": 0.009,
"step": 892
},
{
"epoch": 0.5256032960565038,
"grad_norm": 0.06640625,
"learning_rate": 0.00018552776082977426,
"loss": 0.0096,
"step": 893
},
{
"epoch": 0.5261918775750442,
"grad_norm": 0.09130859375,
"learning_rate": 0.00018550335570469799,
"loss": 0.0098,
"step": 894
},
{
"epoch": 0.5267804590935845,
"grad_norm": 0.062255859375,
"learning_rate": 0.00018547895057962173,
"loss": 0.0098,
"step": 895
},
{
"epoch": 0.5273690406121248,
"grad_norm": 0.1767578125,
"learning_rate": 0.00018545454545454545,
"loss": 0.011,
"step": 896
},
{
"epoch": 0.5279576221306651,
"grad_norm": 0.1796875,
"learning_rate": 0.0001854301403294692,
"loss": 0.0106,
"step": 897
},
{
"epoch": 0.5285462036492055,
"grad_norm": 0.07666015625,
"learning_rate": 0.00018540573520439292,
"loss": 0.0106,
"step": 898
},
{
"epoch": 0.5291347851677457,
"grad_norm": 0.0888671875,
"learning_rate": 0.00018538133007931667,
"loss": 0.0107,
"step": 899
},
{
"epoch": 0.5297233666862861,
"grad_norm": 0.055908203125,
"learning_rate": 0.0001853569249542404,
"loss": 0.0091,
"step": 900
},
{
"epoch": 0.5303119482048264,
"grad_norm": 0.22265625,
"learning_rate": 0.00018533251982916414,
"loss": 0.0155,
"step": 901
},
{
"epoch": 0.5309005297233667,
"grad_norm": 0.08447265625,
"learning_rate": 0.00018530811470408786,
"loss": 0.0111,
"step": 902
},
{
"epoch": 0.531489111241907,
"grad_norm": 0.0869140625,
"learning_rate": 0.0001852837095790116,
"loss": 0.0099,
"step": 903
},
{
"epoch": 0.5320776927604474,
"grad_norm": 0.09765625,
"learning_rate": 0.00018525930445393533,
"loss": 0.0096,
"step": 904
},
{
"epoch": 0.5326662742789876,
"grad_norm": 0.057373046875,
"learning_rate": 0.00018523489932885907,
"loss": 0.0088,
"step": 905
},
{
"epoch": 0.533254855797528,
"grad_norm": 0.058837890625,
"learning_rate": 0.0001852104942037828,
"loss": 0.009,
"step": 906
},
{
"epoch": 0.5338434373160683,
"grad_norm": 0.058837890625,
"learning_rate": 0.00018518608907870654,
"loss": 0.0095,
"step": 907
},
{
"epoch": 0.5344320188346086,
"grad_norm": 0.70703125,
"learning_rate": 0.00018516168395363026,
"loss": 0.0107,
"step": 908
},
{
"epoch": 0.5350206003531489,
"grad_norm": 0.1171875,
"learning_rate": 0.000185137278828554,
"loss": 0.0112,
"step": 909
},
{
"epoch": 0.5356091818716893,
"grad_norm": 0.11572265625,
"learning_rate": 0.00018511287370347773,
"loss": 0.0111,
"step": 910
},
{
"epoch": 0.5361977633902295,
"grad_norm": 0.130859375,
"learning_rate": 0.00018508846857840148,
"loss": 0.0114,
"step": 911
},
{
"epoch": 0.5367863449087699,
"grad_norm": 1.5,
"learning_rate": 0.0001850640634533252,
"loss": 0.0131,
"step": 912
},
{
"epoch": 0.5373749264273102,
"grad_norm": 0.1025390625,
"learning_rate": 0.00018503965832824894,
"loss": 0.0107,
"step": 913
},
{
"epoch": 0.5379635079458505,
"grad_norm": 0.0673828125,
"learning_rate": 0.00018501525320317267,
"loss": 0.0099,
"step": 914
},
{
"epoch": 0.5385520894643908,
"grad_norm": 0.06298828125,
"learning_rate": 0.0001849908480780964,
"loss": 0.0085,
"step": 915
},
{
"epoch": 0.5391406709829312,
"grad_norm": 0.0771484375,
"learning_rate": 0.00018496644295302016,
"loss": 0.0104,
"step": 916
},
{
"epoch": 0.5397292525014714,
"grad_norm": 0.060302734375,
"learning_rate": 0.00018494203782794388,
"loss": 0.0092,
"step": 917
},
{
"epoch": 0.5403178340200118,
"grad_norm": 0.07275390625,
"learning_rate": 0.00018491763270286763,
"loss": 0.0089,
"step": 918
},
{
"epoch": 0.5409064155385521,
"grad_norm": 0.055419921875,
"learning_rate": 0.00018489322757779135,
"loss": 0.0088,
"step": 919
},
{
"epoch": 0.5414949970570924,
"grad_norm": 0.0771484375,
"learning_rate": 0.0001848688224527151,
"loss": 0.0101,
"step": 920
},
{
"epoch": 0.5420835785756327,
"grad_norm": 0.5078125,
"learning_rate": 0.00018484441732763882,
"loss": 0.0198,
"step": 921
},
{
"epoch": 0.5426721600941731,
"grad_norm": 0.1572265625,
"learning_rate": 0.00018482001220256256,
"loss": 0.0093,
"step": 922
},
{
"epoch": 0.5432607416127133,
"grad_norm": 0.05078125,
"learning_rate": 0.00018479560707748628,
"loss": 0.0084,
"step": 923
},
{
"epoch": 0.5438493231312537,
"grad_norm": 0.05712890625,
"learning_rate": 0.00018477120195241003,
"loss": 0.0084,
"step": 924
},
{
"epoch": 0.544437904649794,
"grad_norm": 0.09423828125,
"learning_rate": 0.00018474679682733375,
"loss": 0.0109,
"step": 925
},
{
"epoch": 0.5450264861683343,
"grad_norm": 0.0517578125,
"learning_rate": 0.0001847223917022575,
"loss": 0.0084,
"step": 926
},
{
"epoch": 0.5456150676868746,
"grad_norm": 0.052978515625,
"learning_rate": 0.00018469798657718122,
"loss": 0.0083,
"step": 927
},
{
"epoch": 0.546203649205415,
"grad_norm": 0.08251953125,
"learning_rate": 0.00018467358145210497,
"loss": 0.0108,
"step": 928
},
{
"epoch": 0.5467922307239552,
"grad_norm": 0.07275390625,
"learning_rate": 0.0001846491763270287,
"loss": 0.01,
"step": 929
},
{
"epoch": 0.5473808122424956,
"grad_norm": 0.373046875,
"learning_rate": 0.00018462477120195244,
"loss": 0.0162,
"step": 930
},
{
"epoch": 0.5479693937610359,
"grad_norm": 0.078125,
"learning_rate": 0.00018460036607687616,
"loss": 0.012,
"step": 931
},
{
"epoch": 0.5485579752795762,
"grad_norm": 0.07177734375,
"learning_rate": 0.0001845759609517999,
"loss": 0.0087,
"step": 932
},
{
"epoch": 0.5491465567981165,
"grad_norm": 0.06005859375,
"learning_rate": 0.0001845515558267236,
"loss": 0.0094,
"step": 933
},
{
"epoch": 0.5497351383166569,
"grad_norm": 0.058349609375,
"learning_rate": 0.00018452715070164735,
"loss": 0.0087,
"step": 934
},
{
"epoch": 0.5503237198351971,
"grad_norm": 0.060546875,
"learning_rate": 0.00018450274557657107,
"loss": 0.0093,
"step": 935
},
{
"epoch": 0.5509123013537375,
"grad_norm": 0.056396484375,
"learning_rate": 0.0001844783404514948,
"loss": 0.009,
"step": 936
},
{
"epoch": 0.5515008828722778,
"grad_norm": 0.0439453125,
"learning_rate": 0.00018445393532641856,
"loss": 0.0081,
"step": 937
},
{
"epoch": 0.5520894643908181,
"grad_norm": 0.07861328125,
"learning_rate": 0.00018442953020134228,
"loss": 0.0095,
"step": 938
},
{
"epoch": 0.5526780459093584,
"grad_norm": 0.0927734375,
"learning_rate": 0.00018440512507626603,
"loss": 0.0101,
"step": 939
},
{
"epoch": 0.5532666274278988,
"grad_norm": 0.044677734375,
"learning_rate": 0.00018438071995118975,
"loss": 0.0082,
"step": 940
},
{
"epoch": 0.5538552089464391,
"grad_norm": 0.08984375,
"learning_rate": 0.0001843563148261135,
"loss": 0.0093,
"step": 941
},
{
"epoch": 0.5544437904649794,
"grad_norm": 0.0791015625,
"learning_rate": 0.00018433190970103722,
"loss": 0.0094,
"step": 942
},
{
"epoch": 0.5550323719835197,
"grad_norm": 0.06640625,
"learning_rate": 0.00018430750457596097,
"loss": 0.0097,
"step": 943
},
{
"epoch": 0.5556209535020601,
"grad_norm": 0.05615234375,
"learning_rate": 0.00018428309945088469,
"loss": 0.0085,
"step": 944
},
{
"epoch": 0.5562095350206003,
"grad_norm": 0.1123046875,
"learning_rate": 0.00018425869432580843,
"loss": 0.0107,
"step": 945
},
{
"epoch": 0.5567981165391407,
"grad_norm": 0.076171875,
"learning_rate": 0.00018423428920073215,
"loss": 0.0106,
"step": 946
},
{
"epoch": 0.557386698057681,
"grad_norm": 0.0537109375,
"learning_rate": 0.0001842098840756559,
"loss": 0.0086,
"step": 947
},
{
"epoch": 0.5579752795762213,
"grad_norm": 0.05322265625,
"learning_rate": 0.00018418547895057962,
"loss": 0.0082,
"step": 948
},
{
"epoch": 0.5585638610947616,
"grad_norm": 0.1396484375,
"learning_rate": 0.00018416107382550337,
"loss": 0.0107,
"step": 949
},
{
"epoch": 0.559152442613302,
"grad_norm": 0.07177734375,
"learning_rate": 0.0001841366687004271,
"loss": 0.0102,
"step": 950
},
{
"epoch": 0.5597410241318422,
"grad_norm": 0.057861328125,
"learning_rate": 0.00018411226357535084,
"loss": 0.0092,
"step": 951
},
{
"epoch": 0.5603296056503826,
"grad_norm": 0.06591796875,
"learning_rate": 0.00018408785845027456,
"loss": 0.0099,
"step": 952
},
{
"epoch": 0.5609181871689229,
"grad_norm": 0.0478515625,
"learning_rate": 0.0001840634533251983,
"loss": 0.0082,
"step": 953
},
{
"epoch": 0.5615067686874632,
"grad_norm": 0.07861328125,
"learning_rate": 0.00018403904820012203,
"loss": 0.0105,
"step": 954
},
{
"epoch": 0.5620953502060035,
"grad_norm": 0.068359375,
"learning_rate": 0.00018401464307504577,
"loss": 0.0092,
"step": 955
},
{
"epoch": 0.5626839317245439,
"grad_norm": 0.05029296875,
"learning_rate": 0.0001839902379499695,
"loss": 0.0084,
"step": 956
},
{
"epoch": 0.5632725132430841,
"grad_norm": 0.07666015625,
"learning_rate": 0.00018396583282489324,
"loss": 0.0106,
"step": 957
},
{
"epoch": 0.5638610947616245,
"grad_norm": 0.06591796875,
"learning_rate": 0.00018394142769981696,
"loss": 0.0103,
"step": 958
},
{
"epoch": 0.5644496762801648,
"grad_norm": 0.19921875,
"learning_rate": 0.0001839170225747407,
"loss": 0.0171,
"step": 959
},
{
"epoch": 0.5650382577987051,
"grad_norm": 0.040771484375,
"learning_rate": 0.00018389261744966443,
"loss": 0.0074,
"step": 960
},
{
"epoch": 0.5656268393172454,
"grad_norm": 0.055419921875,
"learning_rate": 0.00018386821232458818,
"loss": 0.0085,
"step": 961
},
{
"epoch": 0.5662154208357858,
"grad_norm": 0.052978515625,
"learning_rate": 0.0001838438071995119,
"loss": 0.0094,
"step": 962
},
{
"epoch": 0.566804002354326,
"grad_norm": 0.050048828125,
"learning_rate": 0.00018381940207443565,
"loss": 0.0082,
"step": 963
},
{
"epoch": 0.5673925838728664,
"grad_norm": 0.05517578125,
"learning_rate": 0.00018379499694935937,
"loss": 0.0085,
"step": 964
},
{
"epoch": 0.5679811653914068,
"grad_norm": 0.053466796875,
"learning_rate": 0.0001837705918242831,
"loss": 0.0087,
"step": 965
},
{
"epoch": 0.568569746909947,
"grad_norm": 0.08203125,
"learning_rate": 0.00018374618669920683,
"loss": 0.0091,
"step": 966
},
{
"epoch": 0.5691583284284873,
"grad_norm": 0.0615234375,
"learning_rate": 0.00018372178157413058,
"loss": 0.0088,
"step": 967
},
{
"epoch": 0.5697469099470277,
"grad_norm": 0.0693359375,
"learning_rate": 0.0001836973764490543,
"loss": 0.009,
"step": 968
},
{
"epoch": 0.570335491465568,
"grad_norm": 0.050537109375,
"learning_rate": 0.00018367297132397805,
"loss": 0.0083,
"step": 969
},
{
"epoch": 0.5709240729841083,
"grad_norm": 0.06591796875,
"learning_rate": 0.0001836485661989018,
"loss": 0.0096,
"step": 970
},
{
"epoch": 0.5715126545026487,
"grad_norm": 0.06640625,
"learning_rate": 0.00018362416107382552,
"loss": 0.0098,
"step": 971
},
{
"epoch": 0.5721012360211889,
"grad_norm": 0.11181640625,
"learning_rate": 0.00018359975594874927,
"loss": 0.0102,
"step": 972
},
{
"epoch": 0.5726898175397293,
"grad_norm": 0.056640625,
"learning_rate": 0.00018357535082367299,
"loss": 0.0093,
"step": 973
},
{
"epoch": 0.5732783990582696,
"grad_norm": 0.06298828125,
"learning_rate": 0.00018355094569859673,
"loss": 0.0092,
"step": 974
},
{
"epoch": 0.5738669805768098,
"grad_norm": 0.052490234375,
"learning_rate": 0.00018352654057352045,
"loss": 0.0082,
"step": 975
},
{
"epoch": 0.5744555620953502,
"grad_norm": 0.1826171875,
"learning_rate": 0.0001835021354484442,
"loss": 0.0129,
"step": 976
},
{
"epoch": 0.5750441436138906,
"grad_norm": 0.045166015625,
"learning_rate": 0.00018347773032336792,
"loss": 0.0083,
"step": 977
},
{
"epoch": 0.5756327251324308,
"grad_norm": 0.053955078125,
"learning_rate": 0.00018345332519829167,
"loss": 0.0092,
"step": 978
},
{
"epoch": 0.5762213066509712,
"grad_norm": 0.0751953125,
"learning_rate": 0.0001834289200732154,
"loss": 0.0099,
"step": 979
},
{
"epoch": 0.5768098881695115,
"grad_norm": 1.78125,
"learning_rate": 0.00018340451494813914,
"loss": 0.0282,
"step": 980
},
{
"epoch": 0.5773984696880518,
"grad_norm": 0.0478515625,
"learning_rate": 0.00018338010982306286,
"loss": 0.0089,
"step": 981
},
{
"epoch": 0.5779870512065921,
"grad_norm": 0.049072265625,
"learning_rate": 0.0001833557046979866,
"loss": 0.0082,
"step": 982
},
{
"epoch": 0.5785756327251325,
"grad_norm": 0.05615234375,
"learning_rate": 0.0001833312995729103,
"loss": 0.0081,
"step": 983
},
{
"epoch": 0.5791642142436727,
"grad_norm": 0.0947265625,
"learning_rate": 0.00018330689444783405,
"loss": 0.0118,
"step": 984
},
{
"epoch": 0.5797527957622131,
"grad_norm": 0.07177734375,
"learning_rate": 0.00018328248932275777,
"loss": 0.0089,
"step": 985
},
{
"epoch": 0.5803413772807534,
"grad_norm": 0.05078125,
"learning_rate": 0.00018325808419768151,
"loss": 0.0082,
"step": 986
},
{
"epoch": 0.5809299587992937,
"grad_norm": 0.058837890625,
"learning_rate": 0.00018323367907260523,
"loss": 0.0082,
"step": 987
},
{
"epoch": 0.581518540317834,
"grad_norm": 0.05859375,
"learning_rate": 0.00018320927394752898,
"loss": 0.0088,
"step": 988
},
{
"epoch": 0.5821071218363744,
"grad_norm": 0.0654296875,
"learning_rate": 0.0001831848688224527,
"loss": 0.0096,
"step": 989
},
{
"epoch": 0.5826957033549146,
"grad_norm": 0.048828125,
"learning_rate": 0.00018316046369737645,
"loss": 0.0074,
"step": 990
},
{
"epoch": 0.583284284873455,
"grad_norm": 0.06591796875,
"learning_rate": 0.0001831360585723002,
"loss": 0.0092,
"step": 991
},
{
"epoch": 0.5838728663919953,
"grad_norm": 0.0556640625,
"learning_rate": 0.00018311165344722392,
"loss": 0.0087,
"step": 992
},
{
"epoch": 0.5844614479105356,
"grad_norm": 0.10302734375,
"learning_rate": 0.00018308724832214767,
"loss": 0.0112,
"step": 993
},
{
"epoch": 0.5850500294290759,
"grad_norm": 0.055908203125,
"learning_rate": 0.00018306284319707139,
"loss": 0.0084,
"step": 994
},
{
"epoch": 0.5856386109476163,
"grad_norm": 0.0595703125,
"learning_rate": 0.00018303843807199513,
"loss": 0.0089,
"step": 995
},
{
"epoch": 0.5862271924661565,
"grad_norm": 0.053955078125,
"learning_rate": 0.00018301403294691885,
"loss": 0.0094,
"step": 996
},
{
"epoch": 0.5868157739846969,
"grad_norm": 0.06494140625,
"learning_rate": 0.0001829896278218426,
"loss": 0.0099,
"step": 997
},
{
"epoch": 0.5874043555032372,
"grad_norm": 0.044189453125,
"learning_rate": 0.00018296522269676632,
"loss": 0.0076,
"step": 998
},
{
"epoch": 0.5879929370217775,
"grad_norm": 0.056396484375,
"learning_rate": 0.00018294081757169007,
"loss": 0.009,
"step": 999
},
{
"epoch": 0.5885815185403178,
"grad_norm": 0.3515625,
"learning_rate": 0.0001829164124466138,
"loss": 0.0125,
"step": 1000
}
],
"logging_steps": 1,
"max_steps": 8495,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}