Gumini-1B-Base / trainer_state.json
GuminiResearch's picture
Upload Gumini-1B-Base (Built with Qwen)
ebee134 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 250,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001,
"grad_norm": 55.0,
"learning_rate": 0.0,
"loss": 15.8026,
"step": 1
},
{
"epoch": 0.002,
"grad_norm": 53.5,
"learning_rate": 2.0000000000000003e-06,
"loss": 15.7515,
"step": 2
},
{
"epoch": 0.003,
"grad_norm": 54.0,
"learning_rate": 4.000000000000001e-06,
"loss": 15.6805,
"step": 3
},
{
"epoch": 0.004,
"grad_norm": 53.75,
"learning_rate": 6e-06,
"loss": 15.6899,
"step": 4
},
{
"epoch": 0.005,
"grad_norm": 51.75,
"learning_rate": 8.000000000000001e-06,
"loss": 15.4635,
"step": 5
},
{
"epoch": 0.006,
"grad_norm": 52.0,
"learning_rate": 1e-05,
"loss": 15.2861,
"step": 6
},
{
"epoch": 0.007,
"grad_norm": 50.25,
"learning_rate": 1.2e-05,
"loss": 15.0195,
"step": 7
},
{
"epoch": 0.008,
"grad_norm": 51.75,
"learning_rate": 1.4000000000000001e-05,
"loss": 14.5896,
"step": 8
},
{
"epoch": 0.009,
"grad_norm": 48.5,
"learning_rate": 1.6000000000000003e-05,
"loss": 14.428,
"step": 9
},
{
"epoch": 0.01,
"grad_norm": 43.0,
"learning_rate": 1.8e-05,
"loss": 13.8742,
"step": 10
},
{
"epoch": 0.011,
"grad_norm": 35.75,
"learning_rate": 2e-05,
"loss": 13.1404,
"step": 11
},
{
"epoch": 0.012,
"grad_norm": 25.75,
"learning_rate": 2.2000000000000003e-05,
"loss": 12.7393,
"step": 12
},
{
"epoch": 0.013,
"grad_norm": 28.25,
"learning_rate": 2.4e-05,
"loss": 12.4467,
"step": 13
},
{
"epoch": 0.014,
"grad_norm": 39.75,
"learning_rate": 2.6000000000000002e-05,
"loss": 12.3137,
"step": 14
},
{
"epoch": 0.015,
"grad_norm": 40.75,
"learning_rate": 2.8000000000000003e-05,
"loss": 11.8957,
"step": 15
},
{
"epoch": 0.016,
"grad_norm": 39.75,
"learning_rate": 3e-05,
"loss": 11.3899,
"step": 16
},
{
"epoch": 0.017,
"grad_norm": 32.0,
"learning_rate": 3.2000000000000005e-05,
"loss": 10.4023,
"step": 17
},
{
"epoch": 0.018,
"grad_norm": 25.0,
"learning_rate": 3.4000000000000007e-05,
"loss": 9.4691,
"step": 18
},
{
"epoch": 0.019,
"grad_norm": 23.5,
"learning_rate": 3.6e-05,
"loss": 8.8468,
"step": 19
},
{
"epoch": 0.02,
"grad_norm": 20.5,
"learning_rate": 3.8e-05,
"loss": 8.201,
"step": 20
},
{
"epoch": 0.021,
"grad_norm": 17.5,
"learning_rate": 4e-05,
"loss": 7.7326,
"step": 21
},
{
"epoch": 0.022,
"grad_norm": 17.375,
"learning_rate": 4.2e-05,
"loss": 7.3021,
"step": 22
},
{
"epoch": 0.023,
"grad_norm": 14.8125,
"learning_rate": 4.4000000000000006e-05,
"loss": 7.0761,
"step": 23
},
{
"epoch": 0.024,
"grad_norm": 12.0625,
"learning_rate": 4.600000000000001e-05,
"loss": 7.0736,
"step": 24
},
{
"epoch": 0.025,
"grad_norm": 9.625,
"learning_rate": 4.8e-05,
"loss": 6.6617,
"step": 25
},
{
"epoch": 0.026,
"grad_norm": 7.125,
"learning_rate": 5e-05,
"loss": 6.2627,
"step": 26
},
{
"epoch": 0.027,
"grad_norm": 6.0,
"learning_rate": 5.2000000000000004e-05,
"loss": 6.108,
"step": 27
},
{
"epoch": 0.028,
"grad_norm": 4.65625,
"learning_rate": 5.4000000000000005e-05,
"loss": 5.7079,
"step": 28
},
{
"epoch": 0.029,
"grad_norm": 4.5625,
"learning_rate": 5.6000000000000006e-05,
"loss": 6.0111,
"step": 29
},
{
"epoch": 0.03,
"grad_norm": 3.78125,
"learning_rate": 5.8e-05,
"loss": 6.0342,
"step": 30
},
{
"epoch": 0.031,
"grad_norm": 3.4375,
"learning_rate": 6e-05,
"loss": 5.8238,
"step": 31
},
{
"epoch": 0.032,
"grad_norm": 3.4375,
"learning_rate": 6.2e-05,
"loss": 5.732,
"step": 32
},
{
"epoch": 0.033,
"grad_norm": 3.171875,
"learning_rate": 6.400000000000001e-05,
"loss": 5.3643,
"step": 33
},
{
"epoch": 0.034,
"grad_norm": 3.546875,
"learning_rate": 6.6e-05,
"loss": 5.2917,
"step": 34
},
{
"epoch": 0.035,
"grad_norm": 4.21875,
"learning_rate": 6.800000000000001e-05,
"loss": 5.2339,
"step": 35
},
{
"epoch": 0.036,
"grad_norm": 2.859375,
"learning_rate": 7e-05,
"loss": 5.2663,
"step": 36
},
{
"epoch": 0.037,
"grad_norm": 2.125,
"learning_rate": 7.2e-05,
"loss": 5.0012,
"step": 37
},
{
"epoch": 0.038,
"grad_norm": 2.84375,
"learning_rate": 7.4e-05,
"loss": 4.8893,
"step": 38
},
{
"epoch": 0.039,
"grad_norm": 4.09375,
"learning_rate": 7.6e-05,
"loss": 4.9856,
"step": 39
},
{
"epoch": 0.04,
"grad_norm": 2.046875,
"learning_rate": 7.800000000000001e-05,
"loss": 5.0183,
"step": 40
},
{
"epoch": 0.041,
"grad_norm": 8.875,
"learning_rate": 8e-05,
"loss": 4.9491,
"step": 41
},
{
"epoch": 0.042,
"grad_norm": 4.65625,
"learning_rate": 8.2e-05,
"loss": 4.7829,
"step": 42
},
{
"epoch": 0.043,
"grad_norm": 4.40625,
"learning_rate": 8.4e-05,
"loss": 4.9717,
"step": 43
},
{
"epoch": 0.044,
"grad_norm": 4.34375,
"learning_rate": 8.6e-05,
"loss": 4.817,
"step": 44
},
{
"epoch": 0.045,
"grad_norm": 2.1875,
"learning_rate": 8.800000000000001e-05,
"loss": 4.8171,
"step": 45
},
{
"epoch": 0.046,
"grad_norm": 5.1875,
"learning_rate": 9e-05,
"loss": 4.8199,
"step": 46
},
{
"epoch": 0.047,
"grad_norm": 3.40625,
"learning_rate": 9.200000000000001e-05,
"loss": 4.7965,
"step": 47
},
{
"epoch": 0.048,
"grad_norm": 3.609375,
"learning_rate": 9.4e-05,
"loss": 4.649,
"step": 48
},
{
"epoch": 0.049,
"grad_norm": 3.59375,
"learning_rate": 9.6e-05,
"loss": 4.6714,
"step": 49
},
{
"epoch": 0.05,
"grad_norm": 3.25,
"learning_rate": 9.8e-05,
"loss": 4.5177,
"step": 50
},
{
"epoch": 0.051,
"grad_norm": 3.03125,
"learning_rate": 0.0001,
"loss": 4.5112,
"step": 51
},
{
"epoch": 0.052,
"grad_norm": 2.15625,
"learning_rate": 0.00010200000000000001,
"loss": 4.5739,
"step": 52
},
{
"epoch": 0.053,
"grad_norm": 2.1875,
"learning_rate": 0.00010400000000000001,
"loss": 4.4601,
"step": 53
},
{
"epoch": 0.054,
"grad_norm": 2.46875,
"learning_rate": 0.00010600000000000002,
"loss": 4.5269,
"step": 54
},
{
"epoch": 0.055,
"grad_norm": 1.8828125,
"learning_rate": 0.00010800000000000001,
"loss": 4.2753,
"step": 55
},
{
"epoch": 0.056,
"grad_norm": 3.203125,
"learning_rate": 0.00011000000000000002,
"loss": 4.3292,
"step": 56
},
{
"epoch": 0.057,
"grad_norm": 2.390625,
"learning_rate": 0.00011200000000000001,
"loss": 4.3489,
"step": 57
},
{
"epoch": 0.058,
"grad_norm": 4.125,
"learning_rate": 0.00011399999999999999,
"loss": 4.4857,
"step": 58
},
{
"epoch": 0.059,
"grad_norm": 2.96875,
"learning_rate": 0.000116,
"loss": 4.3047,
"step": 59
},
{
"epoch": 0.06,
"grad_norm": 3.265625,
"learning_rate": 0.000118,
"loss": 4.4213,
"step": 60
},
{
"epoch": 0.061,
"grad_norm": 11.4375,
"learning_rate": 0.00012,
"loss": 4.3308,
"step": 61
},
{
"epoch": 0.062,
"grad_norm": 2.59375,
"learning_rate": 0.000122,
"loss": 4.3524,
"step": 62
},
{
"epoch": 0.063,
"grad_norm": 3.375,
"learning_rate": 0.000124,
"loss": 4.2236,
"step": 63
},
{
"epoch": 0.064,
"grad_norm": 2.234375,
"learning_rate": 0.000126,
"loss": 4.3674,
"step": 64
},
{
"epoch": 0.065,
"grad_norm": 3.0,
"learning_rate": 0.00012800000000000002,
"loss": 4.1765,
"step": 65
},
{
"epoch": 0.066,
"grad_norm": 2.234375,
"learning_rate": 0.00013000000000000002,
"loss": 4.1043,
"step": 66
},
{
"epoch": 0.067,
"grad_norm": 3.8125,
"learning_rate": 0.000132,
"loss": 4.0828,
"step": 67
},
{
"epoch": 0.068,
"grad_norm": 2.75,
"learning_rate": 0.000134,
"loss": 4.1654,
"step": 68
},
{
"epoch": 0.069,
"grad_norm": 3.109375,
"learning_rate": 0.00013600000000000003,
"loss": 4.0984,
"step": 69
},
{
"epoch": 0.07,
"grad_norm": 2.171875,
"learning_rate": 0.000138,
"loss": 4.0978,
"step": 70
},
{
"epoch": 0.071,
"grad_norm": 3.84375,
"learning_rate": 0.00014,
"loss": 4.1044,
"step": 71
},
{
"epoch": 0.072,
"grad_norm": 2.5625,
"learning_rate": 0.000142,
"loss": 3.967,
"step": 72
},
{
"epoch": 0.073,
"grad_norm": 2.0625,
"learning_rate": 0.000144,
"loss": 4.0976,
"step": 73
},
{
"epoch": 0.074,
"grad_norm": 2.328125,
"learning_rate": 0.000146,
"loss": 3.8734,
"step": 74
},
{
"epoch": 0.075,
"grad_norm": 2.015625,
"learning_rate": 0.000148,
"loss": 3.9381,
"step": 75
},
{
"epoch": 0.076,
"grad_norm": 2.921875,
"learning_rate": 0.00015000000000000001,
"loss": 3.97,
"step": 76
},
{
"epoch": 0.077,
"grad_norm": 2.046875,
"learning_rate": 0.000152,
"loss": 3.9806,
"step": 77
},
{
"epoch": 0.078,
"grad_norm": 3.25,
"learning_rate": 0.000154,
"loss": 3.7804,
"step": 78
},
{
"epoch": 0.079,
"grad_norm": 2.578125,
"learning_rate": 0.00015600000000000002,
"loss": 3.9961,
"step": 79
},
{
"epoch": 0.08,
"grad_norm": 1.9765625,
"learning_rate": 0.00015800000000000002,
"loss": 3.9899,
"step": 80
},
{
"epoch": 0.081,
"grad_norm": 2.171875,
"learning_rate": 0.00016,
"loss": 3.8394,
"step": 81
},
{
"epoch": 0.082,
"grad_norm": 1.8671875,
"learning_rate": 0.000162,
"loss": 3.8614,
"step": 82
},
{
"epoch": 0.083,
"grad_norm": 2.140625,
"learning_rate": 0.000164,
"loss": 3.7687,
"step": 83
},
{
"epoch": 0.084,
"grad_norm": 1.5546875,
"learning_rate": 0.000166,
"loss": 3.8413,
"step": 84
},
{
"epoch": 0.085,
"grad_norm": 2.375,
"learning_rate": 0.000168,
"loss": 3.7533,
"step": 85
},
{
"epoch": 0.086,
"grad_norm": 1.890625,
"learning_rate": 0.00017,
"loss": 3.8058,
"step": 86
},
{
"epoch": 0.087,
"grad_norm": 1.90625,
"learning_rate": 0.000172,
"loss": 3.8243,
"step": 87
},
{
"epoch": 0.088,
"grad_norm": 2.25,
"learning_rate": 0.000174,
"loss": 3.7795,
"step": 88
},
{
"epoch": 0.089,
"grad_norm": 1.8359375,
"learning_rate": 0.00017600000000000002,
"loss": 3.7421,
"step": 89
},
{
"epoch": 0.09,
"grad_norm": 1.7421875,
"learning_rate": 0.00017800000000000002,
"loss": 3.7222,
"step": 90
},
{
"epoch": 0.091,
"grad_norm": 1.5078125,
"learning_rate": 0.00018,
"loss": 3.727,
"step": 91
},
{
"epoch": 0.092,
"grad_norm": 2.328125,
"learning_rate": 0.000182,
"loss": 3.8749,
"step": 92
},
{
"epoch": 0.093,
"grad_norm": 1.7734375,
"learning_rate": 0.00018400000000000003,
"loss": 3.6911,
"step": 93
},
{
"epoch": 0.094,
"grad_norm": 2.4375,
"learning_rate": 0.00018600000000000002,
"loss": 3.6221,
"step": 94
},
{
"epoch": 0.095,
"grad_norm": 2.265625,
"learning_rate": 0.000188,
"loss": 3.6225,
"step": 95
},
{
"epoch": 0.096,
"grad_norm": 2.078125,
"learning_rate": 0.00019,
"loss": 3.5456,
"step": 96
},
{
"epoch": 0.097,
"grad_norm": 3.0625,
"learning_rate": 0.000192,
"loss": 3.5316,
"step": 97
},
{
"epoch": 0.098,
"grad_norm": 1.75,
"learning_rate": 0.000194,
"loss": 3.5942,
"step": 98
},
{
"epoch": 0.099,
"grad_norm": 1.5078125,
"learning_rate": 0.000196,
"loss": 3.5513,
"step": 99
},
{
"epoch": 0.1,
"grad_norm": 2.8125,
"learning_rate": 0.00019800000000000002,
"loss": 3.673,
"step": 100
},
{
"epoch": 0.101,
"grad_norm": 2.328125,
"learning_rate": 0.0002,
"loss": 3.6088,
"step": 101
},
{
"epoch": 0.102,
"grad_norm": 1.4921875,
"learning_rate": 0.00019999939076577905,
"loss": 3.5971,
"step": 102
},
{
"epoch": 0.103,
"grad_norm": 1.421875,
"learning_rate": 0.00019999756307053948,
"loss": 3.5318,
"step": 103
},
{
"epoch": 0.104,
"grad_norm": 2.046875,
"learning_rate": 0.00019999451693655123,
"loss": 3.5199,
"step": 104
},
{
"epoch": 0.105,
"grad_norm": 1.7734375,
"learning_rate": 0.00019999025240093044,
"loss": 3.5049,
"step": 105
},
{
"epoch": 0.106,
"grad_norm": 2.234375,
"learning_rate": 0.00019998476951563915,
"loss": 3.5481,
"step": 106
},
{
"epoch": 0.107,
"grad_norm": 1.9921875,
"learning_rate": 0.00019997806834748456,
"loss": 3.4391,
"step": 107
},
{
"epoch": 0.108,
"grad_norm": 1.4609375,
"learning_rate": 0.00019997014897811833,
"loss": 3.5096,
"step": 108
},
{
"epoch": 0.109,
"grad_norm": 1.8046875,
"learning_rate": 0.00019996101150403543,
"loss": 3.424,
"step": 109
},
{
"epoch": 0.11,
"grad_norm": 2.4375,
"learning_rate": 0.00019995065603657316,
"loss": 3.4862,
"step": 110
},
{
"epoch": 0.111,
"grad_norm": 1.5859375,
"learning_rate": 0.0001999390827019096,
"loss": 3.5117,
"step": 111
},
{
"epoch": 0.112,
"grad_norm": 1.7265625,
"learning_rate": 0.0001999262916410621,
"loss": 3.452,
"step": 112
},
{
"epoch": 0.113,
"grad_norm": 1.7578125,
"learning_rate": 0.00019991228300988585,
"loss": 3.4834,
"step": 113
},
{
"epoch": 0.114,
"grad_norm": 1.8984375,
"learning_rate": 0.00019989705697907149,
"loss": 3.4673,
"step": 114
},
{
"epoch": 0.115,
"grad_norm": 1.625,
"learning_rate": 0.0001998806137341434,
"loss": 3.2031,
"step": 115
},
{
"epoch": 0.116,
"grad_norm": 1.6015625,
"learning_rate": 0.0001998629534754574,
"loss": 3.3329,
"step": 116
},
{
"epoch": 0.117,
"grad_norm": 1.4921875,
"learning_rate": 0.00019984407641819812,
"loss": 3.4215,
"step": 117
},
{
"epoch": 0.118,
"grad_norm": 1.65625,
"learning_rate": 0.00019982398279237655,
"loss": 3.3886,
"step": 118
},
{
"epoch": 0.119,
"grad_norm": 1.421875,
"learning_rate": 0.00019980267284282717,
"loss": 3.2904,
"step": 119
},
{
"epoch": 0.12,
"grad_norm": 1.3515625,
"learning_rate": 0.000199780146829205,
"loss": 3.3135,
"step": 120
},
{
"epoch": 0.121,
"grad_norm": 1.8671875,
"learning_rate": 0.00019975640502598244,
"loss": 3.4523,
"step": 121
},
{
"epoch": 0.122,
"grad_norm": 1.796875,
"learning_rate": 0.00019973144772244582,
"loss": 3.248,
"step": 122
},
{
"epoch": 0.123,
"grad_norm": 1.4140625,
"learning_rate": 0.00019970527522269205,
"loss": 3.3419,
"step": 123
},
{
"epoch": 0.124,
"grad_norm": 1.453125,
"learning_rate": 0.00019967788784562473,
"loss": 3.296,
"step": 124
},
{
"epoch": 0.125,
"grad_norm": 1.90625,
"learning_rate": 0.00019964928592495045,
"loss": 3.2151,
"step": 125
},
{
"epoch": 0.126,
"grad_norm": 1.3828125,
"learning_rate": 0.00019961946980917456,
"loss": 3.3786,
"step": 126
},
{
"epoch": 0.127,
"grad_norm": 1.859375,
"learning_rate": 0.00019958843986159704,
"loss": 3.4188,
"step": 127
},
{
"epoch": 0.128,
"grad_norm": 1.46875,
"learning_rate": 0.00019955619646030802,
"loss": 3.4489,
"step": 128
},
{
"epoch": 0.129,
"grad_norm": 1.7265625,
"learning_rate": 0.0001995227399981831,
"loss": 3.3746,
"step": 129
},
{
"epoch": 0.13,
"grad_norm": 1.921875,
"learning_rate": 0.00019948807088287883,
"loss": 3.29,
"step": 130
},
{
"epoch": 0.131,
"grad_norm": 1.3984375,
"learning_rate": 0.00019945218953682734,
"loss": 3.2717,
"step": 131
},
{
"epoch": 0.132,
"grad_norm": 4.21875,
"learning_rate": 0.00019941509639723155,
"loss": 3.2601,
"step": 132
},
{
"epoch": 0.133,
"grad_norm": 2.15625,
"learning_rate": 0.00019937679191605963,
"loss": 3.3615,
"step": 133
},
{
"epoch": 0.134,
"grad_norm": 2.390625,
"learning_rate": 0.00019933727656003963,
"loss": 3.4326,
"step": 134
},
{
"epoch": 0.135,
"grad_norm": 1.9296875,
"learning_rate": 0.0001992965508106537,
"loss": 3.3733,
"step": 135
},
{
"epoch": 0.136,
"grad_norm": 1.6953125,
"learning_rate": 0.00019925461516413223,
"loss": 3.3488,
"step": 136
},
{
"epoch": 0.137,
"grad_norm": 1.46875,
"learning_rate": 0.0001992114701314478,
"loss": 3.2991,
"step": 137
},
{
"epoch": 0.138,
"grad_norm": 1.375,
"learning_rate": 0.00019916711623830903,
"loss": 3.2037,
"step": 138
},
{
"epoch": 0.139,
"grad_norm": 1.5234375,
"learning_rate": 0.00019912155402515417,
"loss": 3.2748,
"step": 139
},
{
"epoch": 0.14,
"grad_norm": 1.4375,
"learning_rate": 0.00019907478404714436,
"loss": 3.2753,
"step": 140
},
{
"epoch": 0.141,
"grad_norm": 2.0,
"learning_rate": 0.00019902680687415705,
"loss": 3.3406,
"step": 141
},
{
"epoch": 0.142,
"grad_norm": 1.4296875,
"learning_rate": 0.0001989776230907789,
"loss": 3.3422,
"step": 142
},
{
"epoch": 0.143,
"grad_norm": 1.703125,
"learning_rate": 0.00019892723329629887,
"loss": 3.4146,
"step": 143
},
{
"epoch": 0.144,
"grad_norm": 1.4453125,
"learning_rate": 0.0001988756381047006,
"loss": 3.3321,
"step": 144
},
{
"epoch": 0.145,
"grad_norm": 1.46875,
"learning_rate": 0.0001988228381446553,
"loss": 3.3225,
"step": 145
},
{
"epoch": 0.146,
"grad_norm": 1.3984375,
"learning_rate": 0.00019876883405951377,
"loss": 3.2079,
"step": 146
},
{
"epoch": 0.147,
"grad_norm": 1.59375,
"learning_rate": 0.0001987136265072988,
"loss": 3.1757,
"step": 147
},
{
"epoch": 0.148,
"grad_norm": 1.40625,
"learning_rate": 0.00019865721616069696,
"loss": 3.1882,
"step": 148
},
{
"epoch": 0.149,
"grad_norm": 1.3671875,
"learning_rate": 0.0001985996037070505,
"loss": 3.2314,
"step": 149
},
{
"epoch": 0.15,
"grad_norm": 1.2109375,
"learning_rate": 0.00019854078984834903,
"loss": 3.1942,
"step": 150
},
{
"epoch": 0.151,
"grad_norm": 1.4453125,
"learning_rate": 0.00019848077530122083,
"loss": 3.2477,
"step": 151
},
{
"epoch": 0.152,
"grad_norm": 1.7734375,
"learning_rate": 0.0001984195607969242,
"loss": 3.1375,
"step": 152
},
{
"epoch": 0.153,
"grad_norm": 1.484375,
"learning_rate": 0.00019835714708133862,
"loss": 3.145,
"step": 153
},
{
"epoch": 0.154,
"grad_norm": 1.2265625,
"learning_rate": 0.00019829353491495545,
"loss": 3.1747,
"step": 154
},
{
"epoch": 0.155,
"grad_norm": 1.3046875,
"learning_rate": 0.0001982287250728689,
"loss": 3.2027,
"step": 155
},
{
"epoch": 0.156,
"grad_norm": 1.4609375,
"learning_rate": 0.00019816271834476642,
"loss": 3.09,
"step": 156
},
{
"epoch": 0.157,
"grad_norm": 2.578125,
"learning_rate": 0.00019809551553491916,
"loss": 3.2477,
"step": 157
},
{
"epoch": 0.158,
"grad_norm": 1.7421875,
"learning_rate": 0.00019802711746217218,
"loss": 3.1619,
"step": 158
},
{
"epoch": 0.159,
"grad_norm": 1.3984375,
"learning_rate": 0.0001979575249599344,
"loss": 3.2682,
"step": 159
},
{
"epoch": 0.16,
"grad_norm": 1.6328125,
"learning_rate": 0.0001978867388761685,
"loss": 3.0885,
"step": 160
},
{
"epoch": 0.161,
"grad_norm": 1.2890625,
"learning_rate": 0.00019781476007338058,
"loss": 3.2491,
"step": 161
},
{
"epoch": 0.162,
"grad_norm": 1.5,
"learning_rate": 0.0001977415894286096,
"loss": 3.21,
"step": 162
},
{
"epoch": 0.163,
"grad_norm": 1.2734375,
"learning_rate": 0.0001976672278334168,
"loss": 3.1559,
"step": 163
},
{
"epoch": 0.164,
"grad_norm": 1.3515625,
"learning_rate": 0.00019759167619387476,
"loss": 3.0837,
"step": 164
},
{
"epoch": 0.165,
"grad_norm": 1.3046875,
"learning_rate": 0.00019751493543055632,
"loss": 3.1893,
"step": 165
},
{
"epoch": 0.166,
"grad_norm": 1.203125,
"learning_rate": 0.00019743700647852354,
"loss": 3.1023,
"step": 166
},
{
"epoch": 0.167,
"grad_norm": 1.1953125,
"learning_rate": 0.00019735789028731604,
"loss": 3.0936,
"step": 167
},
{
"epoch": 0.168,
"grad_norm": 1.359375,
"learning_rate": 0.00019727758782093967,
"loss": 3.2097,
"step": 168
},
{
"epoch": 0.169,
"grad_norm": 1.6171875,
"learning_rate": 0.00019719610005785465,
"loss": 3.1198,
"step": 169
},
{
"epoch": 0.17,
"grad_norm": 1.3359375,
"learning_rate": 0.00019711342799096361,
"loss": 3.1823,
"step": 170
},
{
"epoch": 0.171,
"grad_norm": 1.0703125,
"learning_rate": 0.00019702957262759965,
"loss": 3.1136,
"step": 171
},
{
"epoch": 0.172,
"grad_norm": 4.90625,
"learning_rate": 0.0001969445349895139,
"loss": 3.1076,
"step": 172
},
{
"epoch": 0.173,
"grad_norm": 1.625,
"learning_rate": 0.0001968583161128631,
"loss": 3.113,
"step": 173
},
{
"epoch": 0.174,
"grad_norm": 1.6640625,
"learning_rate": 0.00019677091704819715,
"loss": 3.1752,
"step": 174
},
{
"epoch": 0.175,
"grad_norm": 1.5390625,
"learning_rate": 0.00019668233886044597,
"loss": 3.1309,
"step": 175
},
{
"epoch": 0.176,
"grad_norm": 1.34375,
"learning_rate": 0.00019659258262890683,
"loss": 3.1505,
"step": 176
},
{
"epoch": 0.177,
"grad_norm": 1.3125,
"learning_rate": 0.00019650164944723115,
"loss": 3.1151,
"step": 177
},
{
"epoch": 0.178,
"grad_norm": 1.5546875,
"learning_rate": 0.00019640954042341103,
"loss": 3.085,
"step": 178
},
{
"epoch": 0.179,
"grad_norm": 1.421875,
"learning_rate": 0.00019631625667976583,
"loss": 3.0857,
"step": 179
},
{
"epoch": 0.18,
"grad_norm": 1.1953125,
"learning_rate": 0.00019622179935292855,
"loss": 3.2549,
"step": 180
},
{
"epoch": 0.181,
"grad_norm": 1.265625,
"learning_rate": 0.0001961261695938319,
"loss": 3.2138,
"step": 181
},
{
"epoch": 0.182,
"grad_norm": 1.1796875,
"learning_rate": 0.0001960293685676943,
"loss": 3.0507,
"step": 182
},
{
"epoch": 0.183,
"grad_norm": 1.4453125,
"learning_rate": 0.00019593139745400576,
"loss": 3.2578,
"step": 183
},
{
"epoch": 0.184,
"grad_norm": 1.2109375,
"learning_rate": 0.00019583225744651333,
"loss": 3.0951,
"step": 184
},
{
"epoch": 0.185,
"grad_norm": 1.6875,
"learning_rate": 0.00019573194975320673,
"loss": 3.1273,
"step": 185
},
{
"epoch": 0.186,
"grad_norm": 1.296875,
"learning_rate": 0.00019563047559630357,
"loss": 3.0946,
"step": 186
},
{
"epoch": 0.187,
"grad_norm": 1.21875,
"learning_rate": 0.00019552783621223436,
"loss": 3.1003,
"step": 187
},
{
"epoch": 0.188,
"grad_norm": 2.359375,
"learning_rate": 0.0001954240328516277,
"loss": 3.2026,
"step": 188
},
{
"epoch": 0.189,
"grad_norm": 1.515625,
"learning_rate": 0.0001953190667792947,
"loss": 3.1465,
"step": 189
},
{
"epoch": 0.19,
"grad_norm": 1.5390625,
"learning_rate": 0.00019521293927421388,
"loss": 3.1967,
"step": 190
},
{
"epoch": 0.191,
"grad_norm": 1.2109375,
"learning_rate": 0.00019510565162951537,
"loss": 3.1578,
"step": 191
},
{
"epoch": 0.192,
"grad_norm": 1.296875,
"learning_rate": 0.00019499720515246525,
"loss": 3.1388,
"step": 192
},
{
"epoch": 0.193,
"grad_norm": 1.140625,
"learning_rate": 0.00019488760116444966,
"loss": 3.1873,
"step": 193
},
{
"epoch": 0.194,
"grad_norm": 1.5703125,
"learning_rate": 0.0001947768410009586,
"loss": 3.1005,
"step": 194
},
{
"epoch": 0.195,
"grad_norm": 1.515625,
"learning_rate": 0.00019466492601156966,
"loss": 3.0942,
"step": 195
},
{
"epoch": 0.196,
"grad_norm": 1.25,
"learning_rate": 0.0001945518575599317,
"loss": 3.0119,
"step": 196
},
{
"epoch": 0.197,
"grad_norm": 1.1171875,
"learning_rate": 0.00019443763702374812,
"loss": 3.0152,
"step": 197
},
{
"epoch": 0.198,
"grad_norm": 1.609375,
"learning_rate": 0.0001943222657947601,
"loss": 2.9887,
"step": 198
},
{
"epoch": 0.199,
"grad_norm": 1.421875,
"learning_rate": 0.00019420574527872968,
"loss": 3.0333,
"step": 199
},
{
"epoch": 0.2,
"grad_norm": 1.1171875,
"learning_rate": 0.00019408807689542257,
"loss": 3.1974,
"step": 200
},
{
"epoch": 0.201,
"grad_norm": 1.4453125,
"learning_rate": 0.00019396926207859084,
"loss": 3.1598,
"step": 201
},
{
"epoch": 0.202,
"grad_norm": 1.21875,
"learning_rate": 0.0001938493022759556,
"loss": 3.1167,
"step": 202
},
{
"epoch": 0.203,
"grad_norm": 1.171875,
"learning_rate": 0.00019372819894918915,
"loss": 3.0386,
"step": 203
},
{
"epoch": 0.204,
"grad_norm": 1.890625,
"learning_rate": 0.00019360595357389735,
"loss": 3.0719,
"step": 204
},
{
"epoch": 0.205,
"grad_norm": 1.328125,
"learning_rate": 0.00019348256763960145,
"loss": 3.1225,
"step": 205
},
{
"epoch": 0.206,
"grad_norm": 1.578125,
"learning_rate": 0.00019335804264972018,
"loss": 3.1224,
"step": 206
},
{
"epoch": 0.207,
"grad_norm": 1.4140625,
"learning_rate": 0.00019323238012155123,
"loss": 3.0044,
"step": 207
},
{
"epoch": 0.208,
"grad_norm": 1.25,
"learning_rate": 0.00019310558158625285,
"loss": 3.0157,
"step": 208
},
{
"epoch": 0.209,
"grad_norm": 1.375,
"learning_rate": 0.00019297764858882514,
"loss": 2.9538,
"step": 209
},
{
"epoch": 0.21,
"grad_norm": 1.375,
"learning_rate": 0.00019284858268809137,
"loss": 3.0122,
"step": 210
},
{
"epoch": 0.211,
"grad_norm": 1.3359375,
"learning_rate": 0.00019271838545667876,
"loss": 3.0153,
"step": 211
},
{
"epoch": 0.212,
"grad_norm": 1.015625,
"learning_rate": 0.0001925870584809995,
"loss": 3.0157,
"step": 212
},
{
"epoch": 0.213,
"grad_norm": 1.484375,
"learning_rate": 0.00019245460336123134,
"loss": 3.1123,
"step": 213
},
{
"epoch": 0.214,
"grad_norm": 1.1171875,
"learning_rate": 0.00019232102171129811,
"loss": 3.0119,
"step": 214
},
{
"epoch": 0.215,
"grad_norm": 1.109375,
"learning_rate": 0.00019218631515885006,
"loss": 3.0785,
"step": 215
},
{
"epoch": 0.216,
"grad_norm": 1.15625,
"learning_rate": 0.00019205048534524406,
"loss": 2.9962,
"step": 216
},
{
"epoch": 0.217,
"grad_norm": 1.046875,
"learning_rate": 0.00019191353392552344,
"loss": 2.98,
"step": 217
},
{
"epoch": 0.218,
"grad_norm": 1.2890625,
"learning_rate": 0.00019177546256839812,
"loss": 3.1094,
"step": 218
},
{
"epoch": 0.219,
"grad_norm": 1.84375,
"learning_rate": 0.00019163627295622397,
"loss": 3.0632,
"step": 219
},
{
"epoch": 0.22,
"grad_norm": 1.3515625,
"learning_rate": 0.0001914959667849825,
"loss": 3.0286,
"step": 220
},
{
"epoch": 0.221,
"grad_norm": 1.3359375,
"learning_rate": 0.0001913545457642601,
"loss": 3.2119,
"step": 221
},
{
"epoch": 0.222,
"grad_norm": 1.203125,
"learning_rate": 0.0001912120116172273,
"loss": 3.1813,
"step": 222
},
{
"epoch": 0.223,
"grad_norm": 1.1640625,
"learning_rate": 0.00019106836608061772,
"loss": 3.144,
"step": 223
},
{
"epoch": 0.224,
"grad_norm": 1.2421875,
"learning_rate": 0.00019092361090470688,
"loss": 3.012,
"step": 224
},
{
"epoch": 0.225,
"grad_norm": 1.1328125,
"learning_rate": 0.00019077774785329087,
"loss": 3.0367,
"step": 225
},
{
"epoch": 0.226,
"grad_norm": 1.0390625,
"learning_rate": 0.000190630778703665,
"loss": 3.0175,
"step": 226
},
{
"epoch": 0.227,
"grad_norm": 1.296875,
"learning_rate": 0.00019048270524660196,
"loss": 2.9683,
"step": 227
},
{
"epoch": 0.228,
"grad_norm": 1.140625,
"learning_rate": 0.0001903335292863301,
"loss": 3.0164,
"step": 228
},
{
"epoch": 0.229,
"grad_norm": 1.234375,
"learning_rate": 0.0001901832526405114,
"loss": 2.9548,
"step": 229
},
{
"epoch": 0.23,
"grad_norm": 1.6875,
"learning_rate": 0.00019003187714021938,
"loss": 2.4954,
"step": 230
},
{
"epoch": 0.231,
"grad_norm": 5.59375,
"learning_rate": 0.0001898794046299167,
"loss": 2.6911,
"step": 231
},
{
"epoch": 0.232,
"grad_norm": 132.0,
"learning_rate": 0.00018972583696743285,
"loss": 3.6355,
"step": 232
},
{
"epoch": 0.233,
"grad_norm": 2.875,
"learning_rate": 0.0001895711760239413,
"loss": 3.0587,
"step": 233
},
{
"epoch": 0.234,
"grad_norm": 1.7734375,
"learning_rate": 0.0001894154236839368,
"loss": 3.0121,
"step": 234
},
{
"epoch": 0.235,
"grad_norm": 52.0,
"learning_rate": 0.00018925858184521256,
"loss": 3.0583,
"step": 235
},
{
"epoch": 0.236,
"grad_norm": 2.484375,
"learning_rate": 0.0001891006524188368,
"loss": 3.1517,
"step": 236
},
{
"epoch": 0.237,
"grad_norm": 1.7578125,
"learning_rate": 0.00018894163732912977,
"loss": 3.1375,
"step": 237
},
{
"epoch": 0.238,
"grad_norm": 1.59375,
"learning_rate": 0.00018878153851364013,
"loss": 3.1155,
"step": 238
},
{
"epoch": 0.239,
"grad_norm": 1.546875,
"learning_rate": 0.00018862035792312147,
"loss": 3.0236,
"step": 239
},
{
"epoch": 0.24,
"grad_norm": 1.328125,
"learning_rate": 0.0001884580975215084,
"loss": 3.2067,
"step": 240
},
{
"epoch": 0.241,
"grad_norm": 1.2109375,
"learning_rate": 0.00018829475928589271,
"loss": 3.1324,
"step": 241
},
{
"epoch": 0.242,
"grad_norm": 1.1875,
"learning_rate": 0.0001881303452064992,
"loss": 3.0433,
"step": 242
},
{
"epoch": 0.243,
"grad_norm": 1.1640625,
"learning_rate": 0.00018796485728666165,
"loss": 2.9619,
"step": 243
},
{
"epoch": 0.244,
"grad_norm": 1.09375,
"learning_rate": 0.00018779829754279805,
"loss": 3.052,
"step": 244
},
{
"epoch": 0.245,
"grad_norm": 1.2109375,
"learning_rate": 0.00018763066800438636,
"loss": 3.0409,
"step": 245
},
{
"epoch": 0.246,
"grad_norm": 1.6328125,
"learning_rate": 0.00018746197071393958,
"loss": 2.9298,
"step": 246
},
{
"epoch": 0.247,
"grad_norm": 1.171875,
"learning_rate": 0.00018729220772698097,
"loss": 3.0326,
"step": 247
},
{
"epoch": 0.248,
"grad_norm": 1.0546875,
"learning_rate": 0.00018712138111201895,
"loss": 3.0219,
"step": 248
},
{
"epoch": 0.249,
"grad_norm": 1.078125,
"learning_rate": 0.0001869494929505219,
"loss": 3.0315,
"step": 249
},
{
"epoch": 0.25,
"grad_norm": 11.5,
"learning_rate": 0.00018677654533689287,
"loss": 2.9556,
"step": 250
},
{
"epoch": 0.25,
"eval_loss": 3.0629920959472656,
"eval_runtime": 31.7573,
"eval_samples_per_second": 15.744,
"eval_steps_per_second": 2.645,
"step": 250
},
{
"epoch": 0.251,
"grad_norm": 1.953125,
"learning_rate": 0.00018660254037844388,
"loss": 3.0847,
"step": 251
},
{
"epoch": 0.252,
"grad_norm": 1.328125,
"learning_rate": 0.0001864274801953705,
"loss": 3.0586,
"step": 252
},
{
"epoch": 0.253,
"grad_norm": 1.5390625,
"learning_rate": 0.00018625136692072575,
"loss": 3.1137,
"step": 253
},
{
"epoch": 0.254,
"grad_norm": 2.4375,
"learning_rate": 0.0001860742027003944,
"loss": 3.021,
"step": 254
},
{
"epoch": 0.255,
"grad_norm": 1.4375,
"learning_rate": 0.00018589598969306645,
"loss": 3.0077,
"step": 255
},
{
"epoch": 0.256,
"grad_norm": 1.4453125,
"learning_rate": 0.00018571673007021123,
"loss": 3.0533,
"step": 256
},
{
"epoch": 0.257,
"grad_norm": 1.453125,
"learning_rate": 0.00018553642601605068,
"loss": 3.038,
"step": 257
},
{
"epoch": 0.258,
"grad_norm": 2.125,
"learning_rate": 0.00018535507972753274,
"loss": 2.9965,
"step": 258
},
{
"epoch": 0.259,
"grad_norm": 1.34375,
"learning_rate": 0.00018517269341430476,
"loss": 3.0632,
"step": 259
},
{
"epoch": 0.26,
"grad_norm": 1.1640625,
"learning_rate": 0.00018498926929868642,
"loss": 3.1137,
"step": 260
},
{
"epoch": 0.261,
"grad_norm": 23.75,
"learning_rate": 0.0001848048096156426,
"loss": 2.9885,
"step": 261
},
{
"epoch": 0.262,
"grad_norm": 3.75,
"learning_rate": 0.00018461931661275643,
"loss": 3.12,
"step": 262
},
{
"epoch": 0.263,
"grad_norm": 2.0625,
"learning_rate": 0.00018443279255020152,
"loss": 3.1726,
"step": 263
},
{
"epoch": 0.264,
"grad_norm": 1.921875,
"learning_rate": 0.00018424523970071477,
"loss": 3.2129,
"step": 264
},
{
"epoch": 0.265,
"grad_norm": 1.71875,
"learning_rate": 0.00018405666034956844,
"loss": 2.9784,
"step": 265
},
{
"epoch": 0.266,
"grad_norm": 71.0,
"learning_rate": 0.00018386705679454242,
"loss": 3.0557,
"step": 266
},
{
"epoch": 0.267,
"grad_norm": 2.65625,
"learning_rate": 0.00018367643134589617,
"loss": 3.1084,
"step": 267
},
{
"epoch": 0.268,
"grad_norm": 1.8984375,
"learning_rate": 0.00018348478632634066,
"loss": 3.0229,
"step": 268
},
{
"epoch": 0.269,
"grad_norm": 3.71875,
"learning_rate": 0.00018329212407100994,
"loss": 2.9938,
"step": 269
},
{
"epoch": 0.27,
"grad_norm": 2.84375,
"learning_rate": 0.00018309844692743283,
"loss": 3.029,
"step": 270
},
{
"epoch": 0.271,
"grad_norm": 2.015625,
"learning_rate": 0.00018290375725550417,
"loss": 2.9886,
"step": 271
},
{
"epoch": 0.272,
"grad_norm": 2.015625,
"learning_rate": 0.00018270805742745617,
"loss": 3.0823,
"step": 272
},
{
"epoch": 0.273,
"grad_norm": 3.28125,
"learning_rate": 0.00018251134982782952,
"loss": 3.0021,
"step": 273
},
{
"epoch": 0.274,
"grad_norm": 2.203125,
"learning_rate": 0.0001823136368534442,
"loss": 2.9724,
"step": 274
},
{
"epoch": 0.275,
"grad_norm": 1.390625,
"learning_rate": 0.00018211492091337042,
"loss": 2.8975,
"step": 275
},
{
"epoch": 0.276,
"grad_norm": 1.8125,
"learning_rate": 0.0001819152044288992,
"loss": 2.9522,
"step": 276
},
{
"epoch": 0.277,
"grad_norm": 1.4921875,
"learning_rate": 0.00018171448983351284,
"loss": 2.9637,
"step": 277
},
{
"epoch": 0.278,
"grad_norm": 1.984375,
"learning_rate": 0.00018151277957285543,
"loss": 3.0239,
"step": 278
},
{
"epoch": 0.279,
"grad_norm": 1.59375,
"learning_rate": 0.00018131007610470276,
"loss": 2.9014,
"step": 279
},
{
"epoch": 0.28,
"grad_norm": 1.8828125,
"learning_rate": 0.00018110638189893267,
"loss": 3.0008,
"step": 280
},
{
"epoch": 0.281,
"grad_norm": 1.4765625,
"learning_rate": 0.00018090169943749476,
"loss": 3.1132,
"step": 281
},
{
"epoch": 0.282,
"grad_norm": 5.09375,
"learning_rate": 0.00018069603121438022,
"loss": 3.0337,
"step": 282
},
{
"epoch": 0.283,
"grad_norm": 2.765625,
"learning_rate": 0.0001804893797355914,
"loss": 3.1402,
"step": 283
},
{
"epoch": 0.284,
"grad_norm": 1.3984375,
"learning_rate": 0.00018028174751911146,
"loss": 2.9044,
"step": 284
},
{
"epoch": 0.285,
"grad_norm": 6.46875,
"learning_rate": 0.00018007313709487334,
"loss": 3.0565,
"step": 285
},
{
"epoch": 0.286,
"grad_norm": 2.65625,
"learning_rate": 0.00017986355100472928,
"loss": 3.1032,
"step": 286
},
{
"epoch": 0.287,
"grad_norm": 3.296875,
"learning_rate": 0.00017965299180241963,
"loss": 2.9643,
"step": 287
},
{
"epoch": 0.288,
"grad_norm": 2.078125,
"learning_rate": 0.00017944146205354182,
"loss": 3.0511,
"step": 288
},
{
"epoch": 0.289,
"grad_norm": 1.6328125,
"learning_rate": 0.00017922896433551907,
"loss": 2.8223,
"step": 289
},
{
"epoch": 0.29,
"grad_norm": 1.359375,
"learning_rate": 0.00017901550123756906,
"loss": 2.9322,
"step": 290
},
{
"epoch": 0.291,
"grad_norm": 13.0,
"learning_rate": 0.00017880107536067218,
"loss": 3.054,
"step": 291
},
{
"epoch": 0.292,
"grad_norm": 1.7265625,
"learning_rate": 0.0001785856893175402,
"loss": 3.0824,
"step": 292
},
{
"epoch": 0.293,
"grad_norm": 1.21875,
"learning_rate": 0.000178369345732584,
"loss": 3.0968,
"step": 293
},
{
"epoch": 0.294,
"grad_norm": 9.25,
"learning_rate": 0.00017815204724188187,
"loss": 3.138,
"step": 294
},
{
"epoch": 0.295,
"grad_norm": 1.703125,
"learning_rate": 0.00017793379649314744,
"loss": 3.0392,
"step": 295
},
{
"epoch": 0.296,
"grad_norm": 20.625,
"learning_rate": 0.0001777145961456971,
"loss": 2.9701,
"step": 296
},
{
"epoch": 0.297,
"grad_norm": 12.0,
"learning_rate": 0.00017749444887041799,
"loss": 3.0403,
"step": 297
},
{
"epoch": 0.298,
"grad_norm": 2.125,
"learning_rate": 0.00017727335734973512,
"loss": 2.8949,
"step": 298
},
{
"epoch": 0.299,
"grad_norm": 25.875,
"learning_rate": 0.00017705132427757895,
"loss": 3.0195,
"step": 299
},
{
"epoch": 0.3,
"grad_norm": 18.25,
"learning_rate": 0.00017682835235935236,
"loss": 2.9629,
"step": 300
},
{
"epoch": 0.301,
"grad_norm": 3.0,
"learning_rate": 0.0001766044443118978,
"loss": 3.0801,
"step": 301
},
{
"epoch": 0.302,
"grad_norm": 4.9375,
"learning_rate": 0.00017637960286346425,
"loss": 2.9368,
"step": 302
},
{
"epoch": 0.303,
"grad_norm": 2.203125,
"learning_rate": 0.0001761538307536737,
"loss": 2.9964,
"step": 303
},
{
"epoch": 0.304,
"grad_norm": 0.984375,
"learning_rate": 0.00017592713073348807,
"loss": 2.9558,
"step": 304
},
{
"epoch": 0.305,
"grad_norm": 1.1640625,
"learning_rate": 0.00017569950556517566,
"loss": 2.9006,
"step": 305
},
{
"epoch": 0.306,
"grad_norm": 1.4140625,
"learning_rate": 0.00017547095802227723,
"loss": 3.0037,
"step": 306
},
{
"epoch": 0.307,
"grad_norm": 1.0,
"learning_rate": 0.00017524149088957245,
"loss": 2.8517,
"step": 307
},
{
"epoch": 0.308,
"grad_norm": 0.92578125,
"learning_rate": 0.00017501110696304596,
"loss": 2.9163,
"step": 308
},
{
"epoch": 0.309,
"grad_norm": 0.98046875,
"learning_rate": 0.0001747798090498532,
"loss": 2.9563,
"step": 309
},
{
"epoch": 0.31,
"grad_norm": 0.8984375,
"learning_rate": 0.00017454759996828623,
"loss": 2.8861,
"step": 310
},
{
"epoch": 0.311,
"grad_norm": 0.94140625,
"learning_rate": 0.00017431448254773944,
"loss": 3.0871,
"step": 311
},
{
"epoch": 0.312,
"grad_norm": 0.98046875,
"learning_rate": 0.000174080459628675,
"loss": 2.8852,
"step": 312
},
{
"epoch": 0.313,
"grad_norm": 26.0,
"learning_rate": 0.00017384553406258842,
"loss": 3.0938,
"step": 313
},
{
"epoch": 0.314,
"grad_norm": 1.7265625,
"learning_rate": 0.00017360970871197346,
"loss": 3.0905,
"step": 314
},
{
"epoch": 0.315,
"grad_norm": 1.140625,
"learning_rate": 0.00017337298645028764,
"loss": 3.0793,
"step": 315
},
{
"epoch": 0.316,
"grad_norm": 1.25,
"learning_rate": 0.00017313537016191706,
"loss": 3.0879,
"step": 316
},
{
"epoch": 0.317,
"grad_norm": 1.1875,
"learning_rate": 0.00017289686274214118,
"loss": 3.1008,
"step": 317
},
{
"epoch": 0.318,
"grad_norm": 1.1015625,
"learning_rate": 0.0001726574670970976,
"loss": 3.0737,
"step": 318
},
{
"epoch": 0.319,
"grad_norm": 1.0859375,
"learning_rate": 0.00017241718614374678,
"loss": 2.9513,
"step": 319
},
{
"epoch": 0.32,
"grad_norm": 1.046875,
"learning_rate": 0.00017217602280983623,
"loss": 3.1167,
"step": 320
},
{
"epoch": 0.321,
"grad_norm": 1.0,
"learning_rate": 0.0001719339800338651,
"loss": 3.0823,
"step": 321
},
{
"epoch": 0.322,
"grad_norm": 1.03125,
"learning_rate": 0.0001716910607650483,
"loss": 2.9707,
"step": 322
},
{
"epoch": 0.323,
"grad_norm": 0.9921875,
"learning_rate": 0.00017144726796328034,
"loss": 3.0313,
"step": 323
},
{
"epoch": 0.324,
"grad_norm": 0.93359375,
"learning_rate": 0.00017120260459909967,
"loss": 2.9786,
"step": 324
},
{
"epoch": 0.325,
"grad_norm": 1.0703125,
"learning_rate": 0.0001709570736536521,
"loss": 2.9752,
"step": 325
},
{
"epoch": 0.326,
"grad_norm": 0.91796875,
"learning_rate": 0.00017071067811865476,
"loss": 3.0147,
"step": 326
},
{
"epoch": 0.327,
"grad_norm": 1.1328125,
"learning_rate": 0.00017046342099635948,
"loss": 3.0414,
"step": 327
},
{
"epoch": 0.328,
"grad_norm": 0.94921875,
"learning_rate": 0.00017021530529951625,
"loss": 3.0018,
"step": 328
},
{
"epoch": 0.329,
"grad_norm": 1.265625,
"learning_rate": 0.00016996633405133655,
"loss": 3.0249,
"step": 329
},
{
"epoch": 0.33,
"grad_norm": 1.2109375,
"learning_rate": 0.00016971651028545648,
"loss": 2.9958,
"step": 330
},
{
"epoch": 0.331,
"grad_norm": 0.8984375,
"learning_rate": 0.00016946583704589973,
"loss": 2.9505,
"step": 331
},
{
"epoch": 0.332,
"grad_norm": 1.0703125,
"learning_rate": 0.0001692143173870407,
"loss": 2.9806,
"step": 332
},
{
"epoch": 0.333,
"grad_norm": 248.0,
"learning_rate": 0.000168961954373567,
"loss": 2.9853,
"step": 333
},
{
"epoch": 0.334,
"grad_norm": 1.75,
"learning_rate": 0.0001687087510804423,
"loss": 2.9122,
"step": 334
},
{
"epoch": 0.335,
"grad_norm": 1.0625,
"learning_rate": 0.00016845471059286887,
"loss": 2.9686,
"step": 335
},
{
"epoch": 0.336,
"grad_norm": 1.4375,
"learning_rate": 0.00016819983600624986,
"loss": 3.0128,
"step": 336
},
{
"epoch": 0.337,
"grad_norm": 1.4609375,
"learning_rate": 0.00016794413042615168,
"loss": 2.9692,
"step": 337
},
{
"epoch": 0.338,
"grad_norm": 1.4140625,
"learning_rate": 0.00016768759696826608,
"loss": 2.9639,
"step": 338
},
{
"epoch": 0.339,
"grad_norm": 1.140625,
"learning_rate": 0.00016743023875837233,
"loss": 2.8997,
"step": 339
},
{
"epoch": 0.34,
"grad_norm": 1.2421875,
"learning_rate": 0.00016717205893229903,
"loss": 2.9825,
"step": 340
},
{
"epoch": 0.341,
"grad_norm": 1.1171875,
"learning_rate": 0.00016691306063588583,
"loss": 2.9442,
"step": 341
},
{
"epoch": 0.342,
"grad_norm": 1.1171875,
"learning_rate": 0.00016665324702494524,
"loss": 2.9611,
"step": 342
},
{
"epoch": 0.343,
"grad_norm": 1.0,
"learning_rate": 0.00016639262126522418,
"loss": 3.0123,
"step": 343
},
{
"epoch": 0.344,
"grad_norm": 1.6953125,
"learning_rate": 0.00016613118653236518,
"loss": 2.9498,
"step": 344
},
{
"epoch": 0.345,
"grad_norm": 1.15625,
"learning_rate": 0.00016586894601186805,
"loss": 2.8588,
"step": 345
},
{
"epoch": 0.346,
"grad_norm": 0.9609375,
"learning_rate": 0.00016560590289905073,
"loss": 3.0459,
"step": 346
},
{
"epoch": 0.347,
"grad_norm": 1.0703125,
"learning_rate": 0.00016534206039901057,
"loss": 2.8838,
"step": 347
},
{
"epoch": 0.348,
"grad_norm": 0.95703125,
"learning_rate": 0.0001650774217265851,
"loss": 2.9694,
"step": 348
},
{
"epoch": 0.349,
"grad_norm": 0.92578125,
"learning_rate": 0.0001648119901063131,
"loss": 2.9102,
"step": 349
},
{
"epoch": 0.35,
"grad_norm": 0.921875,
"learning_rate": 0.00016454576877239507,
"loss": 2.8904,
"step": 350
},
{
"epoch": 0.351,
"grad_norm": 0.85546875,
"learning_rate": 0.00016427876096865394,
"loss": 2.8798,
"step": 351
},
{
"epoch": 0.352,
"grad_norm": 0.93359375,
"learning_rate": 0.00016401096994849557,
"loss": 2.9685,
"step": 352
},
{
"epoch": 0.353,
"grad_norm": 0.890625,
"learning_rate": 0.000163742398974869,
"loss": 2.9498,
"step": 353
},
{
"epoch": 0.354,
"grad_norm": 0.93359375,
"learning_rate": 0.00016347305132022677,
"loss": 2.9719,
"step": 354
},
{
"epoch": 0.355,
"grad_norm": 0.87890625,
"learning_rate": 0.0001632029302664851,
"loss": 2.9964,
"step": 355
},
{
"epoch": 0.356,
"grad_norm": 0.921875,
"learning_rate": 0.00016293203910498376,
"loss": 2.9672,
"step": 356
},
{
"epoch": 0.357,
"grad_norm": 0.87109375,
"learning_rate": 0.00016266038113644607,
"loss": 2.944,
"step": 357
},
{
"epoch": 0.358,
"grad_norm": 0.87890625,
"learning_rate": 0.00016238795967093864,
"loss": 2.928,
"step": 358
},
{
"epoch": 0.359,
"grad_norm": 0.91015625,
"learning_rate": 0.00016211477802783103,
"loss": 2.8984,
"step": 359
},
{
"epoch": 0.36,
"grad_norm": 1.6640625,
"learning_rate": 0.0001618408395357554,
"loss": 2.807,
"step": 360
},
{
"epoch": 0.361,
"grad_norm": 0.98828125,
"learning_rate": 0.0001615661475325658,
"loss": 2.9199,
"step": 361
},
{
"epoch": 0.362,
"grad_norm": 1.1171875,
"learning_rate": 0.00016129070536529766,
"loss": 2.9442,
"step": 362
},
{
"epoch": 0.363,
"grad_norm": 1.1484375,
"learning_rate": 0.0001610145163901268,
"loss": 2.8548,
"step": 363
},
{
"epoch": 0.364,
"grad_norm": 0.8671875,
"learning_rate": 0.00016073758397232868,
"loss": 2.9272,
"step": 364
},
{
"epoch": 0.365,
"grad_norm": 0.890625,
"learning_rate": 0.0001604599114862375,
"loss": 2.9774,
"step": 365
},
{
"epoch": 0.366,
"grad_norm": 0.9609375,
"learning_rate": 0.00016018150231520486,
"loss": 2.8988,
"step": 366
},
{
"epoch": 0.367,
"grad_norm": 0.8984375,
"learning_rate": 0.0001599023598515586,
"loss": 2.8573,
"step": 367
},
{
"epoch": 0.368,
"grad_norm": 1.4609375,
"learning_rate": 0.0001596224874965616,
"loss": 2.86,
"step": 368
},
{
"epoch": 0.369,
"grad_norm": 0.890625,
"learning_rate": 0.00015934188866037016,
"loss": 2.9432,
"step": 369
},
{
"epoch": 0.37,
"grad_norm": 0.875,
"learning_rate": 0.00015906056676199255,
"loss": 2.8969,
"step": 370
},
{
"epoch": 0.371,
"grad_norm": 0.91015625,
"learning_rate": 0.00015877852522924732,
"loss": 2.968,
"step": 371
},
{
"epoch": 0.372,
"grad_norm": 0.86328125,
"learning_rate": 0.00015849576749872157,
"loss": 2.885,
"step": 372
},
{
"epoch": 0.373,
"grad_norm": 0.82421875,
"learning_rate": 0.00015821229701572896,
"loss": 2.9352,
"step": 373
},
{
"epoch": 0.374,
"grad_norm": 0.87109375,
"learning_rate": 0.0001579281172342679,
"loss": 2.7847,
"step": 374
},
{
"epoch": 0.375,
"grad_norm": 1.2109375,
"learning_rate": 0.00015764323161697935,
"loss": 2.9347,
"step": 375
},
{
"epoch": 0.376,
"grad_norm": 0.98046875,
"learning_rate": 0.0001573576436351046,
"loss": 2.8265,
"step": 376
},
{
"epoch": 0.377,
"grad_norm": 0.921875,
"learning_rate": 0.0001570713567684432,
"loss": 2.8173,
"step": 377
},
{
"epoch": 0.378,
"grad_norm": 0.9921875,
"learning_rate": 0.00015678437450531013,
"loss": 2.8425,
"step": 378
},
{
"epoch": 0.379,
"grad_norm": 0.91015625,
"learning_rate": 0.0001564967003424938,
"loss": 2.7723,
"step": 379
},
{
"epoch": 0.38,
"grad_norm": 0.859375,
"learning_rate": 0.00015620833778521307,
"loss": 2.9031,
"step": 380
},
{
"epoch": 0.381,
"grad_norm": 0.85546875,
"learning_rate": 0.0001559192903470747,
"loss": 2.9154,
"step": 381
},
{
"epoch": 0.382,
"grad_norm": 0.86328125,
"learning_rate": 0.0001556295615500305,
"loss": 2.8226,
"step": 382
},
{
"epoch": 0.383,
"grad_norm": 1.015625,
"learning_rate": 0.00015533915492433443,
"loss": 2.8911,
"step": 383
},
{
"epoch": 0.384,
"grad_norm": 0.8828125,
"learning_rate": 0.00015504807400849958,
"loss": 2.8114,
"step": 384
},
{
"epoch": 0.385,
"grad_norm": 0.890625,
"learning_rate": 0.00015475632234925504,
"loss": 2.8762,
"step": 385
},
{
"epoch": 0.386,
"grad_norm": 0.828125,
"learning_rate": 0.00015446390350150273,
"loss": 2.8918,
"step": 386
},
{
"epoch": 0.387,
"grad_norm": 0.9296875,
"learning_rate": 0.000154170821028274,
"loss": 2.9433,
"step": 387
},
{
"epoch": 0.388,
"grad_norm": 0.84375,
"learning_rate": 0.0001538770785006863,
"loss": 2.871,
"step": 388
},
{
"epoch": 0.389,
"grad_norm": 0.90625,
"learning_rate": 0.00015358267949789966,
"loss": 2.8932,
"step": 389
},
{
"epoch": 0.39,
"grad_norm": 0.86328125,
"learning_rate": 0.000153287627607073,
"loss": 2.8251,
"step": 390
},
{
"epoch": 0.391,
"grad_norm": 1.21875,
"learning_rate": 0.0001529919264233205,
"loss": 2.9062,
"step": 391
},
{
"epoch": 0.392,
"grad_norm": 1.0625,
"learning_rate": 0.00015269557954966778,
"loss": 2.8995,
"step": 392
},
{
"epoch": 0.393,
"grad_norm": 0.796875,
"learning_rate": 0.00015239859059700794,
"loss": 2.7726,
"step": 393
},
{
"epoch": 0.394,
"grad_norm": 1.0625,
"learning_rate": 0.00015210096318405767,
"loss": 2.7649,
"step": 394
},
{
"epoch": 0.395,
"grad_norm": 0.92578125,
"learning_rate": 0.00015180270093731303,
"loss": 2.778,
"step": 395
},
{
"epoch": 0.396,
"grad_norm": 0.94921875,
"learning_rate": 0.00015150380749100545,
"loss": 2.8322,
"step": 396
},
{
"epoch": 0.397,
"grad_norm": 0.91796875,
"learning_rate": 0.00015120428648705717,
"loss": 2.8831,
"step": 397
},
{
"epoch": 0.398,
"grad_norm": 0.8671875,
"learning_rate": 0.00015090414157503714,
"loss": 2.8862,
"step": 398
},
{
"epoch": 0.399,
"grad_norm": 0.84765625,
"learning_rate": 0.00015060337641211637,
"loss": 2.8219,
"step": 399
},
{
"epoch": 0.4,
"grad_norm": 0.7890625,
"learning_rate": 0.00015030199466302353,
"loss": 2.8894,
"step": 400
},
{
"epoch": 0.401,
"grad_norm": 0.83984375,
"learning_rate": 0.00015000000000000001,
"loss": 2.8594,
"step": 401
},
{
"epoch": 0.402,
"grad_norm": 0.875,
"learning_rate": 0.00014969739610275556,
"loss": 2.7943,
"step": 402
},
{
"epoch": 0.403,
"grad_norm": 0.890625,
"learning_rate": 0.0001493941866584231,
"loss": 2.9105,
"step": 403
},
{
"epoch": 0.404,
"grad_norm": 1.3046875,
"learning_rate": 0.00014909037536151409,
"loss": 2.9341,
"step": 404
},
{
"epoch": 0.405,
"grad_norm": 0.984375,
"learning_rate": 0.0001487859659138733,
"loss": 2.8726,
"step": 405
},
{
"epoch": 0.406,
"grad_norm": 0.796875,
"learning_rate": 0.00014848096202463372,
"loss": 2.9266,
"step": 406
},
{
"epoch": 0.407,
"grad_norm": 12.5,
"learning_rate": 0.00014817536741017152,
"loss": 2.8098,
"step": 407
},
{
"epoch": 0.408,
"grad_norm": 1.21875,
"learning_rate": 0.0001478691857940607,
"loss": 2.8836,
"step": 408
},
{
"epoch": 0.409,
"grad_norm": 1.1328125,
"learning_rate": 0.00014756242090702756,
"loss": 2.8176,
"step": 409
},
{
"epoch": 0.41,
"grad_norm": 1.265625,
"learning_rate": 0.00014725507648690543,
"loss": 2.8858,
"step": 410
},
{
"epoch": 0.411,
"grad_norm": 1.1328125,
"learning_rate": 0.00014694715627858908,
"loss": 2.8898,
"step": 411
},
{
"epoch": 0.412,
"grad_norm": 1.078125,
"learning_rate": 0.00014663866403398913,
"loss": 2.8529,
"step": 412
},
{
"epoch": 0.413,
"grad_norm": 1.0625,
"learning_rate": 0.00014632960351198618,
"loss": 2.8151,
"step": 413
},
{
"epoch": 0.414,
"grad_norm": 1.0078125,
"learning_rate": 0.00014601997847838518,
"loss": 2.9201,
"step": 414
},
{
"epoch": 0.415,
"grad_norm": 1.0078125,
"learning_rate": 0.00014570979270586945,
"loss": 2.7901,
"step": 415
},
{
"epoch": 0.416,
"grad_norm": 1.1015625,
"learning_rate": 0.00014539904997395468,
"loss": 2.8675,
"step": 416
},
{
"epoch": 0.417,
"grad_norm": 0.9453125,
"learning_rate": 0.00014508775406894307,
"loss": 2.7743,
"step": 417
},
{
"epoch": 0.418,
"grad_norm": 0.98046875,
"learning_rate": 0.00014477590878387696,
"loss": 2.8796,
"step": 418
},
{
"epoch": 0.419,
"grad_norm": 0.94921875,
"learning_rate": 0.00014446351791849276,
"loss": 2.9855,
"step": 419
},
{
"epoch": 0.42,
"grad_norm": 0.95703125,
"learning_rate": 0.00014415058527917452,
"loss": 2.8645,
"step": 420
},
{
"epoch": 0.421,
"grad_norm": 0.91015625,
"learning_rate": 0.00014383711467890774,
"loss": 2.7821,
"step": 421
},
{
"epoch": 0.422,
"grad_norm": 5.53125,
"learning_rate": 0.00014352310993723277,
"loss": 2.9062,
"step": 422
},
{
"epoch": 0.423,
"grad_norm": 0.96484375,
"learning_rate": 0.00014320857488019824,
"loss": 2.8564,
"step": 423
},
{
"epoch": 0.424,
"grad_norm": 0.84765625,
"learning_rate": 0.0001428935133403146,
"loss": 2.855,
"step": 424
},
{
"epoch": 0.425,
"grad_norm": 0.91796875,
"learning_rate": 0.00014257792915650728,
"loss": 2.907,
"step": 425
},
{
"epoch": 0.426,
"grad_norm": 1.0546875,
"learning_rate": 0.00014226182617406996,
"loss": 2.8238,
"step": 426
},
{
"epoch": 0.427,
"grad_norm": 1.09375,
"learning_rate": 0.00014194520824461771,
"loss": 2.8523,
"step": 427
},
{
"epoch": 0.428,
"grad_norm": 1.1015625,
"learning_rate": 0.00014162807922604012,
"loss": 2.91,
"step": 428
},
{
"epoch": 0.429,
"grad_norm": 1.2109375,
"learning_rate": 0.0001413104429824542,
"loss": 2.9161,
"step": 429
},
{
"epoch": 0.43,
"grad_norm": 0.8515625,
"learning_rate": 0.00014099230338415728,
"loss": 2.9304,
"step": 430
},
{
"epoch": 0.431,
"grad_norm": 0.75,
"learning_rate": 0.00014067366430758004,
"loss": 2.8266,
"step": 431
},
{
"epoch": 0.432,
"grad_norm": 0.8203125,
"learning_rate": 0.00014035452963523902,
"loss": 2.8303,
"step": 432
},
{
"epoch": 0.433,
"grad_norm": 0.7890625,
"learning_rate": 0.00014003490325568954,
"loss": 2.8311,
"step": 433
},
{
"epoch": 0.434,
"grad_norm": 0.7265625,
"learning_rate": 0.00013971478906347806,
"loss": 2.826,
"step": 434
},
{
"epoch": 0.435,
"grad_norm": 0.8828125,
"learning_rate": 0.00013939419095909512,
"loss": 2.6267,
"step": 435
},
{
"epoch": 0.436,
"grad_norm": 0.77734375,
"learning_rate": 0.00013907311284892736,
"loss": 2.7203,
"step": 436
},
{
"epoch": 0.437,
"grad_norm": 0.7421875,
"learning_rate": 0.0001387515586452103,
"loss": 2.8801,
"step": 437
},
{
"epoch": 0.438,
"grad_norm": 2.421875,
"learning_rate": 0.00013842953226598037,
"loss": 2.9568,
"step": 438
},
{
"epoch": 0.439,
"grad_norm": 0.9375,
"learning_rate": 0.00013810703763502744,
"loss": 2.758,
"step": 439
},
{
"epoch": 0.44,
"grad_norm": 1.3046875,
"learning_rate": 0.00013778407868184672,
"loss": 2.8374,
"step": 440
},
{
"epoch": 0.441,
"grad_norm": 0.91796875,
"learning_rate": 0.00013746065934159123,
"loss": 2.7894,
"step": 441
},
{
"epoch": 0.442,
"grad_norm": 0.87109375,
"learning_rate": 0.00013713678355502351,
"loss": 2.855,
"step": 442
},
{
"epoch": 0.443,
"grad_norm": 0.734375,
"learning_rate": 0.00013681245526846783,
"loss": 2.777,
"step": 443
},
{
"epoch": 0.444,
"grad_norm": 0.875,
"learning_rate": 0.00013648767843376196,
"loss": 2.8408,
"step": 444
},
{
"epoch": 0.445,
"grad_norm": 0.80078125,
"learning_rate": 0.00013616245700820922,
"loss": 2.8602,
"step": 445
},
{
"epoch": 0.446,
"grad_norm": 0.8359375,
"learning_rate": 0.00013583679495453,
"loss": 2.9452,
"step": 446
},
{
"epoch": 0.447,
"grad_norm": 0.83203125,
"learning_rate": 0.0001355106962408137,
"loss": 2.799,
"step": 447
},
{
"epoch": 0.448,
"grad_norm": 0.78125,
"learning_rate": 0.00013518416484047018,
"loss": 2.8164,
"step": 448
},
{
"epoch": 0.449,
"grad_norm": 0.78515625,
"learning_rate": 0.00013485720473218154,
"loss": 2.9159,
"step": 449
},
{
"epoch": 0.45,
"grad_norm": 0.77734375,
"learning_rate": 0.00013452981989985348,
"loss": 2.8874,
"step": 450
},
{
"epoch": 0.451,
"grad_norm": 0.984375,
"learning_rate": 0.00013420201433256689,
"loss": 2.8846,
"step": 451
},
{
"epoch": 0.452,
"grad_norm": 0.78515625,
"learning_rate": 0.00013387379202452917,
"loss": 2.8474,
"step": 452
},
{
"epoch": 0.453,
"grad_norm": 0.86328125,
"learning_rate": 0.00013354515697502553,
"loss": 2.8697,
"step": 453
},
{
"epoch": 0.454,
"grad_norm": 2.875,
"learning_rate": 0.00013321611318837032,
"loss": 2.8103,
"step": 454
},
{
"epoch": 0.455,
"grad_norm": 0.88671875,
"learning_rate": 0.00013288666467385833,
"loss": 2.7893,
"step": 455
},
{
"epoch": 0.456,
"grad_norm": 0.734375,
"learning_rate": 0.00013255681544571568,
"loss": 2.8302,
"step": 456
},
{
"epoch": 0.457,
"grad_norm": 0.76953125,
"learning_rate": 0.00013222656952305113,
"loss": 2.8056,
"step": 457
},
{
"epoch": 0.458,
"grad_norm": 0.7734375,
"learning_rate": 0.00013189593092980702,
"loss": 2.8463,
"step": 458
},
{
"epoch": 0.459,
"grad_norm": 0.75,
"learning_rate": 0.00013156490369471027,
"loss": 2.7264,
"step": 459
},
{
"epoch": 0.46,
"grad_norm": 0.75390625,
"learning_rate": 0.00013123349185122327,
"loss": 2.7938,
"step": 460
},
{
"epoch": 0.461,
"grad_norm": 0.76953125,
"learning_rate": 0.00013090169943749476,
"loss": 2.7834,
"step": 461
},
{
"epoch": 0.462,
"grad_norm": 0.7265625,
"learning_rate": 0.00013056953049631057,
"loss": 2.8079,
"step": 462
},
{
"epoch": 0.463,
"grad_norm": 0.73046875,
"learning_rate": 0.00013023698907504446,
"loss": 2.7598,
"step": 463
},
{
"epoch": 0.464,
"grad_norm": 0.71875,
"learning_rate": 0.00012990407922560868,
"loss": 2.8624,
"step": 464
},
{
"epoch": 0.465,
"grad_norm": 0.7421875,
"learning_rate": 0.00012957080500440468,
"loss": 2.8337,
"step": 465
},
{
"epoch": 0.466,
"grad_norm": 0.71484375,
"learning_rate": 0.00012923717047227368,
"loss": 2.8641,
"step": 466
},
{
"epoch": 0.467,
"grad_norm": 0.6953125,
"learning_rate": 0.00012890317969444716,
"loss": 2.832,
"step": 467
},
{
"epoch": 0.468,
"grad_norm": 0.765625,
"learning_rate": 0.00012856883674049736,
"loss": 2.9126,
"step": 468
},
{
"epoch": 0.469,
"grad_norm": 3.4375,
"learning_rate": 0.00012823414568428768,
"loss": 2.9386,
"step": 469
},
{
"epoch": 0.47,
"grad_norm": 0.81640625,
"learning_rate": 0.00012789911060392294,
"loss": 2.7886,
"step": 470
},
{
"epoch": 0.471,
"grad_norm": 0.7421875,
"learning_rate": 0.0001275637355816999,
"loss": 2.9174,
"step": 471
},
{
"epoch": 0.472,
"grad_norm": 0.7890625,
"learning_rate": 0.00012722802470405744,
"loss": 2.7964,
"step": 472
},
{
"epoch": 0.473,
"grad_norm": 0.83984375,
"learning_rate": 0.00012689198206152657,
"loss": 2.779,
"step": 473
},
{
"epoch": 0.474,
"grad_norm": 0.7421875,
"learning_rate": 0.00012655561174868088,
"loss": 2.8909,
"step": 474
},
{
"epoch": 0.475,
"grad_norm": 0.7890625,
"learning_rate": 0.00012621891786408648,
"loss": 2.8705,
"step": 475
},
{
"epoch": 0.476,
"grad_norm": 0.83203125,
"learning_rate": 0.00012588190451025207,
"loss": 2.9862,
"step": 476
},
{
"epoch": 0.477,
"grad_norm": 0.74609375,
"learning_rate": 0.00012554457579357905,
"loss": 2.8599,
"step": 477
},
{
"epoch": 0.478,
"grad_norm": 0.73828125,
"learning_rate": 0.0001252069358243114,
"loss": 2.9299,
"step": 478
},
{
"epoch": 0.479,
"grad_norm": 0.78125,
"learning_rate": 0.0001248689887164855,
"loss": 2.8572,
"step": 479
},
{
"epoch": 0.48,
"grad_norm": 0.78515625,
"learning_rate": 0.00012453073858788026,
"loss": 2.8889,
"step": 480
},
{
"epoch": 0.481,
"grad_norm": 0.7109375,
"learning_rate": 0.00012419218955996676,
"loss": 2.8363,
"step": 481
},
{
"epoch": 0.482,
"grad_norm": 0.7578125,
"learning_rate": 0.0001238533457578581,
"loss": 2.8624,
"step": 482
},
{
"epoch": 0.483,
"grad_norm": 0.7734375,
"learning_rate": 0.000123514211310259,
"loss": 2.8517,
"step": 483
},
{
"epoch": 0.484,
"grad_norm": 0.7421875,
"learning_rate": 0.00012317479034941573,
"loss": 2.8247,
"step": 484
},
{
"epoch": 0.485,
"grad_norm": 2.140625,
"learning_rate": 0.00012283508701106557,
"loss": 2.8114,
"step": 485
},
{
"epoch": 0.486,
"grad_norm": 0.8515625,
"learning_rate": 0.0001224951054343865,
"loss": 2.7308,
"step": 486
},
{
"epoch": 0.487,
"grad_norm": 0.76171875,
"learning_rate": 0.00012215484976194676,
"loss": 2.8445,
"step": 487
},
{
"epoch": 0.488,
"grad_norm": 0.8828125,
"learning_rate": 0.00012181432413965428,
"loss": 2.933,
"step": 488
},
{
"epoch": 0.489,
"grad_norm": 0.72265625,
"learning_rate": 0.00012147353271670634,
"loss": 2.8725,
"step": 489
},
{
"epoch": 0.49,
"grad_norm": 0.796875,
"learning_rate": 0.00012113247964553888,
"loss": 2.7981,
"step": 490
},
{
"epoch": 0.491,
"grad_norm": 0.74609375,
"learning_rate": 0.00012079116908177593,
"loss": 2.8803,
"step": 491
},
{
"epoch": 0.492,
"grad_norm": 0.73046875,
"learning_rate": 0.00012044960518417903,
"loss": 2.7637,
"step": 492
},
{
"epoch": 0.493,
"grad_norm": 0.75,
"learning_rate": 0.00012010779211459648,
"loss": 2.888,
"step": 493
},
{
"epoch": 0.494,
"grad_norm": 0.7265625,
"learning_rate": 0.00011976573403791262,
"loss": 2.815,
"step": 494
},
{
"epoch": 0.495,
"grad_norm": 0.77734375,
"learning_rate": 0.0001194234351219972,
"loss": 2.7684,
"step": 495
},
{
"epoch": 0.496,
"grad_norm": 0.6953125,
"learning_rate": 0.00011908089953765449,
"loss": 2.7292,
"step": 496
},
{
"epoch": 0.497,
"grad_norm": 0.69921875,
"learning_rate": 0.00011873813145857249,
"loss": 2.7949,
"step": 497
},
{
"epoch": 0.498,
"grad_norm": 0.6953125,
"learning_rate": 0.00011839513506127203,
"loss": 2.7681,
"step": 498
},
{
"epoch": 0.499,
"grad_norm": 0.69921875,
"learning_rate": 0.00011805191452505602,
"loss": 2.6686,
"step": 499
},
{
"epoch": 0.5,
"grad_norm": 1.2578125,
"learning_rate": 0.00011770847403195834,
"loss": 2.7635,
"step": 500
},
{
"epoch": 0.5,
"eval_loss": 2.7894091606140137,
"eval_runtime": 31.8024,
"eval_samples_per_second": 15.722,
"eval_steps_per_second": 2.641,
"step": 500
},
{
"epoch": 0.501,
"grad_norm": 0.7421875,
"learning_rate": 0.00011736481776669306,
"loss": 2.7591,
"step": 501
},
{
"epoch": 0.502,
"grad_norm": 0.73828125,
"learning_rate": 0.00011702094991660326,
"loss": 2.8107,
"step": 502
},
{
"epoch": 0.503,
"grad_norm": 0.734375,
"learning_rate": 0.00011667687467161024,
"loss": 2.8147,
"step": 503
},
{
"epoch": 0.504,
"grad_norm": 0.76171875,
"learning_rate": 0.00011633259622416224,
"loss": 2.8342,
"step": 504
},
{
"epoch": 0.505,
"grad_norm": 0.79296875,
"learning_rate": 0.0001159881187691835,
"loss": 2.8037,
"step": 505
},
{
"epoch": 0.506,
"grad_norm": 0.734375,
"learning_rate": 0.0001156434465040231,
"loss": 2.8258,
"step": 506
},
{
"epoch": 0.507,
"grad_norm": 0.84375,
"learning_rate": 0.00011529858362840382,
"loss": 2.8476,
"step": 507
},
{
"epoch": 0.508,
"grad_norm": 0.74609375,
"learning_rate": 0.00011495353434437098,
"loss": 2.7726,
"step": 508
},
{
"epoch": 0.509,
"grad_norm": 0.7890625,
"learning_rate": 0.00011460830285624118,
"loss": 2.7936,
"step": 509
},
{
"epoch": 0.51,
"grad_norm": 0.71875,
"learning_rate": 0.00011426289337055119,
"loss": 2.7585,
"step": 510
},
{
"epoch": 0.511,
"grad_norm": 0.703125,
"learning_rate": 0.00011391731009600654,
"loss": 2.7722,
"step": 511
},
{
"epoch": 0.512,
"grad_norm": 0.81640625,
"learning_rate": 0.00011357155724343045,
"loss": 2.8518,
"step": 512
},
{
"epoch": 0.513,
"grad_norm": 0.6484375,
"learning_rate": 0.00011322563902571226,
"loss": 2.8035,
"step": 513
},
{
"epoch": 0.514,
"grad_norm": 0.72265625,
"learning_rate": 0.0001128795596577563,
"loss": 2.8544,
"step": 514
},
{
"epoch": 0.515,
"grad_norm": 0.73828125,
"learning_rate": 0.00011253332335643043,
"loss": 2.8212,
"step": 515
},
{
"epoch": 0.516,
"grad_norm": 2.21875,
"learning_rate": 0.00011218693434051475,
"loss": 2.7369,
"step": 516
},
{
"epoch": 0.517,
"grad_norm": 0.8203125,
"learning_rate": 0.00011184039683065013,
"loss": 2.7366,
"step": 517
},
{
"epoch": 0.518,
"grad_norm": 0.73046875,
"learning_rate": 0.00011149371504928668,
"loss": 2.8155,
"step": 518
},
{
"epoch": 0.519,
"grad_norm": 0.77734375,
"learning_rate": 0.00011114689322063255,
"loss": 2.7464,
"step": 519
},
{
"epoch": 0.52,
"grad_norm": 0.8203125,
"learning_rate": 0.0001107999355706023,
"loss": 2.796,
"step": 520
},
{
"epoch": 0.521,
"grad_norm": 0.71875,
"learning_rate": 0.00011045284632676536,
"loss": 2.7958,
"step": 521
},
{
"epoch": 0.522,
"grad_norm": 0.703125,
"learning_rate": 0.00011010562971829463,
"loss": 2.7453,
"step": 522
},
{
"epoch": 0.523,
"grad_norm": 0.7421875,
"learning_rate": 0.00010975828997591495,
"loss": 2.7748,
"step": 523
},
{
"epoch": 0.524,
"grad_norm": 0.65625,
"learning_rate": 0.00010941083133185146,
"loss": 2.8185,
"step": 524
},
{
"epoch": 0.525,
"grad_norm": 0.7890625,
"learning_rate": 0.00010906325801977804,
"loss": 2.7946,
"step": 525
},
{
"epoch": 0.526,
"grad_norm": 0.71875,
"learning_rate": 0.00010871557427476583,
"loss": 2.8366,
"step": 526
},
{
"epoch": 0.527,
"grad_norm": 0.63671875,
"learning_rate": 0.00010836778433323158,
"loss": 2.81,
"step": 527
},
{
"epoch": 0.528,
"grad_norm": 0.7109375,
"learning_rate": 0.00010801989243288589,
"loss": 2.7869,
"step": 528
},
{
"epoch": 0.529,
"grad_norm": 0.6953125,
"learning_rate": 0.00010767190281268187,
"loss": 2.7461,
"step": 529
},
{
"epoch": 0.53,
"grad_norm": 0.6875,
"learning_rate": 0.00010732381971276318,
"loss": 2.7465,
"step": 530
},
{
"epoch": 0.531,
"grad_norm": 0.91796875,
"learning_rate": 0.00010697564737441252,
"loss": 2.7434,
"step": 531
},
{
"epoch": 0.532,
"grad_norm": 6.3125,
"learning_rate": 0.00010662739004000005,
"loss": 2.7999,
"step": 532
},
{
"epoch": 0.533,
"grad_norm": 0.6953125,
"learning_rate": 0.00010627905195293135,
"loss": 2.7405,
"step": 533
},
{
"epoch": 0.534,
"grad_norm": 0.69921875,
"learning_rate": 0.00010593063735759618,
"loss": 2.8709,
"step": 534
},
{
"epoch": 0.535,
"grad_norm": 0.66796875,
"learning_rate": 0.00010558215049931638,
"loss": 2.9645,
"step": 535
},
{
"epoch": 0.536,
"grad_norm": 0.7265625,
"learning_rate": 0.0001052335956242944,
"loss": 2.7778,
"step": 536
},
{
"epoch": 0.537,
"grad_norm": 0.66796875,
"learning_rate": 0.00010488497697956135,
"loss": 2.7532,
"step": 537
},
{
"epoch": 0.538,
"grad_norm": 0.8203125,
"learning_rate": 0.00010453629881292538,
"loss": 2.8057,
"step": 538
},
{
"epoch": 0.539,
"grad_norm": 0.70703125,
"learning_rate": 0.00010418756537291996,
"loss": 2.75,
"step": 539
},
{
"epoch": 0.54,
"grad_norm": 0.7109375,
"learning_rate": 0.00010383878090875201,
"loss": 2.8593,
"step": 540
},
{
"epoch": 0.541,
"grad_norm": 0.6640625,
"learning_rate": 0.00010348994967025012,
"loss": 2.7264,
"step": 541
},
{
"epoch": 0.542,
"grad_norm": 0.703125,
"learning_rate": 0.00010314107590781284,
"loss": 2.8229,
"step": 542
},
{
"epoch": 0.543,
"grad_norm": 0.68359375,
"learning_rate": 0.0001027921638723569,
"loss": 2.7506,
"step": 543
},
{
"epoch": 0.544,
"grad_norm": 0.65625,
"learning_rate": 0.00010244321781526533,
"loss": 2.7689,
"step": 544
},
{
"epoch": 0.545,
"grad_norm": 0.734375,
"learning_rate": 0.0001020942419883357,
"loss": 2.766,
"step": 545
},
{
"epoch": 0.546,
"grad_norm": 0.69140625,
"learning_rate": 0.00010174524064372837,
"loss": 2.8915,
"step": 546
},
{
"epoch": 0.547,
"grad_norm": 2.625,
"learning_rate": 0.00010139621803391455,
"loss": 2.7575,
"step": 547
},
{
"epoch": 0.548,
"grad_norm": 0.7578125,
"learning_rate": 0.00010104717841162458,
"loss": 2.7613,
"step": 548
},
{
"epoch": 0.549,
"grad_norm": 0.6875,
"learning_rate": 0.00010069812602979615,
"loss": 2.8354,
"step": 549
},
{
"epoch": 0.55,
"grad_norm": 0.640625,
"learning_rate": 0.00010034906514152238,
"loss": 2.815,
"step": 550
},
{
"epoch": 0.551,
"grad_norm": 0.78515625,
"learning_rate": 0.0001,
"loss": 2.9268,
"step": 551
},
{
"epoch": 0.552,
"grad_norm": 0.7109375,
"learning_rate": 9.965093485847767e-05,
"loss": 2.7682,
"step": 552
},
{
"epoch": 0.553,
"grad_norm": 0.6484375,
"learning_rate": 9.930187397020386e-05,
"loss": 2.8294,
"step": 553
},
{
"epoch": 0.554,
"grad_norm": 0.734375,
"learning_rate": 9.895282158837545e-05,
"loss": 2.7828,
"step": 554
},
{
"epoch": 0.555,
"grad_norm": 0.6953125,
"learning_rate": 9.860378196608549e-05,
"loss": 2.7539,
"step": 555
},
{
"epoch": 0.556,
"grad_norm": 0.6953125,
"learning_rate": 9.825475935627165e-05,
"loss": 2.7614,
"step": 556
},
{
"epoch": 0.557,
"grad_norm": 0.73828125,
"learning_rate": 9.790575801166432e-05,
"loss": 2.7728,
"step": 557
},
{
"epoch": 0.558,
"grad_norm": 0.7265625,
"learning_rate": 9.755678218473469e-05,
"loss": 2.7955,
"step": 558
},
{
"epoch": 0.559,
"grad_norm": 0.6796875,
"learning_rate": 9.720783612764314e-05,
"loss": 2.8389,
"step": 559
},
{
"epoch": 0.56,
"grad_norm": 0.70703125,
"learning_rate": 9.685892409218717e-05,
"loss": 2.7221,
"step": 560
},
{
"epoch": 0.561,
"grad_norm": 0.69140625,
"learning_rate": 9.651005032974994e-05,
"loss": 2.7392,
"step": 561
},
{
"epoch": 0.562,
"grad_norm": 0.65625,
"learning_rate": 9.616121909124801e-05,
"loss": 2.7522,
"step": 562
},
{
"epoch": 0.563,
"grad_norm": 4.125,
"learning_rate": 9.581243462708006e-05,
"loss": 2.7441,
"step": 563
},
{
"epoch": 0.564,
"grad_norm": 0.81640625,
"learning_rate": 9.546370118707463e-05,
"loss": 2.7218,
"step": 564
},
{
"epoch": 0.565,
"grad_norm": 0.73046875,
"learning_rate": 9.511502302043868e-05,
"loss": 2.7805,
"step": 565
},
{
"epoch": 0.566,
"grad_norm": 0.67578125,
"learning_rate": 9.476640437570562e-05,
"loss": 2.7791,
"step": 566
},
{
"epoch": 0.567,
"grad_norm": 0.69921875,
"learning_rate": 9.441784950068362e-05,
"loss": 2.8229,
"step": 567
},
{
"epoch": 0.568,
"grad_norm": 0.71875,
"learning_rate": 9.406936264240386e-05,
"loss": 2.8902,
"step": 568
},
{
"epoch": 0.569,
"grad_norm": 0.71484375,
"learning_rate": 9.372094804706867e-05,
"loss": 2.8009,
"step": 569
},
{
"epoch": 0.57,
"grad_norm": 0.62890625,
"learning_rate": 9.337260996000002e-05,
"loss": 2.7867,
"step": 570
},
{
"epoch": 0.571,
"grad_norm": 0.71875,
"learning_rate": 9.302435262558747e-05,
"loss": 2.7513,
"step": 571
},
{
"epoch": 0.572,
"grad_norm": 0.6875,
"learning_rate": 9.267618028723686e-05,
"loss": 2.8066,
"step": 572
},
{
"epoch": 0.573,
"grad_norm": 0.6484375,
"learning_rate": 9.232809718731814e-05,
"loss": 2.818,
"step": 573
},
{
"epoch": 0.574,
"grad_norm": 0.69921875,
"learning_rate": 9.198010756711412e-05,
"loss": 2.6698,
"step": 574
},
{
"epoch": 0.575,
"grad_norm": 0.7109375,
"learning_rate": 9.163221566676847e-05,
"loss": 2.7901,
"step": 575
},
{
"epoch": 0.576,
"grad_norm": 2.0625,
"learning_rate": 9.128442572523417e-05,
"loss": 2.8648,
"step": 576
},
{
"epoch": 0.577,
"grad_norm": 0.78515625,
"learning_rate": 9.093674198022201e-05,
"loss": 2.8698,
"step": 577
},
{
"epoch": 0.578,
"grad_norm": 0.66796875,
"learning_rate": 9.058916866814858e-05,
"loss": 2.8594,
"step": 578
},
{
"epoch": 0.579,
"grad_norm": 3.65625,
"learning_rate": 9.024171002408506e-05,
"loss": 2.788,
"step": 579
},
{
"epoch": 0.58,
"grad_norm": 0.8125,
"learning_rate": 8.989437028170537e-05,
"loss": 2.7126,
"step": 580
},
{
"epoch": 0.581,
"grad_norm": 0.734375,
"learning_rate": 8.954715367323468e-05,
"loss": 2.7795,
"step": 581
},
{
"epoch": 0.582,
"grad_norm": 0.72265625,
"learning_rate": 8.920006442939772e-05,
"loss": 2.7243,
"step": 582
},
{
"epoch": 0.583,
"grad_norm": 0.828125,
"learning_rate": 8.885310677936746e-05,
"loss": 2.8055,
"step": 583
},
{
"epoch": 0.584,
"grad_norm": 0.66796875,
"learning_rate": 8.850628495071336e-05,
"loss": 2.8449,
"step": 584
},
{
"epoch": 0.585,
"grad_norm": 0.83203125,
"learning_rate": 8.81596031693499e-05,
"loss": 2.7153,
"step": 585
},
{
"epoch": 0.586,
"grad_norm": 0.7890625,
"learning_rate": 8.781306565948528e-05,
"loss": 2.8445,
"step": 586
},
{
"epoch": 0.587,
"grad_norm": 0.60546875,
"learning_rate": 8.746667664356956e-05,
"loss": 2.7783,
"step": 587
},
{
"epoch": 0.588,
"grad_norm": 0.6953125,
"learning_rate": 8.712044034224374e-05,
"loss": 2.8369,
"step": 588
},
{
"epoch": 0.589,
"grad_norm": 0.92578125,
"learning_rate": 8.677436097428775e-05,
"loss": 2.7912,
"step": 589
},
{
"epoch": 0.59,
"grad_norm": 0.6484375,
"learning_rate": 8.642844275656957e-05,
"loss": 2.8142,
"step": 590
},
{
"epoch": 0.591,
"grad_norm": 0.72265625,
"learning_rate": 8.608268990399349e-05,
"loss": 2.6924,
"step": 591
},
{
"epoch": 0.592,
"grad_norm": 0.734375,
"learning_rate": 8.573710662944885e-05,
"loss": 2.7975,
"step": 592
},
{
"epoch": 0.593,
"grad_norm": 0.69140625,
"learning_rate": 8.539169714375885e-05,
"loss": 2.8273,
"step": 593
},
{
"epoch": 0.594,
"grad_norm": 3.609375,
"learning_rate": 8.504646565562906e-05,
"loss": 2.7106,
"step": 594
},
{
"epoch": 0.595,
"grad_norm": 0.8359375,
"learning_rate": 8.47014163715962e-05,
"loss": 2.773,
"step": 595
},
{
"epoch": 0.596,
"grad_norm": 0.8203125,
"learning_rate": 8.435655349597689e-05,
"loss": 2.759,
"step": 596
},
{
"epoch": 0.597,
"grad_norm": 0.7265625,
"learning_rate": 8.401188123081653e-05,
"loss": 2.7223,
"step": 597
},
{
"epoch": 0.598,
"grad_norm": 0.73828125,
"learning_rate": 8.366740377583781e-05,
"loss": 2.7163,
"step": 598
},
{
"epoch": 0.599,
"grad_norm": 0.76953125,
"learning_rate": 8.332312532838978e-05,
"loss": 2.7544,
"step": 599
},
{
"epoch": 0.6,
"grad_norm": 0.71484375,
"learning_rate": 8.297905008339677e-05,
"loss": 2.7424,
"step": 600
},
{
"epoch": 0.601,
"grad_norm": 0.70703125,
"learning_rate": 8.263518223330697e-05,
"loss": 2.8959,
"step": 601
},
{
"epoch": 0.602,
"grad_norm": 0.6640625,
"learning_rate": 8.229152596804168e-05,
"loss": 2.665,
"step": 602
},
{
"epoch": 0.603,
"grad_norm": 0.6640625,
"learning_rate": 8.194808547494401e-05,
"loss": 2.8718,
"step": 603
},
{
"epoch": 0.604,
"grad_norm": 0.66796875,
"learning_rate": 8.160486493872798e-05,
"loss": 2.7585,
"step": 604
},
{
"epoch": 0.605,
"grad_norm": 0.65625,
"learning_rate": 8.126186854142752e-05,
"loss": 2.7911,
"step": 605
},
{
"epoch": 0.606,
"grad_norm": 0.92578125,
"learning_rate": 8.091910046234552e-05,
"loss": 2.6548,
"step": 606
},
{
"epoch": 0.607,
"grad_norm": 0.625,
"learning_rate": 8.057656487800282e-05,
"loss": 2.7227,
"step": 607
},
{
"epoch": 0.608,
"grad_norm": 0.66796875,
"learning_rate": 8.023426596208739e-05,
"loss": 2.7485,
"step": 608
},
{
"epoch": 0.609,
"grad_norm": 0.65625,
"learning_rate": 7.989220788540355e-05,
"loss": 2.7348,
"step": 609
},
{
"epoch": 0.61,
"grad_norm": 1.546875,
"learning_rate": 7.955039481582097e-05,
"loss": 2.7158,
"step": 610
},
{
"epoch": 0.611,
"grad_norm": 0.69921875,
"learning_rate": 7.920883091822408e-05,
"loss": 2.6516,
"step": 611
},
{
"epoch": 0.612,
"grad_norm": 0.6484375,
"learning_rate": 7.886752035446114e-05,
"loss": 2.8051,
"step": 612
},
{
"epoch": 0.613,
"grad_norm": 0.59765625,
"learning_rate": 7.852646728329368e-05,
"loss": 2.7428,
"step": 613
},
{
"epoch": 0.614,
"grad_norm": 0.70703125,
"learning_rate": 7.818567586034577e-05,
"loss": 2.8212,
"step": 614
},
{
"epoch": 0.615,
"grad_norm": 0.66015625,
"learning_rate": 7.784515023805328e-05,
"loss": 2.7348,
"step": 615
},
{
"epoch": 0.616,
"grad_norm": 0.69921875,
"learning_rate": 7.750489456561352e-05,
"loss": 2.7394,
"step": 616
},
{
"epoch": 0.617,
"grad_norm": 0.67578125,
"learning_rate": 7.716491298893442e-05,
"loss": 2.6866,
"step": 617
},
{
"epoch": 0.618,
"grad_norm": 0.67578125,
"learning_rate": 7.682520965058428e-05,
"loss": 2.6922,
"step": 618
},
{
"epoch": 0.619,
"grad_norm": 0.64453125,
"learning_rate": 7.6485788689741e-05,
"loss": 2.6569,
"step": 619
},
{
"epoch": 0.62,
"grad_norm": 0.640625,
"learning_rate": 7.614665424214193e-05,
"loss": 2.752,
"step": 620
},
{
"epoch": 0.621,
"grad_norm": 0.69921875,
"learning_rate": 7.580781044003324e-05,
"loss": 2.7281,
"step": 621
},
{
"epoch": 0.622,
"grad_norm": 1.171875,
"learning_rate": 7.546926141211974e-05,
"loss": 2.8616,
"step": 622
},
{
"epoch": 0.623,
"grad_norm": 0.6796875,
"learning_rate": 7.513101128351454e-05,
"loss": 2.8221,
"step": 623
},
{
"epoch": 0.624,
"grad_norm": 0.640625,
"learning_rate": 7.479306417568864e-05,
"loss": 2.8145,
"step": 624
},
{
"epoch": 0.625,
"grad_norm": 1.8359375,
"learning_rate": 7.445542420642097e-05,
"loss": 2.6816,
"step": 625
},
{
"epoch": 0.626,
"grad_norm": 0.640625,
"learning_rate": 7.411809548974792e-05,
"loss": 2.6947,
"step": 626
},
{
"epoch": 0.627,
"grad_norm": 0.66796875,
"learning_rate": 7.378108213591355e-05,
"loss": 2.7485,
"step": 627
},
{
"epoch": 0.628,
"grad_norm": 0.66015625,
"learning_rate": 7.344438825131911e-05,
"loss": 2.7634,
"step": 628
},
{
"epoch": 0.629,
"grad_norm": 0.60546875,
"learning_rate": 7.310801793847344e-05,
"loss": 2.7374,
"step": 629
},
{
"epoch": 0.63,
"grad_norm": 0.65625,
"learning_rate": 7.277197529594257e-05,
"loss": 2.6431,
"step": 630
},
{
"epoch": 0.631,
"grad_norm": 0.66796875,
"learning_rate": 7.243626441830009e-05,
"loss": 2.6069,
"step": 631
},
{
"epoch": 0.632,
"grad_norm": 0.6328125,
"learning_rate": 7.210088939607708e-05,
"loss": 2.7996,
"step": 632
},
{
"epoch": 0.633,
"grad_norm": 1.046875,
"learning_rate": 7.176585431571235e-05,
"loss": 2.7437,
"step": 633
},
{
"epoch": 0.634,
"grad_norm": 0.62890625,
"learning_rate": 7.143116325950265e-05,
"loss": 2.7624,
"step": 634
},
{
"epoch": 0.635,
"grad_norm": 0.59765625,
"learning_rate": 7.109682030555283e-05,
"loss": 2.715,
"step": 635
},
{
"epoch": 0.636,
"grad_norm": 0.70703125,
"learning_rate": 7.076282952772633e-05,
"loss": 2.7208,
"step": 636
},
{
"epoch": 0.637,
"grad_norm": 0.6328125,
"learning_rate": 7.042919499559537e-05,
"loss": 2.7576,
"step": 637
},
{
"epoch": 0.638,
"grad_norm": 0.58203125,
"learning_rate": 7.009592077439134e-05,
"loss": 2.8255,
"step": 638
},
{
"epoch": 0.639,
"grad_norm": 0.640625,
"learning_rate": 6.976301092495556e-05,
"loss": 2.7823,
"step": 639
},
{
"epoch": 0.64,
"grad_norm": 0.64453125,
"learning_rate": 6.943046950368944e-05,
"loss": 2.6574,
"step": 640
},
{
"epoch": 0.641,
"grad_norm": 1.2578125,
"learning_rate": 6.909830056250527e-05,
"loss": 2.6497,
"step": 641
},
{
"epoch": 0.642,
"grad_norm": 0.6171875,
"learning_rate": 6.876650814877674e-05,
"loss": 2.7926,
"step": 642
},
{
"epoch": 0.643,
"grad_norm": 0.63671875,
"learning_rate": 6.843509630528977e-05,
"loss": 2.6997,
"step": 643
},
{
"epoch": 0.644,
"grad_norm": 0.61328125,
"learning_rate": 6.8104069070193e-05,
"loss": 2.7251,
"step": 644
},
{
"epoch": 0.645,
"grad_norm": 0.62109375,
"learning_rate": 6.77734304769489e-05,
"loss": 2.6186,
"step": 645
},
{
"epoch": 0.646,
"grad_norm": 0.6171875,
"learning_rate": 6.744318455428436e-05,
"loss": 2.7101,
"step": 646
},
{
"epoch": 0.647,
"grad_norm": 0.66015625,
"learning_rate": 6.711333532614168e-05,
"loss": 2.6802,
"step": 647
},
{
"epoch": 0.648,
"grad_norm": 0.671875,
"learning_rate": 6.67838868116297e-05,
"loss": 2.7727,
"step": 648
},
{
"epoch": 0.649,
"grad_norm": 0.640625,
"learning_rate": 6.64548430249745e-05,
"loss": 2.8216,
"step": 649
},
{
"epoch": 0.65,
"grad_norm": 0.6640625,
"learning_rate": 6.612620797547087e-05,
"loss": 2.8565,
"step": 650
},
{
"epoch": 0.651,
"grad_norm": 0.671875,
"learning_rate": 6.579798566743314e-05,
"loss": 2.795,
"step": 651
},
{
"epoch": 0.652,
"grad_norm": 0.66015625,
"learning_rate": 6.547018010014654e-05,
"loss": 2.7028,
"step": 652
},
{
"epoch": 0.653,
"grad_norm": 0.625,
"learning_rate": 6.51427952678185e-05,
"loss": 2.7782,
"step": 653
},
{
"epoch": 0.654,
"grad_norm": 0.60546875,
"learning_rate": 6.481583515952983e-05,
"loss": 2.7242,
"step": 654
},
{
"epoch": 0.655,
"grad_norm": 0.6640625,
"learning_rate": 6.448930375918631e-05,
"loss": 2.7598,
"step": 655
},
{
"epoch": 0.656,
"grad_norm": 0.64453125,
"learning_rate": 6.416320504546997e-05,
"loss": 2.6908,
"step": 656
},
{
"epoch": 0.657,
"grad_norm": 1.265625,
"learning_rate": 6.383754299179079e-05,
"loss": 2.7308,
"step": 657
},
{
"epoch": 0.658,
"grad_norm": 0.66796875,
"learning_rate": 6.351232156623803e-05,
"loss": 2.7679,
"step": 658
},
{
"epoch": 0.659,
"grad_norm": 0.66015625,
"learning_rate": 6.318754473153221e-05,
"loss": 2.8747,
"step": 659
},
{
"epoch": 0.66,
"grad_norm": 0.62890625,
"learning_rate": 6.286321644497655e-05,
"loss": 2.7716,
"step": 660
},
{
"epoch": 0.661,
"grad_norm": 0.62890625,
"learning_rate": 6.25393406584088e-05,
"loss": 2.6582,
"step": 661
},
{
"epoch": 0.662,
"grad_norm": 0.60546875,
"learning_rate": 6.22159213181533e-05,
"loss": 2.7907,
"step": 662
},
{
"epoch": 0.663,
"grad_norm": 0.67578125,
"learning_rate": 6.18929623649726e-05,
"loss": 2.6852,
"step": 663
},
{
"epoch": 0.664,
"grad_norm": 0.61328125,
"learning_rate": 6.157046773401964e-05,
"loss": 2.7763,
"step": 664
},
{
"epoch": 0.665,
"grad_norm": 0.640625,
"learning_rate": 6.12484413547897e-05,
"loss": 2.713,
"step": 665
},
{
"epoch": 0.666,
"grad_norm": 0.75390625,
"learning_rate": 6.092688715107264e-05,
"loss": 2.8589,
"step": 666
},
{
"epoch": 0.667,
"grad_norm": 0.67578125,
"learning_rate": 6.0605809040904894e-05,
"loss": 2.813,
"step": 667
},
{
"epoch": 0.668,
"grad_norm": 0.68359375,
"learning_rate": 6.0285210936521955e-05,
"loss": 2.7389,
"step": 668
},
{
"epoch": 0.669,
"grad_norm": 0.625,
"learning_rate": 5.9965096744310526e-05,
"loss": 2.7049,
"step": 669
},
{
"epoch": 0.67,
"grad_norm": 0.6875,
"learning_rate": 5.964547036476099e-05,
"loss": 2.7361,
"step": 670
},
{
"epoch": 0.671,
"grad_norm": 0.61328125,
"learning_rate": 5.9326335692419995e-05,
"loss": 2.7189,
"step": 671
},
{
"epoch": 0.672,
"grad_norm": 1.5078125,
"learning_rate": 5.900769661584272e-05,
"loss": 2.7206,
"step": 672
},
{
"epoch": 0.673,
"grad_norm": 0.69921875,
"learning_rate": 5.868955701754584e-05,
"loss": 2.7581,
"step": 673
},
{
"epoch": 0.674,
"grad_norm": 0.63671875,
"learning_rate": 5.83719207739599e-05,
"loss": 2.5903,
"step": 674
},
{
"epoch": 0.675,
"grad_norm": 0.65234375,
"learning_rate": 5.805479175538229e-05,
"loss": 2.7786,
"step": 675
},
{
"epoch": 0.676,
"grad_norm": 0.59765625,
"learning_rate": 5.773817382593008e-05,
"loss": 2.7335,
"step": 676
},
{
"epoch": 0.677,
"grad_norm": 0.6171875,
"learning_rate": 5.7422070843492734e-05,
"loss": 2.6639,
"step": 677
},
{
"epoch": 0.678,
"grad_norm": 0.66015625,
"learning_rate": 5.710648665968543e-05,
"loss": 2.6919,
"step": 678
},
{
"epoch": 0.679,
"grad_norm": 0.6328125,
"learning_rate": 5.679142511980175e-05,
"loss": 2.7755,
"step": 679
},
{
"epoch": 0.68,
"grad_norm": 0.6171875,
"learning_rate": 5.647689006276726e-05,
"loss": 2.8639,
"step": 680
},
{
"epoch": 0.681,
"grad_norm": 0.6953125,
"learning_rate": 5.616288532109225e-05,
"loss": 2.9127,
"step": 681
},
{
"epoch": 0.682,
"grad_norm": 0.73046875,
"learning_rate": 5.584941472082549e-05,
"loss": 2.8527,
"step": 682
},
{
"epoch": 0.683,
"grad_norm": 0.6875,
"learning_rate": 5.553648208150728e-05,
"loss": 2.7896,
"step": 683
},
{
"epoch": 0.684,
"grad_norm": 0.6171875,
"learning_rate": 5.522409121612304e-05,
"loss": 2.7857,
"step": 684
},
{
"epoch": 0.685,
"grad_norm": 0.65625,
"learning_rate": 5.491224593105695e-05,
"loss": 2.7679,
"step": 685
},
{
"epoch": 0.686,
"grad_norm": 0.6640625,
"learning_rate": 5.4600950026045326e-05,
"loss": 2.7092,
"step": 686
},
{
"epoch": 0.687,
"grad_norm": 0.64453125,
"learning_rate": 5.4290207294130615e-05,
"loss": 2.7738,
"step": 687
},
{
"epoch": 0.688,
"grad_norm": 0.99609375,
"learning_rate": 5.398002152161484e-05,
"loss": 2.7504,
"step": 688
},
{
"epoch": 0.689,
"grad_norm": 0.609375,
"learning_rate": 5.3670396488013854e-05,
"loss": 2.783,
"step": 689
},
{
"epoch": 0.69,
"grad_norm": 0.640625,
"learning_rate": 5.33613359660109e-05,
"loss": 2.7702,
"step": 690
},
{
"epoch": 0.691,
"grad_norm": 0.66796875,
"learning_rate": 5.305284372141095e-05,
"loss": 2.7703,
"step": 691
},
{
"epoch": 0.692,
"grad_norm": 0.67578125,
"learning_rate": 5.274492351309461e-05,
"loss": 2.8481,
"step": 692
},
{
"epoch": 0.693,
"grad_norm": 0.640625,
"learning_rate": 5.243757909297247e-05,
"loss": 2.8131,
"step": 693
},
{
"epoch": 0.694,
"grad_norm": 0.62109375,
"learning_rate": 5.213081420593933e-05,
"loss": 2.8698,
"step": 694
},
{
"epoch": 0.695,
"grad_norm": 0.68359375,
"learning_rate": 5.182463258982846e-05,
"loss": 2.8107,
"step": 695
},
{
"epoch": 0.696,
"grad_norm": 0.6796875,
"learning_rate": 5.15190379753663e-05,
"loss": 2.8036,
"step": 696
},
{
"epoch": 0.697,
"grad_norm": 0.64453125,
"learning_rate": 5.121403408612672e-05,
"loss": 2.8471,
"step": 697
},
{
"epoch": 0.698,
"grad_norm": 0.609375,
"learning_rate": 5.090962463848592e-05,
"loss": 2.8396,
"step": 698
},
{
"epoch": 0.699,
"grad_norm": 0.62890625,
"learning_rate": 5.0605813341576924e-05,
"loss": 2.8636,
"step": 699
},
{
"epoch": 0.7,
"grad_norm": 0.70703125,
"learning_rate": 5.0302603897244474e-05,
"loss": 2.9405,
"step": 700
},
{
"epoch": 0.701,
"grad_norm": 0.62890625,
"learning_rate": 5.000000000000002e-05,
"loss": 2.7732,
"step": 701
},
{
"epoch": 0.702,
"grad_norm": 0.671875,
"learning_rate": 4.969800533697649e-05,
"loss": 2.9202,
"step": 702
},
{
"epoch": 0.703,
"grad_norm": 0.65234375,
"learning_rate": 4.939662358788364e-05,
"loss": 2.8386,
"step": 703
},
{
"epoch": 0.704,
"grad_norm": 0.82421875,
"learning_rate": 4.909585842496287e-05,
"loss": 2.8088,
"step": 704
},
{
"epoch": 0.705,
"grad_norm": 0.66796875,
"learning_rate": 4.8795713512942865e-05,
"loss": 2.6467,
"step": 705
},
{
"epoch": 0.706,
"grad_norm": 0.65234375,
"learning_rate": 4.8496192508994576e-05,
"loss": 2.7861,
"step": 706
},
{
"epoch": 0.707,
"grad_norm": 0.52734375,
"learning_rate": 4.8197299062686995e-05,
"loss": 2.7869,
"step": 707
},
{
"epoch": 0.708,
"grad_norm": 0.6015625,
"learning_rate": 4.78990368159424e-05,
"loss": 2.6457,
"step": 708
},
{
"epoch": 0.709,
"grad_norm": 0.57421875,
"learning_rate": 4.7601409402992106e-05,
"loss": 2.6031,
"step": 709
},
{
"epoch": 0.71,
"grad_norm": 0.58203125,
"learning_rate": 4.7304420450332244e-05,
"loss": 2.8342,
"step": 710
},
{
"epoch": 0.711,
"grad_norm": 0.56640625,
"learning_rate": 4.700807357667952e-05,
"loss": 2.7622,
"step": 711
},
{
"epoch": 0.712,
"grad_norm": 0.61328125,
"learning_rate": 4.6712372392927e-05,
"loss": 2.7514,
"step": 712
},
{
"epoch": 0.713,
"grad_norm": 0.6640625,
"learning_rate": 4.6417320502100316e-05,
"loss": 2.8732,
"step": 713
},
{
"epoch": 0.714,
"grad_norm": 0.6015625,
"learning_rate": 4.612292149931369e-05,
"loss": 2.8161,
"step": 714
},
{
"epoch": 0.715,
"grad_norm": 0.578125,
"learning_rate": 4.582917897172603e-05,
"loss": 2.7898,
"step": 715
},
{
"epoch": 0.716,
"grad_norm": 0.59765625,
"learning_rate": 4.5536096498497295e-05,
"loss": 2.7848,
"step": 716
},
{
"epoch": 0.717,
"grad_norm": 0.59765625,
"learning_rate": 4.524367765074499e-05,
"loss": 2.703,
"step": 717
},
{
"epoch": 0.718,
"grad_norm": 0.59375,
"learning_rate": 4.495192599150044e-05,
"loss": 2.7208,
"step": 718
},
{
"epoch": 0.719,
"grad_norm": 1.3671875,
"learning_rate": 4.46608450756656e-05,
"loss": 2.767,
"step": 719
},
{
"epoch": 0.72,
"grad_norm": 0.57421875,
"learning_rate": 4.437043844996952e-05,
"loss": 2.7353,
"step": 720
},
{
"epoch": 0.721,
"grad_norm": 0.640625,
"learning_rate": 4.4080709652925336e-05,
"loss": 2.7186,
"step": 721
},
{
"epoch": 0.722,
"grad_norm": 18.625,
"learning_rate": 4.379166221478697e-05,
"loss": 2.6783,
"step": 722
},
{
"epoch": 0.723,
"grad_norm": 0.6015625,
"learning_rate": 4.350329965750621e-05,
"loss": 2.718,
"step": 723
},
{
"epoch": 0.724,
"grad_norm": 0.64453125,
"learning_rate": 4.32156254946899e-05,
"loss": 2.6782,
"step": 724
},
{
"epoch": 0.725,
"grad_norm": 0.6484375,
"learning_rate": 4.2928643231556844e-05,
"loss": 2.7787,
"step": 725
},
{
"epoch": 0.726,
"grad_norm": 0.59765625,
"learning_rate": 4.264235636489542e-05,
"loss": 2.7355,
"step": 726
},
{
"epoch": 0.727,
"grad_norm": 0.59765625,
"learning_rate": 4.235676838302068e-05,
"loss": 2.8365,
"step": 727
},
{
"epoch": 0.728,
"grad_norm": 0.59765625,
"learning_rate": 4.207188276573214e-05,
"loss": 2.7514,
"step": 728
},
{
"epoch": 0.729,
"grad_norm": 0.60546875,
"learning_rate": 4.1787702984271074e-05,
"loss": 2.7733,
"step": 729
},
{
"epoch": 0.73,
"grad_norm": 1.8984375,
"learning_rate": 4.150423250127845e-05,
"loss": 2.809,
"step": 730
},
{
"epoch": 0.731,
"grad_norm": 0.5859375,
"learning_rate": 4.12214747707527e-05,
"loss": 2.7394,
"step": 731
},
{
"epoch": 0.732,
"grad_norm": 0.578125,
"learning_rate": 4.093943323800745e-05,
"loss": 2.6793,
"step": 732
},
{
"epoch": 0.733,
"grad_norm": 0.5625,
"learning_rate": 4.065811133962987e-05,
"loss": 2.7069,
"step": 733
},
{
"epoch": 0.734,
"grad_norm": 0.57421875,
"learning_rate": 4.037751250343841e-05,
"loss": 2.6702,
"step": 734
},
{
"epoch": 0.735,
"grad_norm": 1.109375,
"learning_rate": 4.009764014844143e-05,
"loss": 2.8871,
"step": 735
},
{
"epoch": 0.736,
"grad_norm": 0.62890625,
"learning_rate": 3.981849768479517e-05,
"loss": 2.7682,
"step": 736
},
{
"epoch": 0.737,
"grad_norm": 0.59765625,
"learning_rate": 3.954008851376252e-05,
"loss": 2.7024,
"step": 737
},
{
"epoch": 0.738,
"grad_norm": 0.59765625,
"learning_rate": 3.9262416027671356e-05,
"loss": 2.6956,
"step": 738
},
{
"epoch": 0.739,
"grad_norm": 0.6640625,
"learning_rate": 3.8985483609873244e-05,
"loss": 2.8372,
"step": 739
},
{
"epoch": 0.74,
"grad_norm": 0.5703125,
"learning_rate": 3.8709294634702376e-05,
"loss": 2.7524,
"step": 740
},
{
"epoch": 0.741,
"grad_norm": 0.59375,
"learning_rate": 3.843385246743417e-05,
"loss": 2.8114,
"step": 741
},
{
"epoch": 0.742,
"grad_norm": 0.60546875,
"learning_rate": 3.8159160464244606e-05,
"loss": 2.8749,
"step": 742
},
{
"epoch": 0.743,
"grad_norm": 0.578125,
"learning_rate": 3.788522197216897e-05,
"loss": 2.7037,
"step": 743
},
{
"epoch": 0.744,
"grad_norm": 0.55078125,
"learning_rate": 3.7612040329061405e-05,
"loss": 2.7504,
"step": 744
},
{
"epoch": 0.745,
"grad_norm": 0.59765625,
"learning_rate": 3.733961886355398e-05,
"loss": 2.7219,
"step": 745
},
{
"epoch": 0.746,
"grad_norm": 0.60546875,
"learning_rate": 3.7067960895016275e-05,
"loss": 2.741,
"step": 746
},
{
"epoch": 0.747,
"grad_norm": 0.5625,
"learning_rate": 3.679706973351491e-05,
"loss": 2.7085,
"step": 747
},
{
"epoch": 0.748,
"grad_norm": 0.59375,
"learning_rate": 3.6526948679773257e-05,
"loss": 2.7705,
"step": 748
},
{
"epoch": 0.749,
"grad_norm": 0.58984375,
"learning_rate": 3.6257601025131026e-05,
"loss": 2.7403,
"step": 749
},
{
"epoch": 0.75,
"grad_norm": 0.9375,
"learning_rate": 3.5989030051504434e-05,
"loss": 2.7612,
"step": 750
},
{
"epoch": 0.75,
"eval_loss": 2.7113237380981445,
"eval_runtime": 31.7342,
"eval_samples_per_second": 15.756,
"eval_steps_per_second": 2.647,
"step": 750
},
{
"epoch": 0.751,
"grad_norm": 0.59375,
"learning_rate": 3.5721239031346066e-05,
"loss": 2.8165,
"step": 751
},
{
"epoch": 0.752,
"grad_norm": 0.5625,
"learning_rate": 3.545423122760493e-05,
"loss": 2.6717,
"step": 752
},
{
"epoch": 0.753,
"grad_norm": 0.578125,
"learning_rate": 3.518800989368691e-05,
"loss": 2.5895,
"step": 753
},
{
"epoch": 0.754,
"grad_norm": 0.5625,
"learning_rate": 3.492257827341492e-05,
"loss": 2.7452,
"step": 754
},
{
"epoch": 0.755,
"grad_norm": 0.5625,
"learning_rate": 3.465793960098945e-05,
"loss": 2.7445,
"step": 755
},
{
"epoch": 0.756,
"grad_norm": 0.59765625,
"learning_rate": 3.439409710094929e-05,
"loss": 2.805,
"step": 756
},
{
"epoch": 0.757,
"grad_norm": 0.640625,
"learning_rate": 3.413105398813195e-05,
"loss": 2.8297,
"step": 757
},
{
"epoch": 0.758,
"grad_norm": 0.55078125,
"learning_rate": 3.386881346763483e-05,
"loss": 2.6497,
"step": 758
},
{
"epoch": 0.759,
"grad_norm": 0.56640625,
"learning_rate": 3.360737873477584e-05,
"loss": 2.7854,
"step": 759
},
{
"epoch": 0.76,
"grad_norm": 0.58203125,
"learning_rate": 3.334675297505476e-05,
"loss": 2.7295,
"step": 760
},
{
"epoch": 0.761,
"grad_norm": 0.578125,
"learning_rate": 3.308693936411421e-05,
"loss": 2.7563,
"step": 761
},
{
"epoch": 0.762,
"grad_norm": 0.5859375,
"learning_rate": 3.2827941067700996e-05,
"loss": 2.8956,
"step": 762
},
{
"epoch": 0.763,
"grad_norm": 0.76953125,
"learning_rate": 3.2569761241627696e-05,
"loss": 2.7614,
"step": 763
},
{
"epoch": 0.764,
"grad_norm": 0.578125,
"learning_rate": 3.231240303173394e-05,
"loss": 2.7303,
"step": 764
},
{
"epoch": 0.765,
"grad_norm": 0.58984375,
"learning_rate": 3.205586957384838e-05,
"loss": 2.7826,
"step": 765
},
{
"epoch": 0.766,
"grad_norm": 1.3359375,
"learning_rate": 3.1800163993750166e-05,
"loss": 2.7078,
"step": 766
},
{
"epoch": 0.767,
"grad_norm": 0.54296875,
"learning_rate": 3.154528940713113e-05,
"loss": 2.6754,
"step": 767
},
{
"epoch": 0.768,
"grad_norm": 0.59765625,
"learning_rate": 3.129124891955771e-05,
"loss": 2.7744,
"step": 768
},
{
"epoch": 0.769,
"grad_norm": 0.5625,
"learning_rate": 3.103804562643302e-05,
"loss": 2.653,
"step": 769
},
{
"epoch": 0.77,
"grad_norm": 0.546875,
"learning_rate": 3.078568261295933e-05,
"loss": 2.7278,
"step": 770
},
{
"epoch": 0.771,
"grad_norm": 0.57421875,
"learning_rate": 3.053416295410026e-05,
"loss": 2.7533,
"step": 771
},
{
"epoch": 0.772,
"grad_norm": 0.60546875,
"learning_rate": 3.0283489714543556e-05,
"loss": 2.7639,
"step": 772
},
{
"epoch": 0.773,
"grad_norm": 0.578125,
"learning_rate": 3.0033665948663448e-05,
"loss": 2.7951,
"step": 773
},
{
"epoch": 0.774,
"grad_norm": 0.6015625,
"learning_rate": 2.9784694700483762e-05,
"loss": 2.7375,
"step": 774
},
{
"epoch": 0.775,
"grad_norm": 0.55859375,
"learning_rate": 2.953657900364053e-05,
"loss": 2.7481,
"step": 775
},
{
"epoch": 0.776,
"grad_norm": 0.59765625,
"learning_rate": 2.9289321881345254e-05,
"loss": 2.7437,
"step": 776
},
{
"epoch": 0.777,
"grad_norm": 0.58984375,
"learning_rate": 2.904292634634793e-05,
"loss": 2.7637,
"step": 777
},
{
"epoch": 0.778,
"grad_norm": 0.546875,
"learning_rate": 2.879739540090036e-05,
"loss": 2.7252,
"step": 778
},
{
"epoch": 0.779,
"grad_norm": 0.5546875,
"learning_rate": 2.8552732036719687e-05,
"loss": 2.5999,
"step": 779
},
{
"epoch": 0.78,
"grad_norm": 0.5625,
"learning_rate": 2.8308939234951726e-05,
"loss": 2.8094,
"step": 780
},
{
"epoch": 0.781,
"grad_norm": 0.58984375,
"learning_rate": 2.8066019966134904e-05,
"loss": 2.7139,
"step": 781
},
{
"epoch": 0.782,
"grad_norm": 1.46875,
"learning_rate": 2.7823977190163786e-05,
"loss": 2.8623,
"step": 782
},
{
"epoch": 0.783,
"grad_norm": 0.52734375,
"learning_rate": 2.7582813856253275e-05,
"loss": 2.7122,
"step": 783
},
{
"epoch": 0.784,
"grad_norm": 0.55078125,
"learning_rate": 2.734253290290242e-05,
"loss": 2.7772,
"step": 784
},
{
"epoch": 0.785,
"grad_norm": 0.5546875,
"learning_rate": 2.7103137257858868e-05,
"loss": 2.7612,
"step": 785
},
{
"epoch": 0.786,
"grad_norm": 0.5625,
"learning_rate": 2.6864629838082956e-05,
"loss": 2.7303,
"step": 786
},
{
"epoch": 0.787,
"grad_norm": 0.546875,
"learning_rate": 2.6627013549712355e-05,
"loss": 2.6967,
"step": 787
},
{
"epoch": 0.788,
"grad_norm": 0.5234375,
"learning_rate": 2.639029128802657e-05,
"loss": 2.675,
"step": 788
},
{
"epoch": 0.789,
"grad_norm": 0.52734375,
"learning_rate": 2.615446593741161e-05,
"loss": 2.771,
"step": 789
},
{
"epoch": 0.79,
"grad_norm": 0.59765625,
"learning_rate": 2.5919540371325e-05,
"loss": 2.7169,
"step": 790
},
{
"epoch": 0.791,
"grad_norm": 0.5546875,
"learning_rate": 2.5685517452260567e-05,
"loss": 2.6604,
"step": 791
},
{
"epoch": 0.792,
"grad_norm": 0.59765625,
"learning_rate": 2.5452400031713785e-05,
"loss": 2.6749,
"step": 792
},
{
"epoch": 0.793,
"grad_norm": 0.5546875,
"learning_rate": 2.5220190950146827e-05,
"loss": 2.6759,
"step": 793
},
{
"epoch": 0.794,
"grad_norm": 0.57421875,
"learning_rate": 2.4988893036954043e-05,
"loss": 2.5863,
"step": 794
},
{
"epoch": 0.795,
"grad_norm": 0.55078125,
"learning_rate": 2.4758509110427575e-05,
"loss": 2.7512,
"step": 795
},
{
"epoch": 0.796,
"grad_norm": 0.59765625,
"learning_rate": 2.45290419777228e-05,
"loss": 2.7619,
"step": 796
},
{
"epoch": 0.797,
"grad_norm": 0.74609375,
"learning_rate": 2.4300494434824373e-05,
"loss": 2.6975,
"step": 797
},
{
"epoch": 0.798,
"grad_norm": 0.56640625,
"learning_rate": 2.407286926651192e-05,
"loss": 2.6837,
"step": 798
},
{
"epoch": 0.799,
"grad_norm": 0.5234375,
"learning_rate": 2.3846169246326343e-05,
"loss": 2.7131,
"step": 799
},
{
"epoch": 0.8,
"grad_norm": 0.54296875,
"learning_rate": 2.362039713653581e-05,
"loss": 2.7429,
"step": 800
},
{
"epoch": 0.801,
"grad_norm": 0.578125,
"learning_rate": 2.339555568810221e-05,
"loss": 2.6673,
"step": 801
},
{
"epoch": 0.802,
"grad_norm": 0.57421875,
"learning_rate": 2.3171647640647687e-05,
"loss": 2.6941,
"step": 802
},
{
"epoch": 0.803,
"grad_norm": 0.6484375,
"learning_rate": 2.2948675722421086e-05,
"loss": 2.735,
"step": 803
},
{
"epoch": 0.804,
"grad_norm": 0.79296875,
"learning_rate": 2.2726642650264895e-05,
"loss": 2.7529,
"step": 804
},
{
"epoch": 0.805,
"grad_norm": 0.55859375,
"learning_rate": 2.2505551129582047e-05,
"loss": 2.6125,
"step": 805
},
{
"epoch": 0.806,
"grad_norm": 0.56640625,
"learning_rate": 2.2285403854302912e-05,
"loss": 2.8059,
"step": 806
},
{
"epoch": 0.807,
"grad_norm": 0.546875,
"learning_rate": 2.2066203506852566e-05,
"loss": 2.6916,
"step": 807
},
{
"epoch": 0.808,
"grad_norm": 0.7265625,
"learning_rate": 2.1847952758118117e-05,
"loss": 2.7839,
"step": 808
},
{
"epoch": 0.809,
"grad_norm": 0.58984375,
"learning_rate": 2.163065426741603e-05,
"loss": 2.7451,
"step": 809
},
{
"epoch": 0.81,
"grad_norm": 0.53515625,
"learning_rate": 2.1414310682459802e-05,
"loss": 2.7282,
"step": 810
},
{
"epoch": 0.811,
"grad_norm": 0.57421875,
"learning_rate": 2.119892463932781e-05,
"loss": 2.6989,
"step": 811
},
{
"epoch": 0.812,
"grad_norm": 0.5859375,
"learning_rate": 2.098449876243096e-05,
"loss": 2.6681,
"step": 812
},
{
"epoch": 0.813,
"grad_norm": 0.70703125,
"learning_rate": 2.0771035664480942e-05,
"loss": 2.7579,
"step": 813
},
{
"epoch": 0.814,
"grad_norm": 0.57421875,
"learning_rate": 2.0558537946458177e-05,
"loss": 2.6096,
"step": 814
},
{
"epoch": 0.815,
"grad_norm": 0.5546875,
"learning_rate": 2.0347008197580374e-05,
"loss": 2.5568,
"step": 815
},
{
"epoch": 0.816,
"grad_norm": 0.5546875,
"learning_rate": 2.013644899527074e-05,
"loss": 2.7679,
"step": 816
},
{
"epoch": 0.817,
"grad_norm": 0.5546875,
"learning_rate": 1.9926862905126665e-05,
"loss": 2.751,
"step": 817
},
{
"epoch": 0.818,
"grad_norm": 0.51953125,
"learning_rate": 1.9718252480888566e-05,
"loss": 2.7757,
"step": 818
},
{
"epoch": 0.819,
"grad_norm": 0.58203125,
"learning_rate": 1.9510620264408596e-05,
"loss": 2.6745,
"step": 819
},
{
"epoch": 0.82,
"grad_norm": 0.58203125,
"learning_rate": 1.930396878561983e-05,
"loss": 2.6728,
"step": 820
},
{
"epoch": 0.821,
"grad_norm": 0.5859375,
"learning_rate": 1.9098300562505266e-05,
"loss": 2.6893,
"step": 821
},
{
"epoch": 0.822,
"grad_norm": 0.58203125,
"learning_rate": 1.8893618101067355e-05,
"loss": 2.8697,
"step": 822
},
{
"epoch": 0.823,
"grad_norm": 0.5546875,
"learning_rate": 1.8689923895297245e-05,
"loss": 2.7268,
"step": 823
},
{
"epoch": 0.824,
"grad_norm": 0.578125,
"learning_rate": 1.848722042714457e-05,
"loss": 2.7974,
"step": 824
},
{
"epoch": 0.825,
"grad_norm": 0.53515625,
"learning_rate": 1.8285510166487152e-05,
"loss": 2.691,
"step": 825
},
{
"epoch": 0.826,
"grad_norm": 0.5859375,
"learning_rate": 1.808479557110081e-05,
"loss": 2.6877,
"step": 826
},
{
"epoch": 0.827,
"grad_norm": 0.53125,
"learning_rate": 1.78850790866296e-05,
"loss": 2.6971,
"step": 827
},
{
"epoch": 0.828,
"grad_norm": 0.5625,
"learning_rate": 1.7686363146555805e-05,
"loss": 2.8624,
"step": 828
},
{
"epoch": 0.829,
"grad_norm": 2.421875,
"learning_rate": 1.7488650172170496e-05,
"loss": 2.6911,
"step": 829
},
{
"epoch": 0.83,
"grad_norm": 0.5859375,
"learning_rate": 1.7291942572543807e-05,
"loss": 2.6905,
"step": 830
},
{
"epoch": 0.831,
"grad_norm": 0.58984375,
"learning_rate": 1.7096242744495837e-05,
"loss": 2.7459,
"step": 831
},
{
"epoch": 0.832,
"grad_norm": 0.6875,
"learning_rate": 1.690155307256719e-05,
"loss": 2.738,
"step": 832
},
{
"epoch": 0.833,
"grad_norm": 0.578125,
"learning_rate": 1.6707875928990058e-05,
"loss": 2.7459,
"step": 833
},
{
"epoch": 0.834,
"grad_norm": 0.5859375,
"learning_rate": 1.6515213673659357e-05,
"loss": 2.7432,
"step": 834
},
{
"epoch": 0.835,
"grad_norm": 0.55078125,
"learning_rate": 1.632356865410384e-05,
"loss": 2.6985,
"step": 835
},
{
"epoch": 0.836,
"grad_norm": 0.58203125,
"learning_rate": 1.6132943205457606e-05,
"loss": 2.8408,
"step": 836
},
{
"epoch": 0.837,
"grad_norm": 0.53515625,
"learning_rate": 1.5943339650431576e-05,
"loss": 2.662,
"step": 837
},
{
"epoch": 0.838,
"grad_norm": 0.53515625,
"learning_rate": 1.5754760299285252e-05,
"loss": 2.85,
"step": 838
},
{
"epoch": 0.839,
"grad_norm": 0.5390625,
"learning_rate": 1.5567207449798515e-05,
"loss": 2.6535,
"step": 839
},
{
"epoch": 0.84,
"grad_norm": 0.55078125,
"learning_rate": 1.538068338724361e-05,
"loss": 2.7144,
"step": 840
},
{
"epoch": 0.841,
"grad_norm": 0.53515625,
"learning_rate": 1.5195190384357404e-05,
"loss": 2.8185,
"step": 841
},
{
"epoch": 0.842,
"grad_norm": 1.0703125,
"learning_rate": 1.5010730701313625e-05,
"loss": 2.7978,
"step": 842
},
{
"epoch": 0.843,
"grad_norm": 0.546875,
"learning_rate": 1.4827306585695234e-05,
"loss": 2.7138,
"step": 843
},
{
"epoch": 0.844,
"grad_norm": 0.890625,
"learning_rate": 1.4644920272467244e-05,
"loss": 2.7772,
"step": 844
},
{
"epoch": 0.845,
"grad_norm": 0.54296875,
"learning_rate": 1.4463573983949341e-05,
"loss": 2.7598,
"step": 845
},
{
"epoch": 0.846,
"grad_norm": 0.6953125,
"learning_rate": 1.4283269929788779e-05,
"loss": 2.7201,
"step": 846
},
{
"epoch": 0.847,
"grad_norm": 0.51953125,
"learning_rate": 1.4104010306933557e-05,
"loss": 2.8091,
"step": 847
},
{
"epoch": 0.848,
"grad_norm": 0.5546875,
"learning_rate": 1.3925797299605647e-05,
"loss": 2.7008,
"step": 848
},
{
"epoch": 0.849,
"grad_norm": 0.52734375,
"learning_rate": 1.3748633079274253e-05,
"loss": 2.655,
"step": 849
},
{
"epoch": 0.85,
"grad_norm": 0.57421875,
"learning_rate": 1.3572519804629536e-05,
"loss": 2.6479,
"step": 850
},
{
"epoch": 0.851,
"grad_norm": 0.5625,
"learning_rate": 1.339745962155613e-05,
"loss": 2.8183,
"step": 851
},
{
"epoch": 0.852,
"grad_norm": 0.54296875,
"learning_rate": 1.3223454663107172e-05,
"loss": 2.7465,
"step": 852
},
{
"epoch": 0.853,
"grad_norm": 0.6015625,
"learning_rate": 1.30505070494781e-05,
"loss": 2.7232,
"step": 853
},
{
"epoch": 0.854,
"grad_norm": 0.5078125,
"learning_rate": 1.2878618887981064e-05,
"loss": 2.7987,
"step": 854
},
{
"epoch": 0.855,
"grad_norm": 0.52734375,
"learning_rate": 1.2707792273019048e-05,
"loss": 2.7005,
"step": 855
},
{
"epoch": 0.856,
"grad_norm": 0.55078125,
"learning_rate": 1.2538029286060426e-05,
"loss": 2.7156,
"step": 856
},
{
"epoch": 0.857,
"grad_norm": 0.54296875,
"learning_rate": 1.2369331995613665e-05,
"loss": 2.757,
"step": 857
},
{
"epoch": 0.858,
"grad_norm": 0.91015625,
"learning_rate": 1.2201702457201947e-05,
"loss": 2.6486,
"step": 858
},
{
"epoch": 0.859,
"grad_norm": 0.5703125,
"learning_rate": 1.2035142713338366e-05,
"loss": 2.7237,
"step": 859
},
{
"epoch": 0.86,
"grad_norm": 1.125,
"learning_rate": 1.1869654793500784e-05,
"loss": 2.6579,
"step": 860
},
{
"epoch": 0.861,
"grad_norm": 0.54296875,
"learning_rate": 1.1705240714107302e-05,
"loss": 2.719,
"step": 861
},
{
"epoch": 0.862,
"grad_norm": 0.51171875,
"learning_rate": 1.1541902478491606e-05,
"loss": 2.6551,
"step": 862
},
{
"epoch": 0.863,
"grad_norm": 0.52734375,
"learning_rate": 1.1379642076878527e-05,
"loss": 2.6851,
"step": 863
},
{
"epoch": 0.864,
"grad_norm": 0.5390625,
"learning_rate": 1.1218461486359877e-05,
"loss": 2.7538,
"step": 864
},
{
"epoch": 0.865,
"grad_norm": 0.58984375,
"learning_rate": 1.1058362670870249e-05,
"loss": 2.693,
"step": 865
},
{
"epoch": 0.866,
"grad_norm": 0.578125,
"learning_rate": 1.0899347581163221e-05,
"loss": 2.7243,
"step": 866
},
{
"epoch": 0.867,
"grad_norm": 0.546875,
"learning_rate": 1.0741418154787442e-05,
"loss": 2.6959,
"step": 867
},
{
"epoch": 0.868,
"grad_norm": 0.5703125,
"learning_rate": 1.0584576316063188e-05,
"loss": 2.7305,
"step": 868
},
{
"epoch": 0.869,
"grad_norm": 0.57421875,
"learning_rate": 1.042882397605871e-05,
"loss": 2.7807,
"step": 869
},
{
"epoch": 0.87,
"grad_norm": 0.5546875,
"learning_rate": 1.0274163032567163e-05,
"loss": 2.7001,
"step": 870
},
{
"epoch": 0.871,
"grad_norm": 0.54296875,
"learning_rate": 1.0120595370083318e-05,
"loss": 2.7309,
"step": 871
},
{
"epoch": 0.872,
"grad_norm": 0.54296875,
"learning_rate": 9.968122859780648e-06,
"loss": 2.6748,
"step": 872
},
{
"epoch": 0.873,
"grad_norm": 0.58203125,
"learning_rate": 9.816747359488632e-06,
"loss": 2.7391,
"step": 873
},
{
"epoch": 0.874,
"grad_norm": 0.5625,
"learning_rate": 9.666470713669918e-06,
"loss": 2.8011,
"step": 874
},
{
"epoch": 0.875,
"grad_norm": 1.21875,
"learning_rate": 9.517294753398064e-06,
"loss": 2.6658,
"step": 875
},
{
"epoch": 0.876,
"grad_norm": 0.546875,
"learning_rate": 9.369221296335006e-06,
"loss": 2.7542,
"step": 876
},
{
"epoch": 0.877,
"grad_norm": 0.5390625,
"learning_rate": 9.222252146709142e-06,
"loss": 2.6932,
"step": 877
},
{
"epoch": 0.878,
"grad_norm": 0.51953125,
"learning_rate": 9.076389095293148e-06,
"loss": 2.7678,
"step": 878
},
{
"epoch": 0.879,
"grad_norm": 0.53515625,
"learning_rate": 8.931633919382298e-06,
"loss": 2.6873,
"step": 879
},
{
"epoch": 0.88,
"grad_norm": 0.52734375,
"learning_rate": 8.787988382772705e-06,
"loss": 2.812,
"step": 880
},
{
"epoch": 0.881,
"grad_norm": 0.57421875,
"learning_rate": 8.645454235739903e-06,
"loss": 2.6715,
"step": 881
},
{
"epoch": 0.882,
"grad_norm": 0.53125,
"learning_rate": 8.504033215017527e-06,
"loss": 2.7504,
"step": 882
},
{
"epoch": 0.883,
"grad_norm": 0.515625,
"learning_rate": 8.363727043776038e-06,
"loss": 2.7513,
"step": 883
},
{
"epoch": 0.884,
"grad_norm": 0.51171875,
"learning_rate": 8.224537431601886e-06,
"loss": 2.8052,
"step": 884
},
{
"epoch": 0.885,
"grad_norm": 0.53125,
"learning_rate": 8.086466074476563e-06,
"loss": 2.7368,
"step": 885
},
{
"epoch": 0.886,
"grad_norm": 0.55859375,
"learning_rate": 7.949514654755962e-06,
"loss": 2.6757,
"step": 886
},
{
"epoch": 0.887,
"grad_norm": 0.5546875,
"learning_rate": 7.81368484114996e-06,
"loss": 2.703,
"step": 887
},
{
"epoch": 0.888,
"grad_norm": 0.54296875,
"learning_rate": 7.67897828870191e-06,
"loss": 2.7158,
"step": 888
},
{
"epoch": 0.889,
"grad_norm": 0.55078125,
"learning_rate": 7.545396638768698e-06,
"loss": 2.7778,
"step": 889
},
{
"epoch": 0.89,
"grad_norm": 0.5078125,
"learning_rate": 7.412941519000527e-06,
"loss": 2.6752,
"step": 890
},
{
"epoch": 0.891,
"grad_norm": 1.59375,
"learning_rate": 7.281614543321269e-06,
"loss": 2.7462,
"step": 891
},
{
"epoch": 0.892,
"grad_norm": 0.515625,
"learning_rate": 7.151417311908648e-06,
"loss": 2.6856,
"step": 892
},
{
"epoch": 0.893,
"grad_norm": 0.546875,
"learning_rate": 7.022351411174866e-06,
"loss": 2.7136,
"step": 893
},
{
"epoch": 0.894,
"grad_norm": 0.55859375,
"learning_rate": 6.894418413747183e-06,
"loss": 2.7652,
"step": 894
},
{
"epoch": 0.895,
"grad_norm": 0.5390625,
"learning_rate": 6.767619878448783e-06,
"loss": 2.687,
"step": 895
},
{
"epoch": 0.896,
"grad_norm": 0.609375,
"learning_rate": 6.6419573502798374e-06,
"loss": 2.8021,
"step": 896
},
{
"epoch": 0.897,
"grad_norm": 0.53125,
"learning_rate": 6.517432360398556e-06,
"loss": 2.7689,
"step": 897
},
{
"epoch": 0.898,
"grad_norm": 0.5859375,
"learning_rate": 6.394046426102674e-06,
"loss": 2.7352,
"step": 898
},
{
"epoch": 0.899,
"grad_norm": 0.5546875,
"learning_rate": 6.2718010508108545e-06,
"loss": 2.7,
"step": 899
},
{
"epoch": 0.9,
"grad_norm": 0.55859375,
"learning_rate": 6.1506977240444074e-06,
"loss": 2.6995,
"step": 900
},
{
"epoch": 0.901,
"grad_norm": 0.5390625,
"learning_rate": 6.030737921409169e-06,
"loss": 2.6935,
"step": 901
},
{
"epoch": 0.902,
"grad_norm": 0.5234375,
"learning_rate": 5.911923104577455e-06,
"loss": 2.711,
"step": 902
},
{
"epoch": 0.903,
"grad_norm": 0.5078125,
"learning_rate": 5.7942547212703315e-06,
"loss": 2.7778,
"step": 903
},
{
"epoch": 0.904,
"grad_norm": 0.53125,
"learning_rate": 5.6777342052399045e-06,
"loss": 2.7373,
"step": 904
},
{
"epoch": 0.905,
"grad_norm": 0.515625,
"learning_rate": 5.562362976251901e-06,
"loss": 2.7644,
"step": 905
},
{
"epoch": 0.906,
"grad_norm": 0.54296875,
"learning_rate": 5.448142440068316e-06,
"loss": 2.6753,
"step": 906
},
{
"epoch": 0.907,
"grad_norm": 0.8515625,
"learning_rate": 5.335073988430372e-06,
"loss": 2.7881,
"step": 907
},
{
"epoch": 0.908,
"grad_norm": 0.5234375,
"learning_rate": 5.223158999041444e-06,
"loss": 2.8192,
"step": 908
},
{
"epoch": 0.909,
"grad_norm": 0.578125,
"learning_rate": 5.1123988355503475e-06,
"loss": 2.7979,
"step": 909
},
{
"epoch": 0.91,
"grad_norm": 0.578125,
"learning_rate": 5.002794847534764e-06,
"loss": 2.7629,
"step": 910
},
{
"epoch": 0.911,
"grad_norm": 0.53515625,
"learning_rate": 4.8943483704846475e-06,
"loss": 2.7704,
"step": 911
},
{
"epoch": 0.912,
"grad_norm": 0.55859375,
"learning_rate": 4.7870607257861415e-06,
"loss": 2.6811,
"step": 912
},
{
"epoch": 0.913,
"grad_norm": 0.515625,
"learning_rate": 4.680933220705308e-06,
"loss": 2.7518,
"step": 913
},
{
"epoch": 0.914,
"grad_norm": 0.53125,
"learning_rate": 4.575967148372317e-06,
"loss": 2.7595,
"step": 914
},
{
"epoch": 0.915,
"grad_norm": 0.54296875,
"learning_rate": 4.4721637877656375e-06,
"loss": 2.7555,
"step": 915
},
{
"epoch": 0.916,
"grad_norm": 0.546875,
"learning_rate": 4.369524403696457e-06,
"loss": 2.728,
"step": 916
},
{
"epoch": 0.917,
"grad_norm": 0.546875,
"learning_rate": 4.268050246793276e-06,
"loss": 2.7264,
"step": 917
},
{
"epoch": 0.918,
"grad_norm": 0.54296875,
"learning_rate": 4.167742553486675e-06,
"loss": 2.7391,
"step": 918
},
{
"epoch": 0.919,
"grad_norm": 0.53515625,
"learning_rate": 4.068602545994249e-06,
"loss": 2.7269,
"step": 919
},
{
"epoch": 0.92,
"grad_norm": 0.51953125,
"learning_rate": 3.970631432305694e-06,
"loss": 2.7273,
"step": 920
},
{
"epoch": 0.921,
"grad_norm": 0.53515625,
"learning_rate": 3.873830406168111e-06,
"loss": 2.8126,
"step": 921
},
{
"epoch": 0.922,
"grad_norm": 2.171875,
"learning_rate": 3.7782006470714616e-06,
"loss": 2.7532,
"step": 922
},
{
"epoch": 0.923,
"grad_norm": 0.53125,
"learning_rate": 3.68374332023419e-06,
"loss": 2.7923,
"step": 923
},
{
"epoch": 0.924,
"grad_norm": 0.52734375,
"learning_rate": 3.5904595765890005e-06,
"loss": 2.7534,
"step": 924
},
{
"epoch": 0.925,
"grad_norm": 0.5390625,
"learning_rate": 3.4983505527688586e-06,
"loss": 2.7706,
"step": 925
},
{
"epoch": 0.926,
"grad_norm": 0.5390625,
"learning_rate": 3.40741737109318e-06,
"loss": 2.6821,
"step": 926
},
{
"epoch": 0.927,
"grad_norm": 0.56640625,
"learning_rate": 3.3176611395540626e-06,
"loss": 2.6992,
"step": 927
},
{
"epoch": 0.928,
"grad_norm": 0.5546875,
"learning_rate": 3.2290829518028862e-06,
"loss": 2.7818,
"step": 928
},
{
"epoch": 0.929,
"grad_norm": 0.56640625,
"learning_rate": 3.1416838871368924e-06,
"loss": 2.7538,
"step": 929
},
{
"epoch": 0.93,
"grad_norm": 0.5546875,
"learning_rate": 3.0554650104861136e-06,
"loss": 2.7118,
"step": 930
},
{
"epoch": 0.931,
"grad_norm": 0.55859375,
"learning_rate": 2.970427372400353e-06,
"loss": 2.7252,
"step": 931
},
{
"epoch": 0.932,
"grad_norm": 0.546875,
"learning_rate": 2.8865720090364034e-06,
"loss": 2.8308,
"step": 932
},
{
"epoch": 0.933,
"grad_norm": 0.53125,
"learning_rate": 2.8038999421453826e-06,
"loss": 2.6437,
"step": 933
},
{
"epoch": 0.934,
"grad_norm": 0.50390625,
"learning_rate": 2.7224121790603517e-06,
"loss": 2.764,
"step": 934
},
{
"epoch": 0.935,
"grad_norm": 0.5625,
"learning_rate": 2.6421097126839712e-06,
"loss": 2.6876,
"step": 935
},
{
"epoch": 0.936,
"grad_norm": 0.55078125,
"learning_rate": 2.5629935214764865e-06,
"loss": 2.8505,
"step": 936
},
{
"epoch": 0.937,
"grad_norm": 0.53515625,
"learning_rate": 2.4850645694436736e-06,
"loss": 2.7782,
"step": 937
},
{
"epoch": 0.938,
"grad_norm": 1.0625,
"learning_rate": 2.4083238061252567e-06,
"loss": 2.6892,
"step": 938
},
{
"epoch": 0.939,
"grad_norm": 0.52734375,
"learning_rate": 2.332772166583208e-06,
"loss": 2.6548,
"step": 939
},
{
"epoch": 0.94,
"grad_norm": 0.5625,
"learning_rate": 2.2584105713904125e-06,
"loss": 2.7622,
"step": 940
},
{
"epoch": 0.941,
"grad_norm": 0.5625,
"learning_rate": 2.1852399266194314e-06,
"loss": 2.7183,
"step": 941
},
{
"epoch": 0.942,
"grad_norm": 0.5234375,
"learning_rate": 2.1132611238315003e-06,
"loss": 2.7956,
"step": 942
},
{
"epoch": 0.943,
"grad_norm": 0.50390625,
"learning_rate": 2.0424750400655947e-06,
"loss": 2.6357,
"step": 943
},
{
"epoch": 0.944,
"grad_norm": 0.55859375,
"learning_rate": 1.9728825378278246e-06,
"loss": 2.6503,
"step": 944
},
{
"epoch": 0.945,
"grad_norm": 0.54296875,
"learning_rate": 1.904484465080847e-06,
"loss": 2.6973,
"step": 945
},
{
"epoch": 0.946,
"grad_norm": 0.546875,
"learning_rate": 1.8372816552336026e-06,
"loss": 2.7222,
"step": 946
},
{
"epoch": 0.947,
"grad_norm": 0.515625,
"learning_rate": 1.771274927131139e-06,
"loss": 2.6586,
"step": 947
},
{
"epoch": 0.948,
"grad_norm": 0.53125,
"learning_rate": 1.706465085044584e-06,
"loss": 2.657,
"step": 948
},
{
"epoch": 0.949,
"grad_norm": 0.55078125,
"learning_rate": 1.6428529186614195e-06,
"loss": 2.7474,
"step": 949
},
{
"epoch": 0.95,
"grad_norm": 0.578125,
"learning_rate": 1.580439203075812e-06,
"loss": 2.6946,
"step": 950
},
{
"epoch": 0.951,
"grad_norm": 0.5234375,
"learning_rate": 1.5192246987791981e-06,
"loss": 2.6957,
"step": 951
},
{
"epoch": 0.952,
"grad_norm": 0.53125,
"learning_rate": 1.4592101516509914e-06,
"loss": 2.7191,
"step": 952
},
{
"epoch": 0.953,
"grad_norm": 0.5234375,
"learning_rate": 1.400396292949513e-06,
"loss": 2.6774,
"step": 953
},
{
"epoch": 0.954,
"grad_norm": 0.640625,
"learning_rate": 1.3427838393030633e-06,
"loss": 2.785,
"step": 954
},
{
"epoch": 0.955,
"grad_norm": 0.578125,
"learning_rate": 1.2863734927012095e-06,
"loss": 2.7072,
"step": 955
},
{
"epoch": 0.956,
"grad_norm": 0.55078125,
"learning_rate": 1.231165940486234e-06,
"loss": 2.7511,
"step": 956
},
{
"epoch": 0.957,
"grad_norm": 0.5546875,
"learning_rate": 1.1771618553447216e-06,
"loss": 2.7826,
"step": 957
},
{
"epoch": 0.958,
"grad_norm": 5.4375,
"learning_rate": 1.1243618952994195e-06,
"loss": 2.7722,
"step": 958
},
{
"epoch": 0.959,
"grad_norm": 0.50390625,
"learning_rate": 1.0727667037011668e-06,
"loss": 2.6715,
"step": 959
},
{
"epoch": 0.96,
"grad_norm": 0.54296875,
"learning_rate": 1.0223769092211012e-06,
"loss": 2.7709,
"step": 960
},
{
"epoch": 0.961,
"grad_norm": 0.5390625,
"learning_rate": 9.731931258429638e-07,
"loss": 2.7166,
"step": 961
},
{
"epoch": 0.962,
"grad_norm": 0.53515625,
"learning_rate": 9.252159528556403e-07,
"loss": 2.7073,
"step": 962
},
{
"epoch": 0.963,
"grad_norm": 0.5546875,
"learning_rate": 8.784459748458318e-07,
"loss": 2.6861,
"step": 963
},
{
"epoch": 0.964,
"grad_norm": 0.515625,
"learning_rate": 8.328837616909613e-07,
"loss": 2.7969,
"step": 964
},
{
"epoch": 0.965,
"grad_norm": 0.53125,
"learning_rate": 7.885298685522235e-07,
"loss": 2.6999,
"step": 965
},
{
"epoch": 0.966,
"grad_norm": 0.56640625,
"learning_rate": 7.453848358678017e-07,
"loss": 2.7762,
"step": 966
},
{
"epoch": 0.967,
"grad_norm": 0.55078125,
"learning_rate": 7.034491893463058e-07,
"loss": 2.7231,
"step": 967
},
{
"epoch": 0.968,
"grad_norm": 0.515625,
"learning_rate": 6.627234399603555e-07,
"loss": 2.6857,
"step": 968
},
{
"epoch": 0.969,
"grad_norm": 2.28125,
"learning_rate": 6.232080839403631e-07,
"loss": 2.707,
"step": 969
},
{
"epoch": 0.97,
"grad_norm": 0.56640625,
"learning_rate": 5.849036027684606e-07,
"loss": 2.7359,
"step": 970
},
{
"epoch": 0.971,
"grad_norm": 0.55078125,
"learning_rate": 5.478104631726711e-07,
"loss": 2.6725,
"step": 971
},
{
"epoch": 0.972,
"grad_norm": 0.53125,
"learning_rate": 5.119291171211793e-07,
"loss": 2.762,
"step": 972
},
{
"epoch": 0.973,
"grad_norm": 0.546875,
"learning_rate": 4.772600018168816e-07,
"loss": 2.7416,
"step": 973
},
{
"epoch": 0.974,
"grad_norm": 0.53125,
"learning_rate": 4.438035396920004e-07,
"loss": 2.7811,
"step": 974
},
{
"epoch": 0.975,
"grad_norm": 0.54296875,
"learning_rate": 4.115601384029666e-07,
"loss": 2.7719,
"step": 975
},
{
"epoch": 0.976,
"grad_norm": 0.54296875,
"learning_rate": 3.805301908254455e-07,
"loss": 2.7054,
"step": 976
},
{
"epoch": 0.977,
"grad_norm": 0.546875,
"learning_rate": 3.50714075049563e-07,
"loss": 2.8006,
"step": 977
},
{
"epoch": 0.978,
"grad_norm": 0.58203125,
"learning_rate": 3.2211215437528694e-07,
"loss": 2.7788,
"step": 978
},
{
"epoch": 0.979,
"grad_norm": 0.53125,
"learning_rate": 2.947247773079753e-07,
"loss": 2.6917,
"step": 979
},
{
"epoch": 0.98,
"grad_norm": 0.5625,
"learning_rate": 2.685522775541904e-07,
"loss": 2.7921,
"step": 980
},
{
"epoch": 0.981,
"grad_norm": 0.58984375,
"learning_rate": 2.4359497401758024e-07,
"loss": 2.6928,
"step": 981
},
{
"epoch": 0.982,
"grad_norm": 0.515625,
"learning_rate": 2.1985317079500356e-07,
"loss": 2.8196,
"step": 982
},
{
"epoch": 0.983,
"grad_norm": 0.5546875,
"learning_rate": 1.973271571728441e-07,
"loss": 2.6882,
"step": 983
},
{
"epoch": 0.984,
"grad_norm": 0.546875,
"learning_rate": 1.7601720762346897e-07,
"loss": 2.7932,
"step": 984
},
{
"epoch": 0.985,
"grad_norm": 0.79296875,
"learning_rate": 1.5592358180189782e-07,
"loss": 2.7674,
"step": 985
},
{
"epoch": 0.986,
"grad_norm": 0.5,
"learning_rate": 1.3704652454261668e-07,
"loss": 2.7227,
"step": 986
},
{
"epoch": 0.987,
"grad_norm": 0.5859375,
"learning_rate": 1.193862658566025e-07,
"loss": 2.6739,
"step": 987
},
{
"epoch": 0.988,
"grad_norm": 0.52734375,
"learning_rate": 1.0294302092853647e-07,
"loss": 2.731,
"step": 988
},
{
"epoch": 0.989,
"grad_norm": 0.578125,
"learning_rate": 8.771699011416168e-08,
"loss": 2.8035,
"step": 989
},
{
"epoch": 0.99,
"grad_norm": 0.53515625,
"learning_rate": 7.370835893788508e-08,
"loss": 2.7294,
"step": 990
},
{
"epoch": 0.991,
"grad_norm": 0.55078125,
"learning_rate": 6.09172980904238e-08,
"loss": 2.8871,
"step": 991
},
{
"epoch": 0.992,
"grad_norm": 0.5390625,
"learning_rate": 4.934396342684e-08,
"loss": 2.7629,
"step": 992
},
{
"epoch": 0.993,
"grad_norm": 0.5859375,
"learning_rate": 3.898849596456478e-08,
"loss": 2.7404,
"step": 993
},
{
"epoch": 0.994,
"grad_norm": 0.5546875,
"learning_rate": 2.985102188168831e-08,
"loss": 2.7835,
"step": 994
},
{
"epoch": 0.995,
"grad_norm": 0.54296875,
"learning_rate": 2.193165251545004e-08,
"loss": 2.7149,
"step": 995
},
{
"epoch": 0.996,
"grad_norm": 0.515625,
"learning_rate": 1.5230484360873044e-08,
"loss": 2.7078,
"step": 996
},
{
"epoch": 0.997,
"grad_norm": 0.53515625,
"learning_rate": 9.747599069576119e-09,
"loss": 2.6312,
"step": 997
},
{
"epoch": 0.998,
"grad_norm": 0.5078125,
"learning_rate": 5.483063448785686e-09,
"loss": 2.7117,
"step": 998
},
{
"epoch": 0.999,
"grad_norm": 0.53515625,
"learning_rate": 2.4369294605253166e-09,
"loss": 2.7865,
"step": 999
},
{
"epoch": 1.0,
"grad_norm": 0.5859375,
"learning_rate": 6.092342209607083e-10,
"loss": 2.7628,
"step": 1000
},
{
"epoch": 1.0,
"eval_loss": 2.704561948776245,
"eval_runtime": 31.803,
"eval_samples_per_second": 15.722,
"eval_steps_per_second": 2.641,
"step": 1000
}
],
"logging_steps": 1,
"max_steps": 1000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.818479221014528e+18,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}