Robometer-4B-LIBERO-No-Fail / trainer_state.json
aliangdw's picture
Duplicate from aliangdw/libero_ablation_prog_pref_lora_ft_4frames
b9b99c4
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 32.142857142857146,
"eval_steps": 50,
"global_step": 450,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07142857142857142,
"grad_norm": 5.383044719696045,
"learning_rate": 0.0,
"loss": 1.5793,
"step": 1
},
{
"epoch": 0.14285714285714285,
"grad_norm": 3.96494197845459,
"learning_rate": 1.2500000000000002e-07,
"loss": 1.62,
"step": 2
},
{
"epoch": 0.21428571428571427,
"grad_norm": 4.022143363952637,
"learning_rate": 2.5000000000000004e-07,
"loss": 1.6035,
"step": 3
},
{
"epoch": 0.2857142857142857,
"grad_norm": 6.808481216430664,
"learning_rate": 3.75e-07,
"loss": 1.6309,
"step": 4
},
{
"epoch": 0.35714285714285715,
"grad_norm": 4.0089311599731445,
"learning_rate": 5.000000000000001e-07,
"loss": 1.596,
"step": 5
},
{
"epoch": 0.42857142857142855,
"grad_norm": 6.727263450622559,
"learning_rate": 6.25e-07,
"loss": 1.6312,
"step": 6
},
{
"epoch": 0.5,
"grad_norm": 7.648512840270996,
"learning_rate": 7.5e-07,
"loss": 1.6206,
"step": 7
},
{
"epoch": 0.5714285714285714,
"grad_norm": 11.115941047668457,
"learning_rate": 8.750000000000001e-07,
"loss": 1.6133,
"step": 8
},
{
"epoch": 0.6428571428571429,
"grad_norm": 8.82610034942627,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.6419,
"step": 9
},
{
"epoch": 0.7142857142857143,
"grad_norm": 3.3373453617095947,
"learning_rate": 1.125e-06,
"loss": 1.5894,
"step": 10
},
{
"epoch": 0.7857142857142857,
"grad_norm": 7.521600723266602,
"learning_rate": 1.25e-06,
"loss": 1.6188,
"step": 11
},
{
"epoch": 0.8571428571428571,
"grad_norm": 5.497554302215576,
"learning_rate": 1.3750000000000002e-06,
"loss": 1.6221,
"step": 12
},
{
"epoch": 0.9285714285714286,
"grad_norm": 3.5530192852020264,
"learning_rate": 1.5e-06,
"loss": 1.6112,
"step": 13
},
{
"epoch": 1.0,
"grad_norm": 4.452667713165283,
"learning_rate": 1.6250000000000001e-06,
"loss": 1.5999,
"step": 14
},
{
"epoch": 1.0714285714285714,
"grad_norm": 2.4583044052124023,
"learning_rate": 1.7500000000000002e-06,
"loss": 1.5917,
"step": 15
},
{
"epoch": 1.1428571428571428,
"grad_norm": 9.272625923156738,
"learning_rate": 1.875e-06,
"loss": 1.6092,
"step": 16
},
{
"epoch": 1.2142857142857142,
"grad_norm": 4.451343059539795,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.5686,
"step": 17
},
{
"epoch": 1.2857142857142856,
"grad_norm": 5.756788730621338,
"learning_rate": 2.1250000000000004e-06,
"loss": 1.6265,
"step": 18
},
{
"epoch": 1.3571428571428572,
"grad_norm": 4.12752103805542,
"learning_rate": 2.25e-06,
"loss": 1.6073,
"step": 19
},
{
"epoch": 1.4285714285714286,
"grad_norm": 2.2131590843200684,
"learning_rate": 2.375e-06,
"loss": 1.5929,
"step": 20
},
{
"epoch": 1.5,
"grad_norm": 7.726499080657959,
"learning_rate": 2.5e-06,
"loss": 1.6232,
"step": 21
},
{
"epoch": 1.5714285714285714,
"grad_norm": 5.906811714172363,
"learning_rate": 2.625e-06,
"loss": 1.5777,
"step": 22
},
{
"epoch": 1.6428571428571428,
"grad_norm": 4.695427894592285,
"learning_rate": 2.7500000000000004e-06,
"loss": 1.591,
"step": 23
},
{
"epoch": 1.7142857142857144,
"grad_norm": 1.5481104850769043,
"learning_rate": 2.8750000000000004e-06,
"loss": 1.5743,
"step": 24
},
{
"epoch": 1.7857142857142856,
"grad_norm": 4.624354839324951,
"learning_rate": 3e-06,
"loss": 1.6065,
"step": 25
},
{
"epoch": 1.8571428571428572,
"grad_norm": 5.646456718444824,
"learning_rate": 3.125e-06,
"loss": 1.5975,
"step": 26
},
{
"epoch": 1.9285714285714286,
"grad_norm": 8.583253860473633,
"learning_rate": 3.2500000000000002e-06,
"loss": 1.5809,
"step": 27
},
{
"epoch": 2.0,
"grad_norm": 1.7897872924804688,
"learning_rate": 3.3750000000000003e-06,
"loss": 1.5839,
"step": 28
},
{
"epoch": 2.0714285714285716,
"grad_norm": 2.509343385696411,
"learning_rate": 3.5000000000000004e-06,
"loss": 1.5725,
"step": 29
},
{
"epoch": 2.142857142857143,
"grad_norm": 3.86081600189209,
"learning_rate": 3.625e-06,
"loss": 1.5766,
"step": 30
},
{
"epoch": 2.2142857142857144,
"grad_norm": 4.29940128326416,
"learning_rate": 3.75e-06,
"loss": 1.5727,
"step": 31
},
{
"epoch": 2.2857142857142856,
"grad_norm": 3.209973096847534,
"learning_rate": 3.875e-06,
"loss": 1.5783,
"step": 32
},
{
"epoch": 2.357142857142857,
"grad_norm": 2.813647985458374,
"learning_rate": 4.000000000000001e-06,
"loss": 1.5687,
"step": 33
},
{
"epoch": 2.4285714285714284,
"grad_norm": 2.750006914138794,
"learning_rate": 4.125e-06,
"loss": 1.5739,
"step": 34
},
{
"epoch": 2.5,
"grad_norm": 4.352213382720947,
"learning_rate": 4.250000000000001e-06,
"loss": 1.5781,
"step": 35
},
{
"epoch": 2.571428571428571,
"grad_norm": 2.743788957595825,
"learning_rate": 4.375e-06,
"loss": 1.5731,
"step": 36
},
{
"epoch": 2.642857142857143,
"grad_norm": 5.008453845977783,
"learning_rate": 4.5e-06,
"loss": 1.5828,
"step": 37
},
{
"epoch": 2.7142857142857144,
"grad_norm": 3.1128811836242676,
"learning_rate": 4.625e-06,
"loss": 1.5706,
"step": 38
},
{
"epoch": 2.7857142857142856,
"grad_norm": 5.385458946228027,
"learning_rate": 4.75e-06,
"loss": 1.5834,
"step": 39
},
{
"epoch": 2.857142857142857,
"grad_norm": 1.6079157590866089,
"learning_rate": 4.875000000000001e-06,
"loss": 1.5721,
"step": 40
},
{
"epoch": 2.928571428571429,
"grad_norm": 3.1016640663146973,
"learning_rate": 5e-06,
"loss": 1.5763,
"step": 41
},
{
"epoch": 3.0,
"grad_norm": 4.650889873504639,
"learning_rate": 5.125e-06,
"loss": 1.5426,
"step": 42
},
{
"epoch": 3.0714285714285716,
"grad_norm": 1.5727084875106812,
"learning_rate": 5.25e-06,
"loss": 1.5432,
"step": 43
},
{
"epoch": 3.142857142857143,
"grad_norm": 2.637890577316284,
"learning_rate": 5.375e-06,
"loss": 1.5472,
"step": 44
},
{
"epoch": 3.2142857142857144,
"grad_norm": 1.8277631998062134,
"learning_rate": 5.500000000000001e-06,
"loss": 1.5363,
"step": 45
},
{
"epoch": 3.2857142857142856,
"grad_norm": 1.6959493160247803,
"learning_rate": 5.625e-06,
"loss": 1.5397,
"step": 46
},
{
"epoch": 3.357142857142857,
"grad_norm": 7.91141414642334,
"learning_rate": 5.750000000000001e-06,
"loss": 1.5392,
"step": 47
},
{
"epoch": 3.4285714285714284,
"grad_norm": 8.705608367919922,
"learning_rate": 5.875e-06,
"loss": 1.5499,
"step": 48
},
{
"epoch": 3.5,
"grad_norm": 1.4096094369888306,
"learning_rate": 6e-06,
"loss": 1.5554,
"step": 49
},
{
"epoch": 3.571428571428571,
"grad_norm": 1.2479711771011353,
"learning_rate": 6.125e-06,
"loss": 1.5537,
"step": 50
},
{
"epoch": 3.642857142857143,
"grad_norm": 1.7531940937042236,
"learning_rate": 6.25e-06,
"loss": 1.533,
"step": 51
},
{
"epoch": 3.7142857142857144,
"grad_norm": 3.1394143104553223,
"learning_rate": 6.375000000000001e-06,
"loss": 1.5223,
"step": 52
},
{
"epoch": 3.7857142857142856,
"grad_norm": 5.379184246063232,
"learning_rate": 6.5000000000000004e-06,
"loss": 1.5204,
"step": 53
},
{
"epoch": 3.857142857142857,
"grad_norm": 1.6642723083496094,
"learning_rate": 6.625000000000001e-06,
"loss": 1.5195,
"step": 54
},
{
"epoch": 3.928571428571429,
"grad_norm": 1.8299416303634644,
"learning_rate": 6.750000000000001e-06,
"loss": 1.5111,
"step": 55
},
{
"epoch": 4.0,
"grad_norm": 3.942272901535034,
"learning_rate": 6.875000000000001e-06,
"loss": 1.5134,
"step": 56
},
{
"epoch": 4.071428571428571,
"grad_norm": 1.4287347793579102,
"learning_rate": 7.000000000000001e-06,
"loss": 1.5119,
"step": 57
},
{
"epoch": 4.142857142857143,
"grad_norm": 2.0702247619628906,
"learning_rate": 7.1249999999999995e-06,
"loss": 1.4962,
"step": 58
},
{
"epoch": 4.214285714285714,
"grad_norm": 2.863966703414917,
"learning_rate": 7.25e-06,
"loss": 1.5204,
"step": 59
},
{
"epoch": 4.285714285714286,
"grad_norm": 2.398355722427368,
"learning_rate": 7.375e-06,
"loss": 1.4955,
"step": 60
},
{
"epoch": 4.357142857142857,
"grad_norm": 1.4015003442764282,
"learning_rate": 7.5e-06,
"loss": 1.4942,
"step": 61
},
{
"epoch": 4.428571428571429,
"grad_norm": 1.6098459959030151,
"learning_rate": 7.625e-06,
"loss": 1.4712,
"step": 62
},
{
"epoch": 4.5,
"grad_norm": 4.722957611083984,
"learning_rate": 7.75e-06,
"loss": 1.5288,
"step": 63
},
{
"epoch": 4.571428571428571,
"grad_norm": 3.6340856552124023,
"learning_rate": 7.875e-06,
"loss": 1.524,
"step": 64
},
{
"epoch": 4.642857142857143,
"grad_norm": 2.6402580738067627,
"learning_rate": 8.000000000000001e-06,
"loss": 1.5417,
"step": 65
},
{
"epoch": 4.714285714285714,
"grad_norm": 2.7053163051605225,
"learning_rate": 8.125000000000001e-06,
"loss": 1.5101,
"step": 66
},
{
"epoch": 4.785714285714286,
"grad_norm": 4.306228160858154,
"learning_rate": 8.25e-06,
"loss": 1.5474,
"step": 67
},
{
"epoch": 4.857142857142857,
"grad_norm": 3.2443113327026367,
"learning_rate": 8.375e-06,
"loss": 1.5324,
"step": 68
},
{
"epoch": 4.928571428571429,
"grad_norm": 2.3825535774230957,
"learning_rate": 8.500000000000002e-06,
"loss": 1.522,
"step": 69
},
{
"epoch": 5.0,
"grad_norm": 3.2580692768096924,
"learning_rate": 8.625e-06,
"loss": 1.5179,
"step": 70
},
{
"epoch": 5.071428571428571,
"grad_norm": 1.4018006324768066,
"learning_rate": 8.75e-06,
"loss": 1.4971,
"step": 71
},
{
"epoch": 5.142857142857143,
"grad_norm": 5.274407386779785,
"learning_rate": 8.875e-06,
"loss": 1.513,
"step": 72
},
{
"epoch": 5.214285714285714,
"grad_norm": 1.3473955392837524,
"learning_rate": 9e-06,
"loss": 1.4898,
"step": 73
},
{
"epoch": 5.285714285714286,
"grad_norm": 5.1161346435546875,
"learning_rate": 9.125e-06,
"loss": 1.5032,
"step": 74
},
{
"epoch": 5.357142857142857,
"grad_norm": 10.268465042114258,
"learning_rate": 9.25e-06,
"loss": 1.4926,
"step": 75
},
{
"epoch": 5.428571428571429,
"grad_norm": 1.676137089729309,
"learning_rate": 9.375000000000001e-06,
"loss": 1.4833,
"step": 76
},
{
"epoch": 5.5,
"grad_norm": 1.441231369972229,
"learning_rate": 9.5e-06,
"loss": 1.4934,
"step": 77
},
{
"epoch": 5.571428571428571,
"grad_norm": 3.917909860610962,
"learning_rate": 9.625e-06,
"loss": 1.5473,
"step": 78
},
{
"epoch": 5.642857142857143,
"grad_norm": 1.9755581617355347,
"learning_rate": 9.750000000000002e-06,
"loss": 1.5192,
"step": 79
},
{
"epoch": 5.714285714285714,
"grad_norm": 2.587038516998291,
"learning_rate": 9.875000000000001e-06,
"loss": 1.5212,
"step": 80
},
{
"epoch": 5.785714285714286,
"grad_norm": 2.155751943588257,
"learning_rate": 1e-05,
"loss": 1.5337,
"step": 81
},
{
"epoch": 5.857142857142857,
"grad_norm": 2.8446056842803955,
"learning_rate": 1.0125e-05,
"loss": 1.5174,
"step": 82
},
{
"epoch": 5.928571428571429,
"grad_norm": 2.9765541553497314,
"learning_rate": 1.025e-05,
"loss": 1.5267,
"step": 83
},
{
"epoch": 6.0,
"grad_norm": 2.4112436771392822,
"learning_rate": 1.0375e-05,
"loss": 1.4897,
"step": 84
},
{
"epoch": 6.071428571428571,
"grad_norm": 3.6984405517578125,
"learning_rate": 1.05e-05,
"loss": 1.5304,
"step": 85
},
{
"epoch": 6.142857142857143,
"grad_norm": 3.8474667072296143,
"learning_rate": 1.0625e-05,
"loss": 1.5232,
"step": 86
},
{
"epoch": 6.214285714285714,
"grad_norm": 2.087263584136963,
"learning_rate": 1.075e-05,
"loss": 1.5285,
"step": 87
},
{
"epoch": 6.285714285714286,
"grad_norm": 2.1326711177825928,
"learning_rate": 1.0875e-05,
"loss": 1.5387,
"step": 88
},
{
"epoch": 6.357142857142857,
"grad_norm": 4.559895992279053,
"learning_rate": 1.1000000000000001e-05,
"loss": 1.524,
"step": 89
},
{
"epoch": 6.428571428571429,
"grad_norm": 1.8320558071136475,
"learning_rate": 1.1125000000000001e-05,
"loss": 1.5143,
"step": 90
},
{
"epoch": 6.5,
"grad_norm": 1.8108539581298828,
"learning_rate": 1.125e-05,
"loss": 1.4987,
"step": 91
},
{
"epoch": 6.571428571428571,
"grad_norm": 1.3095402717590332,
"learning_rate": 1.1375e-05,
"loss": 1.5025,
"step": 92
},
{
"epoch": 6.642857142857143,
"grad_norm": 4.299781799316406,
"learning_rate": 1.1500000000000002e-05,
"loss": 1.5251,
"step": 93
},
{
"epoch": 6.714285714285714,
"grad_norm": 1.1237058639526367,
"learning_rate": 1.1625000000000001e-05,
"loss": 1.5185,
"step": 94
},
{
"epoch": 6.785714285714286,
"grad_norm": 6.838643550872803,
"learning_rate": 1.175e-05,
"loss": 1.5032,
"step": 95
},
{
"epoch": 6.857142857142857,
"grad_norm": 1.886619210243225,
"learning_rate": 1.1875e-05,
"loss": 1.5394,
"step": 96
},
{
"epoch": 6.928571428571429,
"grad_norm": 2.4293034076690674,
"learning_rate": 1.2e-05,
"loss": 1.4958,
"step": 97
},
{
"epoch": 7.0,
"grad_norm": 1.3641681671142578,
"learning_rate": 1.2125e-05,
"loss": 1.5182,
"step": 98
},
{
"epoch": 7.071428571428571,
"grad_norm": 1.6436591148376465,
"learning_rate": 1.225e-05,
"loss": 1.5007,
"step": 99
},
{
"epoch": 7.142857142857143,
"grad_norm": 1.4990618228912354,
"learning_rate": 1.2375000000000001e-05,
"loss": 1.4881,
"step": 100
},
{
"epoch": 7.214285714285714,
"grad_norm": 1.2271665334701538,
"learning_rate": 1.25e-05,
"loss": 1.5017,
"step": 101
},
{
"epoch": 7.285714285714286,
"grad_norm": 1.2291427850723267,
"learning_rate": 1.2625e-05,
"loss": 1.4979,
"step": 102
},
{
"epoch": 7.357142857142857,
"grad_norm": 5.997069358825684,
"learning_rate": 1.2750000000000002e-05,
"loss": 1.5238,
"step": 103
},
{
"epoch": 7.428571428571429,
"grad_norm": 1.4330472946166992,
"learning_rate": 1.2875000000000001e-05,
"loss": 1.5133,
"step": 104
},
{
"epoch": 7.5,
"grad_norm": 1.3329960107803345,
"learning_rate": 1.3000000000000001e-05,
"loss": 1.5044,
"step": 105
},
{
"epoch": 7.571428571428571,
"grad_norm": 2.7081637382507324,
"learning_rate": 1.3125e-05,
"loss": 1.4825,
"step": 106
},
{
"epoch": 7.642857142857143,
"grad_norm": 2.8797085285186768,
"learning_rate": 1.3250000000000002e-05,
"loss": 1.4711,
"step": 107
},
{
"epoch": 7.714285714285714,
"grad_norm": 4.054922580718994,
"learning_rate": 1.3375000000000002e-05,
"loss": 1.4982,
"step": 108
},
{
"epoch": 7.785714285714286,
"grad_norm": 7.4362311363220215,
"learning_rate": 1.3500000000000001e-05,
"loss": 1.4786,
"step": 109
},
{
"epoch": 7.857142857142857,
"grad_norm": 2.5698859691619873,
"learning_rate": 1.3625e-05,
"loss": 1.4572,
"step": 110
},
{
"epoch": 7.928571428571429,
"grad_norm": 2.9194092750549316,
"learning_rate": 1.3750000000000002e-05,
"loss": 1.4916,
"step": 111
},
{
"epoch": 8.0,
"grad_norm": 2.044605016708374,
"learning_rate": 1.3875000000000002e-05,
"loss": 1.4879,
"step": 112
},
{
"epoch": 8.071428571428571,
"grad_norm": 1.3414772748947144,
"learning_rate": 1.4000000000000001e-05,
"loss": 1.4887,
"step": 113
},
{
"epoch": 8.142857142857142,
"grad_norm": 2.0897555351257324,
"learning_rate": 1.4125e-05,
"loss": 1.4908,
"step": 114
},
{
"epoch": 8.214285714285714,
"grad_norm": 3.721792697906494,
"learning_rate": 1.4249999999999999e-05,
"loss": 1.4978,
"step": 115
},
{
"epoch": 8.285714285714286,
"grad_norm": 1.3067519664764404,
"learning_rate": 1.4374999999999999e-05,
"loss": 1.4903,
"step": 116
},
{
"epoch": 8.357142857142858,
"grad_norm": 2.3188929557800293,
"learning_rate": 1.45e-05,
"loss": 1.475,
"step": 117
},
{
"epoch": 8.428571428571429,
"grad_norm": 2.131817102432251,
"learning_rate": 1.4625e-05,
"loss": 1.4877,
"step": 118
},
{
"epoch": 8.5,
"grad_norm": 5.011297225952148,
"learning_rate": 1.475e-05,
"loss": 1.4651,
"step": 119
},
{
"epoch": 8.571428571428571,
"grad_norm": 2.497302770614624,
"learning_rate": 1.4875e-05,
"loss": 1.4488,
"step": 120
},
{
"epoch": 8.642857142857142,
"grad_norm": 1.3582658767700195,
"learning_rate": 1.5e-05,
"loss": 1.4446,
"step": 121
},
{
"epoch": 8.714285714285714,
"grad_norm": 5.030256748199463,
"learning_rate": 1.5125e-05,
"loss": 1.4445,
"step": 122
},
{
"epoch": 8.785714285714286,
"grad_norm": 6.480685234069824,
"learning_rate": 1.525e-05,
"loss": 1.4853,
"step": 123
},
{
"epoch": 8.857142857142858,
"grad_norm": 9.358418464660645,
"learning_rate": 1.5375e-05,
"loss": 1.5026,
"step": 124
},
{
"epoch": 8.928571428571429,
"grad_norm": 8.761038780212402,
"learning_rate": 1.55e-05,
"loss": 1.4884,
"step": 125
},
{
"epoch": 9.0,
"grad_norm": 1.6179229021072388,
"learning_rate": 1.5625e-05,
"loss": 1.4705,
"step": 126
},
{
"epoch": 9.071428571428571,
"grad_norm": 2.0552783012390137,
"learning_rate": 1.575e-05,
"loss": 1.4869,
"step": 127
},
{
"epoch": 9.142857142857142,
"grad_norm": 6.032142162322998,
"learning_rate": 1.5875e-05,
"loss": 1.4461,
"step": 128
},
{
"epoch": 9.214285714285714,
"grad_norm": 2.0145914554595947,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.4625,
"step": 129
},
{
"epoch": 9.285714285714286,
"grad_norm": 5.505954742431641,
"learning_rate": 1.6125000000000002e-05,
"loss": 1.4764,
"step": 130
},
{
"epoch": 9.357142857142858,
"grad_norm": 4.161317348480225,
"learning_rate": 1.6250000000000002e-05,
"loss": 1.4558,
"step": 131
},
{
"epoch": 9.428571428571429,
"grad_norm": 6.453343391418457,
"learning_rate": 1.6375e-05,
"loss": 1.4914,
"step": 132
},
{
"epoch": 9.5,
"grad_norm": 2.7060706615448,
"learning_rate": 1.65e-05,
"loss": 1.4479,
"step": 133
},
{
"epoch": 9.571428571428571,
"grad_norm": 6.37178897857666,
"learning_rate": 1.6625e-05,
"loss": 1.4951,
"step": 134
},
{
"epoch": 9.642857142857142,
"grad_norm": 1.644932746887207,
"learning_rate": 1.675e-05,
"loss": 1.4492,
"step": 135
},
{
"epoch": 9.714285714285714,
"grad_norm": 4.173709392547607,
"learning_rate": 1.6875000000000004e-05,
"loss": 1.4419,
"step": 136
},
{
"epoch": 9.785714285714286,
"grad_norm": 2.661614179611206,
"learning_rate": 1.7000000000000003e-05,
"loss": 1.4419,
"step": 137
},
{
"epoch": 9.857142857142858,
"grad_norm": 2.468867778778076,
"learning_rate": 1.7125000000000003e-05,
"loss": 1.416,
"step": 138
},
{
"epoch": 9.928571428571429,
"grad_norm": 1.0798838138580322,
"learning_rate": 1.725e-05,
"loss": 1.4473,
"step": 139
},
{
"epoch": 10.0,
"grad_norm": 1.5816832780838013,
"learning_rate": 1.7375e-05,
"loss": 1.4399,
"step": 140
},
{
"epoch": 10.071428571428571,
"grad_norm": 2.063046932220459,
"learning_rate": 1.75e-05,
"loss": 1.4415,
"step": 141
},
{
"epoch": 10.142857142857142,
"grad_norm": 3.9278879165649414,
"learning_rate": 1.7625e-05,
"loss": 1.4463,
"step": 142
},
{
"epoch": 10.214285714285714,
"grad_norm": 1.3775101900100708,
"learning_rate": 1.775e-05,
"loss": 1.4019,
"step": 143
},
{
"epoch": 10.285714285714286,
"grad_norm": 6.007996082305908,
"learning_rate": 1.7875e-05,
"loss": 1.4282,
"step": 144
},
{
"epoch": 10.357142857142858,
"grad_norm": 4.488779067993164,
"learning_rate": 1.8e-05,
"loss": 1.451,
"step": 145
},
{
"epoch": 10.428571428571429,
"grad_norm": 5.015976905822754,
"learning_rate": 1.8125e-05,
"loss": 1.4089,
"step": 146
},
{
"epoch": 10.5,
"grad_norm": 1.4387730360031128,
"learning_rate": 1.825e-05,
"loss": 1.4249,
"step": 147
},
{
"epoch": 10.571428571428571,
"grad_norm": 2.4665675163269043,
"learning_rate": 1.8375e-05,
"loss": 1.4327,
"step": 148
},
{
"epoch": 10.642857142857142,
"grad_norm": 1.8020267486572266,
"learning_rate": 1.85e-05,
"loss": 1.3995,
"step": 149
},
{
"epoch": 10.714285714285714,
"grad_norm": 1.6808390617370605,
"learning_rate": 1.8625000000000002e-05,
"loss": 1.389,
"step": 150
},
{
"epoch": 10.785714285714286,
"grad_norm": 2.3376588821411133,
"learning_rate": 1.8750000000000002e-05,
"loss": 1.4748,
"step": 151
},
{
"epoch": 10.857142857142858,
"grad_norm": 2.5257272720336914,
"learning_rate": 1.8875e-05,
"loss": 1.4221,
"step": 152
},
{
"epoch": 10.928571428571429,
"grad_norm": 1.6355801820755005,
"learning_rate": 1.9e-05,
"loss": 1.3544,
"step": 153
},
{
"epoch": 11.0,
"grad_norm": 2.1227149963378906,
"learning_rate": 1.9125e-05,
"loss": 1.3493,
"step": 154
},
{
"epoch": 11.071428571428571,
"grad_norm": 4.7650370597839355,
"learning_rate": 1.925e-05,
"loss": 1.4708,
"step": 155
},
{
"epoch": 11.142857142857142,
"grad_norm": 2.83673095703125,
"learning_rate": 1.9375e-05,
"loss": 1.4064,
"step": 156
},
{
"epoch": 11.214285714285714,
"grad_norm": 2.4201812744140625,
"learning_rate": 1.9500000000000003e-05,
"loss": 1.3221,
"step": 157
},
{
"epoch": 11.285714285714286,
"grad_norm": 2.0276103019714355,
"learning_rate": 1.9625000000000003e-05,
"loss": 1.2859,
"step": 158
},
{
"epoch": 11.357142857142858,
"grad_norm": 7.189219951629639,
"learning_rate": 1.9750000000000002e-05,
"loss": 1.3526,
"step": 159
},
{
"epoch": 11.428571428571429,
"grad_norm": 2.9911937713623047,
"learning_rate": 1.9875000000000002e-05,
"loss": 1.319,
"step": 160
},
{
"epoch": 11.5,
"grad_norm": 7.41688346862793,
"learning_rate": 2e-05,
"loss": 1.2701,
"step": 161
},
{
"epoch": 11.571428571428571,
"grad_norm": 2.5221216678619385,
"learning_rate": 2.0125e-05,
"loss": 1.2342,
"step": 162
},
{
"epoch": 11.642857142857142,
"grad_norm": 3.1596360206604004,
"learning_rate": 2.025e-05,
"loss": 1.2818,
"step": 163
},
{
"epoch": 11.714285714285714,
"grad_norm": 4.858260631561279,
"learning_rate": 2.0375e-05,
"loss": 1.263,
"step": 164
},
{
"epoch": 11.785714285714286,
"grad_norm": 5.346636772155762,
"learning_rate": 2.05e-05,
"loss": 1.2936,
"step": 165
},
{
"epoch": 11.857142857142858,
"grad_norm": 4.936079502105713,
"learning_rate": 2.0625e-05,
"loss": 1.2727,
"step": 166
},
{
"epoch": 11.928571428571429,
"grad_norm": 10.273465156555176,
"learning_rate": 2.075e-05,
"loss": 1.2605,
"step": 167
},
{
"epoch": 12.0,
"grad_norm": 4.918086051940918,
"learning_rate": 2.0875e-05,
"loss": 1.2611,
"step": 168
},
{
"epoch": 12.071428571428571,
"grad_norm": 12.318052291870117,
"learning_rate": 2.1e-05,
"loss": 1.2175,
"step": 169
},
{
"epoch": 12.142857142857142,
"grad_norm": 4.972585201263428,
"learning_rate": 2.1125000000000002e-05,
"loss": 1.1154,
"step": 170
},
{
"epoch": 12.214285714285714,
"grad_norm": 4.9560441970825195,
"learning_rate": 2.125e-05,
"loss": 1.1845,
"step": 171
},
{
"epoch": 12.285714285714286,
"grad_norm": 5.026245594024658,
"learning_rate": 2.1375e-05,
"loss": 1.1234,
"step": 172
},
{
"epoch": 12.357142857142858,
"grad_norm": 7.455307483673096,
"learning_rate": 2.15e-05,
"loss": 1.1865,
"step": 173
},
{
"epoch": 12.428571428571429,
"grad_norm": 5.8239946365356445,
"learning_rate": 2.1625e-05,
"loss": 1.2061,
"step": 174
},
{
"epoch": 12.5,
"grad_norm": 7.225159168243408,
"learning_rate": 2.175e-05,
"loss": 1.2647,
"step": 175
},
{
"epoch": 12.571428571428571,
"grad_norm": 3.666829824447632,
"learning_rate": 2.1875e-05,
"loss": 1.1374,
"step": 176
},
{
"epoch": 12.642857142857142,
"grad_norm": 6.110956192016602,
"learning_rate": 2.2000000000000003e-05,
"loss": 1.1125,
"step": 177
},
{
"epoch": 12.714285714285714,
"grad_norm": 10.124720573425293,
"learning_rate": 2.2125000000000002e-05,
"loss": 1.1762,
"step": 178
},
{
"epoch": 12.785714285714286,
"grad_norm": 6.521277904510498,
"learning_rate": 2.2250000000000002e-05,
"loss": 1.2196,
"step": 179
},
{
"epoch": 12.857142857142858,
"grad_norm": 3.0751357078552246,
"learning_rate": 2.2375000000000002e-05,
"loss": 1.1462,
"step": 180
},
{
"epoch": 12.928571428571429,
"grad_norm": 4.803239345550537,
"learning_rate": 2.25e-05,
"loss": 1.0391,
"step": 181
},
{
"epoch": 13.0,
"grad_norm": 3.005739450454712,
"learning_rate": 2.2625e-05,
"loss": 1.0753,
"step": 182
},
{
"epoch": 13.071428571428571,
"grad_norm": 4.087983131408691,
"learning_rate": 2.275e-05,
"loss": 1.2163,
"step": 183
},
{
"epoch": 13.142857142857142,
"grad_norm": 3.475637674331665,
"learning_rate": 2.2875e-05,
"loss": 1.197,
"step": 184
},
{
"epoch": 13.214285714285714,
"grad_norm": 6.145781517028809,
"learning_rate": 2.3000000000000003e-05,
"loss": 1.1744,
"step": 185
},
{
"epoch": 13.285714285714286,
"grad_norm": 4.0928544998168945,
"learning_rate": 2.3125000000000003e-05,
"loss": 1.1189,
"step": 186
},
{
"epoch": 13.357142857142858,
"grad_norm": 9.976082801818848,
"learning_rate": 2.3250000000000003e-05,
"loss": 1.2488,
"step": 187
},
{
"epoch": 13.428571428571429,
"grad_norm": 6.045898914337158,
"learning_rate": 2.3375000000000002e-05,
"loss": 1.139,
"step": 188
},
{
"epoch": 13.5,
"grad_norm": 4.3854451179504395,
"learning_rate": 2.35e-05,
"loss": 1.0929,
"step": 189
},
{
"epoch": 13.571428571428571,
"grad_norm": 3.349039316177368,
"learning_rate": 2.3624999999999998e-05,
"loss": 1.0427,
"step": 190
},
{
"epoch": 13.642857142857142,
"grad_norm": 3.4710395336151123,
"learning_rate": 2.375e-05,
"loss": 1.1249,
"step": 191
},
{
"epoch": 13.714285714285714,
"grad_norm": 9.711404800415039,
"learning_rate": 2.3875e-05,
"loss": 1.2626,
"step": 192
},
{
"epoch": 13.785714285714286,
"grad_norm": 3.2990989685058594,
"learning_rate": 2.4e-05,
"loss": 1.1687,
"step": 193
},
{
"epoch": 13.857142857142858,
"grad_norm": 2.510340929031372,
"learning_rate": 2.4125e-05,
"loss": 1.0953,
"step": 194
},
{
"epoch": 13.928571428571429,
"grad_norm": 3.561298131942749,
"learning_rate": 2.425e-05,
"loss": 1.0411,
"step": 195
},
{
"epoch": 14.0,
"grad_norm": 4.719476699829102,
"learning_rate": 2.4375e-05,
"loss": 1.0803,
"step": 196
},
{
"epoch": 14.071428571428571,
"grad_norm": 3.3607053756713867,
"learning_rate": 2.45e-05,
"loss": 1.0709,
"step": 197
},
{
"epoch": 14.142857142857142,
"grad_norm": 4.803561687469482,
"learning_rate": 2.4625000000000002e-05,
"loss": 1.0335,
"step": 198
},
{
"epoch": 14.214285714285714,
"grad_norm": 3.8269684314727783,
"learning_rate": 2.4750000000000002e-05,
"loss": 1.0742,
"step": 199
},
{
"epoch": 14.285714285714286,
"grad_norm": 2.7520132064819336,
"learning_rate": 2.4875e-05,
"loss": 1.17,
"step": 200
},
{
"epoch": 14.357142857142858,
"grad_norm": 5.653459548950195,
"learning_rate": 2.5e-05,
"loss": 1.0918,
"step": 201
},
{
"epoch": 14.428571428571429,
"grad_norm": 2.774630546569824,
"learning_rate": 2.4999980961416097e-05,
"loss": 1.058,
"step": 202
},
{
"epoch": 14.5,
"grad_norm": 3.926753520965576,
"learning_rate": 2.499992384572238e-05,
"loss": 1.0942,
"step": 203
},
{
"epoch": 14.571428571428571,
"grad_norm": 2.9734742641448975,
"learning_rate": 2.4999828653092835e-05,
"loss": 1.007,
"step": 204
},
{
"epoch": 14.642857142857142,
"grad_norm": 2.9620471000671387,
"learning_rate": 2.4999695383817435e-05,
"loss": 1.0881,
"step": 205
},
{
"epoch": 14.714285714285714,
"grad_norm": 5.124941349029541,
"learning_rate": 2.499952403830214e-05,
"loss": 1.0806,
"step": 206
},
{
"epoch": 14.785714285714286,
"grad_norm": 3.0979349613189697,
"learning_rate": 2.4999314617068904e-05,
"loss": 1.0281,
"step": 207
},
{
"epoch": 14.857142857142858,
"grad_norm": 3.4293224811553955,
"learning_rate": 2.4999067120755652e-05,
"loss": 1.051,
"step": 208
},
{
"epoch": 14.928571428571429,
"grad_norm": 6.487819194793701,
"learning_rate": 2.4998781550116305e-05,
"loss": 1.0308,
"step": 209
},
{
"epoch": 15.0,
"grad_norm": 3.3049116134643555,
"learning_rate": 2.499845790602076e-05,
"loss": 0.9572,
"step": 210
},
{
"epoch": 15.071428571428571,
"grad_norm": 4.562008380889893,
"learning_rate": 2.4998096189454893e-05,
"loss": 1.0758,
"step": 211
},
{
"epoch": 15.142857142857142,
"grad_norm": 4.130517482757568,
"learning_rate": 2.4997696401520555e-05,
"loss": 1.0565,
"step": 212
},
{
"epoch": 15.214285714285714,
"grad_norm": 7.463335037231445,
"learning_rate": 2.499725854343557e-05,
"loss": 0.9738,
"step": 213
},
{
"epoch": 15.285714285714286,
"grad_norm": 5.149586200714111,
"learning_rate": 2.4996782616533732e-05,
"loss": 1.0234,
"step": 214
},
{
"epoch": 15.357142857142858,
"grad_norm": 5.69240140914917,
"learning_rate": 2.499626862226479e-05,
"loss": 1.1353,
"step": 215
},
{
"epoch": 15.428571428571429,
"grad_norm": 4.097994327545166,
"learning_rate": 2.4995716562194465e-05,
"loss": 1.0006,
"step": 216
},
{
"epoch": 15.5,
"grad_norm": 3.709420919418335,
"learning_rate": 2.499512643800443e-05,
"loss": 0.9626,
"step": 217
},
{
"epoch": 15.571428571428571,
"grad_norm": 6.93643856048584,
"learning_rate": 2.4994498251492302e-05,
"loss": 0.9727,
"step": 218
},
{
"epoch": 15.642857142857142,
"grad_norm": 2.359558582305908,
"learning_rate": 2.4993832004571646e-05,
"loss": 1.0169,
"step": 219
},
{
"epoch": 15.714285714285714,
"grad_norm": 3.7421205043792725,
"learning_rate": 2.4993127699271966e-05,
"loss": 1.0996,
"step": 220
},
{
"epoch": 15.785714285714286,
"grad_norm": 3.670799493789673,
"learning_rate": 2.49923853377387e-05,
"loss": 1.0078,
"step": 221
},
{
"epoch": 15.857142857142858,
"grad_norm": 3.7862744331359863,
"learning_rate": 2.4991604922233204e-05,
"loss": 0.9785,
"step": 222
},
{
"epoch": 15.928571428571429,
"grad_norm": 2.4504923820495605,
"learning_rate": 2.4990786455132764e-05,
"loss": 0.9815,
"step": 223
},
{
"epoch": 16.0,
"grad_norm": 4.063878536224365,
"learning_rate": 2.4989929938930576e-05,
"loss": 0.9463,
"step": 224
},
{
"epoch": 16.071428571428573,
"grad_norm": 2.9702260494232178,
"learning_rate": 2.498903537623573e-05,
"loss": 0.9618,
"step": 225
},
{
"epoch": 16.142857142857142,
"grad_norm": 2.672618865966797,
"learning_rate": 2.4988102769773227e-05,
"loss": 0.9338,
"step": 226
},
{
"epoch": 16.214285714285715,
"grad_norm": 6.381713390350342,
"learning_rate": 2.4987132122383936e-05,
"loss": 1.0038,
"step": 227
},
{
"epoch": 16.285714285714285,
"grad_norm": 2.369471549987793,
"learning_rate": 2.4986123437024627e-05,
"loss": 1.0239,
"step": 228
},
{
"epoch": 16.357142857142858,
"grad_norm": 2.5277535915374756,
"learning_rate": 2.4985076716767927e-05,
"loss": 0.9014,
"step": 229
},
{
"epoch": 16.428571428571427,
"grad_norm": 3.924433946609497,
"learning_rate": 2.4983991964802327e-05,
"loss": 0.966,
"step": 230
},
{
"epoch": 16.5,
"grad_norm": 3.6045427322387695,
"learning_rate": 2.4982869184432174e-05,
"loss": 0.9695,
"step": 231
},
{
"epoch": 16.571428571428573,
"grad_norm": 6.360043048858643,
"learning_rate": 2.498170837907765e-05,
"loss": 1.0349,
"step": 232
},
{
"epoch": 16.642857142857142,
"grad_norm": 4.736248970031738,
"learning_rate": 2.4980509552274765e-05,
"loss": 0.992,
"step": 233
},
{
"epoch": 16.714285714285715,
"grad_norm": 2.659874677658081,
"learning_rate": 2.4979272707675356e-05,
"loss": 0.9548,
"step": 234
},
{
"epoch": 16.785714285714285,
"grad_norm": 2.803654909133911,
"learning_rate": 2.497799784904707e-05,
"loss": 0.8971,
"step": 235
},
{
"epoch": 16.857142857142858,
"grad_norm": 4.661594867706299,
"learning_rate": 2.4976684980273338e-05,
"loss": 0.9161,
"step": 236
},
{
"epoch": 16.928571428571427,
"grad_norm": 3.7907001972198486,
"learning_rate": 2.4975334105353396e-05,
"loss": 0.8663,
"step": 237
},
{
"epoch": 17.0,
"grad_norm": 6.289381504058838,
"learning_rate": 2.497394522840224e-05,
"loss": 0.9157,
"step": 238
},
{
"epoch": 17.071428571428573,
"grad_norm": 3.7386388778686523,
"learning_rate": 2.4972518353650626e-05,
"loss": 0.9065,
"step": 239
},
{
"epoch": 17.142857142857142,
"grad_norm": 2.243206739425659,
"learning_rate": 2.497105348544507e-05,
"loss": 0.8066,
"step": 240
},
{
"epoch": 17.214285714285715,
"grad_norm": 3.9942452907562256,
"learning_rate": 2.4969550628247805e-05,
"loss": 0.9173,
"step": 241
},
{
"epoch": 17.285714285714285,
"grad_norm": 4.815189361572266,
"learning_rate": 2.49680097866368e-05,
"loss": 0.9629,
"step": 242
},
{
"epoch": 17.357142857142858,
"grad_norm": 5.594153881072998,
"learning_rate": 2.4966430965305727e-05,
"loss": 0.9673,
"step": 243
},
{
"epoch": 17.428571428571427,
"grad_norm": 4.274055004119873,
"learning_rate": 2.4964814169063948e-05,
"loss": 0.9222,
"step": 244
},
{
"epoch": 17.5,
"grad_norm": 3.3747873306274414,
"learning_rate": 2.4963159402836506e-05,
"loss": 0.8992,
"step": 245
},
{
"epoch": 17.571428571428573,
"grad_norm": 4.040309429168701,
"learning_rate": 2.49614666716641e-05,
"loss": 0.8257,
"step": 246
},
{
"epoch": 17.642857142857142,
"grad_norm": 3.115257740020752,
"learning_rate": 2.495973598070309e-05,
"loss": 0.9713,
"step": 247
},
{
"epoch": 17.714285714285715,
"grad_norm": 2.5480332374572754,
"learning_rate": 2.4957967335225456e-05,
"loss": 0.8687,
"step": 248
},
{
"epoch": 17.785714285714285,
"grad_norm": 3.8874218463897705,
"learning_rate": 2.4956160740618806e-05,
"loss": 0.9432,
"step": 249
},
{
"epoch": 17.857142857142858,
"grad_norm": 3.870405673980713,
"learning_rate": 2.495431620238633e-05,
"loss": 0.8584,
"step": 250
},
{
"epoch": 17.928571428571427,
"grad_norm": 2.534801483154297,
"learning_rate": 2.495243372614682e-05,
"loss": 0.9255,
"step": 251
},
{
"epoch": 18.0,
"grad_norm": 2.785574197769165,
"learning_rate": 2.495051331763462e-05,
"loss": 0.8727,
"step": 252
},
{
"epoch": 18.071428571428573,
"grad_norm": 4.180792331695557,
"learning_rate": 2.494855498269963e-05,
"loss": 0.9683,
"step": 253
},
{
"epoch": 18.142857142857142,
"grad_norm": 5.634334564208984,
"learning_rate": 2.4946558727307277e-05,
"loss": 0.9485,
"step": 254
},
{
"epoch": 18.214285714285715,
"grad_norm": 4.938521862030029,
"learning_rate": 2.4944524557538503e-05,
"loss": 0.878,
"step": 255
},
{
"epoch": 18.285714285714285,
"grad_norm": 3.9465174674987793,
"learning_rate": 2.4942452479589735e-05,
"loss": 0.9731,
"step": 256
},
{
"epoch": 18.357142857142858,
"grad_norm": 3.601600170135498,
"learning_rate": 2.494034249977289e-05,
"loss": 0.9057,
"step": 257
},
{
"epoch": 18.428571428571427,
"grad_norm": 3.65264892578125,
"learning_rate": 2.4938194624515333e-05,
"loss": 0.9126,
"step": 258
},
{
"epoch": 18.5,
"grad_norm": 4.7837677001953125,
"learning_rate": 2.4936008860359854e-05,
"loss": 0.9847,
"step": 259
},
{
"epoch": 18.571428571428573,
"grad_norm": 4.8172831535339355,
"learning_rate": 2.4933785213964677e-05,
"loss": 0.9202,
"step": 260
},
{
"epoch": 18.642857142857142,
"grad_norm": 4.068408012390137,
"learning_rate": 2.4931523692103418e-05,
"loss": 0.9072,
"step": 261
},
{
"epoch": 18.714285714285715,
"grad_norm": 4.106720924377441,
"learning_rate": 2.492922430166506e-05,
"loss": 0.8837,
"step": 262
},
{
"epoch": 18.785714285714285,
"grad_norm": 3.7029213905334473,
"learning_rate": 2.4926887049653943e-05,
"loss": 0.9464,
"step": 263
},
{
"epoch": 18.857142857142858,
"grad_norm": 3.3589251041412354,
"learning_rate": 2.492451194318975e-05,
"loss": 0.8956,
"step": 264
},
{
"epoch": 18.928571428571427,
"grad_norm": 3.4119558334350586,
"learning_rate": 2.4922098989507454e-05,
"loss": 0.9177,
"step": 265
},
{
"epoch": 19.0,
"grad_norm": 2.3078062534332275,
"learning_rate": 2.4919648195957344e-05,
"loss": 0.8086,
"step": 266
},
{
"epoch": 19.071428571428573,
"grad_norm": 2.213747978210449,
"learning_rate": 2.4917159570004954e-05,
"loss": 0.8417,
"step": 267
},
{
"epoch": 19.142857142857142,
"grad_norm": 2.629509687423706,
"learning_rate": 2.491463311923108e-05,
"loss": 0.9628,
"step": 268
},
{
"epoch": 19.214285714285715,
"grad_norm": 1.877100944519043,
"learning_rate": 2.491206885133171e-05,
"loss": 0.8174,
"step": 269
},
{
"epoch": 19.285714285714285,
"grad_norm": 2.653271436691284,
"learning_rate": 2.490946677411807e-05,
"loss": 0.8823,
"step": 270
},
{
"epoch": 19.357142857142858,
"grad_norm": 2.488171339035034,
"learning_rate": 2.4906826895516528e-05,
"loss": 0.888,
"step": 271
},
{
"epoch": 19.428571428571427,
"grad_norm": 2.5691208839416504,
"learning_rate": 2.490414922356861e-05,
"loss": 0.87,
"step": 272
},
{
"epoch": 19.5,
"grad_norm": 3.2644972801208496,
"learning_rate": 2.4901433766430975e-05,
"loss": 0.9013,
"step": 273
},
{
"epoch": 19.571428571428573,
"grad_norm": 5.025469779968262,
"learning_rate": 2.4898680532375374e-05,
"loss": 0.9013,
"step": 274
},
{
"epoch": 19.642857142857142,
"grad_norm": 3.607896566390991,
"learning_rate": 2.489588952978863e-05,
"loss": 0.8409,
"step": 275
},
{
"epoch": 19.714285714285715,
"grad_norm": 2.6993868350982666,
"learning_rate": 2.4893060767172632e-05,
"loss": 0.7845,
"step": 276
},
{
"epoch": 19.785714285714285,
"grad_norm": 2.4249427318573,
"learning_rate": 2.489019425314427e-05,
"loss": 0.8113,
"step": 277
},
{
"epoch": 19.857142857142858,
"grad_norm": 4.231166362762451,
"learning_rate": 2.4887289996435452e-05,
"loss": 0.8879,
"step": 278
},
{
"epoch": 19.928571428571427,
"grad_norm": 7.234214782714844,
"learning_rate": 2.4884348005893045e-05,
"loss": 0.8847,
"step": 279
},
{
"epoch": 20.0,
"grad_norm": 4.228268146514893,
"learning_rate": 2.488136829047886e-05,
"loss": 0.8504,
"step": 280
},
{
"epoch": 20.071428571428573,
"grad_norm": 2.944539785385132,
"learning_rate": 2.487835085926963e-05,
"loss": 0.845,
"step": 281
},
{
"epoch": 20.142857142857142,
"grad_norm": 2.7270450592041016,
"learning_rate": 2.487529572145697e-05,
"loss": 0.8335,
"step": 282
},
{
"epoch": 20.214285714285715,
"grad_norm": 2.7035233974456787,
"learning_rate": 2.4872202886347362e-05,
"loss": 0.815,
"step": 283
},
{
"epoch": 20.285714285714285,
"grad_norm": 3.4698843955993652,
"learning_rate": 2.486907236336212e-05,
"loss": 0.875,
"step": 284
},
{
"epoch": 20.357142857142858,
"grad_norm": 4.334729194641113,
"learning_rate": 2.4865904162037358e-05,
"loss": 0.8447,
"step": 285
},
{
"epoch": 20.428571428571427,
"grad_norm": 3.5744895935058594,
"learning_rate": 2.4862698292023963e-05,
"loss": 0.792,
"step": 286
},
{
"epoch": 20.5,
"grad_norm": 3.5507006645202637,
"learning_rate": 2.4859454763087577e-05,
"loss": 0.7948,
"step": 287
},
{
"epoch": 20.571428571428573,
"grad_norm": 2.2967231273651123,
"learning_rate": 2.4856173585108544e-05,
"loss": 0.7975,
"step": 288
},
{
"epoch": 20.642857142857142,
"grad_norm": 2.5421581268310547,
"learning_rate": 2.4852854768081912e-05,
"loss": 0.8074,
"step": 289
},
{
"epoch": 20.714285714285715,
"grad_norm": 2.4942188262939453,
"learning_rate": 2.4849498322117364e-05,
"loss": 0.7962,
"step": 290
},
{
"epoch": 20.785714285714285,
"grad_norm": 4.169875621795654,
"learning_rate": 2.4846104257439222e-05,
"loss": 0.8181,
"step": 291
},
{
"epoch": 20.857142857142858,
"grad_norm": 6.13093376159668,
"learning_rate": 2.4842672584386396e-05,
"loss": 0.8434,
"step": 292
},
{
"epoch": 20.928571428571427,
"grad_norm": 3.3157453536987305,
"learning_rate": 2.483920331341235e-05,
"loss": 0.8108,
"step": 293
},
{
"epoch": 21.0,
"grad_norm": 3.0505266189575195,
"learning_rate": 2.4835696455085093e-05,
"loss": 0.7412,
"step": 294
},
{
"epoch": 21.071428571428573,
"grad_norm": 5.5504350662231445,
"learning_rate": 2.483215202008712e-05,
"loss": 0.8437,
"step": 295
},
{
"epoch": 21.142857142857142,
"grad_norm": 6.492306709289551,
"learning_rate": 2.4828570019215396e-05,
"loss": 0.8533,
"step": 296
},
{
"epoch": 21.214285714285715,
"grad_norm": 3.0854108333587646,
"learning_rate": 2.4824950463381314e-05,
"loss": 0.7881,
"step": 297
},
{
"epoch": 21.285714285714285,
"grad_norm": 3.006676435470581,
"learning_rate": 2.482129336361067e-05,
"loss": 0.8156,
"step": 298
},
{
"epoch": 21.357142857142858,
"grad_norm": 3.312669515609741,
"learning_rate": 2.481759873104363e-05,
"loss": 0.8011,
"step": 299
},
{
"epoch": 21.428571428571427,
"grad_norm": 5.346591949462891,
"learning_rate": 2.4813866576934676e-05,
"loss": 0.7895,
"step": 300
},
{
"epoch": 21.5,
"grad_norm": 5.8296074867248535,
"learning_rate": 2.4810096912652604e-05,
"loss": 0.893,
"step": 301
},
{
"epoch": 21.571428571428573,
"grad_norm": 3.022014617919922,
"learning_rate": 2.480628974968046e-05,
"loss": 0.7875,
"step": 302
},
{
"epoch": 21.642857142857142,
"grad_norm": 3.372377395629883,
"learning_rate": 2.4802445099615525e-05,
"loss": 0.8107,
"step": 303
},
{
"epoch": 21.714285714285715,
"grad_norm": 4.238394260406494,
"learning_rate": 2.479856297416927e-05,
"loss": 0.7879,
"step": 304
},
{
"epoch": 21.785714285714285,
"grad_norm": 4.552757263183594,
"learning_rate": 2.4794643385167327e-05,
"loss": 0.8352,
"step": 305
},
{
"epoch": 21.857142857142858,
"grad_norm": 4.247888088226318,
"learning_rate": 2.4790686344549436e-05,
"loss": 0.7733,
"step": 306
},
{
"epoch": 21.928571428571427,
"grad_norm": 2.4774329662323,
"learning_rate": 2.478669186436943e-05,
"loss": 0.7338,
"step": 307
},
{
"epoch": 22.0,
"grad_norm": 2.973707675933838,
"learning_rate": 2.478265995679519e-05,
"loss": 0.7532,
"step": 308
},
{
"epoch": 22.071428571428573,
"grad_norm": 5.437268257141113,
"learning_rate": 2.4778590634108613e-05,
"loss": 0.8951,
"step": 309
},
{
"epoch": 22.142857142857142,
"grad_norm": 5.616093635559082,
"learning_rate": 2.4774483908705546e-05,
"loss": 0.8117,
"step": 310
},
{
"epoch": 22.214285714285715,
"grad_norm": 3.7091429233551025,
"learning_rate": 2.4770339793095802e-05,
"loss": 0.8166,
"step": 311
},
{
"epoch": 22.285714285714285,
"grad_norm": 3.873770236968994,
"learning_rate": 2.4766158299903062e-05,
"loss": 0.7786,
"step": 312
},
{
"epoch": 22.357142857142858,
"grad_norm": 3.692065477371216,
"learning_rate": 2.4761939441864895e-05,
"loss": 0.8148,
"step": 313
},
{
"epoch": 22.428571428571427,
"grad_norm": 4.50544548034668,
"learning_rate": 2.4757683231832662e-05,
"loss": 0.9163,
"step": 314
},
{
"epoch": 22.5,
"grad_norm": 4.464144229888916,
"learning_rate": 2.4753389682771523e-05,
"loss": 0.8223,
"step": 315
},
{
"epoch": 22.571428571428573,
"grad_norm": 3.729602575302124,
"learning_rate": 2.474905880776037e-05,
"loss": 0.8479,
"step": 316
},
{
"epoch": 22.642857142857142,
"grad_norm": 3.674757957458496,
"learning_rate": 2.47446906199918e-05,
"loss": 0.7319,
"step": 317
},
{
"epoch": 22.714285714285715,
"grad_norm": 4.361830711364746,
"learning_rate": 2.4740285132772072e-05,
"loss": 0.8054,
"step": 318
},
{
"epoch": 22.785714285714285,
"grad_norm": 4.365119934082031,
"learning_rate": 2.4735842359521064e-05,
"loss": 0.801,
"step": 319
},
{
"epoch": 22.857142857142858,
"grad_norm": 4.092256546020508,
"learning_rate": 2.4731362313772233e-05,
"loss": 0.8389,
"step": 320
},
{
"epoch": 22.928571428571427,
"grad_norm": 3.239090919494629,
"learning_rate": 2.4726845009172572e-05,
"loss": 0.7814,
"step": 321
},
{
"epoch": 23.0,
"grad_norm": 2.6484477519989014,
"learning_rate": 2.4722290459482578e-05,
"loss": 0.7468,
"step": 322
},
{
"epoch": 23.071428571428573,
"grad_norm": 3.275247573852539,
"learning_rate": 2.47176986785762e-05,
"loss": 0.827,
"step": 323
},
{
"epoch": 23.142857142857142,
"grad_norm": 3.040330410003662,
"learning_rate": 2.47130696804408e-05,
"loss": 0.7849,
"step": 324
},
{
"epoch": 23.214285714285715,
"grad_norm": 4.0800395011901855,
"learning_rate": 2.47084034791771e-05,
"loss": 0.856,
"step": 325
},
{
"epoch": 23.285714285714285,
"grad_norm": 3.5290443897247314,
"learning_rate": 2.4703700088999167e-05,
"loss": 0.825,
"step": 326
},
{
"epoch": 23.357142857142858,
"grad_norm": 3.670090436935425,
"learning_rate": 2.4698959524234346e-05,
"loss": 0.8061,
"step": 327
},
{
"epoch": 23.428571428571427,
"grad_norm": 1.9602779150009155,
"learning_rate": 2.4694181799323206e-05,
"loss": 0.7803,
"step": 328
},
{
"epoch": 23.5,
"grad_norm": 3.8044676780700684,
"learning_rate": 2.468936692881954e-05,
"loss": 0.767,
"step": 329
},
{
"epoch": 23.571428571428573,
"grad_norm": 3.5850207805633545,
"learning_rate": 2.4684514927390274e-05,
"loss": 0.7555,
"step": 330
},
{
"epoch": 23.642857142857142,
"grad_norm": 2.4394097328186035,
"learning_rate": 2.4679625809815443e-05,
"loss": 0.7911,
"step": 331
},
{
"epoch": 23.714285714285715,
"grad_norm": 3.6944406032562256,
"learning_rate": 2.467469959098815e-05,
"loss": 0.7708,
"step": 332
},
{
"epoch": 23.785714285714285,
"grad_norm": 3.9482243061065674,
"learning_rate": 2.4669736285914505e-05,
"loss": 0.8035,
"step": 333
},
{
"epoch": 23.857142857142858,
"grad_norm": 4.452454566955566,
"learning_rate": 2.4664735909713606e-05,
"loss": 0.7837,
"step": 334
},
{
"epoch": 23.928571428571427,
"grad_norm": 3.8159029483795166,
"learning_rate": 2.465969847761746e-05,
"loss": 0.8188,
"step": 335
},
{
"epoch": 24.0,
"grad_norm": 2.743255138397217,
"learning_rate": 2.4654624004970957e-05,
"loss": 0.7004,
"step": 336
},
{
"epoch": 24.071428571428573,
"grad_norm": 3.636960506439209,
"learning_rate": 2.464951250723183e-05,
"loss": 0.7809,
"step": 337
},
{
"epoch": 24.142857142857142,
"grad_norm": 3.9498450756073,
"learning_rate": 2.4644363999970576e-05,
"loss": 0.7903,
"step": 338
},
{
"epoch": 24.214285714285715,
"grad_norm": 3.0506386756896973,
"learning_rate": 2.4639178498870452e-05,
"loss": 0.8114,
"step": 339
},
{
"epoch": 24.285714285714285,
"grad_norm": 3.2507994174957275,
"learning_rate": 2.4633956019727385e-05,
"loss": 0.7556,
"step": 340
},
{
"epoch": 24.357142857142858,
"grad_norm": 5.005919933319092,
"learning_rate": 2.4628696578449956e-05,
"loss": 0.8456,
"step": 341
},
{
"epoch": 24.428571428571427,
"grad_norm": 3.5123672485351562,
"learning_rate": 2.4623400191059335e-05,
"loss": 0.7921,
"step": 342
},
{
"epoch": 24.5,
"grad_norm": 3.3725931644439697,
"learning_rate": 2.4618066873689238e-05,
"loss": 0.7986,
"step": 343
},
{
"epoch": 24.571428571428573,
"grad_norm": 2.6177029609680176,
"learning_rate": 2.461269664258587e-05,
"loss": 0.7449,
"step": 344
},
{
"epoch": 24.642857142857142,
"grad_norm": 5.391937732696533,
"learning_rate": 2.4607289514107888e-05,
"loss": 0.7433,
"step": 345
},
{
"epoch": 24.714285714285715,
"grad_norm": 2.888105630874634,
"learning_rate": 2.460184550472635e-05,
"loss": 0.7079,
"step": 346
},
{
"epoch": 24.785714285714285,
"grad_norm": 2.3010149002075195,
"learning_rate": 2.4596364631024643e-05,
"loss": 0.7376,
"step": 347
},
{
"epoch": 24.857142857142858,
"grad_norm": 3.590585470199585,
"learning_rate": 2.459084690969846e-05,
"loss": 0.7532,
"step": 348
},
{
"epoch": 24.928571428571427,
"grad_norm": 6.115037441253662,
"learning_rate": 2.4585292357555746e-05,
"loss": 0.7568,
"step": 349
},
{
"epoch": 25.0,
"grad_norm": 2.7985963821411133,
"learning_rate": 2.457970099151662e-05,
"loss": 0.7001,
"step": 350
},
{
"epoch": 25.071428571428573,
"grad_norm": 5.373832702636719,
"learning_rate": 2.4574072828613354e-05,
"loss": 0.8254,
"step": 351
},
{
"epoch": 25.142857142857142,
"grad_norm": 3.988442897796631,
"learning_rate": 2.4568407885990313e-05,
"loss": 0.7305,
"step": 352
},
{
"epoch": 25.214285714285715,
"grad_norm": 3.5456085205078125,
"learning_rate": 2.4562706180903894e-05,
"loss": 0.7546,
"step": 353
},
{
"epoch": 25.285714285714285,
"grad_norm": 3.8878087997436523,
"learning_rate": 2.4556967730722478e-05,
"loss": 0.794,
"step": 354
},
{
"epoch": 25.357142857142858,
"grad_norm": 3.997696876525879,
"learning_rate": 2.455119255292638e-05,
"loss": 0.7983,
"step": 355
},
{
"epoch": 25.428571428571427,
"grad_norm": 5.822238445281982,
"learning_rate": 2.4545380665107786e-05,
"loss": 0.899,
"step": 356
},
{
"epoch": 25.5,
"grad_norm": 3.2456319332122803,
"learning_rate": 2.453953208497073e-05,
"loss": 0.7284,
"step": 357
},
{
"epoch": 25.571428571428573,
"grad_norm": 3.3598856925964355,
"learning_rate": 2.4533646830330986e-05,
"loss": 0.8056,
"step": 358
},
{
"epoch": 25.642857142857142,
"grad_norm": 3.6620326042175293,
"learning_rate": 2.452772491911607e-05,
"loss": 0.7994,
"step": 359
},
{
"epoch": 25.714285714285715,
"grad_norm": 4.005545139312744,
"learning_rate": 2.4521766369365142e-05,
"loss": 0.7773,
"step": 360
},
{
"epoch": 25.785714285714285,
"grad_norm": 4.023702144622803,
"learning_rate": 2.4515771199228987e-05,
"loss": 0.7717,
"step": 361
},
{
"epoch": 25.857142857142858,
"grad_norm": 1.9900436401367188,
"learning_rate": 2.450973942696993e-05,
"loss": 0.7331,
"step": 362
},
{
"epoch": 25.928571428571427,
"grad_norm": 5.718785285949707,
"learning_rate": 2.450367107096179e-05,
"loss": 0.7818,
"step": 363
},
{
"epoch": 26.0,
"grad_norm": 3.5384066104888916,
"learning_rate": 2.449756614968984e-05,
"loss": 0.8165,
"step": 364
},
{
"epoch": 26.071428571428573,
"grad_norm": 2.2805933952331543,
"learning_rate": 2.449142468175072e-05,
"loss": 0.68,
"step": 365
},
{
"epoch": 26.142857142857142,
"grad_norm": 2.818986654281616,
"learning_rate": 2.4485246685852413e-05,
"loss": 0.6765,
"step": 366
},
{
"epoch": 26.214285714285715,
"grad_norm": 3.089205741882324,
"learning_rate": 2.4479032180814166e-05,
"loss": 0.6901,
"step": 367
},
{
"epoch": 26.285714285714285,
"grad_norm": 2.990636110305786,
"learning_rate": 2.447278118556644e-05,
"loss": 0.6839,
"step": 368
},
{
"epoch": 26.357142857142858,
"grad_norm": 2.0169079303741455,
"learning_rate": 2.446649371915084e-05,
"loss": 0.6499,
"step": 369
},
{
"epoch": 26.428571428571427,
"grad_norm": 2.073349714279175,
"learning_rate": 2.4460169800720095e-05,
"loss": 0.6312,
"step": 370
},
{
"epoch": 26.5,
"grad_norm": 5.544507026672363,
"learning_rate": 2.4453809449537947e-05,
"loss": 0.7886,
"step": 371
},
{
"epoch": 26.571428571428573,
"grad_norm": 3.550513505935669,
"learning_rate": 2.4447412684979127e-05,
"loss": 0.7682,
"step": 372
},
{
"epoch": 26.642857142857142,
"grad_norm": 4.850046634674072,
"learning_rate": 2.4440979526529295e-05,
"loss": 0.7891,
"step": 373
},
{
"epoch": 26.714285714285715,
"grad_norm": 5.702778339385986,
"learning_rate": 2.4434509993784972e-05,
"loss": 0.7478,
"step": 374
},
{
"epoch": 26.785714285714285,
"grad_norm": 2.3614633083343506,
"learning_rate": 2.4428004106453462e-05,
"loss": 0.6622,
"step": 375
},
{
"epoch": 26.857142857142858,
"grad_norm": 4.546297073364258,
"learning_rate": 2.4421461884352836e-05,
"loss": 0.7229,
"step": 376
},
{
"epoch": 26.928571428571427,
"grad_norm": 4.9091386795043945,
"learning_rate": 2.4414883347411836e-05,
"loss": 0.7186,
"step": 377
},
{
"epoch": 27.0,
"grad_norm": 3.949092388153076,
"learning_rate": 2.440826851566983e-05,
"loss": 0.7257,
"step": 378
},
{
"epoch": 27.071428571428573,
"grad_norm": 5.0857439041137695,
"learning_rate": 2.4401617409276735e-05,
"loss": 0.6953,
"step": 379
},
{
"epoch": 27.142857142857142,
"grad_norm": 4.325730323791504,
"learning_rate": 2.439493004849298e-05,
"loss": 0.6696,
"step": 380
},
{
"epoch": 27.214285714285715,
"grad_norm": 3.15690541267395,
"learning_rate": 2.438820645368942e-05,
"loss": 0.7532,
"step": 381
},
{
"epoch": 27.285714285714285,
"grad_norm": 4.08896017074585,
"learning_rate": 2.4381446645347297e-05,
"loss": 0.7039,
"step": 382
},
{
"epoch": 27.357142857142858,
"grad_norm": 3.996779441833496,
"learning_rate": 2.4374650644058156e-05,
"loss": 0.7489,
"step": 383
},
{
"epoch": 27.428571428571427,
"grad_norm": 4.629286766052246,
"learning_rate": 2.43678184705238e-05,
"loss": 0.7634,
"step": 384
},
{
"epoch": 27.5,
"grad_norm": 4.668010234832764,
"learning_rate": 2.4360950145556208e-05,
"loss": 0.7321,
"step": 385
},
{
"epoch": 27.571428571428573,
"grad_norm": 2.8770227432250977,
"learning_rate": 2.4354045690077492e-05,
"loss": 0.6417,
"step": 386
},
{
"epoch": 27.642857142857142,
"grad_norm": 4.506302356719971,
"learning_rate": 2.4347105125119824e-05,
"loss": 0.6698,
"step": 387
},
{
"epoch": 27.714285714285715,
"grad_norm": 2.6643428802490234,
"learning_rate": 2.4340128471825362e-05,
"loss": 0.6938,
"step": 388
},
{
"epoch": 27.785714285714285,
"grad_norm": 2.8056280612945557,
"learning_rate": 2.4333115751446208e-05,
"loss": 0.6743,
"step": 389
},
{
"epoch": 27.857142857142858,
"grad_norm": 2.7733471393585205,
"learning_rate": 2.4326066985344318e-05,
"loss": 0.6573,
"step": 390
},
{
"epoch": 27.928571428571427,
"grad_norm": 5.0971174240112305,
"learning_rate": 2.4318982194991463e-05,
"loss": 0.6754,
"step": 391
},
{
"epoch": 28.0,
"grad_norm": 6.083596706390381,
"learning_rate": 2.4311861401969138e-05,
"loss": 0.7646,
"step": 392
},
{
"epoch": 28.071428571428573,
"grad_norm": 4.150708198547363,
"learning_rate": 2.4304704627968515e-05,
"loss": 0.6958,
"step": 393
},
{
"epoch": 28.142857142857142,
"grad_norm": 3.6580939292907715,
"learning_rate": 2.429751189479037e-05,
"loss": 0.6848,
"step": 394
},
{
"epoch": 28.214285714285715,
"grad_norm": 2.1017942428588867,
"learning_rate": 2.429028322434501e-05,
"loss": 0.6576,
"step": 395
},
{
"epoch": 28.285714285714285,
"grad_norm": 1.8062525987625122,
"learning_rate": 2.4283018638652234e-05,
"loss": 0.6963,
"step": 396
},
{
"epoch": 28.357142857142858,
"grad_norm": 3.1975321769714355,
"learning_rate": 2.427571815984121e-05,
"loss": 0.6599,
"step": 397
},
{
"epoch": 28.428571428571427,
"grad_norm": 3.0259714126586914,
"learning_rate": 2.4268381810150474e-05,
"loss": 0.6959,
"step": 398
},
{
"epoch": 28.5,
"grad_norm": 5.031489849090576,
"learning_rate": 2.426100961192782e-05,
"loss": 0.7208,
"step": 399
},
{
"epoch": 28.571428571428573,
"grad_norm": 3.5637011528015137,
"learning_rate": 2.4253601587630236e-05,
"loss": 0.6896,
"step": 400
},
{
"epoch": 28.642857142857142,
"grad_norm": 3.333253860473633,
"learning_rate": 2.4246157759823855e-05,
"loss": 0.6815,
"step": 401
},
{
"epoch": 28.714285714285715,
"grad_norm": 3.422375440597534,
"learning_rate": 2.4238678151183863e-05,
"loss": 0.6606,
"step": 402
},
{
"epoch": 28.785714285714285,
"grad_norm": 2.458256721496582,
"learning_rate": 2.423116278449445e-05,
"loss": 0.6484,
"step": 403
},
{
"epoch": 28.857142857142858,
"grad_norm": 4.026810169219971,
"learning_rate": 2.4223611682648724e-05,
"loss": 0.7167,
"step": 404
},
{
"epoch": 28.928571428571427,
"grad_norm": 3.2683417797088623,
"learning_rate": 2.4216024868648644e-05,
"loss": 0.6899,
"step": 405
},
{
"epoch": 29.0,
"grad_norm": 3.54608416557312,
"learning_rate": 2.4208402365604972e-05,
"loss": 0.6843,
"step": 406
},
{
"epoch": 29.071428571428573,
"grad_norm": 4.817044734954834,
"learning_rate": 2.420074419673717e-05,
"loss": 0.6544,
"step": 407
},
{
"epoch": 29.142857142857142,
"grad_norm": 3.0443451404571533,
"learning_rate": 2.4193050385373344e-05,
"loss": 0.6134,
"step": 408
},
{
"epoch": 29.214285714285715,
"grad_norm": 6.059633731842041,
"learning_rate": 2.418532095495018e-05,
"loss": 0.6718,
"step": 409
},
{
"epoch": 29.285714285714285,
"grad_norm": 2.560316324234009,
"learning_rate": 2.417755592901287e-05,
"loss": 0.6134,
"step": 410
},
{
"epoch": 29.357142857142858,
"grad_norm": 3.06530499458313,
"learning_rate": 2.4169755331215023e-05,
"loss": 0.6158,
"step": 411
},
{
"epoch": 29.428571428571427,
"grad_norm": 2.439854621887207,
"learning_rate": 2.4161919185318617e-05,
"loss": 0.6428,
"step": 412
},
{
"epoch": 29.5,
"grad_norm": 4.74956750869751,
"learning_rate": 2.4154047515193904e-05,
"loss": 0.6847,
"step": 413
},
{
"epoch": 29.571428571428573,
"grad_norm": 3.4892289638519287,
"learning_rate": 2.4146140344819363e-05,
"loss": 0.6293,
"step": 414
},
{
"epoch": 29.642857142857142,
"grad_norm": 2.6727051734924316,
"learning_rate": 2.4138197698281606e-05,
"loss": 0.6429,
"step": 415
},
{
"epoch": 29.714285714285715,
"grad_norm": 4.598572254180908,
"learning_rate": 2.413021959977531e-05,
"loss": 0.6504,
"step": 416
},
{
"epoch": 29.785714285714285,
"grad_norm": 5.181699752807617,
"learning_rate": 2.4122206073603142e-05,
"loss": 0.6994,
"step": 417
},
{
"epoch": 29.857142857142858,
"grad_norm": 4.484119415283203,
"learning_rate": 2.4114157144175703e-05,
"loss": 0.7659,
"step": 418
},
{
"epoch": 29.928571428571427,
"grad_norm": 4.818487644195557,
"learning_rate": 2.4106072836011422e-05,
"loss": 0.7583,
"step": 419
},
{
"epoch": 30.0,
"grad_norm": 3.408137083053589,
"learning_rate": 2.40979531737365e-05,
"loss": 0.6579,
"step": 420
},
{
"epoch": 30.071428571428573,
"grad_norm": 4.11182975769043,
"learning_rate": 2.4089798182084845e-05,
"loss": 0.69,
"step": 421
},
{
"epoch": 30.142857142857142,
"grad_norm": 4.751646518707275,
"learning_rate": 2.4081607885897966e-05,
"loss": 0.7584,
"step": 422
},
{
"epoch": 30.214285714285715,
"grad_norm": 5.190206050872803,
"learning_rate": 2.407338231012494e-05,
"loss": 0.7468,
"step": 423
},
{
"epoch": 30.285714285714285,
"grad_norm": 3.750694513320923,
"learning_rate": 2.406512147982228e-05,
"loss": 0.743,
"step": 424
},
{
"epoch": 30.357142857142858,
"grad_norm": 3.4604289531707764,
"learning_rate": 2.4056825420153917e-05,
"loss": 0.7051,
"step": 425
},
{
"epoch": 30.428571428571427,
"grad_norm": 2.6612353324890137,
"learning_rate": 2.4048494156391087e-05,
"loss": 0.6852,
"step": 426
},
{
"epoch": 30.5,
"grad_norm": 4.154074192047119,
"learning_rate": 2.4040127713912264e-05,
"loss": 0.6733,
"step": 427
},
{
"epoch": 30.571428571428573,
"grad_norm": 3.900482654571533,
"learning_rate": 2.403172611820308e-05,
"loss": 0.6134,
"step": 428
},
{
"epoch": 30.642857142857142,
"grad_norm": 5.209915637969971,
"learning_rate": 2.4023289394856257e-05,
"loss": 0.7324,
"step": 429
},
{
"epoch": 30.714285714285715,
"grad_norm": 2.668140411376953,
"learning_rate": 2.401481756957152e-05,
"loss": 0.6655,
"step": 430
},
{
"epoch": 30.785714285714285,
"grad_norm": 3.480163812637329,
"learning_rate": 2.4006310668155508e-05,
"loss": 0.7295,
"step": 431
},
{
"epoch": 30.857142857142858,
"grad_norm": 2.9695725440979004,
"learning_rate": 2.3997768716521723e-05,
"loss": 0.6949,
"step": 432
},
{
"epoch": 30.928571428571427,
"grad_norm": 2.298661470413208,
"learning_rate": 2.398919174069043e-05,
"loss": 0.6687,
"step": 433
},
{
"epoch": 31.0,
"grad_norm": 3.2847728729248047,
"learning_rate": 2.398057976678859e-05,
"loss": 0.6779,
"step": 434
},
{
"epoch": 31.071428571428573,
"grad_norm": 4.464836120605469,
"learning_rate": 2.3971932821049765e-05,
"loss": 0.7223,
"step": 435
},
{
"epoch": 31.142857142857142,
"grad_norm": 2.811166763305664,
"learning_rate": 2.396325092981405e-05,
"loss": 0.6677,
"step": 436
},
{
"epoch": 31.214285714285715,
"grad_norm": 3.1231977939605713,
"learning_rate": 2.3954534119527996e-05,
"loss": 0.6745,
"step": 437
},
{
"epoch": 31.285714285714285,
"grad_norm": 1.7231147289276123,
"learning_rate": 2.3945782416744517e-05,
"loss": 0.5596,
"step": 438
},
{
"epoch": 31.357142857142858,
"grad_norm": 2.8085505962371826,
"learning_rate": 2.3936995848122812e-05,
"loss": 0.5903,
"step": 439
},
{
"epoch": 31.428571428571427,
"grad_norm": 5.913967132568359,
"learning_rate": 2.3928174440428297e-05,
"loss": 0.6269,
"step": 440
},
{
"epoch": 31.5,
"grad_norm": 3.01706862449646,
"learning_rate": 2.391931822053251e-05,
"loss": 0.6192,
"step": 441
},
{
"epoch": 31.571428571428573,
"grad_norm": 7.216449737548828,
"learning_rate": 2.3910427215413036e-05,
"loss": 0.6928,
"step": 442
},
{
"epoch": 31.642857142857142,
"grad_norm": 3.636003255844116,
"learning_rate": 2.390150145215341e-05,
"loss": 0.6932,
"step": 443
},
{
"epoch": 31.714285714285715,
"grad_norm": 2.978515625,
"learning_rate": 2.3892540957943067e-05,
"loss": 0.6355,
"step": 444
},
{
"epoch": 31.785714285714285,
"grad_norm": 2.5752205848693848,
"learning_rate": 2.3883545760077215e-05,
"loss": 0.6208,
"step": 445
},
{
"epoch": 31.857142857142858,
"grad_norm": 2.671752452850342,
"learning_rate": 2.3874515885956792e-05,
"loss": 0.6078,
"step": 446
},
{
"epoch": 31.928571428571427,
"grad_norm": 3.273820400238037,
"learning_rate": 2.386545136308836e-05,
"loss": 0.6566,
"step": 447
},
{
"epoch": 32.0,
"grad_norm": 3.485921859741211,
"learning_rate": 2.3856352219084024e-05,
"loss": 0.606,
"step": 448
},
{
"epoch": 32.07142857142857,
"grad_norm": 2.268240451812744,
"learning_rate": 2.384721848166136e-05,
"loss": 0.5724,
"step": 449
},
{
"epoch": 32.142857142857146,
"grad_norm": 4.0276689529418945,
"learning_rate": 2.3838050178643312e-05,
"loss": 0.5929,
"step": 450
}
],
"logging_steps": 1,
"max_steps": 2000,
"num_input_tokens_seen": 0,
"num_train_epochs": 143,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}