vlva1 / trainer_state.json
jinghan23's picture
Upload to Hugging Face - trainer_state.json
193ed96 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9942299156679982,
"eval_steps": 500,
"global_step": 140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.5339343547821045,
"learning_rate": 1e-05,
"loss": 0.4406,
"step": 1
},
{
"epoch": 0.01,
"grad_norm": 0.4306606948375702,
"learning_rate": 1e-05,
"loss": 0.4472,
"step": 2
},
{
"epoch": 0.02,
"grad_norm": 0.48949554562568665,
"learning_rate": 1e-05,
"loss": 0.4724,
"step": 3
},
{
"epoch": 0.03,
"grad_norm": 0.7450588941574097,
"learning_rate": 1e-05,
"loss": 0.4314,
"step": 4
},
{
"epoch": 0.04,
"grad_norm": 0.39597198367118835,
"learning_rate": 1e-05,
"loss": 0.4576,
"step": 5
},
{
"epoch": 0.04,
"grad_norm": 0.4372778832912445,
"learning_rate": 1e-05,
"loss": 0.4573,
"step": 6
},
{
"epoch": 0.05,
"grad_norm": 0.5514771938323975,
"learning_rate": 1e-05,
"loss": 0.4342,
"step": 7
},
{
"epoch": 0.06,
"grad_norm": 0.6987231969833374,
"learning_rate": 1e-05,
"loss": 0.4239,
"step": 8
},
{
"epoch": 0.06,
"grad_norm": 0.5473514795303345,
"learning_rate": 1e-05,
"loss": 0.4599,
"step": 9
},
{
"epoch": 0.07,
"grad_norm": 0.5179272890090942,
"learning_rate": 1e-05,
"loss": 0.4462,
"step": 10
},
{
"epoch": 0.08,
"grad_norm": 0.5821484923362732,
"learning_rate": 1e-05,
"loss": 0.435,
"step": 11
},
{
"epoch": 0.09,
"grad_norm": 0.5863294005393982,
"learning_rate": 1e-05,
"loss": 0.4272,
"step": 12
},
{
"epoch": 0.09,
"grad_norm": 0.4016607403755188,
"learning_rate": 1e-05,
"loss": 0.4491,
"step": 13
},
{
"epoch": 0.1,
"grad_norm": 0.4974324703216553,
"learning_rate": 1e-05,
"loss": 0.4542,
"step": 14
},
{
"epoch": 0.11,
"grad_norm": 0.5260458588600159,
"learning_rate": 1e-05,
"loss": 0.4387,
"step": 15
},
{
"epoch": 0.11,
"grad_norm": 0.7002240419387817,
"learning_rate": 1e-05,
"loss": 0.4274,
"step": 16
},
{
"epoch": 0.12,
"grad_norm": 0.3600125014781952,
"learning_rate": 1e-05,
"loss": 0.4564,
"step": 17
},
{
"epoch": 0.13,
"grad_norm": 0.4479730725288391,
"learning_rate": 1e-05,
"loss": 0.4284,
"step": 18
},
{
"epoch": 0.13,
"grad_norm": 0.539320170879364,
"learning_rate": 1e-05,
"loss": 0.44,
"step": 19
},
{
"epoch": 0.14,
"grad_norm": 0.8680666089057922,
"learning_rate": 1e-05,
"loss": 0.4301,
"step": 20
},
{
"epoch": 0.15,
"grad_norm": 0.41423720121383667,
"learning_rate": 1e-05,
"loss": 0.4466,
"step": 21
},
{
"epoch": 0.16,
"grad_norm": 0.441376268863678,
"learning_rate": 1e-05,
"loss": 0.4447,
"step": 22
},
{
"epoch": 0.16,
"grad_norm": 0.5229262709617615,
"learning_rate": 1e-05,
"loss": 0.4344,
"step": 23
},
{
"epoch": 0.17,
"grad_norm": 0.7713359594345093,
"learning_rate": 1e-05,
"loss": 0.4375,
"step": 24
},
{
"epoch": 0.18,
"grad_norm": 0.4245271384716034,
"learning_rate": 1e-05,
"loss": 0.4618,
"step": 25
},
{
"epoch": 0.18,
"grad_norm": 0.4831457734107971,
"learning_rate": 1e-05,
"loss": 0.4403,
"step": 26
},
{
"epoch": 0.19,
"grad_norm": 0.58194899559021,
"learning_rate": 1e-05,
"loss": 0.4229,
"step": 27
},
{
"epoch": 0.2,
"grad_norm": 0.7558773159980774,
"learning_rate": 1e-05,
"loss": 0.3974,
"step": 28
},
{
"epoch": 0.21,
"grad_norm": 0.39629215002059937,
"learning_rate": 1e-05,
"loss": 0.4616,
"step": 29
},
{
"epoch": 0.21,
"grad_norm": 0.49573493003845215,
"learning_rate": 1e-05,
"loss": 0.4417,
"step": 30
},
{
"epoch": 0.22,
"grad_norm": 0.5668038129806519,
"learning_rate": 1e-05,
"loss": 0.43,
"step": 31
},
{
"epoch": 0.23,
"grad_norm": 0.7516561150550842,
"learning_rate": 1e-05,
"loss": 0.414,
"step": 32
},
{
"epoch": 0.23,
"grad_norm": 1.1010799407958984,
"learning_rate": 1e-05,
"loss": 892.876,
"step": 33
},
{
"epoch": 0.24,
"grad_norm": 0.4981920123100281,
"learning_rate": 1e-05,
"loss": 0.4428,
"step": 34
},
{
"epoch": 0.25,
"grad_norm": 0.6654168963432312,
"learning_rate": 1e-05,
"loss": 0.4234,
"step": 35
},
{
"epoch": 0.26,
"grad_norm": 0.9636304378509521,
"learning_rate": 1e-05,
"loss": 0.4185,
"step": 36
},
{
"epoch": 0.26,
"grad_norm": 0.39848971366882324,
"learning_rate": 1e-05,
"loss": 0.4517,
"step": 37
},
{
"epoch": 0.27,
"grad_norm": 0.5225277543067932,
"learning_rate": 1e-05,
"loss": 0.4459,
"step": 38
},
{
"epoch": 0.28,
"grad_norm": 0.5011440515518188,
"learning_rate": 1e-05,
"loss": 0.4403,
"step": 39
},
{
"epoch": 0.28,
"grad_norm": 0.9465950727462769,
"learning_rate": 1e-05,
"loss": 0.4103,
"step": 40
},
{
"epoch": 0.29,
"grad_norm": 0.43049702048301697,
"learning_rate": 1e-05,
"loss": 0.4553,
"step": 41
},
{
"epoch": 0.3,
"grad_norm": 0.48856088519096375,
"learning_rate": 1e-05,
"loss": 0.449,
"step": 42
},
{
"epoch": 0.31,
"grad_norm": 0.6367886066436768,
"learning_rate": 1e-05,
"loss": 0.4405,
"step": 43
},
{
"epoch": 0.31,
"grad_norm": 0.6144514679908752,
"learning_rate": 1e-05,
"loss": 0.4364,
"step": 44
},
{
"epoch": 0.32,
"grad_norm": 0.4697144031524658,
"learning_rate": 1e-05,
"loss": 0.451,
"step": 45
},
{
"epoch": 0.33,
"grad_norm": 0.5866898894309998,
"learning_rate": 1e-05,
"loss": 0.4387,
"step": 46
},
{
"epoch": 0.33,
"grad_norm": 0.5469757914543152,
"learning_rate": 1e-05,
"loss": 0.4464,
"step": 47
},
{
"epoch": 0.34,
"grad_norm": 0.5702504515647888,
"learning_rate": 1e-05,
"loss": 0.4237,
"step": 48
},
{
"epoch": 0.35,
"grad_norm": 0.4027068614959717,
"learning_rate": 1e-05,
"loss": 0.4608,
"step": 49
},
{
"epoch": 0.36,
"grad_norm": 0.4725416302680969,
"learning_rate": 1e-05,
"loss": 0.4564,
"step": 50
},
{
"epoch": 0.36,
"grad_norm": 0.5751639008522034,
"learning_rate": 1e-05,
"loss": 0.4406,
"step": 51
},
{
"epoch": 0.37,
"grad_norm": 1.0833802223205566,
"learning_rate": 1e-05,
"loss": 0.4157,
"step": 52
},
{
"epoch": 0.38,
"grad_norm": 0.44081103801727295,
"learning_rate": 1e-05,
"loss": 0.4618,
"step": 53
},
{
"epoch": 0.38,
"grad_norm": 0.46274518966674805,
"learning_rate": 1e-05,
"loss": 0.4391,
"step": 54
},
{
"epoch": 0.39,
"grad_norm": 0.48259931802749634,
"learning_rate": 1e-05,
"loss": 0.4456,
"step": 55
},
{
"epoch": 0.4,
"grad_norm": 0.7039265632629395,
"learning_rate": 1e-05,
"loss": 0.4201,
"step": 56
},
{
"epoch": 0.4,
"grad_norm": 0.4099649488925934,
"learning_rate": 1e-05,
"loss": 0.4373,
"step": 57
},
{
"epoch": 0.41,
"grad_norm": 0.47295379638671875,
"learning_rate": 1e-05,
"loss": 0.4475,
"step": 58
},
{
"epoch": 0.42,
"grad_norm": 0.5771341919898987,
"learning_rate": 1e-05,
"loss": 0.4205,
"step": 59
},
{
"epoch": 0.43,
"grad_norm": 0.7415468692779541,
"learning_rate": 1e-05,
"loss": 0.4287,
"step": 60
},
{
"epoch": 0.43,
"grad_norm": 0.5697640776634216,
"learning_rate": 1e-05,
"loss": 0.4351,
"step": 61
},
{
"epoch": 0.44,
"grad_norm": 0.494108110666275,
"learning_rate": 1e-05,
"loss": 0.4356,
"step": 62
},
{
"epoch": 0.45,
"grad_norm": 0.7063843011856079,
"learning_rate": 1e-05,
"loss": 0.4101,
"step": 63
},
{
"epoch": 0.45,
"grad_norm": 0.9190882444381714,
"learning_rate": 1e-05,
"loss": 0.4008,
"step": 64
},
{
"epoch": 0.46,
"grad_norm": 0.6143580675125122,
"learning_rate": 1e-05,
"loss": 1077.2749,
"step": 65
},
{
"epoch": 0.47,
"grad_norm": 0.6259483098983765,
"learning_rate": 1e-05,
"loss": 0.4455,
"step": 66
},
{
"epoch": 0.48,
"grad_norm": 0.6354689002037048,
"learning_rate": 1e-05,
"loss": 0.4249,
"step": 67
},
{
"epoch": 0.48,
"grad_norm": 0.8926694393157959,
"learning_rate": 1e-05,
"loss": 0.428,
"step": 68
},
{
"epoch": 0.49,
"grad_norm": 0.37563061714172363,
"learning_rate": 1e-05,
"loss": 0.4522,
"step": 69
},
{
"epoch": 0.5,
"grad_norm": 0.4575391113758087,
"learning_rate": 1e-05,
"loss": 0.4601,
"step": 70
},
{
"epoch": 0.5,
"grad_norm": 0.5881872177124023,
"learning_rate": 1e-05,
"loss": 0.4284,
"step": 71
},
{
"epoch": 0.51,
"grad_norm": 1.1317410469055176,
"learning_rate": 1e-05,
"loss": 0.4023,
"step": 72
},
{
"epoch": 0.52,
"grad_norm": 0.5603578090667725,
"learning_rate": 1e-05,
"loss": 0.4573,
"step": 73
},
{
"epoch": 0.53,
"grad_norm": 0.6214807629585266,
"learning_rate": 1e-05,
"loss": 0.4535,
"step": 74
},
{
"epoch": 0.53,
"grad_norm": 0.6941476464271545,
"learning_rate": 1e-05,
"loss": 0.4351,
"step": 75
},
{
"epoch": 0.54,
"grad_norm": 0.8727200031280518,
"learning_rate": 1e-05,
"loss": 0.4248,
"step": 76
},
{
"epoch": 0.55,
"grad_norm": 0.6732370853424072,
"learning_rate": 1e-05,
"loss": 0.45,
"step": 77
},
{
"epoch": 0.55,
"grad_norm": 0.7978621125221252,
"learning_rate": 1e-05,
"loss": 0.4417,
"step": 78
},
{
"epoch": 0.56,
"grad_norm": 0.7042059898376465,
"learning_rate": 1e-05,
"loss": 0.3983,
"step": 79
},
{
"epoch": 0.57,
"grad_norm": 1.2186613082885742,
"learning_rate": 1e-05,
"loss": 0.3991,
"step": 80
},
{
"epoch": 0.58,
"grad_norm": 0.5385881066322327,
"learning_rate": 1e-05,
"loss": 0.4516,
"step": 81
},
{
"epoch": 0.58,
"grad_norm": 0.6898678541183472,
"learning_rate": 1e-05,
"loss": 0.4246,
"step": 82
},
{
"epoch": 0.59,
"grad_norm": 0.8373177647590637,
"learning_rate": 1e-05,
"loss": 0.4199,
"step": 83
},
{
"epoch": 0.6,
"grad_norm": 0.8186657428741455,
"learning_rate": 1e-05,
"loss": 0.4425,
"step": 84
},
{
"epoch": 0.6,
"grad_norm": 0.8698004484176636,
"learning_rate": 1e-05,
"loss": 0.4432,
"step": 85
},
{
"epoch": 0.61,
"grad_norm": 1.0647398233413696,
"learning_rate": 1e-05,
"loss": 0.4308,
"step": 86
},
{
"epoch": 0.62,
"grad_norm": 0.8065099120140076,
"learning_rate": 1e-05,
"loss": 0.4369,
"step": 87
},
{
"epoch": 0.62,
"grad_norm": 1.0753732919692993,
"learning_rate": 1e-05,
"loss": 0.4055,
"step": 88
},
{
"epoch": 0.63,
"grad_norm": 0.4561431407928467,
"learning_rate": 1e-05,
"loss": 0.4482,
"step": 89
},
{
"epoch": 0.64,
"grad_norm": 0.5589205026626587,
"learning_rate": 1e-05,
"loss": 0.4517,
"step": 90
},
{
"epoch": 0.65,
"grad_norm": 0.7920203804969788,
"learning_rate": 1e-05,
"loss": 0.4107,
"step": 91
},
{
"epoch": 0.65,
"grad_norm": 1.1868764162063599,
"learning_rate": 1e-05,
"loss": 0.406,
"step": 92
},
{
"epoch": 0.66,
"grad_norm": 0.5007905960083008,
"learning_rate": 1e-05,
"loss": 0.4582,
"step": 93
},
{
"epoch": 0.67,
"grad_norm": 0.588740348815918,
"learning_rate": 1e-05,
"loss": 0.4285,
"step": 94
},
{
"epoch": 0.67,
"grad_norm": 0.7123900651931763,
"learning_rate": 1e-05,
"loss": 0.445,
"step": 95
},
{
"epoch": 0.68,
"grad_norm": 0.7591605186462402,
"learning_rate": 1e-05,
"loss": 0.4111,
"step": 96
},
{
"epoch": 0.69,
"grad_norm": 0.6483467817306519,
"learning_rate": 1e-05,
"loss": 0.4353,
"step": 97
},
{
"epoch": 0.7,
"grad_norm": 0.6339621543884277,
"learning_rate": 1e-05,
"loss": 0.4167,
"step": 98
},
{
"epoch": 0.7,
"grad_norm": 0.9539307951927185,
"learning_rate": 1e-05,
"loss": 0.4129,
"step": 99
},
{
"epoch": 0.71,
"grad_norm": 0.9566183090209961,
"learning_rate": 1e-05,
"loss": 0.4122,
"step": 100
},
{
"epoch": 0.72,
"grad_norm": 0.45071572065353394,
"learning_rate": 1e-05,
"loss": 0.4367,
"step": 101
},
{
"epoch": 0.72,
"grad_norm": 0.6243826746940613,
"learning_rate": 1e-05,
"loss": 0.4517,
"step": 102
},
{
"epoch": 0.73,
"grad_norm": 1.0642685890197754,
"learning_rate": 1e-05,
"loss": 0.418,
"step": 103
},
{
"epoch": 0.74,
"grad_norm": 0.8378849029541016,
"learning_rate": 1e-05,
"loss": 0.3948,
"step": 104
},
{
"epoch": 0.75,
"grad_norm": 0.5254150629043579,
"learning_rate": 1e-05,
"loss": 0.4566,
"step": 105
},
{
"epoch": 0.75,
"grad_norm": 0.677213728427887,
"learning_rate": 1e-05,
"loss": 0.4484,
"step": 106
},
{
"epoch": 0.76,
"grad_norm": 1.118269920349121,
"learning_rate": 1e-05,
"loss": 0.4523,
"step": 107
},
{
"epoch": 0.77,
"grad_norm": 1.260345458984375,
"learning_rate": 1e-05,
"loss": 0.408,
"step": 108
},
{
"epoch": 0.77,
"grad_norm": 0.7494204044342041,
"learning_rate": 1e-05,
"loss": 0.4275,
"step": 109
},
{
"epoch": 0.78,
"grad_norm": 0.6795545220375061,
"learning_rate": 1e-05,
"loss": 0.4505,
"step": 110
},
{
"epoch": 0.79,
"grad_norm": 0.8415333032608032,
"learning_rate": 1e-05,
"loss": 0.442,
"step": 111
},
{
"epoch": 0.8,
"grad_norm": 1.064477562904358,
"learning_rate": 1e-05,
"loss": 0.4019,
"step": 112
},
{
"epoch": 0.8,
"grad_norm": 0.6557851433753967,
"learning_rate": 1e-05,
"loss": 0.4534,
"step": 113
},
{
"epoch": 0.81,
"grad_norm": 0.7865703701972961,
"learning_rate": 1e-05,
"loss": 0.4339,
"step": 114
},
{
"epoch": 0.82,
"grad_norm": 0.7419100403785706,
"learning_rate": 1e-05,
"loss": 0.4364,
"step": 115
},
{
"epoch": 0.82,
"grad_norm": 1.0382827520370483,
"learning_rate": 1e-05,
"loss": 0.4028,
"step": 116
},
{
"epoch": 0.83,
"grad_norm": 0.9644023776054382,
"learning_rate": 1e-05,
"loss": 0.4383,
"step": 117
},
{
"epoch": 0.84,
"grad_norm": 0.859690248966217,
"learning_rate": 1e-05,
"loss": 0.4327,
"step": 118
},
{
"epoch": 0.85,
"grad_norm": 0.8699947595596313,
"learning_rate": 1e-05,
"loss": 0.4275,
"step": 119
},
{
"epoch": 0.85,
"grad_norm": 1.2335870265960693,
"learning_rate": 1e-05,
"loss": 0.3891,
"step": 120
},
{
"epoch": 0.86,
"grad_norm": 0.506551206111908,
"learning_rate": 1e-05,
"loss": 0.4568,
"step": 121
},
{
"epoch": 0.87,
"grad_norm": 2.2713043689727783,
"learning_rate": 1e-05,
"loss": 0.4419,
"step": 122
},
{
"epoch": 0.87,
"grad_norm": 0.6728506684303284,
"learning_rate": 1e-05,
"loss": 0.4447,
"step": 123
},
{
"epoch": 0.88,
"grad_norm": 0.8511173725128174,
"learning_rate": 1e-05,
"loss": 0.3946,
"step": 124
},
{
"epoch": 0.89,
"grad_norm": 0.5495295524597168,
"learning_rate": 1e-05,
"loss": 1092.1152,
"step": 125
},
{
"epoch": 0.89,
"grad_norm": 0.9345693588256836,
"learning_rate": 1e-05,
"loss": 0.4477,
"step": 126
},
{
"epoch": 0.9,
"grad_norm": 0.6886237263679504,
"learning_rate": 1e-05,
"loss": 0.4283,
"step": 127
},
{
"epoch": 0.91,
"grad_norm": 0.6619666814804077,
"learning_rate": 1e-05,
"loss": 0.4212,
"step": 128
},
{
"epoch": 0.92,
"grad_norm": 0.534345805644989,
"learning_rate": 1e-05,
"loss": 0.4368,
"step": 129
},
{
"epoch": 0.92,
"grad_norm": 0.7200596332550049,
"learning_rate": 1e-05,
"loss": 0.4117,
"step": 130
},
{
"epoch": 0.93,
"grad_norm": 0.6517333388328552,
"learning_rate": 1e-05,
"loss": 0.4215,
"step": 131
},
{
"epoch": 0.94,
"grad_norm": 0.6811349391937256,
"learning_rate": 1e-05,
"loss": 0.4057,
"step": 132
},
{
"epoch": 0.94,
"grad_norm": 0.4850296974182129,
"learning_rate": 1e-05,
"loss": 0.4407,
"step": 133
},
{
"epoch": 0.95,
"grad_norm": 0.5540636777877808,
"learning_rate": 1e-05,
"loss": 0.4077,
"step": 134
},
{
"epoch": 0.96,
"grad_norm": 0.4562750458717346,
"learning_rate": 1e-05,
"loss": 0.4208,
"step": 135
},
{
"epoch": 0.97,
"grad_norm": 0.8182024359703064,
"learning_rate": 1e-05,
"loss": 0.4151,
"step": 136
},
{
"epoch": 0.97,
"grad_norm": 0.39840784668922424,
"learning_rate": 1e-05,
"loss": 0.4633,
"step": 137
},
{
"epoch": 0.98,
"grad_norm": 0.5493362545967102,
"learning_rate": 1e-05,
"loss": 0.4378,
"step": 138
},
{
"epoch": 0.99,
"grad_norm": 0.6024540066719055,
"learning_rate": 1e-05,
"loss": 0.4087,
"step": 139
},
{
"epoch": 0.99,
"grad_norm": 0.6966805458068848,
"learning_rate": 1e-05,
"loss": 0.4299,
"step": 140
},
{
"epoch": 0.99,
"step": 140,
"total_flos": 190411138334720.0,
"train_loss": 22.297974420232432,
"train_runtime": 30487.3439,
"train_samples_per_second": 0.074,
"train_steps_per_second": 0.005
}
],
"logging_steps": 1.0,
"max_steps": 140,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 30,
"total_flos": 190411138334720.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}