output / trainer_state.json
TinyPixel's picture
Upload folder using huggingface_hub
a8ab680
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9825242718446603,
"eval_steps": 500,
"global_step": 192,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 2e-05,
"loss": 1.6655,
"step": 2
},
{
"epoch": 0.06,
"learning_rate": 2e-05,
"loss": 1.8229,
"step": 4
},
{
"epoch": 0.09,
"learning_rate": 2e-05,
"loss": 1.871,
"step": 6
},
{
"epoch": 0.12,
"learning_rate": 2e-05,
"loss": 1.9893,
"step": 8
},
{
"epoch": 0.16,
"learning_rate": 2e-05,
"loss": 2.0291,
"step": 10
},
{
"epoch": 0.19,
"learning_rate": 2e-05,
"loss": 1.983,
"step": 12
},
{
"epoch": 0.22,
"learning_rate": 2e-05,
"loss": 2.0649,
"step": 14
},
{
"epoch": 0.25,
"learning_rate": 2e-05,
"loss": 2.3775,
"step": 16
},
{
"epoch": 0.28,
"learning_rate": 2e-05,
"loss": 1.6722,
"step": 18
},
{
"epoch": 0.31,
"learning_rate": 2e-05,
"loss": 1.7044,
"step": 20
},
{
"epoch": 0.34,
"learning_rate": 2e-05,
"loss": 1.77,
"step": 22
},
{
"epoch": 0.37,
"learning_rate": 2e-05,
"loss": 2.0366,
"step": 24
},
{
"epoch": 0.4,
"learning_rate": 2e-05,
"loss": 2.0772,
"step": 26
},
{
"epoch": 0.43,
"learning_rate": 2e-05,
"loss": 2.1213,
"step": 28
},
{
"epoch": 0.47,
"learning_rate": 2e-05,
"loss": 2.1209,
"step": 30
},
{
"epoch": 0.5,
"learning_rate": 2e-05,
"loss": 2.567,
"step": 32
},
{
"epoch": 0.53,
"learning_rate": 2e-05,
"loss": 1.6556,
"step": 34
},
{
"epoch": 0.56,
"learning_rate": 2e-05,
"loss": 1.8708,
"step": 36
},
{
"epoch": 0.59,
"learning_rate": 2e-05,
"loss": 2.0389,
"step": 38
},
{
"epoch": 0.62,
"learning_rate": 2e-05,
"loss": 1.9722,
"step": 40
},
{
"epoch": 0.65,
"learning_rate": 2e-05,
"loss": 2.075,
"step": 42
},
{
"epoch": 0.68,
"learning_rate": 2e-05,
"loss": 2.0757,
"step": 44
},
{
"epoch": 0.71,
"learning_rate": 2e-05,
"loss": 2.0656,
"step": 46
},
{
"epoch": 0.75,
"learning_rate": 2e-05,
"loss": 2.4261,
"step": 48
},
{
"epoch": 0.78,
"learning_rate": 2e-05,
"loss": 1.7365,
"step": 50
},
{
"epoch": 0.81,
"learning_rate": 2e-05,
"loss": 1.8049,
"step": 52
},
{
"epoch": 0.84,
"learning_rate": 2e-05,
"loss": 1.9021,
"step": 54
},
{
"epoch": 0.87,
"learning_rate": 2e-05,
"loss": 1.9696,
"step": 56
},
{
"epoch": 0.9,
"learning_rate": 2e-05,
"loss": 2.1454,
"step": 58
},
{
"epoch": 0.93,
"learning_rate": 2e-05,
"loss": 1.9359,
"step": 60
},
{
"epoch": 0.96,
"learning_rate": 2e-05,
"loss": 2.2369,
"step": 62
},
{
"epoch": 0.99,
"learning_rate": 2e-05,
"loss": 2.309,
"step": 64
},
{
"epoch": 1.03,
"learning_rate": 2e-05,
"loss": 1.7485,
"step": 66
},
{
"epoch": 1.06,
"learning_rate": 2e-05,
"loss": 1.805,
"step": 68
},
{
"epoch": 1.09,
"learning_rate": 2e-05,
"loss": 1.777,
"step": 70
},
{
"epoch": 1.12,
"learning_rate": 2e-05,
"loss": 1.9951,
"step": 72
},
{
"epoch": 1.15,
"learning_rate": 2e-05,
"loss": 2.1456,
"step": 74
},
{
"epoch": 1.18,
"learning_rate": 2e-05,
"loss": 2.0073,
"step": 76
},
{
"epoch": 1.21,
"learning_rate": 2e-05,
"loss": 2.1608,
"step": 78
},
{
"epoch": 1.24,
"learning_rate": 2e-05,
"loss": 2.2249,
"step": 80
},
{
"epoch": 1.27,
"learning_rate": 2e-05,
"loss": 1.7468,
"step": 82
},
{
"epoch": 1.3,
"learning_rate": 2e-05,
"loss": 1.7292,
"step": 84
},
{
"epoch": 1.34,
"learning_rate": 2e-05,
"loss": 1.8926,
"step": 86
},
{
"epoch": 1.37,
"learning_rate": 2e-05,
"loss": 1.9109,
"step": 88
},
{
"epoch": 1.4,
"learning_rate": 2e-05,
"loss": 2.0223,
"step": 90
},
{
"epoch": 1.43,
"learning_rate": 2e-05,
"loss": 2.0283,
"step": 92
},
{
"epoch": 1.46,
"learning_rate": 2e-05,
"loss": 1.9571,
"step": 94
},
{
"epoch": 1.49,
"learning_rate": 2e-05,
"loss": 2.4003,
"step": 96
},
{
"epoch": 1.52,
"learning_rate": 2e-05,
"loss": 1.9499,
"step": 98
},
{
"epoch": 1.55,
"learning_rate": 2e-05,
"loss": 1.7059,
"step": 100
},
{
"epoch": 1.58,
"learning_rate": 2e-05,
"loss": 1.7516,
"step": 102
},
{
"epoch": 1.62,
"learning_rate": 2e-05,
"loss": 1.9586,
"step": 104
},
{
"epoch": 1.65,
"learning_rate": 2e-05,
"loss": 2.0152,
"step": 106
},
{
"epoch": 1.68,
"learning_rate": 2e-05,
"loss": 2.1286,
"step": 108
},
{
"epoch": 1.71,
"learning_rate": 2e-05,
"loss": 2.1614,
"step": 110
},
{
"epoch": 1.74,
"learning_rate": 2e-05,
"loss": 2.168,
"step": 112
},
{
"epoch": 1.77,
"learning_rate": 2e-05,
"loss": 1.8767,
"step": 114
},
{
"epoch": 1.8,
"learning_rate": 2e-05,
"loss": 1.8243,
"step": 116
},
{
"epoch": 1.83,
"learning_rate": 2e-05,
"loss": 1.9965,
"step": 118
},
{
"epoch": 1.86,
"learning_rate": 2e-05,
"loss": 1.9171,
"step": 120
},
{
"epoch": 1.9,
"learning_rate": 2e-05,
"loss": 1.9598,
"step": 122
},
{
"epoch": 1.93,
"learning_rate": 2e-05,
"loss": 1.8569,
"step": 124
},
{
"epoch": 1.96,
"learning_rate": 2e-05,
"loss": 2.0991,
"step": 126
},
{
"epoch": 1.99,
"learning_rate": 2e-05,
"loss": 2.1616,
"step": 128
},
{
"epoch": 2.02,
"learning_rate": 2e-05,
"loss": 1.7902,
"step": 130
},
{
"epoch": 2.05,
"learning_rate": 2e-05,
"loss": 1.7404,
"step": 132
},
{
"epoch": 2.08,
"learning_rate": 2e-05,
"loss": 1.9132,
"step": 134
},
{
"epoch": 2.11,
"learning_rate": 2e-05,
"loss": 1.9342,
"step": 136
},
{
"epoch": 2.14,
"learning_rate": 2e-05,
"loss": 2.0537,
"step": 138
},
{
"epoch": 2.17,
"learning_rate": 2e-05,
"loss": 2.0116,
"step": 140
},
{
"epoch": 2.21,
"learning_rate": 2e-05,
"loss": 2.0901,
"step": 142
},
{
"epoch": 2.24,
"learning_rate": 2e-05,
"loss": 2.1829,
"step": 144
},
{
"epoch": 2.27,
"learning_rate": 2e-05,
"loss": 1.8606,
"step": 146
},
{
"epoch": 2.3,
"learning_rate": 2e-05,
"loss": 1.6824,
"step": 148
},
{
"epoch": 2.33,
"learning_rate": 2e-05,
"loss": 1.8978,
"step": 150
},
{
"epoch": 2.36,
"learning_rate": 2e-05,
"loss": 1.8608,
"step": 152
},
{
"epoch": 2.39,
"learning_rate": 2e-05,
"loss": 1.9369,
"step": 154
},
{
"epoch": 2.42,
"learning_rate": 2e-05,
"loss": 1.8742,
"step": 156
},
{
"epoch": 2.45,
"learning_rate": 2e-05,
"loss": 2.0519,
"step": 158
},
{
"epoch": 2.49,
"learning_rate": 2e-05,
"loss": 2.1078,
"step": 160
},
{
"epoch": 2.52,
"learning_rate": 2e-05,
"loss": 1.8818,
"step": 162
},
{
"epoch": 2.55,
"learning_rate": 2e-05,
"loss": 1.7438,
"step": 164
},
{
"epoch": 2.58,
"learning_rate": 2e-05,
"loss": 1.8832,
"step": 166
},
{
"epoch": 2.61,
"learning_rate": 2e-05,
"loss": 1.7988,
"step": 168
},
{
"epoch": 2.64,
"learning_rate": 2e-05,
"loss": 2.0218,
"step": 170
},
{
"epoch": 2.67,
"learning_rate": 2e-05,
"loss": 1.949,
"step": 172
},
{
"epoch": 2.7,
"learning_rate": 2e-05,
"loss": 1.9499,
"step": 174
},
{
"epoch": 2.73,
"learning_rate": 2e-05,
"loss": 2.1105,
"step": 176
},
{
"epoch": 2.77,
"learning_rate": 2e-05,
"loss": 1.8723,
"step": 178
},
{
"epoch": 2.8,
"learning_rate": 2e-05,
"loss": 1.696,
"step": 180
},
{
"epoch": 2.83,
"learning_rate": 2e-05,
"loss": 1.7281,
"step": 182
},
{
"epoch": 2.86,
"learning_rate": 2e-05,
"loss": 1.8753,
"step": 184
},
{
"epoch": 2.89,
"learning_rate": 2e-05,
"loss": 2.0551,
"step": 186
},
{
"epoch": 2.92,
"learning_rate": 2e-05,
"loss": 1.8918,
"step": 188
},
{
"epoch": 2.95,
"learning_rate": 2e-05,
"loss": 1.883,
"step": 190
},
{
"epoch": 2.98,
"learning_rate": 2e-05,
"loss": 2.0562,
"step": 192
}
],
"logging_steps": 2,
"max_steps": 192,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 3.523807718510592e+16,
"trial_name": null,
"trial_params": null
}