Mixtral / checkpoint-142 /trainer_state.json
nRuaif
Upload folder using huggingface_hub
a550e19
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.2513274336283186,
"eval_steps": 500,
"global_step": 142,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.807692307692308e-06,
"loss": 2.0125,
"step": 1
},
{
"epoch": 0.0,
"learning_rate": 9.615384615384616e-06,
"loss": 1.9307,
"step": 2
},
{
"epoch": 0.01,
"learning_rate": 1.4423076923076924e-05,
"loss": 1.6104,
"step": 3
},
{
"epoch": 0.01,
"learning_rate": 1.923076923076923e-05,
"loss": 1.9208,
"step": 4
},
{
"epoch": 0.01,
"learning_rate": 2.4038461538461542e-05,
"loss": 1.7502,
"step": 5
},
{
"epoch": 0.01,
"learning_rate": 2.884615384615385e-05,
"loss": 2.2112,
"step": 6
},
{
"epoch": 0.01,
"learning_rate": 3.365384615384615e-05,
"loss": 1.5895,
"step": 7
},
{
"epoch": 0.01,
"learning_rate": 3.846153846153846e-05,
"loss": 1.9856,
"step": 8
},
{
"epoch": 0.02,
"learning_rate": 4.3269230769230766e-05,
"loss": 2.1565,
"step": 9
},
{
"epoch": 0.02,
"learning_rate": 4.8076923076923084e-05,
"loss": 1.6874,
"step": 10
},
{
"epoch": 0.02,
"learning_rate": 5.288461538461539e-05,
"loss": 1.614,
"step": 11
},
{
"epoch": 0.02,
"learning_rate": 5.76923076923077e-05,
"loss": 1.8226,
"step": 12
},
{
"epoch": 0.02,
"learning_rate": 6.25e-05,
"loss": 1.4058,
"step": 13
},
{
"epoch": 0.02,
"learning_rate": 6.73076923076923e-05,
"loss": 1.4717,
"step": 14
},
{
"epoch": 0.03,
"learning_rate": 7.211538461538461e-05,
"loss": 1.5335,
"step": 15
},
{
"epoch": 0.03,
"learning_rate": 7.692307692307693e-05,
"loss": 2.1125,
"step": 16
},
{
"epoch": 0.03,
"learning_rate": 8.173076923076923e-05,
"loss": 1.9451,
"step": 17
},
{
"epoch": 0.03,
"learning_rate": 8.653846153846153e-05,
"loss": 1.7484,
"step": 18
},
{
"epoch": 0.03,
"learning_rate": 9.134615384615384e-05,
"loss": 1.7573,
"step": 19
},
{
"epoch": 0.04,
"learning_rate": 9.615384615384617e-05,
"loss": 2.1968,
"step": 20
},
{
"epoch": 0.04,
"learning_rate": 0.00010096153846153847,
"loss": 1.7941,
"step": 21
},
{
"epoch": 0.04,
"learning_rate": 0.00010576923076923077,
"loss": 1.8685,
"step": 22
},
{
"epoch": 0.04,
"learning_rate": 0.00011057692307692308,
"loss": 2.0065,
"step": 23
},
{
"epoch": 0.04,
"learning_rate": 0.0001153846153846154,
"loss": 1.9018,
"step": 24
},
{
"epoch": 0.04,
"learning_rate": 0.0001201923076923077,
"loss": 2.0752,
"step": 25
},
{
"epoch": 0.05,
"learning_rate": 0.000125,
"loss": 1.716,
"step": 26
},
{
"epoch": 0.05,
"learning_rate": 0.00012980769230769233,
"loss": 1.6542,
"step": 27
},
{
"epoch": 0.05,
"learning_rate": 0.0001346153846153846,
"loss": 1.7198,
"step": 28
},
{
"epoch": 0.05,
"learning_rate": 0.00013942307692307694,
"loss": 1.8383,
"step": 29
},
{
"epoch": 0.05,
"learning_rate": 0.00014423076923076922,
"loss": 1.6938,
"step": 30
},
{
"epoch": 0.05,
"learning_rate": 0.00014903846153846155,
"loss": 1.9142,
"step": 31
},
{
"epoch": 0.06,
"learning_rate": 0.00015384615384615385,
"loss": 1.7715,
"step": 32
},
{
"epoch": 0.06,
"learning_rate": 0.00015865384615384616,
"loss": 1.467,
"step": 33
},
{
"epoch": 0.06,
"learning_rate": 0.00016346153846153846,
"loss": 1.7608,
"step": 34
},
{
"epoch": 0.06,
"learning_rate": 0.0001682692307692308,
"loss": 1.5371,
"step": 35
},
{
"epoch": 0.06,
"learning_rate": 0.00017307692307692307,
"loss": 1.6211,
"step": 36
},
{
"epoch": 0.07,
"learning_rate": 0.0001778846153846154,
"loss": 1.7275,
"step": 37
},
{
"epoch": 0.07,
"learning_rate": 0.00018269230769230767,
"loss": 1.7063,
"step": 38
},
{
"epoch": 0.07,
"learning_rate": 0.0001875,
"loss": 1.9367,
"step": 39
},
{
"epoch": 0.07,
"learning_rate": 0.00019230769230769233,
"loss": 1.6608,
"step": 40
},
{
"epoch": 0.07,
"learning_rate": 0.0001971153846153846,
"loss": 1.9212,
"step": 41
},
{
"epoch": 0.07,
"learning_rate": 0.00020192307692307694,
"loss": 1.611,
"step": 42
},
{
"epoch": 0.08,
"learning_rate": 0.00020673076923076922,
"loss": 1.8582,
"step": 43
},
{
"epoch": 0.08,
"learning_rate": 0.00021153846153846155,
"loss": 1.5658,
"step": 44
},
{
"epoch": 0.08,
"learning_rate": 0.00021634615384615385,
"loss": 1.8543,
"step": 45
},
{
"epoch": 0.08,
"learning_rate": 0.00022115384615384616,
"loss": 1.7291,
"step": 46
},
{
"epoch": 0.08,
"learning_rate": 0.00022596153846153846,
"loss": 1.9411,
"step": 47
},
{
"epoch": 0.08,
"learning_rate": 0.0002307692307692308,
"loss": 1.8138,
"step": 48
},
{
"epoch": 0.09,
"learning_rate": 0.00023557692307692307,
"loss": 1.708,
"step": 49
},
{
"epoch": 0.09,
"learning_rate": 0.0002403846153846154,
"loss": 1.675,
"step": 50
},
{
"epoch": 0.09,
"learning_rate": 0.0002451923076923077,
"loss": 1.658,
"step": 51
},
{
"epoch": 0.09,
"learning_rate": 0.00025,
"loss": 1.9797,
"step": 52
},
{
"epoch": 0.09,
"learning_rate": 0.000249997656075194,
"loss": 1.5857,
"step": 53
},
{
"epoch": 0.1,
"learning_rate": 0.0002499906243886798,
"loss": 1.8837,
"step": 54
},
{
"epoch": 0.1,
"learning_rate": 0.00024997890520416535,
"loss": 1.8022,
"step": 55
},
{
"epoch": 0.1,
"learning_rate": 0.0002499624989611527,
"loss": 1.9101,
"step": 56
},
{
"epoch": 0.1,
"learning_rate": 0.00024994140627492207,
"loss": 1.5614,
"step": 57
},
{
"epoch": 0.1,
"learning_rate": 0.00024991562793650793,
"loss": 1.8988,
"step": 58
},
{
"epoch": 0.1,
"learning_rate": 0.0002498851649126703,
"loss": 1.7089,
"step": 59
},
{
"epoch": 0.11,
"learning_rate": 0.00024985001834585763,
"loss": 1.7782,
"step": 60
},
{
"epoch": 0.11,
"learning_rate": 0.0002498101895541645,
"loss": 1.5338,
"step": 61
},
{
"epoch": 0.11,
"learning_rate": 0.0002497656800312821,
"loss": 1.6484,
"step": 62
},
{
"epoch": 0.11,
"learning_rate": 0.0002497164914464419,
"loss": 1.733,
"step": 63
},
{
"epoch": 0.11,
"learning_rate": 0.00024966262564435343,
"loss": 1.8893,
"step": 64
},
{
"epoch": 0.12,
"learning_rate": 0.000249604084645135,
"loss": 1.6361,
"step": 65
},
{
"epoch": 0.12,
"learning_rate": 0.0002495408706442377,
"loss": 1.9827,
"step": 66
},
{
"epoch": 0.12,
"learning_rate": 0.00024947298601236343,
"loss": 1.6801,
"step": 67
},
{
"epoch": 0.12,
"learning_rate": 0.0002494004332953758,
"loss": 1.9678,
"step": 68
},
{
"epoch": 0.12,
"learning_rate": 0.00024932321521420456,
"loss": 1.9245,
"step": 69
},
{
"epoch": 0.12,
"learning_rate": 0.0002492413346647437,
"loss": 1.5254,
"step": 70
},
{
"epoch": 0.13,
"learning_rate": 0.00024915479471774286,
"loss": 1.7283,
"step": 71
},
{
"epoch": 0.13,
"learning_rate": 0.00024906359861869216,
"loss": 1.9968,
"step": 72
},
{
"epoch": 0.13,
"learning_rate": 0.0002489677497877003,
"loss": 1.6559,
"step": 73
},
{
"epoch": 0.13,
"learning_rate": 0.0002488672518193665,
"loss": 1.5518,
"step": 74
},
{
"epoch": 0.13,
"learning_rate": 0.0002487621084826458,
"loss": 1.6201,
"step": 75
},
{
"epoch": 0.13,
"learning_rate": 0.0002486523237207072,
"loss": 1.7592,
"step": 76
},
{
"epoch": 0.14,
"learning_rate": 0.00024853790165078654,
"loss": 1.5929,
"step": 77
},
{
"epoch": 0.14,
"learning_rate": 0.0002484188465640313,
"loss": 1.4261,
"step": 78
},
{
"epoch": 0.14,
"learning_rate": 0.0002482951629253403,
"loss": 1.8929,
"step": 79
},
{
"epoch": 0.14,
"learning_rate": 0.0002481668553731959,
"loss": 1.5752,
"step": 80
},
{
"epoch": 0.14,
"learning_rate": 0.00024803392871949013,
"loss": 1.8596,
"step": 81
},
{
"epoch": 0.15,
"learning_rate": 0.00024789638794934436,
"loss": 1.838,
"step": 82
},
{
"epoch": 0.15,
"learning_rate": 0.00024775423822092214,
"loss": 1.7938,
"step": 83
},
{
"epoch": 0.15,
"learning_rate": 0.0002476074848652358,
"loss": 1.6448,
"step": 84
},
{
"epoch": 0.15,
"learning_rate": 0.0002474561333859467,
"loss": 1.7674,
"step": 85
},
{
"epoch": 0.15,
"learning_rate": 0.00024730018945915864,
"loss": 1.6526,
"step": 86
},
{
"epoch": 0.15,
"learning_rate": 0.000247139658933205,
"loss": 1.6921,
"step": 87
},
{
"epoch": 0.16,
"learning_rate": 0.00024697454782842944,
"loss": 1.8208,
"step": 88
},
{
"epoch": 0.16,
"learning_rate": 0.0002468048623369603,
"loss": 1.9528,
"step": 89
},
{
"epoch": 0.16,
"learning_rate": 0.00024663060882247796,
"loss": 1.8188,
"step": 90
},
{
"epoch": 0.16,
"learning_rate": 0.00024645179381997673,
"loss": 1.921,
"step": 91
},
{
"epoch": 0.16,
"learning_rate": 0.00024626842403551927,
"loss": 1.9332,
"step": 92
},
{
"epoch": 0.16,
"learning_rate": 0.0002460805063459853,
"loss": 1.967,
"step": 93
},
{
"epoch": 0.17,
"learning_rate": 0.00024588804779881383,
"loss": 1.4963,
"step": 94
},
{
"epoch": 0.17,
"learning_rate": 0.00024569105561173866,
"loss": 1.6944,
"step": 95
},
{
"epoch": 0.17,
"learning_rate": 0.00024548953717251783,
"loss": 1.9083,
"step": 96
},
{
"epoch": 0.17,
"learning_rate": 0.0002452835000386563,
"loss": 1.7368,
"step": 97
},
{
"epoch": 0.17,
"learning_rate": 0.000245072951937123,
"loss": 1.6183,
"step": 98
},
{
"epoch": 0.18,
"learning_rate": 0.00024485790076406047,
"loss": 1.7917,
"step": 99
},
{
"epoch": 0.18,
"learning_rate": 0.00024463835458448925,
"loss": 2.1032,
"step": 100
},
{
"epoch": 0.18,
"learning_rate": 0.0002444143216320052,
"loss": 1.8451,
"step": 101
},
{
"epoch": 0.18,
"learning_rate": 0.0002441858103084705,
"loss": 1.7479,
"step": 102
},
{
"epoch": 0.18,
"learning_rate": 0.000243952829183699,
"loss": 1.8158,
"step": 103
},
{
"epoch": 0.18,
"learning_rate": 0.00024371538699513443,
"loss": 1.9275,
"step": 104
},
{
"epoch": 0.19,
"learning_rate": 0.00024347349264752303,
"loss": 1.7759,
"step": 105
},
{
"epoch": 0.19,
"learning_rate": 0.00024322715521257933,
"loss": 2.0328,
"step": 106
},
{
"epoch": 0.19,
"learning_rate": 0.00024297638392864617,
"loss": 1.8839,
"step": 107
},
{
"epoch": 0.19,
"learning_rate": 0.00024272118820034804,
"loss": 1.8443,
"step": 108
},
{
"epoch": 0.19,
"learning_rate": 0.00024246157759823855,
"loss": 1.9779,
"step": 109
},
{
"epoch": 0.19,
"learning_rate": 0.00024219756185844132,
"loss": 1.6495,
"step": 110
},
{
"epoch": 0.2,
"learning_rate": 0.00024192915088228512,
"loss": 1.6645,
"step": 111
},
{
"epoch": 0.2,
"learning_rate": 0.00024165635473593215,
"loss": 1.4214,
"step": 112
},
{
"epoch": 0.2,
"learning_rate": 0.00024137918365000095,
"loss": 1.6603,
"step": 113
},
{
"epoch": 0.2,
"learning_rate": 0.00024109764801918244,
"loss": 1.4227,
"step": 114
},
{
"epoch": 0.2,
"learning_rate": 0.0002408117584018502,
"loss": 1.3033,
"step": 115
},
{
"epoch": 0.21,
"learning_rate": 0.00024052152551966457,
"loss": 2.0312,
"step": 116
},
{
"epoch": 0.21,
"learning_rate": 0.00024022696025717023,
"loss": 1.8867,
"step": 117
},
{
"epoch": 0.21,
"learning_rate": 0.00023992807366138847,
"loss": 1.6906,
"step": 118
},
{
"epoch": 0.21,
"learning_rate": 0.00023962487694140263,
"loss": 1.595,
"step": 119
},
{
"epoch": 0.21,
"learning_rate": 0.00023931738146793763,
"loss": 1.5583,
"step": 120
},
{
"epoch": 0.21,
"learning_rate": 0.00023900559877293383,
"loss": 1.9068,
"step": 121
},
{
"epoch": 0.22,
"learning_rate": 0.00023868954054911428,
"loss": 1.4536,
"step": 122
},
{
"epoch": 0.22,
"learning_rate": 0.00023836921864954635,
"loss": 1.7105,
"step": 123
},
{
"epoch": 0.22,
"learning_rate": 0.0002380446450871972,
"loss": 1.4355,
"step": 124
},
{
"epoch": 0.22,
"learning_rate": 0.00023771583203448322,
"loss": 1.7782,
"step": 125
},
{
"epoch": 0.22,
"learning_rate": 0.00023738279182281352,
"loss": 1.9977,
"step": 126
},
{
"epoch": 0.22,
"learning_rate": 0.00023704553694212752,
"loss": 1.5658,
"step": 127
},
{
"epoch": 0.23,
"learning_rate": 0.00023670408004042653,
"loss": 1.5389,
"step": 128
},
{
"epoch": 0.23,
"learning_rate": 0.00023635843392329938,
"loss": 1.7435,
"step": 129
},
{
"epoch": 0.23,
"learning_rate": 0.00023600861155344223,
"loss": 1.5706,
"step": 130
},
{
"epoch": 0.23,
"learning_rate": 0.00023565462605017228,
"loss": 1.8396,
"step": 131
},
{
"epoch": 0.23,
"learning_rate": 0.00023529649068893598,
"loss": 1.8073,
"step": 132
},
{
"epoch": 0.24,
"learning_rate": 0.00023493421890081112,
"loss": 1.9954,
"step": 133
},
{
"epoch": 0.24,
"learning_rate": 0.00023456782427200295,
"loss": 1.9553,
"step": 134
},
{
"epoch": 0.24,
"learning_rate": 0.0002341973205433348,
"loss": 1.2249,
"step": 135
},
{
"epoch": 0.24,
"learning_rate": 0.0002338227216097328,
"loss": 1.8021,
"step": 136
},
{
"epoch": 0.24,
"learning_rate": 0.00023344404151970464,
"loss": 1.8086,
"step": 137
},
{
"epoch": 0.24,
"learning_rate": 0.00023306129447481282,
"loss": 1.4283,
"step": 138
},
{
"epoch": 0.25,
"learning_rate": 0.00023267449482914203,
"loss": 1.8477,
"step": 139
},
{
"epoch": 0.25,
"learning_rate": 0.0002322836570887608,
"loss": 1.3233,
"step": 140
},
{
"epoch": 0.25,
"learning_rate": 0.0002318887959111776,
"loss": 1.7511,
"step": 141
},
{
"epoch": 0.25,
"learning_rate": 0.0002314899261047909,
"loss": 1.9737,
"step": 142
}
],
"logging_steps": 1,
"max_steps": 565,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 142,
"total_flos": 1.313729954956968e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}