Qwen2.5-Math-7B-s1k / trainer_state.json
flyingbugs's picture
Model save
504d30d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 105,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.047619047619047616,
"grad_norm": 46.73658663770524,
"learning_rate": 4.5454545454545455e-06,
"loss": 11.3797,
"step": 1
},
{
"epoch": 0.09523809523809523,
"grad_norm": 44.523666214134316,
"learning_rate": 9.090909090909091e-06,
"loss": 11.5191,
"step": 2
},
{
"epoch": 0.14285714285714285,
"grad_norm": 52.06639933238829,
"learning_rate": 1.3636363636363637e-05,
"loss": 10.8448,
"step": 3
},
{
"epoch": 0.19047619047619047,
"grad_norm": 69.75134205890501,
"learning_rate": 1.8181818181818182e-05,
"loss": 9.5655,
"step": 4
},
{
"epoch": 0.23809523809523808,
"grad_norm": 50.314368468329306,
"learning_rate": 2.272727272727273e-05,
"loss": 4.1848,
"step": 5
},
{
"epoch": 0.2857142857142857,
"grad_norm": 7.357015429387816,
"learning_rate": 2.7272727272727273e-05,
"loss": 1.9862,
"step": 6
},
{
"epoch": 0.3333333333333333,
"grad_norm": 7.401615020276034,
"learning_rate": 3.181818181818182e-05,
"loss": 1.9584,
"step": 7
},
{
"epoch": 0.38095238095238093,
"grad_norm": 3.149164302427893,
"learning_rate": 3.6363636363636364e-05,
"loss": 1.5975,
"step": 8
},
{
"epoch": 0.42857142857142855,
"grad_norm": 1.9054072928164427,
"learning_rate": 4.0909090909090915e-05,
"loss": 1.4777,
"step": 9
},
{
"epoch": 0.47619047619047616,
"grad_norm": 2.09522071898415,
"learning_rate": 4.545454545454546e-05,
"loss": 1.4076,
"step": 10
},
{
"epoch": 0.5238095238095238,
"grad_norm": 1.4886426113733002,
"learning_rate": 5e-05,
"loss": 1.3226,
"step": 11
},
{
"epoch": 0.5714285714285714,
"grad_norm": 1.594161802836668,
"learning_rate": 4.946808510638298e-05,
"loss": 1.2825,
"step": 12
},
{
"epoch": 0.6190476190476191,
"grad_norm": 1.5132463664090081,
"learning_rate": 4.893617021276596e-05,
"loss": 1.3821,
"step": 13
},
{
"epoch": 0.6666666666666666,
"grad_norm": 1.4916977417841104,
"learning_rate": 4.840425531914894e-05,
"loss": 1.2297,
"step": 14
},
{
"epoch": 0.7142857142857143,
"grad_norm": 1.07234396111405,
"learning_rate": 4.787234042553192e-05,
"loss": 1.2448,
"step": 15
},
{
"epoch": 0.7619047619047619,
"grad_norm": 0.9261313011360983,
"learning_rate": 4.734042553191489e-05,
"loss": 1.2062,
"step": 16
},
{
"epoch": 0.8095238095238095,
"grad_norm": 1.3648296835839082,
"learning_rate": 4.680851063829788e-05,
"loss": 1.2428,
"step": 17
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.9239559683166827,
"learning_rate": 4.627659574468085e-05,
"loss": 1.2616,
"step": 18
},
{
"epoch": 0.9047619047619048,
"grad_norm": 0.7209850490291895,
"learning_rate": 4.574468085106383e-05,
"loss": 1.0656,
"step": 19
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.9769698482701535,
"learning_rate": 4.5212765957446815e-05,
"loss": 1.0884,
"step": 20
},
{
"epoch": 1.0,
"grad_norm": 0.6106942050998588,
"learning_rate": 4.468085106382979e-05,
"loss": 1.0672,
"step": 21
},
{
"epoch": 1.0476190476190477,
"grad_norm": 0.801040183534444,
"learning_rate": 4.414893617021277e-05,
"loss": 1.0478,
"step": 22
},
{
"epoch": 1.0952380952380953,
"grad_norm": 0.7146659615588544,
"learning_rate": 4.3617021276595746e-05,
"loss": 1.091,
"step": 23
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.5905709163842993,
"learning_rate": 4.3085106382978725e-05,
"loss": 1.1275,
"step": 24
},
{
"epoch": 1.1904761904761905,
"grad_norm": 0.5826060194579513,
"learning_rate": 4.2553191489361704e-05,
"loss": 1.0079,
"step": 25
},
{
"epoch": 1.2380952380952381,
"grad_norm": 0.6151603075503278,
"learning_rate": 4.2021276595744684e-05,
"loss": 1.0802,
"step": 26
},
{
"epoch": 1.2857142857142856,
"grad_norm": 0.5264186877249164,
"learning_rate": 4.148936170212766e-05,
"loss": 1.0406,
"step": 27
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.5461664948214178,
"learning_rate": 4.095744680851064e-05,
"loss": 1.0072,
"step": 28
},
{
"epoch": 1.380952380952381,
"grad_norm": 0.4822129939360476,
"learning_rate": 4.0425531914893614e-05,
"loss": 1.0367,
"step": 29
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.49848757883873196,
"learning_rate": 3.9893617021276594e-05,
"loss": 0.9754,
"step": 30
},
{
"epoch": 1.4761904761904763,
"grad_norm": 0.5766625117987682,
"learning_rate": 3.936170212765958e-05,
"loss": 1.0631,
"step": 31
},
{
"epoch": 1.5238095238095237,
"grad_norm": 0.4859811195017287,
"learning_rate": 3.882978723404255e-05,
"loss": 1.0003,
"step": 32
},
{
"epoch": 1.5714285714285714,
"grad_norm": 0.5617547786833981,
"learning_rate": 3.829787234042553e-05,
"loss": 1.0312,
"step": 33
},
{
"epoch": 1.619047619047619,
"grad_norm": 0.4760704205398701,
"learning_rate": 3.776595744680852e-05,
"loss": 0.9957,
"step": 34
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.5525097760212518,
"learning_rate": 3.723404255319149e-05,
"loss": 1.0427,
"step": 35
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.48722284467868165,
"learning_rate": 3.670212765957447e-05,
"loss": 0.9141,
"step": 36
},
{
"epoch": 1.7619047619047619,
"grad_norm": 0.48286908931413486,
"learning_rate": 3.617021276595745e-05,
"loss": 0.9701,
"step": 37
},
{
"epoch": 1.8095238095238095,
"grad_norm": 0.509607069236827,
"learning_rate": 3.563829787234043e-05,
"loss": 0.9234,
"step": 38
},
{
"epoch": 1.8571428571428572,
"grad_norm": 0.48664273824178367,
"learning_rate": 3.5106382978723407e-05,
"loss": 1.0605,
"step": 39
},
{
"epoch": 1.9047619047619047,
"grad_norm": 0.4951502177756041,
"learning_rate": 3.4574468085106386e-05,
"loss": 1.0418,
"step": 40
},
{
"epoch": 1.9523809523809523,
"grad_norm": 0.5599844810380401,
"learning_rate": 3.4042553191489365e-05,
"loss": 1.0207,
"step": 41
},
{
"epoch": 2.0,
"grad_norm": 0.47167217001219114,
"learning_rate": 3.3510638297872344e-05,
"loss": 0.8667,
"step": 42
},
{
"epoch": 2.0476190476190474,
"grad_norm": 0.5766076030131434,
"learning_rate": 3.2978723404255317e-05,
"loss": 0.9305,
"step": 43
},
{
"epoch": 2.0952380952380953,
"grad_norm": 0.5223243597728271,
"learning_rate": 3.2446808510638296e-05,
"loss": 0.8412,
"step": 44
},
{
"epoch": 2.142857142857143,
"grad_norm": 0.46907426786479,
"learning_rate": 3.191489361702128e-05,
"loss": 0.8973,
"step": 45
},
{
"epoch": 2.1904761904761907,
"grad_norm": 0.4701126779049834,
"learning_rate": 3.1382978723404254e-05,
"loss": 0.8509,
"step": 46
},
{
"epoch": 2.238095238095238,
"grad_norm": 0.48418735511330596,
"learning_rate": 3.085106382978723e-05,
"loss": 0.8757,
"step": 47
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.6556800657324943,
"learning_rate": 3.0319148936170216e-05,
"loss": 0.9124,
"step": 48
},
{
"epoch": 2.3333333333333335,
"grad_norm": 0.6014700280019579,
"learning_rate": 2.9787234042553192e-05,
"loss": 0.8755,
"step": 49
},
{
"epoch": 2.380952380952381,
"grad_norm": 0.5915165889418732,
"learning_rate": 2.925531914893617e-05,
"loss": 0.8925,
"step": 50
},
{
"epoch": 2.4285714285714284,
"grad_norm": 0.6317703078383352,
"learning_rate": 2.8723404255319154e-05,
"loss": 0.8663,
"step": 51
},
{
"epoch": 2.4761904761904763,
"grad_norm": 0.6429525648150871,
"learning_rate": 2.819148936170213e-05,
"loss": 0.8198,
"step": 52
},
{
"epoch": 2.5238095238095237,
"grad_norm": 0.5187394614056622,
"learning_rate": 2.765957446808511e-05,
"loss": 0.7651,
"step": 53
},
{
"epoch": 2.571428571428571,
"grad_norm": 0.4845593782104512,
"learning_rate": 2.7127659574468084e-05,
"loss": 0.8805,
"step": 54
},
{
"epoch": 2.619047619047619,
"grad_norm": 0.47561465960794413,
"learning_rate": 2.6595744680851064e-05,
"loss": 0.9561,
"step": 55
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.48208401639762166,
"learning_rate": 2.6063829787234046e-05,
"loss": 0.8638,
"step": 56
},
{
"epoch": 2.7142857142857144,
"grad_norm": 0.5193206572882076,
"learning_rate": 2.5531914893617022e-05,
"loss": 0.8062,
"step": 57
},
{
"epoch": 2.761904761904762,
"grad_norm": 0.5299016279110512,
"learning_rate": 2.5e-05,
"loss": 0.8236,
"step": 58
},
{
"epoch": 2.8095238095238093,
"grad_norm": 0.47247578172991544,
"learning_rate": 2.446808510638298e-05,
"loss": 0.786,
"step": 59
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.4887209838922498,
"learning_rate": 2.393617021276596e-05,
"loss": 0.8275,
"step": 60
},
{
"epoch": 2.9047619047619047,
"grad_norm": 0.47820835963456026,
"learning_rate": 2.340425531914894e-05,
"loss": 0.8581,
"step": 61
},
{
"epoch": 2.9523809523809526,
"grad_norm": 0.46592054309550573,
"learning_rate": 2.2872340425531915e-05,
"loss": 0.8726,
"step": 62
},
{
"epoch": 3.0,
"grad_norm": 0.5063226147267863,
"learning_rate": 2.2340425531914894e-05,
"loss": 0.8522,
"step": 63
},
{
"epoch": 3.0476190476190474,
"grad_norm": 0.4349456733161818,
"learning_rate": 2.1808510638297873e-05,
"loss": 0.7678,
"step": 64
},
{
"epoch": 3.0952380952380953,
"grad_norm": 0.43322041549114737,
"learning_rate": 2.1276595744680852e-05,
"loss": 0.7291,
"step": 65
},
{
"epoch": 3.142857142857143,
"grad_norm": 0.3891658154700991,
"learning_rate": 2.074468085106383e-05,
"loss": 0.7098,
"step": 66
},
{
"epoch": 3.1904761904761907,
"grad_norm": 0.48090626334768466,
"learning_rate": 2.0212765957446807e-05,
"loss": 0.7075,
"step": 67
},
{
"epoch": 3.238095238095238,
"grad_norm": 0.4192109123095055,
"learning_rate": 1.968085106382979e-05,
"loss": 0.7135,
"step": 68
},
{
"epoch": 3.2857142857142856,
"grad_norm": 0.4538447830174013,
"learning_rate": 1.9148936170212766e-05,
"loss": 0.7293,
"step": 69
},
{
"epoch": 3.3333333333333335,
"grad_norm": 0.400428247237971,
"learning_rate": 1.8617021276595745e-05,
"loss": 0.7182,
"step": 70
},
{
"epoch": 3.380952380952381,
"grad_norm": 0.41259082028208505,
"learning_rate": 1.8085106382978724e-05,
"loss": 0.7111,
"step": 71
},
{
"epoch": 3.4285714285714284,
"grad_norm": 0.42210992879504494,
"learning_rate": 1.7553191489361703e-05,
"loss": 0.7358,
"step": 72
},
{
"epoch": 3.4761904761904763,
"grad_norm": 0.4805171232293903,
"learning_rate": 1.7021276595744682e-05,
"loss": 0.7693,
"step": 73
},
{
"epoch": 3.5238095238095237,
"grad_norm": 0.4290739751199803,
"learning_rate": 1.6489361702127658e-05,
"loss": 0.7538,
"step": 74
},
{
"epoch": 3.571428571428571,
"grad_norm": 0.46386967645771476,
"learning_rate": 1.595744680851064e-05,
"loss": 0.7483,
"step": 75
},
{
"epoch": 3.619047619047619,
"grad_norm": 0.4600000815160992,
"learning_rate": 1.5425531914893617e-05,
"loss": 0.6961,
"step": 76
},
{
"epoch": 3.6666666666666665,
"grad_norm": 0.48967690124437346,
"learning_rate": 1.4893617021276596e-05,
"loss": 0.7759,
"step": 77
},
{
"epoch": 3.7142857142857144,
"grad_norm": 0.43799987175594934,
"learning_rate": 1.4361702127659577e-05,
"loss": 0.6973,
"step": 78
},
{
"epoch": 3.761904761904762,
"grad_norm": 0.4170501746303817,
"learning_rate": 1.3829787234042554e-05,
"loss": 0.7713,
"step": 79
},
{
"epoch": 3.8095238095238093,
"grad_norm": 0.44313691170789904,
"learning_rate": 1.3297872340425532e-05,
"loss": 0.7287,
"step": 80
},
{
"epoch": 3.857142857142857,
"grad_norm": 0.3626173621626196,
"learning_rate": 1.2765957446808511e-05,
"loss": 0.7152,
"step": 81
},
{
"epoch": 3.9047619047619047,
"grad_norm": 0.40980441661962524,
"learning_rate": 1.223404255319149e-05,
"loss": 0.7388,
"step": 82
},
{
"epoch": 3.9523809523809526,
"grad_norm": 0.3476970829860624,
"learning_rate": 1.170212765957447e-05,
"loss": 0.6835,
"step": 83
},
{
"epoch": 4.0,
"grad_norm": 0.34534853622038764,
"learning_rate": 1.1170212765957447e-05,
"loss": 0.6936,
"step": 84
},
{
"epoch": 4.0476190476190474,
"grad_norm": 0.34717149811141956,
"learning_rate": 1.0638297872340426e-05,
"loss": 0.6412,
"step": 85
},
{
"epoch": 4.095238095238095,
"grad_norm": 0.39121639106624856,
"learning_rate": 1.0106382978723404e-05,
"loss": 0.6447,
"step": 86
},
{
"epoch": 4.142857142857143,
"grad_norm": 0.3853359648131134,
"learning_rate": 9.574468085106383e-06,
"loss": 0.67,
"step": 87
},
{
"epoch": 4.190476190476191,
"grad_norm": 0.333219732679469,
"learning_rate": 9.042553191489362e-06,
"loss": 0.6951,
"step": 88
},
{
"epoch": 4.238095238095238,
"grad_norm": 0.4113440938373367,
"learning_rate": 8.510638297872341e-06,
"loss": 0.6427,
"step": 89
},
{
"epoch": 4.285714285714286,
"grad_norm": 0.4292570295550588,
"learning_rate": 7.97872340425532e-06,
"loss": 0.6563,
"step": 90
},
{
"epoch": 4.333333333333333,
"grad_norm": 0.3325469730092871,
"learning_rate": 7.446808510638298e-06,
"loss": 0.6215,
"step": 91
},
{
"epoch": 4.380952380952381,
"grad_norm": 0.37390136106392785,
"learning_rate": 6.914893617021277e-06,
"loss": 0.6194,
"step": 92
},
{
"epoch": 4.428571428571429,
"grad_norm": 0.4143640454410716,
"learning_rate": 6.3829787234042555e-06,
"loss": 0.6207,
"step": 93
},
{
"epoch": 4.476190476190476,
"grad_norm": 0.3648244167487935,
"learning_rate": 5.851063829787235e-06,
"loss": 0.5998,
"step": 94
},
{
"epoch": 4.523809523809524,
"grad_norm": 0.3512446333779434,
"learning_rate": 5.319148936170213e-06,
"loss": 0.6351,
"step": 95
},
{
"epoch": 4.571428571428571,
"grad_norm": 0.339436273470992,
"learning_rate": 4.787234042553191e-06,
"loss": 0.599,
"step": 96
},
{
"epoch": 4.619047619047619,
"grad_norm": 0.4440038854080703,
"learning_rate": 4.255319148936171e-06,
"loss": 0.584,
"step": 97
},
{
"epoch": 4.666666666666667,
"grad_norm": 0.36581621694617583,
"learning_rate": 3.723404255319149e-06,
"loss": 0.6203,
"step": 98
},
{
"epoch": 4.714285714285714,
"grad_norm": 0.3509331585886843,
"learning_rate": 3.1914893617021277e-06,
"loss": 0.5981,
"step": 99
},
{
"epoch": 4.761904761904762,
"grad_norm": 0.3403976301106126,
"learning_rate": 2.6595744680851065e-06,
"loss": 0.6415,
"step": 100
},
{
"epoch": 4.809523809523809,
"grad_norm": 0.30854856192103863,
"learning_rate": 2.1276595744680853e-06,
"loss": 0.6054,
"step": 101
},
{
"epoch": 4.857142857142857,
"grad_norm": 0.2984920819758894,
"learning_rate": 1.5957446808510639e-06,
"loss": 0.6145,
"step": 102
},
{
"epoch": 4.904761904761905,
"grad_norm": 0.30989076720361786,
"learning_rate": 1.0638297872340427e-06,
"loss": 0.6317,
"step": 103
},
{
"epoch": 4.9523809523809526,
"grad_norm": 0.3438826262811614,
"learning_rate": 5.319148936170213e-07,
"loss": 0.6193,
"step": 104
},
{
"epoch": 5.0,
"grad_norm": 0.32644346890582215,
"learning_rate": 0.0,
"loss": 0.6277,
"step": 105
},
{
"epoch": 5.0,
"step": 105,
"total_flos": 8.363833783064986e+16,
"train_loss": 1.3066247133981614,
"train_runtime": 6202.6685,
"train_samples_per_second": 0.266,
"train_steps_per_second": 0.017
}
],
"logging_steps": 1,
"max_steps": 105,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.363833783064986e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}