task-16-Qwen-Qwen2.5-3B-Instruct / trainer_state.json
robertou2's picture
Upload folder using huggingface_hub
e8fe3e4 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05063291139240506,
"grad_norm": 23.053184509277344,
"learning_rate": 0.0,
"loss": 3.0474,
"step": 1
},
{
"epoch": 0.10126582278481013,
"grad_norm": 37.69414520263672,
"learning_rate": 3.3333333333333335e-05,
"loss": 3.2925,
"step": 2
},
{
"epoch": 0.1518987341772152,
"grad_norm": 5.540323734283447,
"learning_rate": 6.666666666666667e-05,
"loss": 3.0089,
"step": 3
},
{
"epoch": 0.20253164556962025,
"grad_norm": 4.970560073852539,
"learning_rate": 0.0001,
"loss": 2.6052,
"step": 4
},
{
"epoch": 0.25316455696202533,
"grad_norm": 3.1140971183776855,
"learning_rate": 0.00013333333333333334,
"loss": 2.1661,
"step": 5
},
{
"epoch": 0.3037974683544304,
"grad_norm": 5.0788140296936035,
"learning_rate": 0.00016666666666666666,
"loss": 2.7634,
"step": 6
},
{
"epoch": 0.35443037974683544,
"grad_norm": 3.610426425933838,
"learning_rate": 0.0002,
"loss": 2.5581,
"step": 7
},
{
"epoch": 0.4050632911392405,
"grad_norm": 3.3719804286956787,
"learning_rate": 0.00023333333333333333,
"loss": 2.3932,
"step": 8
},
{
"epoch": 0.45569620253164556,
"grad_norm": 3.6790554523468018,
"learning_rate": 0.0002666666666666667,
"loss": 2.7487,
"step": 9
},
{
"epoch": 0.5063291139240507,
"grad_norm": 3.152188301086426,
"learning_rate": 0.0003,
"loss": 2.5796,
"step": 10
},
{
"epoch": 0.5569620253164557,
"grad_norm": 3.6318979263305664,
"learning_rate": 0.0003333333333333333,
"loss": 2.6359,
"step": 11
},
{
"epoch": 0.6075949367088608,
"grad_norm": 2.668596029281616,
"learning_rate": 0.00036666666666666667,
"loss": 2.7351,
"step": 12
},
{
"epoch": 0.6582278481012658,
"grad_norm": 5.233928680419922,
"learning_rate": 0.0004,
"loss": 2.4318,
"step": 13
},
{
"epoch": 0.7088607594936709,
"grad_norm": 3.2564899921417236,
"learning_rate": 0.00043333333333333337,
"loss": 2.0882,
"step": 14
},
{
"epoch": 0.759493670886076,
"grad_norm": 3.2959020137786865,
"learning_rate": 0.00046666666666666666,
"loss": 2.3181,
"step": 15
},
{
"epoch": 0.810126582278481,
"grad_norm": 3.4378912448883057,
"learning_rate": 0.0005,
"loss": 2.7332,
"step": 16
},
{
"epoch": 0.8607594936708861,
"grad_norm": 3.6707003116607666,
"learning_rate": 0.0004998292650357557,
"loss": 2.5919,
"step": 17
},
{
"epoch": 0.9113924050632911,
"grad_norm": 3.861492872238159,
"learning_rate": 0.0004993172933464471,
"loss": 2.9812,
"step": 18
},
{
"epoch": 0.9620253164556962,
"grad_norm": 5.971181869506836,
"learning_rate": 0.0004984647842238185,
"loss": 2.7555,
"step": 19
},
{
"epoch": 1.0,
"grad_norm": 4.519949436187744,
"learning_rate": 0.0004972729020927865,
"loss": 2.4604,
"step": 20
},
{
"epoch": 1.0506329113924051,
"grad_norm": 6.372091770172119,
"learning_rate": 0.0004957432749209755,
"loss": 1.9803,
"step": 21
},
{
"epoch": 1.1012658227848102,
"grad_norm": 3.0549910068511963,
"learning_rate": 0.0004938779919951092,
"loss": 1.7481,
"step": 22
},
{
"epoch": 1.1518987341772151,
"grad_norm": 3.166491746902466,
"learning_rate": 0.0004916796010672969,
"loss": 1.7108,
"step": 23
},
{
"epoch": 1.2025316455696202,
"grad_norm": 5.136909008026123,
"learning_rate": 0.0004891511048751102,
"loss": 1.9706,
"step": 24
},
{
"epoch": 1.2531645569620253,
"grad_norm": 3.662278413772583,
"learning_rate": 0.00048629595704020493,
"loss": 1.451,
"step": 25
},
{
"epoch": 1.3037974683544304,
"grad_norm": 3.243025302886963,
"learning_rate": 0.00048311805735108893,
"loss": 1.882,
"step": 26
},
{
"epoch": 1.3544303797468356,
"grad_norm": 4.813160419464111,
"learning_rate": 0.0004796217464364808,
"loss": 1.9157,
"step": 27
},
{
"epoch": 1.4050632911392404,
"grad_norm": 3.516997814178467,
"learning_rate": 0.0004758117998365322,
"loss": 1.8105,
"step": 28
},
{
"epoch": 1.4556962025316456,
"grad_norm": 3.336134672164917,
"learning_rate": 0.00047169342148001547,
"loss": 1.7735,
"step": 29
},
{
"epoch": 1.5063291139240507,
"grad_norm": 3.748394727706909,
"learning_rate": 0.0004672722365763821,
"loss": 1.6359,
"step": 30
},
{
"epoch": 1.5569620253164556,
"grad_norm": 4.215877056121826,
"learning_rate": 0.0004625542839324036,
"loss": 1.557,
"step": 31
},
{
"epoch": 1.6075949367088609,
"grad_norm": 3.181777000427246,
"learning_rate": 0.00045754600770388763,
"loss": 1.6716,
"step": 32
},
{
"epoch": 1.6582278481012658,
"grad_norm": 5.638424873352051,
"learning_rate": 0.0004522542485937369,
"loss": 1.8503,
"step": 33
},
{
"epoch": 1.7088607594936709,
"grad_norm": 3.092801570892334,
"learning_rate": 0.0004466862345083708,
"loss": 1.7874,
"step": 34
},
{
"epoch": 1.759493670886076,
"grad_norm": 3.7433102130889893,
"learning_rate": 0.0004408495706852758,
"loss": 1.8509,
"step": 35
},
{
"epoch": 1.810126582278481,
"grad_norm": 5.199512004852295,
"learning_rate": 0.00043475222930516476,
"loss": 1.9676,
"step": 36
},
{
"epoch": 1.8607594936708862,
"grad_norm": 2.9524500370025635,
"learning_rate": 0.0004284025386029381,
"loss": 1.6064,
"step": 37
},
{
"epoch": 1.9113924050632911,
"grad_norm": 3.5136239528656006,
"learning_rate": 0.00042180917149231567,
"loss": 1.7605,
"step": 38
},
{
"epoch": 1.9620253164556962,
"grad_norm": 4.008464813232422,
"learning_rate": 0.0004149811337196807,
"loss": 1.6991,
"step": 39
},
{
"epoch": 2.0,
"grad_norm": 3.3913097381591797,
"learning_rate": 0.00040792775156331276,
"loss": 1.9957,
"step": 40
},
{
"epoch": 2.050632911392405,
"grad_norm": 3.1906797885894775,
"learning_rate": 0.0004006586590948141,
"loss": 1.1241,
"step": 41
},
{
"epoch": 2.1012658227848102,
"grad_norm": 72.32931518554688,
"learning_rate": 0.0003931837850201263,
"loss": 1.0448,
"step": 42
},
{
"epoch": 2.151898734177215,
"grad_norm": 8.60446548461914,
"learning_rate": 0.00038551333911811237,
"loss": 1.2111,
"step": 43
},
{
"epoch": 2.2025316455696204,
"grad_norm": 111.643798828125,
"learning_rate": 0.00037765779829522674,
"loss": 1.2316,
"step": 44
},
{
"epoch": 2.2531645569620253,
"grad_norm": 69.01380157470703,
"learning_rate": 0.00036962789227532164,
"loss": 1.1394,
"step": 45
},
{
"epoch": 2.3037974683544302,
"grad_norm": 49.87290573120117,
"learning_rate": 0.0003614345889441346,
"loss": 1.5392,
"step": 46
},
{
"epoch": 2.3544303797468356,
"grad_norm": 6.338498115539551,
"learning_rate": 0.0003530890793684759,
"loss": 1.3349,
"step": 47
},
{
"epoch": 2.4050632911392404,
"grad_norm": 3.9022185802459717,
"learning_rate": 0.0003446027625105776,
"loss": 1.4879,
"step": 48
},
{
"epoch": 2.4556962025316453,
"grad_norm": 3.2077016830444336,
"learning_rate": 0.00033598722965848206,
"loss": 1.3891,
"step": 49
},
{
"epoch": 2.5063291139240507,
"grad_norm": 3.5117242336273193,
"learning_rate": 0.00032725424859373687,
"loss": 1.0252,
"step": 50
},
{
"epoch": 2.5569620253164556,
"grad_norm": 2.9290771484375,
"learning_rate": 0.0003184157475180208,
"loss": 1.1663,
"step": 51
},
{
"epoch": 2.607594936708861,
"grad_norm": 3.6337857246398926,
"learning_rate": 0.00030948379876065467,
"loss": 0.8949,
"step": 52
},
{
"epoch": 2.6582278481012658,
"grad_norm": 3.0574092864990234,
"learning_rate": 0.00030047060228925254,
"loss": 1.0849,
"step": 53
},
{
"epoch": 2.708860759493671,
"grad_norm": 4.709362983703613,
"learning_rate": 0.0002913884690460325,
"loss": 1.5214,
"step": 54
},
{
"epoch": 2.759493670886076,
"grad_norm": 2.733558416366577,
"learning_rate": 0.00028224980413255084,
"loss": 0.9746,
"step": 55
},
{
"epoch": 2.810126582278481,
"grad_norm": 2.5984580516815186,
"learning_rate": 0.0002730670898658255,
"loss": 0.9218,
"step": 56
},
{
"epoch": 2.8607594936708862,
"grad_norm": 3.3489749431610107,
"learning_rate": 0.0002638528687289925,
"loss": 0.9848,
"step": 57
},
{
"epoch": 2.911392405063291,
"grad_norm": 3.0495173931121826,
"learning_rate": 0.0002546197262397825,
"loss": 1.0627,
"step": 58
},
{
"epoch": 2.962025316455696,
"grad_norm": 3.800489664077759,
"learning_rate": 0.0002453802737602176,
"loss": 1.193,
"step": 59
},
{
"epoch": 3.0,
"grad_norm": 2.3720569610595703,
"learning_rate": 0.00023614713127100752,
"loss": 0.5813,
"step": 60
},
{
"epoch": 3.050632911392405,
"grad_norm": 2.65234112739563,
"learning_rate": 0.00022693291013417452,
"loss": 0.7218,
"step": 61
},
{
"epoch": 3.1012658227848102,
"grad_norm": 1.9882073402404785,
"learning_rate": 0.00021775019586744925,
"loss": 0.5474,
"step": 62
},
{
"epoch": 3.151898734177215,
"grad_norm": 2.5399203300476074,
"learning_rate": 0.0002086115309539675,
"loss": 0.7906,
"step": 63
},
{
"epoch": 3.2025316455696204,
"grad_norm": 1.6655718088150024,
"learning_rate": 0.0001995293977107475,
"loss": 0.3726,
"step": 64
},
{
"epoch": 3.2531645569620253,
"grad_norm": 2.330378770828247,
"learning_rate": 0.00019051620123934537,
"loss": 0.6493,
"step": 65
},
{
"epoch": 3.3037974683544302,
"grad_norm": 2.360882043838501,
"learning_rate": 0.0001815842524819793,
"loss": 0.6553,
"step": 66
},
{
"epoch": 3.3544303797468356,
"grad_norm": 2.726463556289673,
"learning_rate": 0.00017274575140626317,
"loss": 0.7101,
"step": 67
},
{
"epoch": 3.4050632911392404,
"grad_norm": 2.313028573989868,
"learning_rate": 0.00016401277034151795,
"loss": 0.773,
"step": 68
},
{
"epoch": 3.4556962025316453,
"grad_norm": 2.5557079315185547,
"learning_rate": 0.00015539723748942243,
"loss": 0.7098,
"step": 69
},
{
"epoch": 3.5063291139240507,
"grad_norm": 2.11527681350708,
"learning_rate": 0.00014691092063152418,
"loss": 0.5224,
"step": 70
},
{
"epoch": 3.5569620253164556,
"grad_norm": 2.06489634513855,
"learning_rate": 0.00013856541105586545,
"loss": 0.565,
"step": 71
},
{
"epoch": 3.607594936708861,
"grad_norm": 2.3769450187683105,
"learning_rate": 0.0001303721077246784,
"loss": 0.5167,
"step": 72
},
{
"epoch": 3.6582278481012658,
"grad_norm": 3.1446056365966797,
"learning_rate": 0.0001223422017047733,
"loss": 0.738,
"step": 73
},
{
"epoch": 3.708860759493671,
"grad_norm": 2.9032044410705566,
"learning_rate": 0.00011448666088188764,
"loss": 0.84,
"step": 74
},
{
"epoch": 3.759493670886076,
"grad_norm": 2.6407244205474854,
"learning_rate": 0.00010681621497987371,
"loss": 0.55,
"step": 75
},
{
"epoch": 3.810126582278481,
"grad_norm": 2.3889331817626953,
"learning_rate": 9.934134090518593e-05,
"loss": 0.6036,
"step": 76
},
{
"epoch": 3.8607594936708862,
"grad_norm": 2.591595411300659,
"learning_rate": 9.207224843668733e-05,
"loss": 0.4689,
"step": 77
},
{
"epoch": 3.911392405063291,
"grad_norm": 2.4760868549346924,
"learning_rate": 8.50188662803194e-05,
"loss": 0.4915,
"step": 78
},
{
"epoch": 3.962025316455696,
"grad_norm": 2.930954694747925,
"learning_rate": 7.819082850768433e-05,
"loss": 0.8307,
"step": 79
},
{
"epoch": 4.0,
"grad_norm": 2.1440188884735107,
"learning_rate": 7.159746139706194e-05,
"loss": 0.4358,
"step": 80
},
{
"epoch": 4.050632911392405,
"grad_norm": 1.4254531860351562,
"learning_rate": 6.524777069483526e-05,
"loss": 0.2303,
"step": 81
},
{
"epoch": 4.10126582278481,
"grad_norm": 2.0634138584136963,
"learning_rate": 5.9150429314724254e-05,
"loss": 0.5029,
"step": 82
},
{
"epoch": 4.151898734177215,
"grad_norm": 1.7933154106140137,
"learning_rate": 5.3313765491629194e-05,
"loss": 0.3326,
"step": 83
},
{
"epoch": 4.2025316455696204,
"grad_norm": 1.1411280632019043,
"learning_rate": 4.7745751406263163e-05,
"loss": 0.1651,
"step": 84
},
{
"epoch": 4.253164556962025,
"grad_norm": 1.6106775999069214,
"learning_rate": 4.245399229611238e-05,
"loss": 0.3276,
"step": 85
},
{
"epoch": 4.30379746835443,
"grad_norm": 1.6101868152618408,
"learning_rate": 3.7445716067596506e-05,
"loss": 0.3073,
"step": 86
},
{
"epoch": 4.3544303797468356,
"grad_norm": 1.1931700706481934,
"learning_rate": 3.2727763423617915e-05,
"loss": 0.1378,
"step": 87
},
{
"epoch": 4.405063291139241,
"grad_norm": 1.7510262727737427,
"learning_rate": 2.8306578519984528e-05,
"loss": 0.2117,
"step": 88
},
{
"epoch": 4.455696202531645,
"grad_norm": 1.7942403554916382,
"learning_rate": 2.4188200163467787e-05,
"loss": 0.1961,
"step": 89
},
{
"epoch": 4.506329113924051,
"grad_norm": 1.7418352365493774,
"learning_rate": 2.0378253563519245e-05,
"loss": 0.2915,
"step": 90
},
{
"epoch": 4.556962025316456,
"grad_norm": 1.7192072868347168,
"learning_rate": 1.6881942648911074e-05,
"loss": 0.1873,
"step": 91
},
{
"epoch": 4.6075949367088604,
"grad_norm": 1.5197887420654297,
"learning_rate": 1.3704042959795133e-05,
"loss": 0.3229,
"step": 92
},
{
"epoch": 4.658227848101266,
"grad_norm": 1.1745399236679077,
"learning_rate": 1.0848895124889818e-05,
"loss": 0.1945,
"step": 93
},
{
"epoch": 4.708860759493671,
"grad_norm": 1.7969136238098145,
"learning_rate": 8.320398932703144e-06,
"loss": 0.2555,
"step": 94
},
{
"epoch": 4.759493670886076,
"grad_norm": 1.9828176498413086,
"learning_rate": 6.12200800489085e-06,
"loss": 0.2931,
"step": 95
},
{
"epoch": 4.810126582278481,
"grad_norm": 1.6430472135543823,
"learning_rate": 4.256725079024554e-06,
"loss": 0.1705,
"step": 96
},
{
"epoch": 4.860759493670886,
"grad_norm": 1.5796780586242676,
"learning_rate": 2.7270979072135106e-06,
"loss": 0.2818,
"step": 97
},
{
"epoch": 4.911392405063291,
"grad_norm": 1.7523950338363647,
"learning_rate": 1.5352157761815977e-06,
"loss": 0.2395,
"step": 98
},
{
"epoch": 4.962025316455696,
"grad_norm": 1.9358466863632202,
"learning_rate": 6.827066535529947e-07,
"loss": 0.3112,
"step": 99
},
{
"epoch": 5.0,
"grad_norm": 2.1313531398773193,
"learning_rate": 1.7073496424427348e-07,
"loss": 0.102,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 20,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.9265650012307456e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}