checkpoint-100 / trainer_state.json
mangopy's picture
Upload trainer_state.json with huggingface_hub
377a0ef verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.2810248198558847,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.012810248198558846,
"grad_norm": 14.422553569632921,
"learning_rate": 1.25e-07,
"loss": 0.3727,
"step": 1
},
{
"epoch": 0.025620496397117692,
"grad_norm": 15.27369825740346,
"learning_rate": 2.5e-07,
"loss": 0.3497,
"step": 2
},
{
"epoch": 0.03843074459567654,
"grad_norm": 14.934103011301683,
"learning_rate": 3.75e-07,
"loss": 0.3837,
"step": 3
},
{
"epoch": 0.051240992794235385,
"grad_norm": 14.736814811452579,
"learning_rate": 5e-07,
"loss": 0.3396,
"step": 4
},
{
"epoch": 0.06405124099279423,
"grad_norm": 14.197297787283581,
"learning_rate": 6.249999999999999e-07,
"loss": 0.3499,
"step": 5
},
{
"epoch": 0.07686148919135308,
"grad_norm": 11.495057131004048,
"learning_rate": 7.5e-07,
"loss": 0.3044,
"step": 6
},
{
"epoch": 0.08967173738991192,
"grad_norm": 12.10719572952739,
"learning_rate": 8.75e-07,
"loss": 0.3489,
"step": 7
},
{
"epoch": 0.10248198558847077,
"grad_norm": 17.82877298105584,
"learning_rate": 1e-06,
"loss": 0.3319,
"step": 8
},
{
"epoch": 0.11529223378702963,
"grad_norm": 20.757666788469532,
"learning_rate": 1.125e-06,
"loss": 0.299,
"step": 9
},
{
"epoch": 0.12810248198558846,
"grad_norm": 19.06167752186461,
"learning_rate": 1.2499999999999999e-06,
"loss": 0.3083,
"step": 10
},
{
"epoch": 0.14091273018414732,
"grad_norm": 17.862857075250577,
"learning_rate": 1.375e-06,
"loss": 0.289,
"step": 11
},
{
"epoch": 0.15372297838270615,
"grad_norm": 24.872907286961055,
"learning_rate": 1.5e-06,
"loss": 0.2736,
"step": 12
},
{
"epoch": 0.16653322658126501,
"grad_norm": 26.286295562663373,
"learning_rate": 1.625e-06,
"loss": 0.2917,
"step": 13
},
{
"epoch": 0.17934347477982385,
"grad_norm": 27.853856612497783,
"learning_rate": 1.75e-06,
"loss": 0.3264,
"step": 14
},
{
"epoch": 0.1921537229783827,
"grad_norm": 29.926337896366878,
"learning_rate": 1.8749999999999998e-06,
"loss": 0.4142,
"step": 15
},
{
"epoch": 0.20496397117694154,
"grad_norm": 32.72677695738821,
"learning_rate": 2e-06,
"loss": 0.3857,
"step": 16
},
{
"epoch": 0.2177742193755004,
"grad_norm": 25.15556631940085,
"learning_rate": 1.9997482349425066e-06,
"loss": 0.4154,
"step": 17
},
{
"epoch": 0.23058446757405926,
"grad_norm": 25.6375788037228,
"learning_rate": 1.9989930665413145e-06,
"loss": 0.393,
"step": 18
},
{
"epoch": 0.2433947157726181,
"grad_norm": 25.369663386464367,
"learning_rate": 1.997734875046456e-06,
"loss": 0.3194,
"step": 19
},
{
"epoch": 0.2562049639711769,
"grad_norm": 22.33431342233663,
"learning_rate": 1.995974293995239e-06,
"loss": 0.3499,
"step": 20
},
{
"epoch": 0.2690152121697358,
"grad_norm": 17.384680330316037,
"learning_rate": 1.9937122098932426e-06,
"loss": 0.3306,
"step": 21
},
{
"epoch": 0.28182546036829464,
"grad_norm": 19.864128436150512,
"learning_rate": 1.9909497617679347e-06,
"loss": 0.3596,
"step": 22
},
{
"epoch": 0.2946357085668535,
"grad_norm": 20.439398805573077,
"learning_rate": 1.9876883405951377e-06,
"loss": 0.328,
"step": 23
},
{
"epoch": 0.3074459567654123,
"grad_norm": 16.880587852141087,
"learning_rate": 1.9839295885986295e-06,
"loss": 0.2997,
"step": 24
},
{
"epoch": 0.32025620496397117,
"grad_norm": 14.039028426941659,
"learning_rate": 1.9796753984232355e-06,
"loss": 0.2995,
"step": 25
},
{
"epoch": 0.33306645316253003,
"grad_norm": 16.079146586971746,
"learning_rate": 1.9749279121818236e-06,
"loss": 0.3277,
"step": 26
},
{
"epoch": 0.3458767013610889,
"grad_norm": 16.540864706951407,
"learning_rate": 1.9696895203766866e-06,
"loss": 0.3492,
"step": 27
},
{
"epoch": 0.3586869495596477,
"grad_norm": 12.581391856103888,
"learning_rate": 1.9639628606958534e-06,
"loss": 0.2907,
"step": 28
},
{
"epoch": 0.37149719775820655,
"grad_norm": 11.140674253319334,
"learning_rate": 1.9577508166849303e-06,
"loss": 0.2977,
"step": 29
},
{
"epoch": 0.3843074459567654,
"grad_norm": 14.824558664988295,
"learning_rate": 1.9510565162951534e-06,
"loss": 0.3211,
"step": 30
},
{
"epoch": 0.3971176941553243,
"grad_norm": 13.545165184398623,
"learning_rate": 1.9438833303083674e-06,
"loss": 0.3418,
"step": 31
},
{
"epoch": 0.4099279423538831,
"grad_norm": 13.298757689081272,
"learning_rate": 1.936234870639737e-06,
"loss": 0.3298,
"step": 32
},
{
"epoch": 0.42273819055244194,
"grad_norm": 13.834770648936974,
"learning_rate": 1.928114988519039e-06,
"loss": 0.3406,
"step": 33
},
{
"epoch": 0.4355484387510008,
"grad_norm": 14.126211131938067,
"learning_rate": 1.9195277725514506e-06,
"loss": 0.3268,
"step": 34
},
{
"epoch": 0.44835868694955966,
"grad_norm": 12.61217701807801,
"learning_rate": 1.9104775466588157e-06,
"loss": 0.3421,
"step": 35
},
{
"epoch": 0.4611689351481185,
"grad_norm": 12.638382733573478,
"learning_rate": 1.9009688679024189e-06,
"loss": 0.3311,
"step": 36
},
{
"epoch": 0.4739791833466773,
"grad_norm": 13.067396281150065,
"learning_rate": 1.8910065241883678e-06,
"loss": 0.3278,
"step": 37
},
{
"epoch": 0.4867894315452362,
"grad_norm": 12.323205100444895,
"learning_rate": 1.8805955318567379e-06,
"loss": 0.3021,
"step": 38
},
{
"epoch": 0.49959967974379504,
"grad_norm": 15.67799328226583,
"learning_rate": 1.8697411331556953e-06,
"loss": 0.356,
"step": 39
},
{
"epoch": 0.5124099279423538,
"grad_norm": 11.446755693352566,
"learning_rate": 1.858448793601866e-06,
"loss": 0.2987,
"step": 40
},
{
"epoch": 0.5252201761409128,
"grad_norm": 13.823237028823755,
"learning_rate": 1.8467241992282841e-06,
"loss": 0.3212,
"step": 41
},
{
"epoch": 0.5380304243394716,
"grad_norm": 14.097283611242158,
"learning_rate": 1.8345732537213026e-06,
"loss": 0.2847,
"step": 42
},
{
"epoch": 0.5508406725380304,
"grad_norm": 13.339593293425303,
"learning_rate": 1.82200207544791e-06,
"loss": 0.3023,
"step": 43
},
{
"epoch": 0.5636509207365893,
"grad_norm": 11.710132196735142,
"learning_rate": 1.8090169943749474e-06,
"loss": 0.288,
"step": 44
},
{
"epoch": 0.5764611689351481,
"grad_norm": 15.421270027221475,
"learning_rate": 1.795624548881781e-06,
"loss": 0.3191,
"step": 45
},
{
"epoch": 0.589271417133707,
"grad_norm": 15.196185783638896,
"learning_rate": 1.7818314824680298e-06,
"loss": 0.2917,
"step": 46
},
{
"epoch": 0.6020816653322658,
"grad_norm": 14.417769465666634,
"learning_rate": 1.767644740358011e-06,
"loss": 0.3065,
"step": 47
},
{
"epoch": 0.6148919135308246,
"grad_norm": 15.806685481243651,
"learning_rate": 1.753071466003611e-06,
"loss": 0.3133,
"step": 48
},
{
"epoch": 0.6277021617293835,
"grad_norm": 12.804839878435857,
"learning_rate": 1.7381189974873407e-06,
"loss": 0.2924,
"step": 49
},
{
"epoch": 0.6405124099279423,
"grad_norm": 12.41861829102855,
"learning_rate": 1.7227948638273915e-06,
"loss": 0.2884,
"step": 50
},
{
"epoch": 0.6533226581265013,
"grad_norm": 14.521843299259071,
"learning_rate": 1.7071067811865474e-06,
"loss": 0.3172,
"step": 51
},
{
"epoch": 0.6661329063250601,
"grad_norm": 12.369940546930579,
"learning_rate": 1.6910626489868648e-06,
"loss": 0.3064,
"step": 52
},
{
"epoch": 0.6789431545236189,
"grad_norm": 13.075907929491837,
"learning_rate": 1.6746705459320744e-06,
"loss": 0.3077,
"step": 53
},
{
"epoch": 0.6917534027221778,
"grad_norm": 15.91307811791754,
"learning_rate": 1.6579387259397126e-06,
"loss": 0.3202,
"step": 54
},
{
"epoch": 0.7045636509207366,
"grad_norm": 16.794702833290078,
"learning_rate": 1.640875613985024e-06,
"loss": 0.3509,
"step": 55
},
{
"epoch": 0.7173738991192954,
"grad_norm": 11.645931539680932,
"learning_rate": 1.6234898018587336e-06,
"loss": 0.2737,
"step": 56
},
{
"epoch": 0.7301841473178543,
"grad_norm": 12.164042324452254,
"learning_rate": 1.6057900438408199e-06,
"loss": 0.2805,
"step": 57
},
{
"epoch": 0.7429943955164131,
"grad_norm": 11.003522632810185,
"learning_rate": 1.587785252292473e-06,
"loss": 0.2935,
"step": 58
},
{
"epoch": 0.755804643714972,
"grad_norm": 14.747618364345833,
"learning_rate": 1.569484493168452e-06,
"loss": 0.3185,
"step": 59
},
{
"epoch": 0.7686148919135308,
"grad_norm": 13.515142475366316,
"learning_rate": 1.5508969814521024e-06,
"loss": 0.2973,
"step": 60
},
{
"epoch": 0.7814251401120896,
"grad_norm": 12.718973924439272,
"learning_rate": 1.5320320765153365e-06,
"loss": 0.2956,
"step": 61
},
{
"epoch": 0.7942353883106485,
"grad_norm": 11.338497570766252,
"learning_rate": 1.5128992774059062e-06,
"loss": 0.3127,
"step": 62
},
{
"epoch": 0.8070456365092074,
"grad_norm": 11.44549619969602,
"learning_rate": 1.4935082180643467e-06,
"loss": 0.277,
"step": 63
},
{
"epoch": 0.8198558847077662,
"grad_norm": 12.057954732711101,
"learning_rate": 1.4738686624729987e-06,
"loss": 0.2957,
"step": 64
},
{
"epoch": 0.8326661329063251,
"grad_norm": 12.801694035766888,
"learning_rate": 1.4539904997395467e-06,
"loss": 0.2907,
"step": 65
},
{
"epoch": 0.8454763811048839,
"grad_norm": 12.990091142294833,
"learning_rate": 1.433883739117558e-06,
"loss": 0.2973,
"step": 66
},
{
"epoch": 0.8582866293034428,
"grad_norm": 12.952175792300656,
"learning_rate": 1.4135585049665206e-06,
"loss": 0.3178,
"step": 67
},
{
"epoch": 0.8710968775020016,
"grad_norm": 12.728890379024975,
"learning_rate": 1.3930250316539235e-06,
"loss": 0.2974,
"step": 68
},
{
"epoch": 0.8839071257005604,
"grad_norm": 14.929834721351973,
"learning_rate": 1.3722936584019451e-06,
"loss": 0.2682,
"step": 69
},
{
"epoch": 0.8967173738991193,
"grad_norm": 13.241782488686244,
"learning_rate": 1.3513748240813427e-06,
"loss": 0.3008,
"step": 70
},
{
"epoch": 0.9095276220976781,
"grad_norm": 11.993846811913796,
"learning_rate": 1.3302790619551672e-06,
"loss": 0.2954,
"step": 71
},
{
"epoch": 0.922337870296237,
"grad_norm": 11.51482248851063,
"learning_rate": 1.3090169943749473e-06,
"loss": 0.2839,
"step": 72
},
{
"epoch": 0.9351481184947958,
"grad_norm": 16.30894659038507,
"learning_rate": 1.2875993274320173e-06,
"loss": 0.2697,
"step": 73
},
{
"epoch": 0.9479583666933546,
"grad_norm": 14.18702210008118,
"learning_rate": 1.266036845566675e-06,
"loss": 0.3257,
"step": 74
},
{
"epoch": 0.9607686148919136,
"grad_norm": 12.829612982865,
"learning_rate": 1.244340406137894e-06,
"loss": 0.2843,
"step": 75
},
{
"epoch": 0.9735788630904724,
"grad_norm": 11.280572000591482,
"learning_rate": 1.2225209339563143e-06,
"loss": 0.2877,
"step": 76
},
{
"epoch": 0.9863891112890312,
"grad_norm": 11.890947903730776,
"learning_rate": 1.2005894157832728e-06,
"loss": 0.3159,
"step": 77
},
{
"epoch": 0.9991993594875901,
"grad_norm": 11.43791196956486,
"learning_rate": 1.1785568947986366e-06,
"loss": 0.2783,
"step": 78
},
{
"epoch": 1.012009607686149,
"grad_norm": 6.708403841137654,
"learning_rate": 1.156434465040231e-06,
"loss": 0.1292,
"step": 79
},
{
"epoch": 1.0248198558847077,
"grad_norm": 7.089840959480307,
"learning_rate": 1.1342332658176555e-06,
"loss": 0.1441,
"step": 80
},
{
"epoch": 1.0376301040832665,
"grad_norm": 7.0309806199688705,
"learning_rate": 1.1119644761033077e-06,
"loss": 0.1405,
"step": 81
},
{
"epoch": 1.0504403522818255,
"grad_norm": 8.710657806703308,
"learning_rate": 1.0896393089034335e-06,
"loss": 0.1265,
"step": 82
},
{
"epoch": 1.0632506004803843,
"grad_norm": 8.365738125681913,
"learning_rate": 1.0672690056120398e-06,
"loss": 0.121,
"step": 83
},
{
"epoch": 1.0760608486789431,
"grad_norm": 6.240558508799005,
"learning_rate": 1.044864830350515e-06,
"loss": 0.1072,
"step": 84
},
{
"epoch": 1.088871096877502,
"grad_norm": 8.786219507218826,
"learning_rate": 1.022438064295805e-06,
"loss": 0.1256,
"step": 85
},
{
"epoch": 1.1016813450760607,
"grad_norm": 7.367696121948762,
"learning_rate": 1e-06,
"loss": 0.1169,
"step": 86
},
{
"epoch": 1.1144915932746198,
"grad_norm": 8.286075087364262,
"learning_rate": 9.77561935704195e-07,
"loss": 0.1302,
"step": 87
},
{
"epoch": 1.1273018414731786,
"grad_norm": 6.69695777215013,
"learning_rate": 9.551351696494853e-07,
"loss": 0.1191,
"step": 88
},
{
"epoch": 1.1401120896717374,
"grad_norm": 12.22193531590102,
"learning_rate": 9.327309943879603e-07,
"loss": 0.1089,
"step": 89
},
{
"epoch": 1.1529223378702962,
"grad_norm": 9.221889701081018,
"learning_rate": 9.103606910965665e-07,
"loss": 0.1224,
"step": 90
},
{
"epoch": 1.165732586068855,
"grad_norm": 19.718305211304475,
"learning_rate": 8.880355238966921e-07,
"loss": 0.1211,
"step": 91
},
{
"epoch": 1.178542834267414,
"grad_norm": 8.925338055742081,
"learning_rate": 8.657667341823448e-07,
"loss": 0.1178,
"step": 92
},
{
"epoch": 1.1913530824659728,
"grad_norm": 8.25397448128133,
"learning_rate": 8.435655349597689e-07,
"loss": 0.127,
"step": 93
},
{
"epoch": 1.2041633306645316,
"grad_norm": 10.64375599591567,
"learning_rate": 8.214431052013634e-07,
"loss": 0.1162,
"step": 94
},
{
"epoch": 1.2169735788630904,
"grad_norm": 9.047517084249554,
"learning_rate": 7.994105842167272e-07,
"loss": 0.1386,
"step": 95
},
{
"epoch": 1.2297838270616492,
"grad_norm": 14.178667620453645,
"learning_rate": 7.774790660436857e-07,
"loss": 0.1137,
"step": 96
},
{
"epoch": 1.2425940752602083,
"grad_norm": 7.4649396301693836,
"learning_rate": 7.556595938621058e-07,
"loss": 0.1189,
"step": 97
},
{
"epoch": 1.255404323458767,
"grad_norm": 9.120043718229002,
"learning_rate": 7.33963154433325e-07,
"loss": 0.1218,
"step": 98
},
{
"epoch": 1.2682145716573259,
"grad_norm": 9.320538317849792,
"learning_rate": 7.124006725679828e-07,
"loss": 0.1215,
"step": 99
},
{
"epoch": 1.2810248198558847,
"grad_norm": 8.775423616734841,
"learning_rate": 6.909830056250526e-07,
"loss": 0.1106,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 156,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 135343586148352.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}