klora_2000_skill / 120 /trainer_state.json
RayDu0010's picture
Upload folder using huggingface_hub
8bf1313 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 327,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.015313935681470138,
"grad_norm": 1.1634962558746338,
"learning_rate": 1.4634146341463416e-06,
"loss": 1.3381,
"step": 5
},
{
"epoch": 0.030627871362940276,
"grad_norm": 0.9751255512237549,
"learning_rate": 3.2926829268292685e-06,
"loss": 1.3201,
"step": 10
},
{
"epoch": 0.045941807044410414,
"grad_norm": 0.6420730948448181,
"learning_rate": 5.121951219512195e-06,
"loss": 1.3144,
"step": 15
},
{
"epoch": 0.06125574272588055,
"grad_norm": 0.5940472483634949,
"learning_rate": 6.951219512195123e-06,
"loss": 1.2986,
"step": 20
},
{
"epoch": 0.07656967840735068,
"grad_norm": 0.6182255148887634,
"learning_rate": 8.780487804878048e-06,
"loss": 1.3159,
"step": 25
},
{
"epoch": 0.09188361408882083,
"grad_norm": 0.5141950845718384,
"learning_rate": 1.0609756097560975e-05,
"loss": 1.268,
"step": 30
},
{
"epoch": 0.10719754977029096,
"grad_norm": 0.528359055519104,
"learning_rate": 1.2439024390243903e-05,
"loss": 1.207,
"step": 35
},
{
"epoch": 0.1225114854517611,
"grad_norm": 0.48875945806503296,
"learning_rate": 1.4268292682926829e-05,
"loss": 1.2673,
"step": 40
},
{
"epoch": 0.13782542113323124,
"grad_norm": 0.5121011137962341,
"learning_rate": 1.6097560975609757e-05,
"loss": 1.201,
"step": 45
},
{
"epoch": 0.15313935681470137,
"grad_norm": 0.4511997699737549,
"learning_rate": 1.7926829268292684e-05,
"loss": 1.2696,
"step": 50
},
{
"epoch": 0.16845329249617153,
"grad_norm": 0.42570406198501587,
"learning_rate": 1.975609756097561e-05,
"loss": 1.1761,
"step": 55
},
{
"epoch": 0.18376722817764166,
"grad_norm": 0.4839572012424469,
"learning_rate": 2.1585365853658537e-05,
"loss": 1.1657,
"step": 60
},
{
"epoch": 0.1990811638591118,
"grad_norm": 0.4797247648239136,
"learning_rate": 2.3414634146341466e-05,
"loss": 1.2398,
"step": 65
},
{
"epoch": 0.21439509954058192,
"grad_norm": 0.45414257049560547,
"learning_rate": 2.524390243902439e-05,
"loss": 1.2156,
"step": 70
},
{
"epoch": 0.22970903522205208,
"grad_norm": 0.41919460892677307,
"learning_rate": 2.707317073170732e-05,
"loss": 1.1861,
"step": 75
},
{
"epoch": 0.2450229709035222,
"grad_norm": 0.4727442264556885,
"learning_rate": 2.8902439024390242e-05,
"loss": 1.1336,
"step": 80
},
{
"epoch": 0.26033690658499237,
"grad_norm": 0.5595141649246216,
"learning_rate": 2.9999877234172298e-05,
"loss": 1.161,
"step": 85
},
{
"epoch": 0.27565084226646247,
"grad_norm": 0.48706549406051636,
"learning_rate": 2.999849614168868e-05,
"loss": 1.163,
"step": 90
},
{
"epoch": 0.29096477794793263,
"grad_norm": 0.5425196886062622,
"learning_rate": 2.999558064119925e-05,
"loss": 1.1033,
"step": 95
},
{
"epoch": 0.30627871362940273,
"grad_norm": 0.5159770250320435,
"learning_rate": 2.999113103097174e-05,
"loss": 1.0591,
"step": 100
},
{
"epoch": 0.3215926493108729,
"grad_norm": 0.6011470556259155,
"learning_rate": 2.9985147766219615e-05,
"loss": 1.1068,
"step": 105
},
{
"epoch": 0.33690658499234305,
"grad_norm": 0.6025936007499695,
"learning_rate": 2.9977631459055537e-05,
"loss": 1.1102,
"step": 110
},
{
"epoch": 0.35222052067381315,
"grad_norm": 0.6048042178153992,
"learning_rate": 2.996858287842873e-05,
"loss": 1.0633,
"step": 115
},
{
"epoch": 0.3675344563552833,
"grad_norm": 0.5745708346366882,
"learning_rate": 2.995800295004629e-05,
"loss": 1.0355,
"step": 120
},
{
"epoch": 0.38284839203675347,
"grad_norm": 0.6400654315948486,
"learning_rate": 2.9945892756278543e-05,
"loss": 0.9959,
"step": 125
},
{
"epoch": 0.3981623277182236,
"grad_norm": 0.6600092053413391,
"learning_rate": 2.993225353604824e-05,
"loss": 0.923,
"step": 130
},
{
"epoch": 0.41347626339969373,
"grad_norm": 0.6698320508003235,
"learning_rate": 2.9917086684703844e-05,
"loss": 0.9218,
"step": 135
},
{
"epoch": 0.42879019908116384,
"grad_norm": 0.6736750602722168,
"learning_rate": 2.9900393753876816e-05,
"loss": 1.0266,
"step": 140
},
{
"epoch": 0.444104134762634,
"grad_norm": 0.7200514674186707,
"learning_rate": 2.9882176451322798e-05,
"loss": 0.9735,
"step": 145
},
{
"epoch": 0.45941807044410415,
"grad_norm": 0.7945486903190613,
"learning_rate": 2.9862436640746973e-05,
"loss": 0.9593,
"step": 150
},
{
"epoch": 0.47473200612557426,
"grad_norm": 0.7599704265594482,
"learning_rate": 2.9841176341613364e-05,
"loss": 0.9086,
"step": 155
},
{
"epoch": 0.4900459418070444,
"grad_norm": 0.7764740586280823,
"learning_rate": 2.981839772893825e-05,
"loss": 0.9635,
"step": 160
},
{
"epoch": 0.5053598774885145,
"grad_norm": 0.7852749824523926,
"learning_rate": 2.9794103133067637e-05,
"loss": 0.9173,
"step": 165
},
{
"epoch": 0.5206738131699847,
"grad_norm": 0.7193847894668579,
"learning_rate": 2.9768295039438868e-05,
"loss": 0.8323,
"step": 170
},
{
"epoch": 0.5359877488514548,
"grad_norm": 0.7468358874320984,
"learning_rate": 2.974097608832635e-05,
"loss": 0.9018,
"step": 175
},
{
"epoch": 0.5513016845329249,
"grad_norm": 0.7651287913322449,
"learning_rate": 2.9712149074571433e-05,
"loss": 0.8993,
"step": 180
},
{
"epoch": 0.5666156202143952,
"grad_norm": 0.8457761406898499,
"learning_rate": 2.968181694729651e-05,
"loss": 0.8642,
"step": 185
},
{
"epoch": 0.5819295558958653,
"grad_norm": 0.981182336807251,
"learning_rate": 2.964998280960328e-05,
"loss": 0.8593,
"step": 190
},
{
"epoch": 0.5972434915773354,
"grad_norm": 0.9098852872848511,
"learning_rate": 2.961664991825531e-05,
"loss": 0.8578,
"step": 195
},
{
"epoch": 0.6125574272588055,
"grad_norm": 1.0851359367370605,
"learning_rate": 2.9581821683344832e-05,
"loss": 0.8896,
"step": 200
},
{
"epoch": 0.6278713629402757,
"grad_norm": 0.8962501883506775,
"learning_rate": 2.954550166794391e-05,
"loss": 0.7954,
"step": 205
},
{
"epoch": 0.6431852986217458,
"grad_norm": 1.154096007347107,
"learning_rate": 2.9507693587739895e-05,
"loss": 0.8227,
"step": 210
},
{
"epoch": 0.6584992343032159,
"grad_norm": 0.8688374757766724,
"learning_rate": 2.9468401310655303e-05,
"loss": 0.8133,
"step": 215
},
{
"epoch": 0.6738131699846861,
"grad_norm": 0.8076976537704468,
"learning_rate": 2.942762885645211e-05,
"loss": 0.8698,
"step": 220
},
{
"epoch": 0.6891271056661562,
"grad_norm": 0.9876447319984436,
"learning_rate": 2.9385380396320523e-05,
"loss": 0.7785,
"step": 225
},
{
"epoch": 0.7044410413476263,
"grad_norm": 0.8549358248710632,
"learning_rate": 2.934166025245223e-05,
"loss": 0.8522,
"step": 230
},
{
"epoch": 0.7197549770290965,
"grad_norm": 1.0136367082595825,
"learning_rate": 2.9296472897598246e-05,
"loss": 0.7612,
"step": 235
},
{
"epoch": 0.7350689127105666,
"grad_norm": 0.8504995703697205,
"learning_rate": 2.924982295461131e-05,
"loss": 0.7773,
"step": 240
},
{
"epoch": 0.7503828483920367,
"grad_norm": 0.9323103427886963,
"learning_rate": 2.920171519597297e-05,
"loss": 0.7684,
"step": 245
},
{
"epoch": 0.7656967840735069,
"grad_norm": 0.9799562096595764,
"learning_rate": 2.9152154543305316e-05,
"loss": 0.7306,
"step": 250
},
{
"epoch": 0.781010719754977,
"grad_norm": 0.9990427494049072,
"learning_rate": 2.9101146066867502e-05,
"loss": 0.7089,
"step": 255
},
{
"epoch": 0.7963246554364471,
"grad_norm": 0.94583660364151,
"learning_rate": 2.9048694985037e-05,
"loss": 0.7458,
"step": 260
},
{
"epoch": 0.8116385911179173,
"grad_norm": 1.0244216918945312,
"learning_rate": 2.8994806663775792e-05,
"loss": 0.7193,
"step": 265
},
{
"epoch": 0.8269525267993875,
"grad_norm": 0.9860573410987854,
"learning_rate": 2.893948661608136e-05,
"loss": 0.7328,
"step": 270
},
{
"epoch": 0.8422664624808576,
"grad_norm": 1.1211233139038086,
"learning_rate": 2.888274050142271e-05,
"loss": 0.7017,
"step": 275
},
{
"epoch": 0.8575803981623277,
"grad_norm": 0.9939438700675964,
"learning_rate": 2.8824574125161384e-05,
"loss": 0.7092,
"step": 280
},
{
"epoch": 0.8728943338437979,
"grad_norm": 1.0668702125549316,
"learning_rate": 2.876499343795754e-05,
"loss": 0.6999,
"step": 285
},
{
"epoch": 0.888208269525268,
"grad_norm": 1.0538650751113892,
"learning_rate": 2.8704004535161172e-05,
"loss": 0.7,
"step": 290
},
{
"epoch": 0.9035222052067381,
"grad_norm": 0.9455710649490356,
"learning_rate": 2.864161365618854e-05,
"loss": 0.7119,
"step": 295
},
{
"epoch": 0.9188361408882083,
"grad_norm": 1.0059537887573242,
"learning_rate": 2.8577827183883853e-05,
"loss": 0.6566,
"step": 300
},
{
"epoch": 0.9341500765696784,
"grad_norm": 1.0264160633087158,
"learning_rate": 2.851265164386627e-05,
"loss": 0.7123,
"step": 305
},
{
"epoch": 0.9494640122511485,
"grad_norm": 0.9730039834976196,
"learning_rate": 2.84460937038623e-05,
"loss": 0.6861,
"step": 310
},
{
"epoch": 0.9647779479326187,
"grad_norm": 1.0401747226715088,
"learning_rate": 2.837816017302368e-05,
"loss": 0.6573,
"step": 315
},
{
"epoch": 0.9800918836140888,
"grad_norm": 0.9776532649993896,
"learning_rate": 2.8308858001230757e-05,
"loss": 0.6147,
"step": 320
},
{
"epoch": 0.9954058192955589,
"grad_norm": 1.103063702583313,
"learning_rate": 2.823819427838149e-05,
"loss": 0.6398,
"step": 325
}
],
"logging_steps": 5,
"max_steps": 1635,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.674677580748554e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}