klora_2000_skill / 109 /trainer_state.json
RayDu0010's picture
Upload folder using huggingface_hub
8e7145a verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 486,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010298661174047374,
"grad_norm": 1.141814947128296,
"learning_rate": 9.836065573770493e-07,
"loss": 1.3338,
"step": 5
},
{
"epoch": 0.02059732234809475,
"grad_norm": 0.9742817878723145,
"learning_rate": 2.213114754098361e-06,
"loss": 1.3646,
"step": 10
},
{
"epoch": 0.030895983522142123,
"grad_norm": 0.7402164340019226,
"learning_rate": 3.4426229508196724e-06,
"loss": 1.3261,
"step": 15
},
{
"epoch": 0.0411946446961895,
"grad_norm": 0.5555693507194519,
"learning_rate": 4.672131147540984e-06,
"loss": 1.3035,
"step": 20
},
{
"epoch": 0.05149330587023687,
"grad_norm": 0.6190383434295654,
"learning_rate": 5.901639344262295e-06,
"loss": 1.3847,
"step": 25
},
{
"epoch": 0.061791967044284246,
"grad_norm": 0.5282045006752014,
"learning_rate": 7.131147540983606e-06,
"loss": 1.3197,
"step": 30
},
{
"epoch": 0.07209062821833162,
"grad_norm": 0.48918476700782776,
"learning_rate": 8.360655737704917e-06,
"loss": 1.2899,
"step": 35
},
{
"epoch": 0.082389289392379,
"grad_norm": 0.7961065769195557,
"learning_rate": 9.590163934426231e-06,
"loss": 1.2656,
"step": 40
},
{
"epoch": 0.09268795056642637,
"grad_norm": 0.4608859419822693,
"learning_rate": 1.0819672131147542e-05,
"loss": 1.26,
"step": 45
},
{
"epoch": 0.10298661174047374,
"grad_norm": 0.555935800075531,
"learning_rate": 1.2049180327868853e-05,
"loss": 1.2548,
"step": 50
},
{
"epoch": 0.11328527291452112,
"grad_norm": 0.4993849992752075,
"learning_rate": 1.3278688524590163e-05,
"loss": 1.2389,
"step": 55
},
{
"epoch": 0.12358393408856849,
"grad_norm": 0.5452463030815125,
"learning_rate": 1.4508196721311476e-05,
"loss": 1.2124,
"step": 60
},
{
"epoch": 0.13388259526261587,
"grad_norm": 0.4450685381889343,
"learning_rate": 1.5737704918032788e-05,
"loss": 1.2105,
"step": 65
},
{
"epoch": 0.14418125643666324,
"grad_norm": 0.5045918822288513,
"learning_rate": 1.6967213114754097e-05,
"loss": 1.2305,
"step": 70
},
{
"epoch": 0.15447991761071062,
"grad_norm": 0.49198681116104126,
"learning_rate": 1.819672131147541e-05,
"loss": 1.2464,
"step": 75
},
{
"epoch": 0.164778578784758,
"grad_norm": 0.4819914996623993,
"learning_rate": 1.9426229508196722e-05,
"loss": 1.194,
"step": 80
},
{
"epoch": 0.17507723995880536,
"grad_norm": 0.4840977191925049,
"learning_rate": 2.0655737704918034e-05,
"loss": 1.1949,
"step": 85
},
{
"epoch": 0.18537590113285274,
"grad_norm": 0.55226069688797,
"learning_rate": 2.1885245901639347e-05,
"loss": 1.1936,
"step": 90
},
{
"epoch": 0.1956745623069001,
"grad_norm": 0.47093257308006287,
"learning_rate": 2.3114754098360656e-05,
"loss": 1.1214,
"step": 95
},
{
"epoch": 0.2059732234809475,
"grad_norm": 0.5413655042648315,
"learning_rate": 2.434426229508197e-05,
"loss": 1.1661,
"step": 100
},
{
"epoch": 0.21627188465499486,
"grad_norm": 0.525501012802124,
"learning_rate": 2.5573770491803277e-05,
"loss": 1.1657,
"step": 105
},
{
"epoch": 0.22657054582904224,
"grad_norm": 0.5898400545120239,
"learning_rate": 2.680327868852459e-05,
"loss": 1.1567,
"step": 110
},
{
"epoch": 0.2368692070030896,
"grad_norm": 0.5453243851661682,
"learning_rate": 2.8032786885245902e-05,
"loss": 1.1429,
"step": 115
},
{
"epoch": 0.24716786817713698,
"grad_norm": 0.5925690531730652,
"learning_rate": 2.9262295081967215e-05,
"loss": 1.165,
"step": 120
},
{
"epoch": 0.25746652935118436,
"grad_norm": 0.5159111618995667,
"learning_rate": 2.9999944416086244e-05,
"loss": 1.0712,
"step": 125
},
{
"epoch": 0.26776519052523173,
"grad_norm": 0.5997148752212524,
"learning_rate": 2.999931910178738e-05,
"loss": 1.087,
"step": 130
},
{
"epoch": 0.2780638516992791,
"grad_norm": 0.5427290201187134,
"learning_rate": 2.9997999022358424e-05,
"loss": 1.1449,
"step": 135
},
{
"epoch": 0.2883625128733265,
"grad_norm": 0.7504834532737732,
"learning_rate": 2.999598423894516e-05,
"loss": 1.1485,
"step": 140
},
{
"epoch": 0.29866117404737386,
"grad_norm": 0.5923535823822021,
"learning_rate": 2.9993274844871917e-05,
"loss": 1.1358,
"step": 145
},
{
"epoch": 0.30895983522142123,
"grad_norm": 0.6039471626281738,
"learning_rate": 2.9989870965637215e-05,
"loss": 1.1134,
"step": 150
},
{
"epoch": 0.3192584963954686,
"grad_norm": 0.5884091854095459,
"learning_rate": 2.9985772758907992e-05,
"loss": 1.101,
"step": 155
},
{
"epoch": 0.329557157569516,
"grad_norm": 0.6560966968536377,
"learning_rate": 2.998098041451227e-05,
"loss": 1.126,
"step": 160
},
{
"epoch": 0.33985581874356335,
"grad_norm": 0.557138979434967,
"learning_rate": 2.9975494154430378e-05,
"loss": 1.0722,
"step": 165
},
{
"epoch": 0.35015447991761073,
"grad_norm": 0.7041076421737671,
"learning_rate": 2.996931423278467e-05,
"loss": 1.0913,
"step": 170
},
{
"epoch": 0.3604531410916581,
"grad_norm": 0.6222100257873535,
"learning_rate": 2.9962440935827735e-05,
"loss": 1.0392,
"step": 175
},
{
"epoch": 0.3707518022657055,
"grad_norm": 0.6287522912025452,
"learning_rate": 2.995487458192917e-05,
"loss": 1.003,
"step": 180
},
{
"epoch": 0.38105046343975285,
"grad_norm": 0.8934054970741272,
"learning_rate": 2.9946615521560805e-05,
"loss": 1.0176,
"step": 185
},
{
"epoch": 0.3913491246138002,
"grad_norm": 0.6240345239639282,
"learning_rate": 2.9937664137280478e-05,
"loss": 1.0205,
"step": 190
},
{
"epoch": 0.4016477857878476,
"grad_norm": 0.6924830675125122,
"learning_rate": 2.9928020843714323e-05,
"loss": 1.0218,
"step": 195
},
{
"epoch": 0.411946446961895,
"grad_norm": 0.7187322378158569,
"learning_rate": 2.9917686087537563e-05,
"loss": 1.0581,
"step": 200
},
{
"epoch": 0.42224510813594235,
"grad_norm": 0.6714951992034912,
"learning_rate": 2.9906660347453803e-05,
"loss": 0.9597,
"step": 205
},
{
"epoch": 0.4325437693099897,
"grad_norm": 0.758985698223114,
"learning_rate": 2.9894944134172876e-05,
"loss": 0.9869,
"step": 210
},
{
"epoch": 0.4428424304840371,
"grad_norm": 0.6603527069091797,
"learning_rate": 2.988253799038718e-05,
"loss": 0.9687,
"step": 215
},
{
"epoch": 0.45314109165808447,
"grad_norm": 0.7007045745849609,
"learning_rate": 2.986944249074654e-05,
"loss": 1.0301,
"step": 220
},
{
"epoch": 0.46343975283213185,
"grad_norm": 0.7556740641593933,
"learning_rate": 2.985565824183159e-05,
"loss": 1.0108,
"step": 225
},
{
"epoch": 0.4737384140061792,
"grad_norm": 0.7658377885818481,
"learning_rate": 2.9841185882125682e-05,
"loss": 0.9966,
"step": 230
},
{
"epoch": 0.4840370751802266,
"grad_norm": 0.8318830728530884,
"learning_rate": 2.9826026081985305e-05,
"loss": 0.9416,
"step": 235
},
{
"epoch": 0.49433573635427397,
"grad_norm": 0.7977103590965271,
"learning_rate": 2.9810179543609032e-05,
"loss": 0.9753,
"step": 240
},
{
"epoch": 0.5046343975283213,
"grad_norm": 0.7902314066886902,
"learning_rate": 2.9793647001005002e-05,
"loss": 0.9422,
"step": 245
},
{
"epoch": 0.5149330587023687,
"grad_norm": 0.8238946795463562,
"learning_rate": 2.9776429219956917e-05,
"loss": 0.9609,
"step": 250
},
{
"epoch": 0.525231719876416,
"grad_norm": 0.8614672422409058,
"learning_rate": 2.975852699798857e-05,
"loss": 0.9375,
"step": 255
},
{
"epoch": 0.5355303810504635,
"grad_norm": 0.946057915687561,
"learning_rate": 2.9739941164326914e-05,
"loss": 0.8812,
"step": 260
},
{
"epoch": 0.5458290422245108,
"grad_norm": 0.8593087196350098,
"learning_rate": 2.9720672579863633e-05,
"loss": 0.9091,
"step": 265
},
{
"epoch": 0.5561277033985582,
"grad_norm": 0.8131739497184753,
"learning_rate": 2.970072213711528e-05,
"loss": 0.9136,
"step": 270
},
{
"epoch": 0.5664263645726055,
"grad_norm": 0.8276655077934265,
"learning_rate": 2.9680090760181933e-05,
"loss": 0.9188,
"step": 275
},
{
"epoch": 0.576725025746653,
"grad_norm": 0.8448908925056458,
"learning_rate": 2.9658779404704387e-05,
"loss": 0.8959,
"step": 280
},
{
"epoch": 0.5870236869207003,
"grad_norm": 0.7980640530586243,
"learning_rate": 2.9636789057819903e-05,
"loss": 0.9117,
"step": 285
},
{
"epoch": 0.5973223480947477,
"grad_norm": 0.8234493136405945,
"learning_rate": 2.961412073811646e-05,
"loss": 0.904,
"step": 290
},
{
"epoch": 0.607621009268795,
"grad_norm": 1.027661919593811,
"learning_rate": 2.9590775495585597e-05,
"loss": 0.8299,
"step": 295
},
{
"epoch": 0.6179196704428425,
"grad_norm": 0.8117839097976685,
"learning_rate": 2.956675441157376e-05,
"loss": 0.8743,
"step": 300
},
{
"epoch": 0.6282183316168898,
"grad_norm": 0.8687512278556824,
"learning_rate": 2.954205859873223e-05,
"loss": 0.8539,
"step": 305
},
{
"epoch": 0.6385169927909372,
"grad_norm": 0.9031465649604797,
"learning_rate": 2.951668920096557e-05,
"loss": 0.8416,
"step": 310
},
{
"epoch": 0.6488156539649845,
"grad_norm": 1.0350013971328735,
"learning_rate": 2.9490647393378656e-05,
"loss": 0.8505,
"step": 315
},
{
"epoch": 0.659114315139032,
"grad_norm": 0.9072204828262329,
"learning_rate": 2.9463934382222226e-05,
"loss": 0.8328,
"step": 320
},
{
"epoch": 0.6694129763130793,
"grad_norm": 0.8805303573608398,
"learning_rate": 2.943655140483703e-05,
"loss": 0.8498,
"step": 325
},
{
"epoch": 0.6797116374871267,
"grad_norm": 0.9569805264472961,
"learning_rate": 2.94084997295965e-05,
"loss": 0.8613,
"step": 330
},
{
"epoch": 0.690010298661174,
"grad_norm": 0.842765748500824,
"learning_rate": 2.9379780655848e-05,
"loss": 0.9271,
"step": 335
},
{
"epoch": 0.7003089598352215,
"grad_norm": 1.0585700273513794,
"learning_rate": 2.9350395513852655e-05,
"loss": 0.8049,
"step": 340
},
{
"epoch": 0.7106076210092688,
"grad_norm": 0.9681879878044128,
"learning_rate": 2.9320345664723713e-05,
"loss": 0.8611,
"step": 345
},
{
"epoch": 0.7209062821833162,
"grad_norm": 0.9282833933830261,
"learning_rate": 2.928963250036351e-05,
"loss": 0.8645,
"step": 350
},
{
"epoch": 0.7312049433573635,
"grad_norm": 0.880826473236084,
"learning_rate": 2.9258257443399007e-05,
"loss": 0.8409,
"step": 355
},
{
"epoch": 0.741503604531411,
"grad_norm": 0.9096245169639587,
"learning_rate": 2.922622194711587e-05,
"loss": 0.8648,
"step": 360
},
{
"epoch": 0.7518022657054583,
"grad_norm": 0.9456668496131897,
"learning_rate": 2.919352749539117e-05,
"loss": 0.8045,
"step": 365
},
{
"epoch": 0.7621009268795057,
"grad_norm": 0.9902530312538147,
"learning_rate": 2.916017560262466e-05,
"loss": 0.84,
"step": 370
},
{
"epoch": 0.772399588053553,
"grad_norm": 1.0271276235580444,
"learning_rate": 2.91261678136686e-05,
"loss": 0.8052,
"step": 375
},
{
"epoch": 0.7826982492276005,
"grad_norm": 0.9190011024475098,
"learning_rate": 2.9091505703756224e-05,
"loss": 0.7965,
"step": 380
},
{
"epoch": 0.7929969104016478,
"grad_norm": 1.022260308265686,
"learning_rate": 2.9056190878428766e-05,
"loss": 0.8417,
"step": 385
},
{
"epoch": 0.8032955715756952,
"grad_norm": 0.9204807281494141,
"learning_rate": 2.9020224973461098e-05,
"loss": 0.7853,
"step": 390
},
{
"epoch": 0.8135942327497425,
"grad_norm": 0.9588913917541504,
"learning_rate": 2.8983609654785948e-05,
"loss": 0.8235,
"step": 395
},
{
"epoch": 0.82389289392379,
"grad_norm": 0.8754395246505737,
"learning_rate": 2.8946346618416742e-05,
"loss": 0.7409,
"step": 400
},
{
"epoch": 0.8341915550978373,
"grad_norm": 1.0218687057495117,
"learning_rate": 2.8908437590369056e-05,
"loss": 0.7466,
"step": 405
},
{
"epoch": 0.8444902162718847,
"grad_norm": 1.1078494787216187,
"learning_rate": 2.886988432658065e-05,
"loss": 0.7767,
"step": 410
},
{
"epoch": 0.854788877445932,
"grad_norm": 0.970990002155304,
"learning_rate": 2.8830688612830142e-05,
"loss": 0.7584,
"step": 415
},
{
"epoch": 0.8650875386199794,
"grad_norm": 1.0772037506103516,
"learning_rate": 2.8790852264654287e-05,
"loss": 0.8034,
"step": 420
},
{
"epoch": 0.8753861997940268,
"grad_norm": 0.9200147986412048,
"learning_rate": 2.875037712726389e-05,
"loss": 0.7289,
"step": 425
},
{
"epoch": 0.8856848609680742,
"grad_norm": 1.0537430047988892,
"learning_rate": 2.8709265075458324e-05,
"loss": 0.7462,
"step": 430
},
{
"epoch": 0.8959835221421215,
"grad_norm": 0.9600791335105896,
"learning_rate": 2.86675180135387e-05,
"loss": 0.68,
"step": 435
},
{
"epoch": 0.9062821833161689,
"grad_norm": 1.05148184299469,
"learning_rate": 2.8625137875219655e-05,
"loss": 0.7472,
"step": 440
},
{
"epoch": 0.9165808444902163,
"grad_norm": 0.9907565712928772,
"learning_rate": 2.8582126623539787e-05,
"loss": 0.6766,
"step": 445
},
{
"epoch": 0.9268795056642637,
"grad_norm": 0.9938767552375793,
"learning_rate": 2.8538486250770724e-05,
"loss": 0.7238,
"step": 450
},
{
"epoch": 0.937178166838311,
"grad_norm": 0.9231424331665039,
"learning_rate": 2.849421877832484e-05,
"loss": 0.7381,
"step": 455
},
{
"epoch": 0.9474768280123584,
"grad_norm": 1.0045597553253174,
"learning_rate": 2.844932625666163e-05,
"loss": 0.6675,
"step": 460
},
{
"epoch": 0.9577754891864058,
"grad_norm": 0.9766920804977417,
"learning_rate": 2.8403810765192727e-05,
"loss": 0.7332,
"step": 465
},
{
"epoch": 0.9680741503604532,
"grad_norm": 0.9820194244384766,
"learning_rate": 2.835767441218559e-05,
"loss": 0.726,
"step": 470
},
{
"epoch": 0.9783728115345005,
"grad_norm": 1.1101776361465454,
"learning_rate": 2.831091933466584e-05,
"loss": 0.7179,
"step": 475
},
{
"epoch": 0.9886714727085479,
"grad_norm": 1.3044633865356445,
"learning_rate": 2.82635476983183e-05,
"loss": 0.6778,
"step": 480
},
{
"epoch": 0.9989701338825953,
"grad_norm": 1.0158640146255493,
"learning_rate": 2.821556169738663e-05,
"loss": 0.7167,
"step": 485
}
],
"logging_steps": 5,
"max_steps": 2430,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.963472629424456e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}