WNL468M / checkpoint-4100 /trainer_state.json
mariusjabami's picture
Upload folder using huggingface_hub
4762022 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.9196940726577436,
"eval_steps": 500,
"global_step": 4100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04780114722753346,
"grad_norm": 0.0546465665102005,
"learning_rate": 4.941443594646272e-05,
"loss": 2.2922,
"step": 50
},
{
"epoch": 0.09560229445506692,
"grad_norm": 0.08099879324436188,
"learning_rate": 4.881692160611855e-05,
"loss": 2.2658,
"step": 100
},
{
"epoch": 0.14340344168260039,
"grad_norm": 0.08135072886943817,
"learning_rate": 4.821940726577438e-05,
"loss": 2.1959,
"step": 150
},
{
"epoch": 0.19120458891013384,
"grad_norm": 0.08765063434839249,
"learning_rate": 4.7621892925430214e-05,
"loss": 2.1285,
"step": 200
},
{
"epoch": 0.2390057361376673,
"grad_norm": 0.10307478904724121,
"learning_rate": 4.7024378585086046e-05,
"loss": 2.0899,
"step": 250
},
{
"epoch": 0.28680688336520077,
"grad_norm": 0.10948552936315536,
"learning_rate": 4.642686424474187e-05,
"loss": 2.0663,
"step": 300
},
{
"epoch": 0.33460803059273425,
"grad_norm": 0.1135614812374115,
"learning_rate": 4.5829349904397704e-05,
"loss": 2.0508,
"step": 350
},
{
"epoch": 0.3824091778202677,
"grad_norm": 0.12696176767349243,
"learning_rate": 4.5231835564053536e-05,
"loss": 1.9819,
"step": 400
},
{
"epoch": 0.43021032504780116,
"grad_norm": 0.13437969982624054,
"learning_rate": 4.463432122370937e-05,
"loss": 2.0004,
"step": 450
},
{
"epoch": 0.4780114722753346,
"grad_norm": 0.1275995969772339,
"learning_rate": 4.40368068833652e-05,
"loss": 1.9961,
"step": 500
},
{
"epoch": 0.5258126195028681,
"grad_norm": 0.14496305584907532,
"learning_rate": 4.343929254302104e-05,
"loss": 1.9855,
"step": 550
},
{
"epoch": 0.5736137667304015,
"grad_norm": 0.13401177525520325,
"learning_rate": 4.284177820267687e-05,
"loss": 1.976,
"step": 600
},
{
"epoch": 0.621414913957935,
"grad_norm": 0.15063685178756714,
"learning_rate": 4.2244263862332704e-05,
"loss": 1.9428,
"step": 650
},
{
"epoch": 0.6692160611854685,
"grad_norm": 0.1525646150112152,
"learning_rate": 4.164674952198853e-05,
"loss": 1.9565,
"step": 700
},
{
"epoch": 0.7170172084130019,
"grad_norm": 0.1553465574979782,
"learning_rate": 4.104923518164436e-05,
"loss": 1.9499,
"step": 750
},
{
"epoch": 0.7648183556405354,
"grad_norm": 0.178068146109581,
"learning_rate": 4.0451720841300193e-05,
"loss": 1.9342,
"step": 800
},
{
"epoch": 0.8126195028680688,
"grad_norm": 0.16074113547801971,
"learning_rate": 3.9854206500956026e-05,
"loss": 1.9304,
"step": 850
},
{
"epoch": 0.8604206500956023,
"grad_norm": 0.17012304067611694,
"learning_rate": 3.925669216061186e-05,
"loss": 1.9358,
"step": 900
},
{
"epoch": 0.9082217973231358,
"grad_norm": 0.1635637879371643,
"learning_rate": 3.865917782026769e-05,
"loss": 1.9145,
"step": 950
},
{
"epoch": 0.9560229445506692,
"grad_norm": 0.18287719786167145,
"learning_rate": 3.806166347992352e-05,
"loss": 1.9186,
"step": 1000
},
{
"epoch": 1.0038240917782026,
"grad_norm": 0.18034423887729645,
"learning_rate": 3.7464149139579354e-05,
"loss": 1.8995,
"step": 1050
},
{
"epoch": 1.0516252390057361,
"grad_norm": 0.16354230046272278,
"learning_rate": 3.6866634799235186e-05,
"loss": 1.9068,
"step": 1100
},
{
"epoch": 1.0994263862332696,
"grad_norm": 0.236989825963974,
"learning_rate": 3.626912045889101e-05,
"loss": 1.9141,
"step": 1150
},
{
"epoch": 1.147227533460803,
"grad_norm": 0.2356785088777542,
"learning_rate": 3.5671606118546844e-05,
"loss": 1.9088,
"step": 1200
},
{
"epoch": 1.1950286806883366,
"grad_norm": 0.1805247962474823,
"learning_rate": 3.5074091778202676e-05,
"loss": 1.8923,
"step": 1250
},
{
"epoch": 1.24282982791587,
"grad_norm": 0.21303710341453552,
"learning_rate": 3.447657743785851e-05,
"loss": 1.898,
"step": 1300
},
{
"epoch": 1.2906309751434035,
"grad_norm": 0.18716047704219818,
"learning_rate": 3.387906309751434e-05,
"loss": 1.8986,
"step": 1350
},
{
"epoch": 1.338432122370937,
"grad_norm": 0.16454172134399414,
"learning_rate": 3.328154875717017e-05,
"loss": 1.9149,
"step": 1400
},
{
"epoch": 1.3862332695984705,
"grad_norm": 0.18206411600112915,
"learning_rate": 3.2684034416826005e-05,
"loss": 1.8662,
"step": 1450
},
{
"epoch": 1.4340344168260037,
"grad_norm": 0.19803249835968018,
"learning_rate": 3.208652007648184e-05,
"loss": 1.9015,
"step": 1500
},
{
"epoch": 1.4818355640535372,
"grad_norm": 0.19260399043560028,
"learning_rate": 3.148900573613767e-05,
"loss": 1.905,
"step": 1550
},
{
"epoch": 1.5296367112810707,
"grad_norm": 0.20765070617198944,
"learning_rate": 3.08914913957935e-05,
"loss": 1.8981,
"step": 1600
},
{
"epoch": 1.5774378585086042,
"grad_norm": 0.1981675922870636,
"learning_rate": 3.029397705544933e-05,
"loss": 1.8957,
"step": 1650
},
{
"epoch": 1.6252390057361377,
"grad_norm": 0.19996145367622375,
"learning_rate": 2.9696462715105166e-05,
"loss": 1.8797,
"step": 1700
},
{
"epoch": 1.6730401529636711,
"grad_norm": 0.21547172963619232,
"learning_rate": 2.9098948374760998e-05,
"loss": 1.8919,
"step": 1750
},
{
"epoch": 1.7208413001912046,
"grad_norm": 0.21214577555656433,
"learning_rate": 2.850143403441683e-05,
"loss": 1.8942,
"step": 1800
},
{
"epoch": 1.7686424474187379,
"grad_norm": 0.19430747628211975,
"learning_rate": 2.7903919694072662e-05,
"loss": 1.8744,
"step": 1850
},
{
"epoch": 1.8164435946462714,
"grad_norm": 0.20672687888145447,
"learning_rate": 2.730640535372849e-05,
"loss": 1.8784,
"step": 1900
},
{
"epoch": 1.8642447418738048,
"grad_norm": 0.204985573887825,
"learning_rate": 2.6708891013384323e-05,
"loss": 1.8902,
"step": 1950
},
{
"epoch": 1.9120458891013383,
"grad_norm": 0.20450404286384583,
"learning_rate": 2.6111376673040155e-05,
"loss": 1.8649,
"step": 2000
},
{
"epoch": 1.9598470363288718,
"grad_norm": 0.20468485355377197,
"learning_rate": 2.5513862332695987e-05,
"loss": 1.8899,
"step": 2050
},
{
"epoch": 2.0076481835564053,
"grad_norm": 0.206816628575325,
"learning_rate": 2.491634799235182e-05,
"loss": 1.8607,
"step": 2100
},
{
"epoch": 2.0554493307839388,
"grad_norm": 0.2078491896390915,
"learning_rate": 2.431883365200765e-05,
"loss": 1.8737,
"step": 2150
},
{
"epoch": 2.1032504780114722,
"grad_norm": 0.19321344792842865,
"learning_rate": 2.372131931166348e-05,
"loss": 1.8597,
"step": 2200
},
{
"epoch": 2.1510516252390057,
"grad_norm": 0.22135953605175018,
"learning_rate": 2.3123804971319313e-05,
"loss": 1.88,
"step": 2250
},
{
"epoch": 2.198852772466539,
"grad_norm": 0.20404009521007538,
"learning_rate": 2.2526290630975145e-05,
"loss": 1.8908,
"step": 2300
},
{
"epoch": 2.2466539196940727,
"grad_norm": 0.21296437084674835,
"learning_rate": 2.1928776290630977e-05,
"loss": 1.8632,
"step": 2350
},
{
"epoch": 2.294455066921606,
"grad_norm": 0.2082318514585495,
"learning_rate": 2.1331261950286806e-05,
"loss": 1.8819,
"step": 2400
},
{
"epoch": 2.3422562141491396,
"grad_norm": 0.2119467705488205,
"learning_rate": 2.0733747609942638e-05,
"loss": 1.8679,
"step": 2450
},
{
"epoch": 2.390057361376673,
"grad_norm": 0.2162160575389862,
"learning_rate": 2.013623326959847e-05,
"loss": 1.8698,
"step": 2500
},
{
"epoch": 2.4378585086042066,
"grad_norm": 0.19639310240745544,
"learning_rate": 1.9538718929254306e-05,
"loss": 1.8808,
"step": 2550
},
{
"epoch": 2.48565965583174,
"grad_norm": 0.2149832397699356,
"learning_rate": 1.8941204588910135e-05,
"loss": 1.8743,
"step": 2600
},
{
"epoch": 2.5334608030592736,
"grad_norm": 0.20657892525196075,
"learning_rate": 1.8343690248565967e-05,
"loss": 1.8611,
"step": 2650
},
{
"epoch": 2.581261950286807,
"grad_norm": 0.22579824924468994,
"learning_rate": 1.77461759082218e-05,
"loss": 1.8834,
"step": 2700
},
{
"epoch": 2.62906309751434,
"grad_norm": 0.214285746216774,
"learning_rate": 1.714866156787763e-05,
"loss": 1.8712,
"step": 2750
},
{
"epoch": 2.676864244741874,
"grad_norm": 0.2056400179862976,
"learning_rate": 1.6551147227533463e-05,
"loss": 1.8515,
"step": 2800
},
{
"epoch": 2.724665391969407,
"grad_norm": 0.21018265187740326,
"learning_rate": 1.5953632887189292e-05,
"loss": 1.8537,
"step": 2850
},
{
"epoch": 2.772466539196941,
"grad_norm": 0.1982487589120865,
"learning_rate": 1.5356118546845124e-05,
"loss": 1.8637,
"step": 2900
},
{
"epoch": 2.820267686424474,
"grad_norm": 0.22870182991027832,
"learning_rate": 1.4758604206500956e-05,
"loss": 1.857,
"step": 2950
},
{
"epoch": 2.8680688336520075,
"grad_norm": 0.2098233997821808,
"learning_rate": 1.4161089866156787e-05,
"loss": 1.8433,
"step": 3000
},
{
"epoch": 2.915869980879541,
"grad_norm": 0.23259004950523376,
"learning_rate": 1.3563575525812619e-05,
"loss": 1.8934,
"step": 3050
},
{
"epoch": 2.9636711281070744,
"grad_norm": 0.2191799432039261,
"learning_rate": 1.2966061185468451e-05,
"loss": 1.8603,
"step": 3100
},
{
"epoch": 3.011472275334608,
"grad_norm": 0.21849119663238525,
"learning_rate": 1.2368546845124283e-05,
"loss": 1.8603,
"step": 3150
},
{
"epoch": 3.0592734225621414,
"grad_norm": 0.22140590846538544,
"learning_rate": 1.1771032504780115e-05,
"loss": 1.8696,
"step": 3200
},
{
"epoch": 3.107074569789675,
"grad_norm": 0.2533087432384491,
"learning_rate": 1.1173518164435948e-05,
"loss": 1.8491,
"step": 3250
},
{
"epoch": 3.1548757170172084,
"grad_norm": 0.22291821241378784,
"learning_rate": 1.057600382409178e-05,
"loss": 1.8664,
"step": 3300
},
{
"epoch": 3.202676864244742,
"grad_norm": 0.23389829695224762,
"learning_rate": 9.97848948374761e-06,
"loss": 1.8633,
"step": 3350
},
{
"epoch": 3.2504780114722753,
"grad_norm": 0.2284611165523529,
"learning_rate": 9.380975143403442e-06,
"loss": 1.8739,
"step": 3400
},
{
"epoch": 3.298279158699809,
"grad_norm": 0.2357141673564911,
"learning_rate": 8.783460803059273e-06,
"loss": 1.8837,
"step": 3450
},
{
"epoch": 3.3460803059273423,
"grad_norm": 0.23133474588394165,
"learning_rate": 8.185946462715105e-06,
"loss": 1.8284,
"step": 3500
},
{
"epoch": 3.3938814531548758,
"grad_norm": 0.24003466963768005,
"learning_rate": 7.588432122370938e-06,
"loss": 1.8557,
"step": 3550
},
{
"epoch": 3.4416826003824093,
"grad_norm": 0.22754019498825073,
"learning_rate": 6.990917782026769e-06,
"loss": 1.8726,
"step": 3600
},
{
"epoch": 3.4894837476099427,
"grad_norm": 0.24840685725212097,
"learning_rate": 6.393403441682601e-06,
"loss": 1.8683,
"step": 3650
},
{
"epoch": 3.537284894837476,
"grad_norm": 0.23282456398010254,
"learning_rate": 5.795889101338432e-06,
"loss": 1.8624,
"step": 3700
},
{
"epoch": 3.5850860420650097,
"grad_norm": 0.22449611127376556,
"learning_rate": 5.198374760994264e-06,
"loss": 1.8355,
"step": 3750
},
{
"epoch": 3.632887189292543,
"grad_norm": 0.22073350846767426,
"learning_rate": 4.6008604206500955e-06,
"loss": 1.8706,
"step": 3800
},
{
"epoch": 3.6806883365200767,
"grad_norm": 0.2504482567310333,
"learning_rate": 4.003346080305928e-06,
"loss": 1.8607,
"step": 3850
},
{
"epoch": 3.7284894837476097,
"grad_norm": 0.23835700750350952,
"learning_rate": 3.4058317399617594e-06,
"loss": 1.8495,
"step": 3900
},
{
"epoch": 3.7762906309751436,
"grad_norm": 0.2028919905424118,
"learning_rate": 2.808317399617591e-06,
"loss": 1.8423,
"step": 3950
},
{
"epoch": 3.8240917782026767,
"grad_norm": 0.23366276919841766,
"learning_rate": 2.210803059273423e-06,
"loss": 1.8431,
"step": 4000
},
{
"epoch": 3.8718929254302106,
"grad_norm": 0.261652410030365,
"learning_rate": 1.6132887189292542e-06,
"loss": 1.8639,
"step": 4050
},
{
"epoch": 3.9196940726577436,
"grad_norm": 0.2397291362285614,
"learning_rate": 1.015774378585086e-06,
"loss": 1.8626,
"step": 4100
}
],
"logging_steps": 50,
"max_steps": 4184,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.491537700349542e+16,
"train_batch_size": 10,
"trial_name": null,
"trial_params": null
}