SmollLM-BN-test / trainer_state.json
tislamshuvo's picture
Initial model upload
d85f89b verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0648249186987478,
"eval_steps": 500,
"global_step": 6000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0005402076558228983,
"grad_norm": 4.3125,
"learning_rate": 1.7644940583363342e-06,
"loss": 1.7943,
"step": 50
},
{
"epoch": 0.0010804153116457966,
"grad_norm": 4.28125,
"learning_rate": 3.5649981994958587e-06,
"loss": 1.7796,
"step": 100
},
{
"epoch": 0.001620622967468695,
"grad_norm": 3.640625,
"learning_rate": 5.365502340655383e-06,
"loss": 1.7311,
"step": 150
},
{
"epoch": 0.002160830623291593,
"grad_norm": 1.390625,
"learning_rate": 7.166006481814909e-06,
"loss": 1.7021,
"step": 200
},
{
"epoch": 0.0027010382791144917,
"grad_norm": 2.890625,
"learning_rate": 8.966510622974434e-06,
"loss": 1.7806,
"step": 250
},
{
"epoch": 0.00324124593493739,
"grad_norm": 1.15625,
"learning_rate": 1.0767014764133958e-05,
"loss": 1.7074,
"step": 300
},
{
"epoch": 0.003781453590760288,
"grad_norm": 0.5390625,
"learning_rate": 1.2567518905293481e-05,
"loss": 1.6765,
"step": 350
},
{
"epoch": 0.004321661246583186,
"grad_norm": 1.078125,
"learning_rate": 1.4368023046453008e-05,
"loss": 1.6671,
"step": 400
},
{
"epoch": 0.004861868902406085,
"grad_norm": 0.455078125,
"learning_rate": 1.6168527187612533e-05,
"loss": 1.6235,
"step": 450
},
{
"epoch": 0.0054020765582289835,
"grad_norm": 0.2890625,
"learning_rate": 1.7969031328772056e-05,
"loss": 1.6529,
"step": 500
},
{
"epoch": 0.005942284214051881,
"grad_norm": 0.345703125,
"learning_rate": 1.9769535469931582e-05,
"loss": 1.559,
"step": 550
},
{
"epoch": 0.00648249186987478,
"grad_norm": 0.306640625,
"learning_rate": 2.1570039611091105e-05,
"loss": 1.5842,
"step": 600
},
{
"epoch": 0.007022699525697678,
"grad_norm": 0.359375,
"learning_rate": 2.337054375225063e-05,
"loss": 1.5504,
"step": 650
},
{
"epoch": 0.007562907181520576,
"grad_norm": 0.30859375,
"learning_rate": 2.5171047893410155e-05,
"loss": 1.5611,
"step": 700
},
{
"epoch": 0.008103114837343476,
"grad_norm": 0.91015625,
"learning_rate": 2.697155203456968e-05,
"loss": 1.5447,
"step": 750
},
{
"epoch": 0.008643322493166372,
"grad_norm": 0.359375,
"learning_rate": 2.8772056175729207e-05,
"loss": 1.546,
"step": 800
},
{
"epoch": 0.009183530148989271,
"grad_norm": 0.328125,
"learning_rate": 3.057256031688873e-05,
"loss": 1.5344,
"step": 850
},
{
"epoch": 0.00972373780481217,
"grad_norm": 0.29296875,
"learning_rate": 3.237306445804826e-05,
"loss": 1.4914,
"step": 900
},
{
"epoch": 0.010263945460635068,
"grad_norm": 0.29296875,
"learning_rate": 3.4173568599207777e-05,
"loss": 1.559,
"step": 950
},
{
"epoch": 0.010804153116457967,
"grad_norm": 0.283203125,
"learning_rate": 3.59740727403673e-05,
"loss": 1.485,
"step": 1000
},
{
"epoch": 0.011344360772280866,
"grad_norm": 0.263671875,
"learning_rate": 3.777457688152683e-05,
"loss": 1.4972,
"step": 1050
},
{
"epoch": 0.011884568428103762,
"grad_norm": 0.439453125,
"learning_rate": 3.9575081022686356e-05,
"loss": 1.45,
"step": 1100
},
{
"epoch": 0.012424776083926661,
"grad_norm": 0.3046875,
"learning_rate": 4.137558516384588e-05,
"loss": 1.4232,
"step": 1150
},
{
"epoch": 0.01296498373974956,
"grad_norm": 0.35546875,
"learning_rate": 4.31760893050054e-05,
"loss": 1.4098,
"step": 1200
},
{
"epoch": 0.013505191395572458,
"grad_norm": 0.330078125,
"learning_rate": 4.497659344616493e-05,
"loss": 1.3447,
"step": 1250
},
{
"epoch": 0.014045399051395357,
"grad_norm": 0.31640625,
"learning_rate": 4.6777097587324455e-05,
"loss": 1.35,
"step": 1300
},
{
"epoch": 0.014585606707218255,
"grad_norm": 0.37109375,
"learning_rate": 4.857760172848398e-05,
"loss": 1.3509,
"step": 1350
},
{
"epoch": 0.015125814363041152,
"grad_norm": 0.3515625,
"learning_rate": 5.03781058696435e-05,
"loss": 1.3161,
"step": 1400
},
{
"epoch": 0.01566602201886405,
"grad_norm": 0.27734375,
"learning_rate": 5.2178610010803034e-05,
"loss": 1.3277,
"step": 1450
},
{
"epoch": 0.01620622967468695,
"grad_norm": 0.37890625,
"learning_rate": 5.3979114151962554e-05,
"loss": 1.2814,
"step": 1500
},
{
"epoch": 0.016746437330509848,
"grad_norm": 0.31640625,
"learning_rate": 5.577961829312208e-05,
"loss": 1.2944,
"step": 1550
},
{
"epoch": 0.017286644986332745,
"grad_norm": 0.32421875,
"learning_rate": 5.75801224342816e-05,
"loss": 1.2714,
"step": 1600
},
{
"epoch": 0.017826852642155645,
"grad_norm": 0.3203125,
"learning_rate": 5.9380626575441126e-05,
"loss": 1.2847,
"step": 1650
},
{
"epoch": 0.018367060297978542,
"grad_norm": 0.390625,
"learning_rate": 6.118113071660065e-05,
"loss": 1.2828,
"step": 1700
},
{
"epoch": 0.018907267953801443,
"grad_norm": 0.33984375,
"learning_rate": 6.298163485776018e-05,
"loss": 1.252,
"step": 1750
},
{
"epoch": 0.01944747560962434,
"grad_norm": 0.35546875,
"learning_rate": 6.47821389989197e-05,
"loss": 1.2659,
"step": 1800
},
{
"epoch": 0.019987683265447236,
"grad_norm": 0.375,
"learning_rate": 6.658264314007923e-05,
"loss": 1.2429,
"step": 1850
},
{
"epoch": 0.020527890921270137,
"grad_norm": 0.357421875,
"learning_rate": 6.838314728123874e-05,
"loss": 1.2492,
"step": 1900
},
{
"epoch": 0.021068098577093033,
"grad_norm": 0.39453125,
"learning_rate": 7.018365142239827e-05,
"loss": 1.2083,
"step": 1950
},
{
"epoch": 0.021608306232915934,
"grad_norm": 0.3515625,
"learning_rate": 7.19841555635578e-05,
"loss": 1.2459,
"step": 2000
},
{
"epoch": 0.02214851388873883,
"grad_norm": 0.35546875,
"learning_rate": 7.378465970471732e-05,
"loss": 1.2101,
"step": 2050
},
{
"epoch": 0.02268872154456173,
"grad_norm": 0.34765625,
"learning_rate": 7.558516384587685e-05,
"loss": 1.1946,
"step": 2100
},
{
"epoch": 0.023228929200384628,
"grad_norm": 0.3671875,
"learning_rate": 7.738566798703636e-05,
"loss": 1.2097,
"step": 2150
},
{
"epoch": 0.023769136856207525,
"grad_norm": 0.400390625,
"learning_rate": 7.91861721281959e-05,
"loss": 1.1788,
"step": 2200
},
{
"epoch": 0.024309344512030425,
"grad_norm": 0.419921875,
"learning_rate": 8.098667626935542e-05,
"loss": 1.1804,
"step": 2250
},
{
"epoch": 0.024849552167853322,
"grad_norm": 0.419921875,
"learning_rate": 8.278718041051496e-05,
"loss": 1.2001,
"step": 2300
},
{
"epoch": 0.025389759823676222,
"grad_norm": 0.3828125,
"learning_rate": 8.458768455167447e-05,
"loss": 1.181,
"step": 2350
},
{
"epoch": 0.02592996747949912,
"grad_norm": 0.408203125,
"learning_rate": 8.6388188692834e-05,
"loss": 1.1763,
"step": 2400
},
{
"epoch": 0.02647017513532202,
"grad_norm": 0.431640625,
"learning_rate": 8.818869283399352e-05,
"loss": 1.1643,
"step": 2450
},
{
"epoch": 0.027010382791144916,
"grad_norm": 0.412109375,
"learning_rate": 8.998919697515305e-05,
"loss": 1.1801,
"step": 2500
},
{
"epoch": 0.027550590446967813,
"grad_norm": 0.42578125,
"learning_rate": 9.178970111631257e-05,
"loss": 1.1554,
"step": 2550
},
{
"epoch": 0.028090798102790714,
"grad_norm": 0.435546875,
"learning_rate": 9.35902052574721e-05,
"loss": 1.1682,
"step": 2600
},
{
"epoch": 0.02863100575861361,
"grad_norm": 0.400390625,
"learning_rate": 9.539070939863161e-05,
"loss": 1.1518,
"step": 2650
},
{
"epoch": 0.02917121341443651,
"grad_norm": 0.43359375,
"learning_rate": 9.719121353979115e-05,
"loss": 1.1331,
"step": 2700
},
{
"epoch": 0.029711421070259408,
"grad_norm": 0.44140625,
"learning_rate": 9.899171768095067e-05,
"loss": 1.1256,
"step": 2750
},
{
"epoch": 0.030251628726082305,
"grad_norm": 0.474609375,
"learning_rate": 0.00010079222182211019,
"loss": 1.1367,
"step": 2800
},
{
"epoch": 0.030791836381905205,
"grad_norm": 0.4765625,
"learning_rate": 0.00010259272596326972,
"loss": 1.1206,
"step": 2850
},
{
"epoch": 0.0313320440377281,
"grad_norm": 0.396484375,
"learning_rate": 0.00010439323010442925,
"loss": 1.1086,
"step": 2900
},
{
"epoch": 0.031872251693551,
"grad_norm": 0.451171875,
"learning_rate": 0.00010619373424558876,
"loss": 1.1455,
"step": 2950
},
{
"epoch": 0.0324124593493739,
"grad_norm": 0.5390625,
"learning_rate": 0.0001079942383867483,
"loss": 1.1079,
"step": 3000
},
{
"epoch": 0.0329526670051968,
"grad_norm": 0.46875,
"learning_rate": 0.00010979474252790783,
"loss": 1.0991,
"step": 3050
},
{
"epoch": 0.033492874661019696,
"grad_norm": 0.453125,
"learning_rate": 0.00011159524666906735,
"loss": 1.1154,
"step": 3100
},
{
"epoch": 0.03403308231684259,
"grad_norm": 0.439453125,
"learning_rate": 0.00011339575081022686,
"loss": 1.1002,
"step": 3150
},
{
"epoch": 0.03457328997266549,
"grad_norm": 0.462890625,
"learning_rate": 0.00011519625495138639,
"loss": 1.0997,
"step": 3200
},
{
"epoch": 0.035113497628488394,
"grad_norm": 0.412109375,
"learning_rate": 0.00011699675909254593,
"loss": 1.0754,
"step": 3250
},
{
"epoch": 0.03565370528431129,
"grad_norm": 0.486328125,
"learning_rate": 0.00011879726323370543,
"loss": 1.0933,
"step": 3300
},
{
"epoch": 0.03619391294013419,
"grad_norm": 0.408203125,
"learning_rate": 0.00012059776737486497,
"loss": 1.0764,
"step": 3350
},
{
"epoch": 0.036734120595957084,
"grad_norm": 0.453125,
"learning_rate": 0.0001223982715160245,
"loss": 1.1249,
"step": 3400
},
{
"epoch": 0.03727432825177998,
"grad_norm": 0.40234375,
"learning_rate": 0.000124198775657184,
"loss": 1.0746,
"step": 3450
},
{
"epoch": 0.037814535907602885,
"grad_norm": 0.51953125,
"learning_rate": 0.00012599927979834355,
"loss": 1.0747,
"step": 3500
},
{
"epoch": 0.03835474356342578,
"grad_norm": 0.44140625,
"learning_rate": 0.00012779978393950308,
"loss": 1.0814,
"step": 3550
},
{
"epoch": 0.03889495121924868,
"grad_norm": 0.5,
"learning_rate": 0.0001296002880806626,
"loss": 1.064,
"step": 3600
},
{
"epoch": 0.039435158875071576,
"grad_norm": 0.4453125,
"learning_rate": 0.0001314007922218221,
"loss": 1.0867,
"step": 3650
},
{
"epoch": 0.03997536653089447,
"grad_norm": 0.4296875,
"learning_rate": 0.00013320129636298166,
"loss": 1.066,
"step": 3700
},
{
"epoch": 0.040515574186717376,
"grad_norm": 0.51953125,
"learning_rate": 0.00013500180050414118,
"loss": 1.0721,
"step": 3750
},
{
"epoch": 0.04105578184254027,
"grad_norm": 0.482421875,
"learning_rate": 0.00013680230464530068,
"loss": 1.0518,
"step": 3800
},
{
"epoch": 0.04159598949836317,
"grad_norm": 0.466796875,
"learning_rate": 0.0001386028087864602,
"loss": 1.0495,
"step": 3850
},
{
"epoch": 0.04213619715418607,
"grad_norm": 0.39453125,
"learning_rate": 0.00014040331292761973,
"loss": 1.0292,
"step": 3900
},
{
"epoch": 0.04267640481000897,
"grad_norm": 0.4375,
"learning_rate": 0.00014220381706877926,
"loss": 1.031,
"step": 3950
},
{
"epoch": 0.04321661246583187,
"grad_norm": 0.48828125,
"learning_rate": 0.0001440043212099388,
"loss": 1.026,
"step": 4000
},
{
"epoch": 0.043756820121654764,
"grad_norm": 0.46875,
"learning_rate": 0.0001458048253510983,
"loss": 1.0372,
"step": 4050
},
{
"epoch": 0.04429702777747766,
"grad_norm": 0.396484375,
"learning_rate": 0.00014760532949225784,
"loss": 1.0388,
"step": 4100
},
{
"epoch": 0.04483723543330056,
"grad_norm": 0.4296875,
"learning_rate": 0.00014940583363341737,
"loss": 1.0258,
"step": 4150
},
{
"epoch": 0.04537744308912346,
"grad_norm": 0.478515625,
"learning_rate": 0.0001512063377745769,
"loss": 1.0111,
"step": 4200
},
{
"epoch": 0.04591765074494636,
"grad_norm": 0.46484375,
"learning_rate": 0.00015300684191573642,
"loss": 1.0316,
"step": 4250
},
{
"epoch": 0.046457858400769256,
"grad_norm": 0.5546875,
"learning_rate": 0.00015480734605689592,
"loss": 1.0375,
"step": 4300
},
{
"epoch": 0.04699806605659215,
"grad_norm": 0.408203125,
"learning_rate": 0.00015660785019805547,
"loss": 1.037,
"step": 4350
},
{
"epoch": 0.04753827371241505,
"grad_norm": 0.458984375,
"learning_rate": 0.000158408354339215,
"loss": 1.0172,
"step": 4400
},
{
"epoch": 0.04807848136823795,
"grad_norm": 0.455078125,
"learning_rate": 0.0001602088584803745,
"loss": 1.0482,
"step": 4450
},
{
"epoch": 0.04861868902406085,
"grad_norm": 0.451171875,
"learning_rate": 0.00016200936262153402,
"loss": 1.0072,
"step": 4500
},
{
"epoch": 0.04915889667988375,
"grad_norm": 0.63671875,
"learning_rate": 0.00016380986676269358,
"loss": 0.9988,
"step": 4550
},
{
"epoch": 0.049699104335706644,
"grad_norm": 0.42578125,
"learning_rate": 0.0001656103709038531,
"loss": 0.9998,
"step": 4600
},
{
"epoch": 0.05023931199152954,
"grad_norm": 0.5234375,
"learning_rate": 0.0001674108750450126,
"loss": 1.0084,
"step": 4650
},
{
"epoch": 0.050779519647352445,
"grad_norm": 0.498046875,
"learning_rate": 0.00016921137918617213,
"loss": 1.0152,
"step": 4700
},
{
"epoch": 0.05131972730317534,
"grad_norm": 0.48046875,
"learning_rate": 0.00017101188332733166,
"loss": 0.982,
"step": 4750
},
{
"epoch": 0.05185993495899824,
"grad_norm": 0.53515625,
"learning_rate": 0.00017281238746849118,
"loss": 1.0199,
"step": 4800
},
{
"epoch": 0.052400142614821135,
"grad_norm": 0.48828125,
"learning_rate": 0.0001746128916096507,
"loss": 1.0044,
"step": 4850
},
{
"epoch": 0.05294035027064404,
"grad_norm": 0.5078125,
"learning_rate": 0.00017641339575081024,
"loss": 0.9955,
"step": 4900
},
{
"epoch": 0.053480557926466936,
"grad_norm": 0.498046875,
"learning_rate": 0.00017821389989196976,
"loss": 0.9803,
"step": 4950
},
{
"epoch": 0.05402076558228983,
"grad_norm": 0.443359375,
"learning_rate": 0.0001800144040331293,
"loss": 1.0071,
"step": 5000
},
{
"epoch": 0.05456097323811273,
"grad_norm": 0.447265625,
"learning_rate": 0.00018181490817428881,
"loss": 0.9752,
"step": 5050
},
{
"epoch": 0.055101180893935627,
"grad_norm": 0.474609375,
"learning_rate": 0.00018361541231544834,
"loss": 0.9567,
"step": 5100
},
{
"epoch": 0.05564138854975853,
"grad_norm": 0.6171875,
"learning_rate": 0.00018541591645660784,
"loss": 0.9918,
"step": 5150
},
{
"epoch": 0.05618159620558143,
"grad_norm": 0.458984375,
"learning_rate": 0.0001872164205977674,
"loss": 0.9632,
"step": 5200
},
{
"epoch": 0.056721803861404324,
"grad_norm": 0.46484375,
"learning_rate": 0.00018901692473892692,
"loss": 0.9607,
"step": 5250
},
{
"epoch": 0.05726201151722722,
"grad_norm": 0.51171875,
"learning_rate": 0.00019081742888008642,
"loss": 0.96,
"step": 5300
},
{
"epoch": 0.05780221917305012,
"grad_norm": 0.474609375,
"learning_rate": 0.00019261793302124595,
"loss": 0.9752,
"step": 5350
},
{
"epoch": 0.05834242682887302,
"grad_norm": 0.45703125,
"learning_rate": 0.0001944184371624055,
"loss": 0.9782,
"step": 5400
},
{
"epoch": 0.05888263448469592,
"grad_norm": 0.498046875,
"learning_rate": 0.00019621894130356503,
"loss": 0.956,
"step": 5450
},
{
"epoch": 0.059422842140518815,
"grad_norm": 0.466796875,
"learning_rate": 0.00019801944544472453,
"loss": 0.9549,
"step": 5500
},
{
"epoch": 0.05996304979634171,
"grad_norm": 0.6171875,
"learning_rate": 0.00019981994958588405,
"loss": 0.9739,
"step": 5550
},
{
"epoch": 0.06050325745216461,
"grad_norm": 0.4765625,
"learning_rate": 0.00019999996900614756,
"loss": 0.9687,
"step": 5600
},
{
"epoch": 0.06104346510798751,
"grad_norm": 0.6171875,
"learning_rate": 0.0001999998618669291,
"loss": 0.97,
"step": 5650
},
{
"epoch": 0.06158367276381041,
"grad_norm": 0.451171875,
"learning_rate": 0.00019999967819978658,
"loss": 0.9493,
"step": 5700
},
{
"epoch": 0.06212388041963331,
"grad_norm": 0.447265625,
"learning_rate": 0.00019999941800486043,
"loss": 0.9411,
"step": 5750
},
{
"epoch": 0.0626640880754562,
"grad_norm": 0.640625,
"learning_rate": 0.00019999908128234985,
"loss": 0.9662,
"step": 5800
},
{
"epoch": 0.0632042957312791,
"grad_norm": 0.443359375,
"learning_rate": 0.0001999986680325125,
"loss": 0.9541,
"step": 5850
},
{
"epoch": 0.063744503387102,
"grad_norm": 0.44921875,
"learning_rate": 0.00019999817825566463,
"loss": 0.9312,
"step": 5900
},
{
"epoch": 0.0642847110429249,
"grad_norm": 0.45703125,
"learning_rate": 0.00019999761195218106,
"loss": 0.9492,
"step": 5950
},
{
"epoch": 0.0648249186987478,
"grad_norm": 0.466796875,
"learning_rate": 0.00019999696912249515,
"loss": 0.9508,
"step": 6000
}
],
"logging_steps": 50,
"max_steps": 185114,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.01130777853952e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}