matcha-making22 / checkpoint-1000 /trainer_state.json
SIGRoboticsUIUC's picture
Upload folder using huggingface_hub
05f9798 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.4926905132192845,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.024883359253499222,
"grad_norm": 6.788532733917236,
"learning_rate": 4.5e-06,
"loss": 1.2189,
"step": 10
},
{
"epoch": 0.049766718506998445,
"grad_norm": 1.3378803730010986,
"learning_rate": 9.5e-06,
"loss": 0.6193,
"step": 20
},
{
"epoch": 0.07465007776049767,
"grad_norm": 0.7917852997779846,
"learning_rate": 1.45e-05,
"loss": 0.231,
"step": 30
},
{
"epoch": 0.09953343701399689,
"grad_norm": 0.7099640965461731,
"learning_rate": 1.9500000000000003e-05,
"loss": 0.1463,
"step": 40
},
{
"epoch": 0.12441679626749612,
"grad_norm": 0.686485230922699,
"learning_rate": 2.45e-05,
"loss": 0.108,
"step": 50
},
{
"epoch": 0.14930015552099535,
"grad_norm": 0.4254772961139679,
"learning_rate": 2.95e-05,
"loss": 0.0878,
"step": 60
},
{
"epoch": 0.17418351477449456,
"grad_norm": 0.41301316022872925,
"learning_rate": 3.45e-05,
"loss": 0.0733,
"step": 70
},
{
"epoch": 0.19906687402799378,
"grad_norm": 0.5910139679908752,
"learning_rate": 3.9500000000000005e-05,
"loss": 0.0654,
"step": 80
},
{
"epoch": 0.223950233281493,
"grad_norm": 0.4281249940395355,
"learning_rate": 4.4500000000000004e-05,
"loss": 0.0621,
"step": 90
},
{
"epoch": 0.24883359253499224,
"grad_norm": 0.33288511633872986,
"learning_rate": 4.9500000000000004e-05,
"loss": 0.0528,
"step": 100
},
{
"epoch": 0.2737169517884914,
"grad_norm": 0.3627478778362274,
"learning_rate": 5.45e-05,
"loss": 0.0511,
"step": 110
},
{
"epoch": 0.2986003110419907,
"grad_norm": 0.3620380461215973,
"learning_rate": 5.95e-05,
"loss": 0.0467,
"step": 120
},
{
"epoch": 0.3234836702954899,
"grad_norm": 0.37019455432891846,
"learning_rate": 6.450000000000001e-05,
"loss": 0.0444,
"step": 130
},
{
"epoch": 0.3483670295489891,
"grad_norm": 0.44425082206726074,
"learning_rate": 6.95e-05,
"loss": 0.0413,
"step": 140
},
{
"epoch": 0.37325038880248834,
"grad_norm": 0.3729994297027588,
"learning_rate": 7.450000000000001e-05,
"loss": 0.0395,
"step": 150
},
{
"epoch": 0.39813374805598756,
"grad_norm": 0.25299033522605896,
"learning_rate": 7.950000000000001e-05,
"loss": 0.0375,
"step": 160
},
{
"epoch": 0.4230171073094868,
"grad_norm": 0.24479390680789948,
"learning_rate": 8.450000000000001e-05,
"loss": 0.0336,
"step": 170
},
{
"epoch": 0.447900466562986,
"grad_norm": 0.4071142375469208,
"learning_rate": 8.950000000000001e-05,
"loss": 0.0316,
"step": 180
},
{
"epoch": 0.4727838258164852,
"grad_norm": 0.40290141105651855,
"learning_rate": 9.449999999999999e-05,
"loss": 0.0331,
"step": 190
},
{
"epoch": 0.4976671850699845,
"grad_norm": 0.38158151507377625,
"learning_rate": 9.95e-05,
"loss": 0.0311,
"step": 200
},
{
"epoch": 0.5225505443234837,
"grad_norm": 0.29570913314819336,
"learning_rate": 9.999861593790126e-05,
"loss": 0.0291,
"step": 210
},
{
"epoch": 0.5474339035769828,
"grad_norm": 0.37690913677215576,
"learning_rate": 9.999383162408304e-05,
"loss": 0.0277,
"step": 220
},
{
"epoch": 0.5723172628304821,
"grad_norm": 0.3893811106681824,
"learning_rate": 9.998563029828259e-05,
"loss": 0.028,
"step": 230
},
{
"epoch": 0.5972006220839814,
"grad_norm": 0.3241683542728424,
"learning_rate": 9.997401252104962e-05,
"loss": 0.0267,
"step": 240
},
{
"epoch": 0.6220839813374806,
"grad_norm": 0.31424349546432495,
"learning_rate": 9.995897908644378e-05,
"loss": 0.0268,
"step": 250
},
{
"epoch": 0.6469673405909798,
"grad_norm": 0.2265535593032837,
"learning_rate": 9.994053102198034e-05,
"loss": 0.0257,
"step": 260
},
{
"epoch": 0.671850699844479,
"grad_norm": 0.352764368057251,
"learning_rate": 9.991866958856003e-05,
"loss": 0.0244,
"step": 270
},
{
"epoch": 0.6967340590979783,
"grad_norm": 0.29578715562820435,
"learning_rate": 9.989339628038276e-05,
"loss": 0.0224,
"step": 280
},
{
"epoch": 0.7216174183514774,
"grad_norm": 0.36025872826576233,
"learning_rate": 9.98647128248456e-05,
"loss": 0.0231,
"step": 290
},
{
"epoch": 0.7465007776049767,
"grad_norm": 0.31710195541381836,
"learning_rate": 9.98326211824246e-05,
"loss": 0.0247,
"step": 300
},
{
"epoch": 0.7713841368584758,
"grad_norm": 0.28732505440711975,
"learning_rate": 9.979712354654091e-05,
"loss": 0.0224,
"step": 310
},
{
"epoch": 0.7962674961119751,
"grad_norm": 0.21972423791885376,
"learning_rate": 9.975822234341079e-05,
"loss": 0.0211,
"step": 320
},
{
"epoch": 0.8211508553654744,
"grad_norm": 0.22749976813793182,
"learning_rate": 9.97159202318798e-05,
"loss": 0.0201,
"step": 330
},
{
"epoch": 0.8460342146189735,
"grad_norm": 0.3257753551006317,
"learning_rate": 9.967022010324105e-05,
"loss": 0.0218,
"step": 340
},
{
"epoch": 0.8709175738724728,
"grad_norm": 0.2250833660364151,
"learning_rate": 9.962112508103765e-05,
"loss": 0.0192,
"step": 350
},
{
"epoch": 0.895800933125972,
"grad_norm": 0.3466697335243225,
"learning_rate": 9.956863852084914e-05,
"loss": 0.0194,
"step": 360
},
{
"epoch": 0.9206842923794712,
"grad_norm": 0.3127228319644928,
"learning_rate": 9.951276401006221e-05,
"loss": 0.0207,
"step": 370
},
{
"epoch": 0.9455676516329704,
"grad_norm": 0.2664047181606293,
"learning_rate": 9.945350536762543e-05,
"loss": 0.0194,
"step": 380
},
{
"epoch": 0.9704510108864697,
"grad_norm": 0.2534600794315338,
"learning_rate": 9.939086664378829e-05,
"loss": 0.0203,
"step": 390
},
{
"epoch": 0.995334370139969,
"grad_norm": 0.2339852750301361,
"learning_rate": 9.932485211982437e-05,
"loss": 0.0193,
"step": 400
},
{
"epoch": 1.0223950233281494,
"grad_norm": 0.2680792212486267,
"learning_rate": 9.92554663077387e-05,
"loss": 0.0188,
"step": 410
},
{
"epoch": 1.0472783825816485,
"grad_norm": 0.22340402007102966,
"learning_rate": 9.918271394995935e-05,
"loss": 0.0175,
"step": 420
},
{
"epoch": 1.0721617418351477,
"grad_norm": 0.22573307156562805,
"learning_rate": 9.910660001901335e-05,
"loss": 0.0191,
"step": 430
},
{
"epoch": 1.097045101088647,
"grad_norm": 0.16532698273658752,
"learning_rate": 9.902712971718675e-05,
"loss": 0.0189,
"step": 440
},
{
"epoch": 1.1219284603421462,
"grad_norm": 0.26129794120788574,
"learning_rate": 9.894430847616915e-05,
"loss": 0.0191,
"step": 450
},
{
"epoch": 1.1468118195956454,
"grad_norm": 0.2564263939857483,
"learning_rate": 9.885814195668232e-05,
"loss": 0.0184,
"step": 460
},
{
"epoch": 1.1716951788491445,
"grad_norm": 0.269545316696167,
"learning_rate": 9.876863604809344e-05,
"loss": 0.0202,
"step": 470
},
{
"epoch": 1.196578538102644,
"grad_norm": 0.25393763184547424,
"learning_rate": 9.867579686801245e-05,
"loss": 0.0193,
"step": 480
},
{
"epoch": 1.221461897356143,
"grad_norm": 0.26436880230903625,
"learning_rate": 9.8579630761874e-05,
"loss": 0.0184,
"step": 490
},
{
"epoch": 1.2463452566096422,
"grad_norm": 0.1819346696138382,
"learning_rate": 9.848014430250367e-05,
"loss": 0.0182,
"step": 500
},
{
"epoch": 1.2712286158631416,
"grad_norm": 0.26912328600883484,
"learning_rate": 9.837734428966885e-05,
"loss": 0.0177,
"step": 510
},
{
"epoch": 1.2961119751166408,
"grad_norm": 0.32153385877609253,
"learning_rate": 9.827123774961383e-05,
"loss": 0.0181,
"step": 520
},
{
"epoch": 1.32099533437014,
"grad_norm": 0.32203471660614014,
"learning_rate": 9.816183193457968e-05,
"loss": 0.0182,
"step": 530
},
{
"epoch": 1.3458786936236393,
"grad_norm": 0.34882357716560364,
"learning_rate": 9.804913432230856e-05,
"loss": 0.0182,
"step": 540
},
{
"epoch": 1.3707620528771385,
"grad_norm": 0.3095707297325134,
"learning_rate": 9.793315261553252e-05,
"loss": 0.0178,
"step": 550
},
{
"epoch": 1.3956454121306376,
"grad_norm": 0.31017982959747314,
"learning_rate": 9.781389474144717e-05,
"loss": 0.0182,
"step": 560
},
{
"epoch": 1.4205287713841368,
"grad_norm": 0.24658547341823578,
"learning_rate": 9.76913688511698e-05,
"loss": 0.0174,
"step": 570
},
{
"epoch": 1.445412130637636,
"grad_norm": 0.23288820683956146,
"learning_rate": 9.756558331918227e-05,
"loss": 0.0172,
"step": 580
},
{
"epoch": 1.4702954898911353,
"grad_norm": 0.16752177476882935,
"learning_rate": 9.743654674275855e-05,
"loss": 0.0185,
"step": 590
},
{
"epoch": 1.4951788491446345,
"grad_norm": 0.19120067358016968,
"learning_rate": 9.730426794137727e-05,
"loss": 0.0163,
"step": 600
},
{
"epoch": 1.5200622083981337,
"grad_norm": 0.2136230617761612,
"learning_rate": 9.716875595611879e-05,
"loss": 0.0171,
"step": 610
},
{
"epoch": 1.544945567651633,
"grad_norm": 0.18993347883224487,
"learning_rate": 9.703002004904729e-05,
"loss": 0.0164,
"step": 620
},
{
"epoch": 1.5698289269051322,
"grad_norm": 0.22324898838996887,
"learning_rate": 9.688806970257773e-05,
"loss": 0.0169,
"step": 630
},
{
"epoch": 1.5947122861586314,
"grad_norm": 0.18617911636829376,
"learning_rate": 9.674291461882774e-05,
"loss": 0.0153,
"step": 640
},
{
"epoch": 1.6195956454121307,
"grad_norm": 0.1991218626499176,
"learning_rate": 9.659456471895445e-05,
"loss": 0.0158,
"step": 650
},
{
"epoch": 1.64447900466563,
"grad_norm": 0.27028852701187134,
"learning_rate": 9.644303014247648e-05,
"loss": 0.0164,
"step": 660
},
{
"epoch": 1.669362363919129,
"grad_norm": 0.21468698978424072,
"learning_rate": 9.628832124658085e-05,
"loss": 0.0159,
"step": 670
},
{
"epoch": 1.6942457231726284,
"grad_norm": 0.25870266556739807,
"learning_rate": 9.613044860541507e-05,
"loss": 0.0159,
"step": 680
},
{
"epoch": 1.7191290824261274,
"grad_norm": 0.2562198042869568,
"learning_rate": 9.596942300936445e-05,
"loss": 0.0149,
"step": 690
},
{
"epoch": 1.7440124416796268,
"grad_norm": 0.22703538835048676,
"learning_rate": 9.580525546431459e-05,
"loss": 0.0165,
"step": 700
},
{
"epoch": 1.768895800933126,
"grad_norm": 0.27203115820884705,
"learning_rate": 9.563795719089911e-05,
"loss": 0.0153,
"step": 710
},
{
"epoch": 1.793779160186625,
"grad_norm": 0.337146133184433,
"learning_rate": 9.546753962373281e-05,
"loss": 0.0175,
"step": 720
},
{
"epoch": 1.8186625194401245,
"grad_norm": 0.3714126944541931,
"learning_rate": 9.529401441062997e-05,
"loss": 0.0162,
"step": 730
},
{
"epoch": 1.8435458786936236,
"grad_norm": 0.23839302361011505,
"learning_rate": 9.511739341180842e-05,
"loss": 0.0169,
"step": 740
},
{
"epoch": 1.8684292379471228,
"grad_norm": 0.2162984013557434,
"learning_rate": 9.493768869907886e-05,
"loss": 0.0153,
"step": 750
},
{
"epoch": 1.8933125972006222,
"grad_norm": 0.25327548384666443,
"learning_rate": 9.475491255501968e-05,
"loss": 0.0149,
"step": 760
},
{
"epoch": 1.9181959564541213,
"grad_norm": 0.17730680108070374,
"learning_rate": 9.456907747213748e-05,
"loss": 0.0147,
"step": 770
},
{
"epoch": 1.9430793157076205,
"grad_norm": 0.2287522703409195,
"learning_rate": 9.438019615201336e-05,
"loss": 0.0154,
"step": 780
},
{
"epoch": 1.9679626749611199,
"grad_norm": 0.2877958118915558,
"learning_rate": 9.418828150443469e-05,
"loss": 0.0157,
"step": 790
},
{
"epoch": 1.9928460342146188,
"grad_norm": 0.272636741399765,
"learning_rate": 9.399334664651262e-05,
"loss": 0.0153,
"step": 800
},
{
"epoch": 2.0199066874027993,
"grad_norm": 0.3082719147205353,
"learning_rate": 9.379540490178581e-05,
"loss": 0.0151,
"step": 810
},
{
"epoch": 2.0447900466562987,
"grad_norm": 0.3081252872943878,
"learning_rate": 9.359446979930955e-05,
"loss": 0.0156,
"step": 820
},
{
"epoch": 2.0696734059097976,
"grad_norm": 0.25696584582328796,
"learning_rate": 9.33905550727312e-05,
"loss": 0.0157,
"step": 830
},
{
"epoch": 2.094556765163297,
"grad_norm": 0.2680334150791168,
"learning_rate": 9.318367465935142e-05,
"loss": 0.0151,
"step": 840
},
{
"epoch": 2.1194401244167964,
"grad_norm": 0.20804259181022644,
"learning_rate": 9.29738426991717e-05,
"loss": 0.0144,
"step": 850
},
{
"epoch": 2.1443234836702953,
"grad_norm": 0.2176770567893982,
"learning_rate": 9.276107353392774e-05,
"loss": 0.0144,
"step": 860
},
{
"epoch": 2.1692068429237947,
"grad_norm": 0.21425634622573853,
"learning_rate": 9.254538170610938e-05,
"loss": 0.0145,
"step": 870
},
{
"epoch": 2.194090202177294,
"grad_norm": 0.20511746406555176,
"learning_rate": 9.232678195796654e-05,
"loss": 0.0146,
"step": 880
},
{
"epoch": 2.218973561430793,
"grad_norm": 0.2533751130104065,
"learning_rate": 9.210528923050164e-05,
"loss": 0.0134,
"step": 890
},
{
"epoch": 2.2438569206842924,
"grad_norm": 0.2530650794506073,
"learning_rate": 9.188091866244834e-05,
"loss": 0.0143,
"step": 900
},
{
"epoch": 2.2687402799377914,
"grad_norm": 0.25711414217948914,
"learning_rate": 9.165368558923695e-05,
"loss": 0.0139,
"step": 910
},
{
"epoch": 2.2936236391912908,
"grad_norm": 0.226607546210289,
"learning_rate": 9.142360554194618e-05,
"loss": 0.0146,
"step": 920
},
{
"epoch": 2.31850699844479,
"grad_norm": 0.15505652129650116,
"learning_rate": 9.119069424624163e-05,
"loss": 0.014,
"step": 930
},
{
"epoch": 2.343390357698289,
"grad_norm": 0.20164167881011963,
"learning_rate": 9.0954967621301e-05,
"loss": 0.0142,
"step": 940
},
{
"epoch": 2.3682737169517885,
"grad_norm": 0.2123536318540573,
"learning_rate": 9.071644177872594e-05,
"loss": 0.0136,
"step": 950
},
{
"epoch": 2.393157076205288,
"grad_norm": 0.218880295753479,
"learning_rate": 9.047513302144095e-05,
"loss": 0.0133,
"step": 960
},
{
"epoch": 2.4180404354587868,
"grad_norm": 0.20033881068229675,
"learning_rate": 9.023105784257906e-05,
"loss": 0.013,
"step": 970
},
{
"epoch": 2.442923794712286,
"grad_norm": 0.20657172799110413,
"learning_rate": 8.998423292435454e-05,
"loss": 0.0146,
"step": 980
},
{
"epoch": 2.4678071539657855,
"grad_norm": 0.14253973960876465,
"learning_rate": 8.973467513692265e-05,
"loss": 0.0133,
"step": 990
},
{
"epoch": 2.4926905132192845,
"grad_norm": 0.20540325343608856,
"learning_rate": 8.94824015372267e-05,
"loss": 0.0155,
"step": 1000
}
],
"logging_steps": 10,
"max_steps": 4000,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}