prithivMLmods's picture
Upload folder using huggingface_hub
c9a3d12 verified
{
"best_global_step": 1250,
"best_metric": 0.7589222033223469,
"best_model_checkpoint": "voice_emotion_classification/checkpoint-1250",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0008,
"grad_norm": 1.4005043506622314,
"learning_rate": 6.000000000000001e-07,
"loss": 2.0903,
"step": 1
},
{
"epoch": 0.008,
"grad_norm": 1.3180536031723022,
"learning_rate": 6e-06,
"loss": 2.0826,
"step": 10
},
{
"epoch": 0.016,
"grad_norm": 1.0437175035476685,
"learning_rate": 1.2e-05,
"loss": 2.0693,
"step": 20
},
{
"epoch": 0.024,
"grad_norm": 1.371071457862854,
"learning_rate": 1.8e-05,
"loss": 2.0463,
"step": 30
},
{
"epoch": 0.032,
"grad_norm": 1.51685631275177,
"learning_rate": 2.4e-05,
"loss": 2.0423,
"step": 40
},
{
"epoch": 0.04,
"grad_norm": 1.4282890558242798,
"learning_rate": 3e-05,
"loss": 1.996,
"step": 50
},
{
"epoch": 0.048,
"grad_norm": 1.860026478767395,
"learning_rate": 2.991891891891892e-05,
"loss": 1.9639,
"step": 60
},
{
"epoch": 0.056,
"grad_norm": 2.121481418609619,
"learning_rate": 2.983783783783784e-05,
"loss": 1.9517,
"step": 70
},
{
"epoch": 0.064,
"grad_norm": 2.056445837020874,
"learning_rate": 2.9756756756756758e-05,
"loss": 1.8765,
"step": 80
},
{
"epoch": 0.072,
"grad_norm": 2.3928184509277344,
"learning_rate": 2.9675675675675678e-05,
"loss": 1.8703,
"step": 90
},
{
"epoch": 0.08,
"grad_norm": 3.0733420848846436,
"learning_rate": 2.9594594594594598e-05,
"loss": 1.853,
"step": 100
},
{
"epoch": 0.088,
"grad_norm": 2.88864803314209,
"learning_rate": 2.9513513513513514e-05,
"loss": 1.7652,
"step": 110
},
{
"epoch": 0.096,
"grad_norm": 2.9048268795013428,
"learning_rate": 2.943243243243243e-05,
"loss": 1.7984,
"step": 120
},
{
"epoch": 0.104,
"grad_norm": 2.6991426944732666,
"learning_rate": 2.935135135135135e-05,
"loss": 1.6869,
"step": 130
},
{
"epoch": 0.112,
"grad_norm": 2.4570231437683105,
"learning_rate": 2.927027027027027e-05,
"loss": 1.7812,
"step": 140
},
{
"epoch": 0.12,
"grad_norm": 4.502678871154785,
"learning_rate": 2.918918918918919e-05,
"loss": 1.8444,
"step": 150
},
{
"epoch": 0.128,
"grad_norm": 3.117838144302368,
"learning_rate": 2.9108108108108108e-05,
"loss": 1.7544,
"step": 160
},
{
"epoch": 0.136,
"grad_norm": 2.812086343765259,
"learning_rate": 2.9027027027027028e-05,
"loss": 1.638,
"step": 170
},
{
"epoch": 0.144,
"grad_norm": 3.23271107673645,
"learning_rate": 2.8954054054054057e-05,
"loss": 1.7026,
"step": 180
},
{
"epoch": 0.152,
"grad_norm": 3.975172996520996,
"learning_rate": 2.8872972972972977e-05,
"loss": 1.6715,
"step": 190
},
{
"epoch": 0.16,
"grad_norm": 3.8965938091278076,
"learning_rate": 2.879189189189189e-05,
"loss": 1.6804,
"step": 200
},
{
"epoch": 0.168,
"grad_norm": 4.267274856567383,
"learning_rate": 2.871081081081081e-05,
"loss": 1.6587,
"step": 210
},
{
"epoch": 0.176,
"grad_norm": 3.524360179901123,
"learning_rate": 2.862972972972973e-05,
"loss": 1.4811,
"step": 220
},
{
"epoch": 0.184,
"grad_norm": 3.266697883605957,
"learning_rate": 2.854864864864865e-05,
"loss": 1.672,
"step": 230
},
{
"epoch": 0.192,
"grad_norm": 5.3684186935424805,
"learning_rate": 2.8467567567567567e-05,
"loss": 1.5284,
"step": 240
},
{
"epoch": 0.2,
"grad_norm": 3.898176431655884,
"learning_rate": 2.8386486486486487e-05,
"loss": 1.5774,
"step": 250
},
{
"epoch": 0.208,
"grad_norm": 3.189732074737549,
"learning_rate": 2.8305405405405407e-05,
"loss": 1.4874,
"step": 260
},
{
"epoch": 0.216,
"grad_norm": 3.274244785308838,
"learning_rate": 2.8224324324324327e-05,
"loss": 1.5098,
"step": 270
},
{
"epoch": 0.224,
"grad_norm": 5.691224098205566,
"learning_rate": 2.8143243243243244e-05,
"loss": 1.509,
"step": 280
},
{
"epoch": 0.232,
"grad_norm": 6.856773376464844,
"learning_rate": 2.8062162162162164e-05,
"loss": 1.4558,
"step": 290
},
{
"epoch": 0.24,
"grad_norm": 7.078716278076172,
"learning_rate": 2.7981081081081084e-05,
"loss": 1.5298,
"step": 300
},
{
"epoch": 0.248,
"grad_norm": 4.4305100440979,
"learning_rate": 2.79e-05,
"loss": 1.3387,
"step": 310
},
{
"epoch": 0.256,
"grad_norm": 10.400449752807617,
"learning_rate": 2.7818918918918917e-05,
"loss": 1.4501,
"step": 320
},
{
"epoch": 0.264,
"grad_norm": 5.316948890686035,
"learning_rate": 2.7737837837837837e-05,
"loss": 1.367,
"step": 330
},
{
"epoch": 0.272,
"grad_norm": 9.753177642822266,
"learning_rate": 2.7656756756756757e-05,
"loss": 1.4684,
"step": 340
},
{
"epoch": 0.28,
"grad_norm": 8.100529670715332,
"learning_rate": 2.7575675675675677e-05,
"loss": 1.4175,
"step": 350
},
{
"epoch": 0.288,
"grad_norm": 9.878854751586914,
"learning_rate": 2.7494594594594594e-05,
"loss": 1.308,
"step": 360
},
{
"epoch": 0.296,
"grad_norm": 5.865877151489258,
"learning_rate": 2.7413513513513514e-05,
"loss": 1.3035,
"step": 370
},
{
"epoch": 0.304,
"grad_norm": 7.870754241943359,
"learning_rate": 2.7332432432432434e-05,
"loss": 1.2915,
"step": 380
},
{
"epoch": 0.312,
"grad_norm": 8.517908096313477,
"learning_rate": 2.7251351351351354e-05,
"loss": 1.4318,
"step": 390
},
{
"epoch": 0.32,
"grad_norm": 4.7960309982299805,
"learning_rate": 2.717027027027027e-05,
"loss": 1.3154,
"step": 400
},
{
"epoch": 0.328,
"grad_norm": 5.629390716552734,
"learning_rate": 2.708918918918919e-05,
"loss": 1.3433,
"step": 410
},
{
"epoch": 0.336,
"grad_norm": 8.473249435424805,
"learning_rate": 2.700810810810811e-05,
"loss": 1.1474,
"step": 420
},
{
"epoch": 0.344,
"grad_norm": 3.652617931365967,
"learning_rate": 2.6927027027027028e-05,
"loss": 1.3247,
"step": 430
},
{
"epoch": 0.352,
"grad_norm": 4.9890055656433105,
"learning_rate": 2.6845945945945944e-05,
"loss": 1.3347,
"step": 440
},
{
"epoch": 0.36,
"grad_norm": 5.2355055809021,
"learning_rate": 2.6764864864864864e-05,
"loss": 1.0932,
"step": 450
},
{
"epoch": 0.368,
"grad_norm": 6.325026512145996,
"learning_rate": 2.6683783783783785e-05,
"loss": 1.4873,
"step": 460
},
{
"epoch": 0.376,
"grad_norm": 6.78115701675415,
"learning_rate": 2.6602702702702705e-05,
"loss": 1.2311,
"step": 470
},
{
"epoch": 0.384,
"grad_norm": 4.194353103637695,
"learning_rate": 2.652162162162162e-05,
"loss": 1.2493,
"step": 480
},
{
"epoch": 0.392,
"grad_norm": 3.8817057609558105,
"learning_rate": 2.644054054054054e-05,
"loss": 1.1237,
"step": 490
},
{
"epoch": 0.4,
"grad_norm": 6.7539520263671875,
"learning_rate": 2.635945945945946e-05,
"loss": 1.1135,
"step": 500
},
{
"epoch": 0.408,
"grad_norm": 9.044737815856934,
"learning_rate": 2.627837837837838e-05,
"loss": 1.2459,
"step": 510
},
{
"epoch": 0.416,
"grad_norm": 15.829017639160156,
"learning_rate": 2.6197297297297298e-05,
"loss": 1.2803,
"step": 520
},
{
"epoch": 0.424,
"grad_norm": 10.789520263671875,
"learning_rate": 2.6116216216216218e-05,
"loss": 1.1912,
"step": 530
},
{
"epoch": 0.432,
"grad_norm": 5.011368274688721,
"learning_rate": 2.6035135135135135e-05,
"loss": 1.0143,
"step": 540
},
{
"epoch": 0.44,
"grad_norm": 8.985868453979492,
"learning_rate": 2.5954054054054055e-05,
"loss": 1.118,
"step": 550
},
{
"epoch": 0.448,
"grad_norm": 6.862995147705078,
"learning_rate": 2.587297297297297e-05,
"loss": 1.1269,
"step": 560
},
{
"epoch": 0.456,
"grad_norm": 10.972336769104004,
"learning_rate": 2.579189189189189e-05,
"loss": 1.1591,
"step": 570
},
{
"epoch": 0.464,
"grad_norm": 8.179327011108398,
"learning_rate": 2.5710810810810812e-05,
"loss": 1.154,
"step": 580
},
{
"epoch": 0.472,
"grad_norm": 11.713990211486816,
"learning_rate": 2.5629729729729732e-05,
"loss": 1.0995,
"step": 590
},
{
"epoch": 0.48,
"grad_norm": 10.86710262298584,
"learning_rate": 2.554864864864865e-05,
"loss": 1.1544,
"step": 600
},
{
"epoch": 0.488,
"grad_norm": 6.228063106536865,
"learning_rate": 2.546756756756757e-05,
"loss": 1.2395,
"step": 610
},
{
"epoch": 0.496,
"grad_norm": 12.631518363952637,
"learning_rate": 2.538648648648649e-05,
"loss": 1.0992,
"step": 620
},
{
"epoch": 0.504,
"grad_norm": 7.058006763458252,
"learning_rate": 2.530540540540541e-05,
"loss": 1.194,
"step": 630
},
{
"epoch": 0.512,
"grad_norm": 5.026750087738037,
"learning_rate": 2.5224324324324325e-05,
"loss": 1.103,
"step": 640
},
{
"epoch": 0.52,
"grad_norm": 7.1134843826293945,
"learning_rate": 2.5143243243243242e-05,
"loss": 0.9427,
"step": 650
},
{
"epoch": 0.528,
"grad_norm": 7.147433280944824,
"learning_rate": 2.5062162162162162e-05,
"loss": 0.9881,
"step": 660
},
{
"epoch": 0.536,
"grad_norm": 6.535639762878418,
"learning_rate": 2.4981081081081082e-05,
"loss": 1.1143,
"step": 670
},
{
"epoch": 0.544,
"grad_norm": 10.878937721252441,
"learning_rate": 2.49e-05,
"loss": 0.8909,
"step": 680
},
{
"epoch": 0.552,
"grad_norm": 5.79094934463501,
"learning_rate": 2.481891891891892e-05,
"loss": 0.9728,
"step": 690
},
{
"epoch": 0.56,
"grad_norm": 6.935592174530029,
"learning_rate": 2.473783783783784e-05,
"loss": 1.0735,
"step": 700
},
{
"epoch": 0.568,
"grad_norm": 5.661824703216553,
"learning_rate": 2.465675675675676e-05,
"loss": 1.0012,
"step": 710
},
{
"epoch": 0.576,
"grad_norm": 13.233421325683594,
"learning_rate": 2.4575675675675676e-05,
"loss": 1.0315,
"step": 720
},
{
"epoch": 0.584,
"grad_norm": 9.292459487915039,
"learning_rate": 2.4494594594594596e-05,
"loss": 0.9547,
"step": 730
},
{
"epoch": 0.592,
"grad_norm": 13.138367652893066,
"learning_rate": 2.442162162162162e-05,
"loss": 0.9379,
"step": 740
},
{
"epoch": 0.6,
"grad_norm": 13.352531433105469,
"learning_rate": 2.434054054054054e-05,
"loss": 0.9484,
"step": 750
},
{
"epoch": 0.608,
"grad_norm": 11.993139266967773,
"learning_rate": 2.4259459459459458e-05,
"loss": 1.1064,
"step": 760
},
{
"epoch": 0.616,
"grad_norm": 12.132452011108398,
"learning_rate": 2.4178378378378378e-05,
"loss": 1.1363,
"step": 770
},
{
"epoch": 0.624,
"grad_norm": 13.944737434387207,
"learning_rate": 2.4097297297297298e-05,
"loss": 0.9835,
"step": 780
},
{
"epoch": 0.632,
"grad_norm": 6.077609062194824,
"learning_rate": 2.4016216216216218e-05,
"loss": 0.8391,
"step": 790
},
{
"epoch": 0.64,
"grad_norm": 7.873855113983154,
"learning_rate": 2.3935135135135135e-05,
"loss": 0.7772,
"step": 800
},
{
"epoch": 0.648,
"grad_norm": 13.312115669250488,
"learning_rate": 2.3854054054054055e-05,
"loss": 1.0117,
"step": 810
},
{
"epoch": 0.656,
"grad_norm": 9.016510963439941,
"learning_rate": 2.3772972972972975e-05,
"loss": 0.9353,
"step": 820
},
{
"epoch": 0.664,
"grad_norm": 8.618375778198242,
"learning_rate": 2.3691891891891895e-05,
"loss": 0.9598,
"step": 830
},
{
"epoch": 0.672,
"grad_norm": 10.867205619812012,
"learning_rate": 2.361081081081081e-05,
"loss": 0.8726,
"step": 840
},
{
"epoch": 0.68,
"grad_norm": 13.182415962219238,
"learning_rate": 2.3529729729729728e-05,
"loss": 0.9202,
"step": 850
},
{
"epoch": 0.688,
"grad_norm": 12.405129432678223,
"learning_rate": 2.3448648648648648e-05,
"loss": 0.8795,
"step": 860
},
{
"epoch": 0.696,
"grad_norm": 8.207524299621582,
"learning_rate": 2.3367567567567568e-05,
"loss": 0.8015,
"step": 870
},
{
"epoch": 0.704,
"grad_norm": 15.442817687988281,
"learning_rate": 2.3286486486486485e-05,
"loss": 0.9932,
"step": 880
},
{
"epoch": 0.712,
"grad_norm": 13.388226509094238,
"learning_rate": 2.3205405405405405e-05,
"loss": 0.87,
"step": 890
},
{
"epoch": 0.72,
"grad_norm": 8.635920524597168,
"learning_rate": 2.3124324324324325e-05,
"loss": 0.7842,
"step": 900
},
{
"epoch": 0.728,
"grad_norm": 11.66073989868164,
"learning_rate": 2.3043243243243245e-05,
"loss": 0.9023,
"step": 910
},
{
"epoch": 0.736,
"grad_norm": 12.954612731933594,
"learning_rate": 2.2962162162162162e-05,
"loss": 1.0076,
"step": 920
},
{
"epoch": 0.744,
"grad_norm": 11.18680191040039,
"learning_rate": 2.2881081081081082e-05,
"loss": 1.1349,
"step": 930
},
{
"epoch": 0.752,
"grad_norm": 8.514711380004883,
"learning_rate": 2.2800000000000002e-05,
"loss": 0.9604,
"step": 940
},
{
"epoch": 0.76,
"grad_norm": 4.436418056488037,
"learning_rate": 2.2718918918918922e-05,
"loss": 0.9632,
"step": 950
},
{
"epoch": 0.768,
"grad_norm": 10.213781356811523,
"learning_rate": 2.263783783783784e-05,
"loss": 0.8139,
"step": 960
},
{
"epoch": 0.776,
"grad_norm": 9.987252235412598,
"learning_rate": 2.2556756756756755e-05,
"loss": 0.8276,
"step": 970
},
{
"epoch": 0.784,
"grad_norm": 12.511467933654785,
"learning_rate": 2.2475675675675675e-05,
"loss": 0.8709,
"step": 980
},
{
"epoch": 0.792,
"grad_norm": 8.908098220825195,
"learning_rate": 2.2394594594594595e-05,
"loss": 0.8812,
"step": 990
},
{
"epoch": 0.8,
"grad_norm": 10.62246322631836,
"learning_rate": 2.2313513513513512e-05,
"loss": 0.9733,
"step": 1000
},
{
"epoch": 0.808,
"grad_norm": 14.651544570922852,
"learning_rate": 2.2232432432432432e-05,
"loss": 1.0309,
"step": 1010
},
{
"epoch": 0.816,
"grad_norm": 19.1525936126709,
"learning_rate": 2.2151351351351352e-05,
"loss": 0.8808,
"step": 1020
},
{
"epoch": 0.824,
"grad_norm": 7.289106369018555,
"learning_rate": 2.2070270270270272e-05,
"loss": 0.9126,
"step": 1030
},
{
"epoch": 0.832,
"grad_norm": 5.375001907348633,
"learning_rate": 2.198918918918919e-05,
"loss": 0.847,
"step": 1040
},
{
"epoch": 0.84,
"grad_norm": 8.623431205749512,
"learning_rate": 2.190810810810811e-05,
"loss": 0.9139,
"step": 1050
},
{
"epoch": 0.848,
"grad_norm": 6.639071941375732,
"learning_rate": 2.182702702702703e-05,
"loss": 0.9345,
"step": 1060
},
{
"epoch": 0.856,
"grad_norm": 7.635943412780762,
"learning_rate": 2.174594594594595e-05,
"loss": 0.8134,
"step": 1070
},
{
"epoch": 0.864,
"grad_norm": 12.048315048217773,
"learning_rate": 2.1664864864864862e-05,
"loss": 0.6728,
"step": 1080
},
{
"epoch": 0.872,
"grad_norm": 13.869949340820312,
"learning_rate": 2.1583783783783783e-05,
"loss": 0.8256,
"step": 1090
},
{
"epoch": 0.88,
"grad_norm": 26.233325958251953,
"learning_rate": 2.1502702702702703e-05,
"loss": 0.7044,
"step": 1100
},
{
"epoch": 0.888,
"grad_norm": 7.98716926574707,
"learning_rate": 2.1421621621621623e-05,
"loss": 0.7398,
"step": 1110
},
{
"epoch": 0.896,
"grad_norm": 13.682205200195312,
"learning_rate": 2.134054054054054e-05,
"loss": 0.7522,
"step": 1120
},
{
"epoch": 0.904,
"grad_norm": 9.086796760559082,
"learning_rate": 2.125945945945946e-05,
"loss": 0.8574,
"step": 1130
},
{
"epoch": 0.912,
"grad_norm": 10.3043851852417,
"learning_rate": 2.117837837837838e-05,
"loss": 0.8005,
"step": 1140
},
{
"epoch": 0.92,
"grad_norm": 12.477950096130371,
"learning_rate": 2.10972972972973e-05,
"loss": 0.8436,
"step": 1150
},
{
"epoch": 0.928,
"grad_norm": 16.634178161621094,
"learning_rate": 2.1016216216216216e-05,
"loss": 0.6515,
"step": 1160
},
{
"epoch": 0.936,
"grad_norm": 11.066425323486328,
"learning_rate": 2.0935135135135136e-05,
"loss": 0.8689,
"step": 1170
},
{
"epoch": 0.944,
"grad_norm": 19.47179412841797,
"learning_rate": 2.0854054054054056e-05,
"loss": 0.6605,
"step": 1180
},
{
"epoch": 0.952,
"grad_norm": 3.917236804962158,
"learning_rate": 2.0772972972972973e-05,
"loss": 0.6826,
"step": 1190
},
{
"epoch": 0.96,
"grad_norm": 16.43979263305664,
"learning_rate": 2.069189189189189e-05,
"loss": 0.7731,
"step": 1200
},
{
"epoch": 0.968,
"grad_norm": 6.7848711013793945,
"learning_rate": 2.061081081081081e-05,
"loss": 0.705,
"step": 1210
},
{
"epoch": 0.976,
"grad_norm": 7.472936153411865,
"learning_rate": 2.052972972972973e-05,
"loss": 0.7663,
"step": 1220
},
{
"epoch": 0.984,
"grad_norm": 5.729743957519531,
"learning_rate": 2.044864864864865e-05,
"loss": 0.9337,
"step": 1230
},
{
"epoch": 0.992,
"grad_norm": 6.306894302368164,
"learning_rate": 2.0367567567567567e-05,
"loss": 0.7655,
"step": 1240
},
{
"epoch": 1.0,
"grad_norm": 17.98261260986328,
"learning_rate": 2.0286486486486487e-05,
"loss": 0.8202,
"step": 1250
},
{
"epoch": 1.0,
"eval_accuracy": 0.7668834417208604,
"eval_f1": 0.7589222033223469,
"eval_loss": 0.7338727712631226,
"eval_model_preparation_time": 0.0029,
"eval_runtime": 59.9403,
"eval_samples_per_second": 33.35,
"eval_steps_per_second": 4.171,
"step": 1250
}
],
"logging_steps": 10,
"max_steps": 3750,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.4103920295511066e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}