dzungpham's picture
upload graphcodebert base, more training = more assign to majority class, poor f1 score T_T
b74eaa6 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0512,
"eval_steps": 1000,
"global_step": 800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00064,
"grad_norm": 1.8688431978225708,
"learning_rate": 3.840409643695328e-08,
"loss": 0.7168,
"step": 10
},
{
"epoch": 0.00128,
"grad_norm": 1.835353970527649,
"learning_rate": 8.10753147002347e-08,
"loss": 0.7179,
"step": 20
},
{
"epoch": 0.00192,
"grad_norm": 1.2541388273239136,
"learning_rate": 1.2374653296351612e-07,
"loss": 0.7177,
"step": 30
},
{
"epoch": 0.00256,
"grad_norm": 1.1381633281707764,
"learning_rate": 1.6641775122679754e-07,
"loss": 0.7312,
"step": 40
},
{
"epoch": 0.0032,
"grad_norm": 1.6313824653625488,
"learning_rate": 2.0908896949007894e-07,
"loss": 0.7167,
"step": 50
},
{
"epoch": 0.00384,
"grad_norm": 1.092464566230774,
"learning_rate": 2.517601877533604e-07,
"loss": 0.7201,
"step": 60
},
{
"epoch": 0.00448,
"grad_norm": 1.164099097251892,
"learning_rate": 2.944314060166418e-07,
"loss": 0.7195,
"step": 70
},
{
"epoch": 0.00512,
"grad_norm": 1.9283920526504517,
"learning_rate": 3.371026242799232e-07,
"loss": 0.7223,
"step": 80
},
{
"epoch": 0.00576,
"grad_norm": 1.8716129064559937,
"learning_rate": 3.7977384254320464e-07,
"loss": 0.7151,
"step": 90
},
{
"epoch": 0.0064,
"grad_norm": 1.5499262809753418,
"learning_rate": 4.22445060806486e-07,
"loss": 0.7211,
"step": 100
},
{
"epoch": 0.00704,
"grad_norm": 1.375114917755127,
"learning_rate": 4.651162790697675e-07,
"loss": 0.7247,
"step": 110
},
{
"epoch": 0.00768,
"grad_norm": 1.384915828704834,
"learning_rate": 5.077874973330489e-07,
"loss": 0.7156,
"step": 120
},
{
"epoch": 0.00832,
"grad_norm": 1.6376659870147705,
"learning_rate": 5.504587155963304e-07,
"loss": 0.7196,
"step": 130
},
{
"epoch": 0.00896,
"grad_norm": 1.709489345550537,
"learning_rate": 5.931299338596117e-07,
"loss": 0.7215,
"step": 140
},
{
"epoch": 0.0096,
"grad_norm": 1.350512146949768,
"learning_rate": 6.358011521228932e-07,
"loss": 0.71,
"step": 150
},
{
"epoch": 0.01024,
"grad_norm": 2.33050537109375,
"learning_rate": 6.784723703861745e-07,
"loss": 0.7191,
"step": 160
},
{
"epoch": 0.01088,
"grad_norm": 1.0042874813079834,
"learning_rate": 7.21143588649456e-07,
"loss": 0.72,
"step": 170
},
{
"epoch": 0.01152,
"grad_norm": 1.1835744380950928,
"learning_rate": 7.638148069127374e-07,
"loss": 0.7122,
"step": 180
},
{
"epoch": 0.01216,
"grad_norm": 1.949506402015686,
"learning_rate": 8.064860251760189e-07,
"loss": 0.7091,
"step": 190
},
{
"epoch": 0.0128,
"grad_norm": 1.139626383781433,
"learning_rate": 8.491572434393003e-07,
"loss": 0.7046,
"step": 200
},
{
"epoch": 0.01344,
"grad_norm": 1.7734779119491577,
"learning_rate": 8.918284617025817e-07,
"loss": 0.7135,
"step": 210
},
{
"epoch": 0.01408,
"grad_norm": 1.3442974090576172,
"learning_rate": 9.344996799658632e-07,
"loss": 0.7127,
"step": 220
},
{
"epoch": 0.01472,
"grad_norm": 1.6148335933685303,
"learning_rate": 9.771708982291445e-07,
"loss": 0.7168,
"step": 230
},
{
"epoch": 0.01536,
"grad_norm": 0.9220213294029236,
"learning_rate": 1.0198421164924258e-06,
"loss": 0.7109,
"step": 240
},
{
"epoch": 0.016,
"grad_norm": 1.5631815195083618,
"learning_rate": 1.0625133347557074e-06,
"loss": 0.7149,
"step": 250
},
{
"epoch": 0.01664,
"grad_norm": 1.5052822828292847,
"learning_rate": 1.1051845530189888e-06,
"loss": 0.7112,
"step": 260
},
{
"epoch": 0.01728,
"grad_norm": 1.6714903116226196,
"learning_rate": 1.1478557712822702e-06,
"loss": 0.7128,
"step": 270
},
{
"epoch": 0.01792,
"grad_norm": 1.6667262315750122,
"learning_rate": 1.1905269895455517e-06,
"loss": 0.7091,
"step": 280
},
{
"epoch": 0.01856,
"grad_norm": 1.6861907243728638,
"learning_rate": 1.233198207808833e-06,
"loss": 0.7078,
"step": 290
},
{
"epoch": 0.0192,
"grad_norm": 1.6259212493896484,
"learning_rate": 1.2758694260721145e-06,
"loss": 0.7087,
"step": 300
},
{
"epoch": 0.01984,
"grad_norm": 1.207320213317871,
"learning_rate": 1.318540644335396e-06,
"loss": 0.7087,
"step": 310
},
{
"epoch": 0.02048,
"grad_norm": 1.087124228477478,
"learning_rate": 1.3612118625986772e-06,
"loss": 0.7174,
"step": 320
},
{
"epoch": 0.02112,
"grad_norm": 1.5668721199035645,
"learning_rate": 1.4038830808619588e-06,
"loss": 0.7066,
"step": 330
},
{
"epoch": 0.02176,
"grad_norm": 1.5332859754562378,
"learning_rate": 1.4465542991252401e-06,
"loss": 0.7115,
"step": 340
},
{
"epoch": 0.0224,
"grad_norm": 2.092994451522827,
"learning_rate": 1.4892255173885215e-06,
"loss": 0.7085,
"step": 350
},
{
"epoch": 0.02304,
"grad_norm": 1.266851544380188,
"learning_rate": 1.531896735651803e-06,
"loss": 0.7091,
"step": 360
},
{
"epoch": 0.02368,
"grad_norm": 1.290616512298584,
"learning_rate": 1.5745679539150842e-06,
"loss": 0.7103,
"step": 370
},
{
"epoch": 0.02432,
"grad_norm": 1.2045000791549683,
"learning_rate": 1.6172391721783658e-06,
"loss": 0.705,
"step": 380
},
{
"epoch": 0.02496,
"grad_norm": 1.828832983970642,
"learning_rate": 1.6599103904416472e-06,
"loss": 0.7038,
"step": 390
},
{
"epoch": 0.0256,
"grad_norm": 1.7793196439743042,
"learning_rate": 1.7025816087049288e-06,
"loss": 0.7085,
"step": 400
},
{
"epoch": 0.02624,
"grad_norm": 0.8949472904205322,
"learning_rate": 1.7452528269682101e-06,
"loss": 0.7045,
"step": 410
},
{
"epoch": 0.02688,
"grad_norm": 1.3071945905685425,
"learning_rate": 1.7879240452314913e-06,
"loss": 0.702,
"step": 420
},
{
"epoch": 0.02752,
"grad_norm": 1.7497148513793945,
"learning_rate": 1.8305952634947729e-06,
"loss": 0.7073,
"step": 430
},
{
"epoch": 0.02816,
"grad_norm": 1.0856297016143799,
"learning_rate": 1.8732664817580542e-06,
"loss": 0.7008,
"step": 440
},
{
"epoch": 0.0288,
"grad_norm": 1.369019627571106,
"learning_rate": 1.915937700021336e-06,
"loss": 0.7059,
"step": 450
},
{
"epoch": 0.02944,
"grad_norm": 2.0961010456085205,
"learning_rate": 1.958608918284617e-06,
"loss": 0.6952,
"step": 460
},
{
"epoch": 0.03008,
"grad_norm": 1.6076347827911377,
"learning_rate": 2.0012801365478988e-06,
"loss": 0.7074,
"step": 470
},
{
"epoch": 0.03072,
"grad_norm": 1.3129311800003052,
"learning_rate": 2.04395135481118e-06,
"loss": 0.6994,
"step": 480
},
{
"epoch": 0.03136,
"grad_norm": 1.3621476888656616,
"learning_rate": 2.0866225730744615e-06,
"loss": 0.696,
"step": 490
},
{
"epoch": 0.032,
"grad_norm": 0.9786806106567383,
"learning_rate": 2.129293791337743e-06,
"loss": 0.7011,
"step": 500
},
{
"epoch": 0.03264,
"grad_norm": 1.4611176252365112,
"learning_rate": 2.1719650096010242e-06,
"loss": 0.699,
"step": 510
},
{
"epoch": 0.03328,
"grad_norm": 1.0675945281982422,
"learning_rate": 2.214636227864306e-06,
"loss": 0.6906,
"step": 520
},
{
"epoch": 0.03392,
"grad_norm": 1.6564017534255981,
"learning_rate": 2.257307446127587e-06,
"loss": 0.6923,
"step": 530
},
{
"epoch": 0.03456,
"grad_norm": 1.228119134902954,
"learning_rate": 2.2999786643908685e-06,
"loss": 0.6928,
"step": 540
},
{
"epoch": 0.0352,
"grad_norm": 1.9868593215942383,
"learning_rate": 2.34264988265415e-06,
"loss": 0.6912,
"step": 550
},
{
"epoch": 0.03584,
"grad_norm": 1.2531176805496216,
"learning_rate": 2.3853211009174317e-06,
"loss": 0.6923,
"step": 560
},
{
"epoch": 0.03648,
"grad_norm": 1.413602352142334,
"learning_rate": 2.427992319180713e-06,
"loss": 0.6993,
"step": 570
},
{
"epoch": 0.03712,
"grad_norm": 0.9655390977859497,
"learning_rate": 2.470663537443994e-06,
"loss": 0.7017,
"step": 580
},
{
"epoch": 0.03776,
"grad_norm": 1.962438941001892,
"learning_rate": 2.5133347557072756e-06,
"loss": 0.6933,
"step": 590
},
{
"epoch": 0.0384,
"grad_norm": 1.1099931001663208,
"learning_rate": 2.556005973970557e-06,
"loss": 0.6925,
"step": 600
},
{
"epoch": 0.03904,
"grad_norm": 1.1766624450683594,
"learning_rate": 2.5986771922338383e-06,
"loss": 0.6894,
"step": 610
},
{
"epoch": 0.03968,
"grad_norm": 1.574353814125061,
"learning_rate": 2.64134841049712e-06,
"loss": 0.6923,
"step": 620
},
{
"epoch": 0.04032,
"grad_norm": 1.194074273109436,
"learning_rate": 2.6840196287604015e-06,
"loss": 0.6855,
"step": 630
},
{
"epoch": 0.04096,
"grad_norm": 1.1750593185424805,
"learning_rate": 2.7266908470236826e-06,
"loss": 0.7002,
"step": 640
},
{
"epoch": 0.0416,
"grad_norm": 1.3165347576141357,
"learning_rate": 2.7693620652869642e-06,
"loss": 0.6893,
"step": 650
},
{
"epoch": 0.04224,
"grad_norm": 1.1079384088516235,
"learning_rate": 2.812033283550246e-06,
"loss": 0.6852,
"step": 660
},
{
"epoch": 0.04288,
"grad_norm": 1.231327772140503,
"learning_rate": 2.8547045018135274e-06,
"loss": 0.695,
"step": 670
},
{
"epoch": 0.04352,
"grad_norm": 1.966036319732666,
"learning_rate": 2.897375720076808e-06,
"loss": 0.6833,
"step": 680
},
{
"epoch": 0.04416,
"grad_norm": 1.0460131168365479,
"learning_rate": 2.9400469383400897e-06,
"loss": 0.6802,
"step": 690
},
{
"epoch": 0.0448,
"grad_norm": 1.2445200681686401,
"learning_rate": 2.9827181566033713e-06,
"loss": 0.6883,
"step": 700
},
{
"epoch": 0.04544,
"grad_norm": 1.1759636402130127,
"learning_rate": 3.025389374866653e-06,
"loss": 0.682,
"step": 710
},
{
"epoch": 0.04608,
"grad_norm": 1.2478561401367188,
"learning_rate": 3.068060593129934e-06,
"loss": 0.6802,
"step": 720
},
{
"epoch": 0.04672,
"grad_norm": 0.8945108652114868,
"learning_rate": 3.1107318113932156e-06,
"loss": 0.6873,
"step": 730
},
{
"epoch": 0.04736,
"grad_norm": 1.6810317039489746,
"learning_rate": 3.153403029656497e-06,
"loss": 0.6828,
"step": 740
},
{
"epoch": 0.048,
"grad_norm": 2.882283926010132,
"learning_rate": 3.1960742479197783e-06,
"loss": 0.6797,
"step": 750
},
{
"epoch": 0.04864,
"grad_norm": 1.575766921043396,
"learning_rate": 3.23874546618306e-06,
"loss": 0.6826,
"step": 760
},
{
"epoch": 0.04928,
"grad_norm": 1.8044737577438354,
"learning_rate": 3.2814166844463415e-06,
"loss": 0.6837,
"step": 770
},
{
"epoch": 0.04992,
"grad_norm": 1.4755513668060303,
"learning_rate": 3.324087902709623e-06,
"loss": 0.6815,
"step": 780
},
{
"epoch": 0.05056,
"grad_norm": 2.0735654830932617,
"learning_rate": 3.3667591209729038e-06,
"loss": 0.6773,
"step": 790
},
{
"epoch": 0.0512,
"grad_norm": 0.9823655486106873,
"learning_rate": 3.4094303392361854e-06,
"loss": 0.6689,
"step": 800
}
],
"logging_steps": 10,
"max_steps": 46875,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 200,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6731943018000000.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}