Test / checkpoint-1104 /trainer_state.json
FrAnKu34t23's picture
Upload merged fine-tuned risk prediction model
35cb202 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 8.0,
"eval_steps": 50,
"global_step": 1104,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007246376811594203,
"grad_norm": 1.9687033891677856,
"learning_rate": 0.0,
"loss": 19.2682,
"step": 1
},
{
"epoch": 0.07246376811594203,
"grad_norm": 1.703112006187439,
"learning_rate": 1.8e-05,
"loss": 12.4529,
"step": 10
},
{
"epoch": 0.14492753623188406,
"grad_norm": 3.6209306716918945,
"learning_rate": 3.8e-05,
"loss": 17.5169,
"step": 20
},
{
"epoch": 0.21739130434782608,
"grad_norm": 2.09627366065979,
"learning_rate": 5.8e-05,
"loss": 16.0619,
"step": 30
},
{
"epoch": 0.2898550724637681,
"grad_norm": 1.9405275583267212,
"learning_rate": 7.800000000000001e-05,
"loss": 20.228,
"step": 40
},
{
"epoch": 0.36231884057971014,
"grad_norm": 8.32076644897461,
"learning_rate": 9.8e-05,
"loss": 16.0457,
"step": 50
},
{
"epoch": 0.43478260869565216,
"grad_norm": 1.1994603872299194,
"learning_rate": 9.9146110056926e-05,
"loss": 15.9109,
"step": 60
},
{
"epoch": 0.5072463768115942,
"grad_norm": 1.3328566551208496,
"learning_rate": 9.819734345351043e-05,
"loss": 15.6408,
"step": 70
},
{
"epoch": 0.5797101449275363,
"grad_norm": 6.663642406463623,
"learning_rate": 9.724857685009489e-05,
"loss": 13.4492,
"step": 80
},
{
"epoch": 0.6521739130434783,
"grad_norm": 4.154050350189209,
"learning_rate": 9.629981024667933e-05,
"loss": 16.0048,
"step": 90
},
{
"epoch": 0.7246376811594203,
"grad_norm": 12.047609329223633,
"learning_rate": 9.535104364326376e-05,
"loss": 42.9753,
"step": 100
},
{
"epoch": 0.7971014492753623,
"grad_norm": 4.798079490661621,
"learning_rate": 9.44022770398482e-05,
"loss": 17.2692,
"step": 110
},
{
"epoch": 0.8695652173913043,
"grad_norm": 6.930491924285889,
"learning_rate": 9.345351043643265e-05,
"loss": 13.9401,
"step": 120
},
{
"epoch": 0.9420289855072463,
"grad_norm": 7.896566867828369,
"learning_rate": 9.250474383301708e-05,
"loss": 16.1242,
"step": 130
},
{
"epoch": 1.0144927536231885,
"grad_norm": 10.504218101501465,
"learning_rate": 9.155597722960152e-05,
"loss": 9.8417,
"step": 140
},
{
"epoch": 1.0869565217391304,
"grad_norm": 9.594452857971191,
"learning_rate": 9.060721062618596e-05,
"loss": 12.6395,
"step": 150
},
{
"epoch": 1.1594202898550725,
"grad_norm": 16.113290786743164,
"learning_rate": 8.96584440227704e-05,
"loss": 11.6141,
"step": 160
},
{
"epoch": 1.2318840579710144,
"grad_norm": 10.904824256896973,
"learning_rate": 8.870967741935484e-05,
"loss": 15.8041,
"step": 170
},
{
"epoch": 1.3043478260869565,
"grad_norm": 4.2903852462768555,
"learning_rate": 8.776091081593929e-05,
"loss": 10.5756,
"step": 180
},
{
"epoch": 1.3768115942028984,
"grad_norm": 3.3363027572631836,
"learning_rate": 8.681214421252373e-05,
"loss": 12.2632,
"step": 190
},
{
"epoch": 1.4492753623188406,
"grad_norm": 5.582556247711182,
"learning_rate": 8.586337760910817e-05,
"loss": 10.2026,
"step": 200
},
{
"epoch": 1.5217391304347827,
"grad_norm": 3.090460777282715,
"learning_rate": 8.49146110056926e-05,
"loss": 9.8113,
"step": 210
},
{
"epoch": 1.5942028985507246,
"grad_norm": 20.330148696899414,
"learning_rate": 8.396584440227704e-05,
"loss": 9.2478,
"step": 220
},
{
"epoch": 1.6666666666666665,
"grad_norm": 16.418106079101562,
"learning_rate": 8.301707779886149e-05,
"loss": 11.377,
"step": 230
},
{
"epoch": 1.7391304347826086,
"grad_norm": 1.9852267503738403,
"learning_rate": 8.206831119544592e-05,
"loss": 9.8674,
"step": 240
},
{
"epoch": 1.8115942028985508,
"grad_norm": 4.2618489265441895,
"learning_rate": 8.111954459203036e-05,
"loss": 10.9124,
"step": 250
},
{
"epoch": 1.8840579710144927,
"grad_norm": 5.334084510803223,
"learning_rate": 8.017077798861481e-05,
"loss": 29.9208,
"step": 260
},
{
"epoch": 1.9565217391304348,
"grad_norm": 7.662145614624023,
"learning_rate": 7.922201138519924e-05,
"loss": 9.4491,
"step": 270
},
{
"epoch": 2.028985507246377,
"grad_norm": 2.990098476409912,
"learning_rate": 7.827324478178368e-05,
"loss": 12.7051,
"step": 280
},
{
"epoch": 2.101449275362319,
"grad_norm": 11.343267440795898,
"learning_rate": 7.732447817836812e-05,
"loss": 10.1094,
"step": 290
},
{
"epoch": 2.1739130434782608,
"grad_norm": 4.076178550720215,
"learning_rate": 7.637571157495257e-05,
"loss": 10.8557,
"step": 300
},
{
"epoch": 2.246376811594203,
"grad_norm": 3.965240240097046,
"learning_rate": 7.542694497153701e-05,
"loss": 10.2597,
"step": 310
},
{
"epoch": 2.318840579710145,
"grad_norm": 16.37909507751465,
"learning_rate": 7.447817836812145e-05,
"loss": 9.0119,
"step": 320
},
{
"epoch": 2.391304347826087,
"grad_norm": 3.6150219440460205,
"learning_rate": 7.352941176470589e-05,
"loss": 26.6018,
"step": 330
},
{
"epoch": 2.463768115942029,
"grad_norm": 19.880943298339844,
"learning_rate": 7.258064516129033e-05,
"loss": 9.6189,
"step": 340
},
{
"epoch": 2.536231884057971,
"grad_norm": 3.5458195209503174,
"learning_rate": 7.163187855787477e-05,
"loss": 8.4988,
"step": 350
},
{
"epoch": 2.608695652173913,
"grad_norm": 13.963035583496094,
"learning_rate": 7.06831119544592e-05,
"loss": 11.8011,
"step": 360
},
{
"epoch": 2.681159420289855,
"grad_norm": 19.048301696777344,
"learning_rate": 6.973434535104365e-05,
"loss": 10.9278,
"step": 370
},
{
"epoch": 2.753623188405797,
"grad_norm": 4.992802143096924,
"learning_rate": 6.878557874762808e-05,
"loss": 8.919,
"step": 380
},
{
"epoch": 2.8260869565217392,
"grad_norm": 11.363718032836914,
"learning_rate": 6.783681214421252e-05,
"loss": 13.1761,
"step": 390
},
{
"epoch": 2.898550724637681,
"grad_norm": 5.374908924102783,
"learning_rate": 6.688804554079696e-05,
"loss": 10.3222,
"step": 400
},
{
"epoch": 2.971014492753623,
"grad_norm": 8.949808120727539,
"learning_rate": 6.59392789373814e-05,
"loss": 10.7587,
"step": 410
},
{
"epoch": 3.0434782608695654,
"grad_norm": 4.645139217376709,
"learning_rate": 6.499051233396585e-05,
"loss": 7.0093,
"step": 420
},
{
"epoch": 3.1159420289855073,
"grad_norm": 1.7357378005981445,
"learning_rate": 6.404174573055029e-05,
"loss": 10.5791,
"step": 430
},
{
"epoch": 3.1884057971014492,
"grad_norm": 6.632950305938721,
"learning_rate": 6.309297912713473e-05,
"loss": 8.7916,
"step": 440
},
{
"epoch": 3.260869565217391,
"grad_norm": 3.3891420364379883,
"learning_rate": 6.214421252371917e-05,
"loss": 10.3499,
"step": 450
},
{
"epoch": 3.3333333333333335,
"grad_norm": 10.14654541015625,
"learning_rate": 6.119544592030361e-05,
"loss": 8.5833,
"step": 460
},
{
"epoch": 3.4057971014492754,
"grad_norm": 3.27583384513855,
"learning_rate": 6.0246679316888046e-05,
"loss": 28.3443,
"step": 470
},
{
"epoch": 3.4782608695652173,
"grad_norm": 16.218280792236328,
"learning_rate": 5.9297912713472494e-05,
"loss": 11.3033,
"step": 480
},
{
"epoch": 3.550724637681159,
"grad_norm": 5.851198196411133,
"learning_rate": 5.834914611005693e-05,
"loss": 7.5228,
"step": 490
},
{
"epoch": 3.6231884057971016,
"grad_norm": 13.043642044067383,
"learning_rate": 5.740037950664137e-05,
"loss": 11.608,
"step": 500
},
{
"epoch": 3.6956521739130435,
"grad_norm": 3.651071071624756,
"learning_rate": 5.645161290322582e-05,
"loss": 9.3775,
"step": 510
},
{
"epoch": 3.7681159420289854,
"grad_norm": 3.946401357650757,
"learning_rate": 5.550284629981025e-05,
"loss": 9.9621,
"step": 520
},
{
"epoch": 3.8405797101449277,
"grad_norm": 5.363879680633545,
"learning_rate": 5.4554079696394686e-05,
"loss": 7.2515,
"step": 530
},
{
"epoch": 3.9130434782608696,
"grad_norm": 18.565500259399414,
"learning_rate": 5.360531309297913e-05,
"loss": 9.9015,
"step": 540
},
{
"epoch": 3.9855072463768115,
"grad_norm": 2.068690299987793,
"learning_rate": 5.2656546489563575e-05,
"loss": 8.6521,
"step": 550
},
{
"epoch": 4.057971014492754,
"grad_norm": 8.377117156982422,
"learning_rate": 5.170777988614801e-05,
"loss": 11.6352,
"step": 560
},
{
"epoch": 4.130434782608695,
"grad_norm": 6.822054386138916,
"learning_rate": 5.075901328273245e-05,
"loss": 8.9412,
"step": 570
},
{
"epoch": 4.202898550724638,
"grad_norm": 17.24335479736328,
"learning_rate": 4.981024667931689e-05,
"loss": 9.5539,
"step": 580
},
{
"epoch": 4.27536231884058,
"grad_norm": 13.434685707092285,
"learning_rate": 4.8861480075901326e-05,
"loss": 10.8367,
"step": 590
},
{
"epoch": 4.3478260869565215,
"grad_norm": 17.375398635864258,
"learning_rate": 4.791271347248577e-05,
"loss": 10.818,
"step": 600
},
{
"epoch": 4.420289855072464,
"grad_norm": 4.53552770614624,
"learning_rate": 4.6963946869070216e-05,
"loss": 7.442,
"step": 610
},
{
"epoch": 4.492753623188406,
"grad_norm": 4.74644136428833,
"learning_rate": 4.601518026565465e-05,
"loss": 9.6362,
"step": 620
},
{
"epoch": 4.565217391304348,
"grad_norm": 3.9156153202056885,
"learning_rate": 4.506641366223909e-05,
"loss": 11.4203,
"step": 630
},
{
"epoch": 4.63768115942029,
"grad_norm": 18.31324005126953,
"learning_rate": 4.411764705882353e-05,
"loss": 8.557,
"step": 640
},
{
"epoch": 4.710144927536232,
"grad_norm": 4.924636363983154,
"learning_rate": 4.3168880455407974e-05,
"loss": 9.608,
"step": 650
},
{
"epoch": 4.782608695652174,
"grad_norm": 5.3758769035339355,
"learning_rate": 4.222011385199241e-05,
"loss": 8.9764,
"step": 660
},
{
"epoch": 4.855072463768116,
"grad_norm": 8.584567070007324,
"learning_rate": 4.1271347248576856e-05,
"loss": 9.621,
"step": 670
},
{
"epoch": 4.927536231884058,
"grad_norm": 4.82368803024292,
"learning_rate": 4.032258064516129e-05,
"loss": 24.5753,
"step": 680
},
{
"epoch": 5.0,
"grad_norm": 2.3411295413970947,
"learning_rate": 3.937381404174573e-05,
"loss": 7.9662,
"step": 690
},
{
"epoch": 5.072463768115942,
"grad_norm": 3.99580979347229,
"learning_rate": 3.842504743833017e-05,
"loss": 6.8737,
"step": 700
},
{
"epoch": 5.144927536231884,
"grad_norm": 24.173742294311523,
"learning_rate": 3.7476280834914614e-05,
"loss": 11.8038,
"step": 710
},
{
"epoch": 5.217391304347826,
"grad_norm": 25.25533676147461,
"learning_rate": 3.6527514231499055e-05,
"loss": 11.9047,
"step": 720
},
{
"epoch": 5.2898550724637685,
"grad_norm": 4.739432334899902,
"learning_rate": 3.557874762808349e-05,
"loss": 9.503,
"step": 730
},
{
"epoch": 5.36231884057971,
"grad_norm": 7.874780178070068,
"learning_rate": 3.462998102466794e-05,
"loss": 10.949,
"step": 740
},
{
"epoch": 5.434782608695652,
"grad_norm": 3.363119602203369,
"learning_rate": 3.368121442125237e-05,
"loss": 10.2591,
"step": 750
},
{
"epoch": 5.507246376811594,
"grad_norm": 5.406454086303711,
"learning_rate": 3.273244781783681e-05,
"loss": 7.7785,
"step": 760
},
{
"epoch": 5.579710144927536,
"grad_norm": 6.118616580963135,
"learning_rate": 3.1783681214421254e-05,
"loss": 7.2315,
"step": 770
},
{
"epoch": 5.6521739130434785,
"grad_norm": 16.83717918395996,
"learning_rate": 3.0834914611005695e-05,
"loss": 7.6301,
"step": 780
},
{
"epoch": 5.72463768115942,
"grad_norm": 11.110983848571777,
"learning_rate": 2.9886148007590137e-05,
"loss": 9.153,
"step": 790
},
{
"epoch": 5.797101449275362,
"grad_norm": 8.291231155395508,
"learning_rate": 2.8937381404174574e-05,
"loss": 10.3227,
"step": 800
},
{
"epoch": 5.869565217391305,
"grad_norm": 2.8294785022735596,
"learning_rate": 2.7988614800759016e-05,
"loss": 6.8461,
"step": 810
},
{
"epoch": 5.942028985507246,
"grad_norm": 11.522751808166504,
"learning_rate": 2.7039848197343453e-05,
"loss": 25.7992,
"step": 820
},
{
"epoch": 6.0144927536231885,
"grad_norm": 5.239048480987549,
"learning_rate": 2.6091081593927898e-05,
"loss": 10.1367,
"step": 830
},
{
"epoch": 6.086956521739131,
"grad_norm": 14.244694709777832,
"learning_rate": 2.5142314990512332e-05,
"loss": 8.1876,
"step": 840
},
{
"epoch": 6.159420289855072,
"grad_norm": 3.612367868423462,
"learning_rate": 2.4193548387096777e-05,
"loss": 9.5329,
"step": 850
},
{
"epoch": 6.231884057971015,
"grad_norm": 4.118394374847412,
"learning_rate": 2.3244781783681215e-05,
"loss": 8.8621,
"step": 860
},
{
"epoch": 6.304347826086957,
"grad_norm": 2.245436906814575,
"learning_rate": 2.2296015180265656e-05,
"loss": 7.363,
"step": 870
},
{
"epoch": 6.3768115942028984,
"grad_norm": 7.4832658767700195,
"learning_rate": 2.1347248576850097e-05,
"loss": 8.4077,
"step": 880
},
{
"epoch": 6.449275362318841,
"grad_norm": 20.146770477294922,
"learning_rate": 2.0398481973434535e-05,
"loss": 27.7444,
"step": 890
},
{
"epoch": 6.521739130434782,
"grad_norm": 6.686460018157959,
"learning_rate": 1.9449715370018976e-05,
"loss": 9.9147,
"step": 900
},
{
"epoch": 6.594202898550725,
"grad_norm": 11.729459762573242,
"learning_rate": 1.8500948766603414e-05,
"loss": 10.427,
"step": 910
},
{
"epoch": 6.666666666666667,
"grad_norm": 5.6112518310546875,
"learning_rate": 1.7552182163187855e-05,
"loss": 8.1236,
"step": 920
},
{
"epoch": 6.739130434782608,
"grad_norm": 21.59635353088379,
"learning_rate": 1.66034155597723e-05,
"loss": 8.0663,
"step": 930
},
{
"epoch": 6.811594202898551,
"grad_norm": 7.840078353881836,
"learning_rate": 1.5654648956356737e-05,
"loss": 11.4745,
"step": 940
},
{
"epoch": 6.884057971014493,
"grad_norm": 8.633024215698242,
"learning_rate": 1.4705882352941177e-05,
"loss": 8.9603,
"step": 950
},
{
"epoch": 6.956521739130435,
"grad_norm": 2.9932422637939453,
"learning_rate": 1.3757115749525618e-05,
"loss": 8.8133,
"step": 960
},
{
"epoch": 7.028985507246377,
"grad_norm": 4.314597129821777,
"learning_rate": 1.2808349146110058e-05,
"loss": 6.8162,
"step": 970
},
{
"epoch": 7.101449275362318,
"grad_norm": 18.329147338867188,
"learning_rate": 1.1859582542694497e-05,
"loss": 11.9377,
"step": 980
},
{
"epoch": 7.173913043478261,
"grad_norm": 7.561063289642334,
"learning_rate": 1.0910815939278938e-05,
"loss": 6.8045,
"step": 990
},
{
"epoch": 7.246376811594203,
"grad_norm": 5.871479511260986,
"learning_rate": 9.962049335863378e-06,
"loss": 9.549,
"step": 1000
},
{
"epoch": 7.318840579710145,
"grad_norm": 3.1535987854003906,
"learning_rate": 9.013282732447819e-06,
"loss": 6.7738,
"step": 1010
},
{
"epoch": 7.391304347826087,
"grad_norm": 32.92097854614258,
"learning_rate": 8.064516129032258e-06,
"loss": 27.4036,
"step": 1020
},
{
"epoch": 7.463768115942029,
"grad_norm": 7.275607109069824,
"learning_rate": 7.115749525616698e-06,
"loss": 9.3496,
"step": 1030
},
{
"epoch": 7.536231884057971,
"grad_norm": 7.355132579803467,
"learning_rate": 6.166982922201139e-06,
"loss": 7.1842,
"step": 1040
},
{
"epoch": 7.608695652173913,
"grad_norm": 11.539506912231445,
"learning_rate": 5.218216318785579e-06,
"loss": 10.1211,
"step": 1050
},
{
"epoch": 7.681159420289855,
"grad_norm": 5.036221981048584,
"learning_rate": 4.269449715370019e-06,
"loss": 10.1483,
"step": 1060
},
{
"epoch": 7.753623188405797,
"grad_norm": 24.608356475830078,
"learning_rate": 3.320683111954459e-06,
"loss": 9.3202,
"step": 1070
},
{
"epoch": 7.826086956521739,
"grad_norm": 9.034127235412598,
"learning_rate": 2.3719165085388996e-06,
"loss": 9.5748,
"step": 1080
},
{
"epoch": 7.898550724637682,
"grad_norm": 13.333020210266113,
"learning_rate": 1.4231499051233397e-06,
"loss": 8.6569,
"step": 1090
},
{
"epoch": 7.971014492753623,
"grad_norm": 7.102665424346924,
"learning_rate": 4.743833017077799e-07,
"loss": 8.791,
"step": 1100
}
],
"logging_steps": 10,
"max_steps": 1104,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 109892759371776.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}