phi_adapter_test / trainer_state.json
Nataliia767567's picture
phi_tuned_test_2
2246423 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1485,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04040404040404041,
"grad_norm": 0.4179236888885498,
"learning_rate": 3.3670033670033673e-07,
"loss": 1.6395,
"step": 20
},
{
"epoch": 0.08080808080808081,
"grad_norm": 0.41700685024261475,
"learning_rate": 6.734006734006735e-07,
"loss": 1.6704,
"step": 40
},
{
"epoch": 0.12121212121212122,
"grad_norm": 0.41969290375709534,
"learning_rate": 1.01010101010101e-06,
"loss": 1.6681,
"step": 60
},
{
"epoch": 0.16161616161616163,
"grad_norm": 0.43269139528274536,
"learning_rate": 1.346801346801347e-06,
"loss": 1.5497,
"step": 80
},
{
"epoch": 0.20202020202020202,
"grad_norm": 0.4519573450088501,
"learning_rate": 1.6835016835016838e-06,
"loss": 1.6648,
"step": 100
},
{
"epoch": 0.24242424242424243,
"grad_norm": 0.4950277507305145,
"learning_rate": 2.02020202020202e-06,
"loss": 1.5099,
"step": 120
},
{
"epoch": 0.2828282828282828,
"grad_norm": 0.39855948090553284,
"learning_rate": 2.3569023569023572e-06,
"loss": 1.5483,
"step": 140
},
{
"epoch": 0.32323232323232326,
"grad_norm": 0.422542542219162,
"learning_rate": 2.693602693602694e-06,
"loss": 1.6411,
"step": 160
},
{
"epoch": 0.36363636363636365,
"grad_norm": 0.393912672996521,
"learning_rate": 3.0303030303030305e-06,
"loss": 1.5534,
"step": 180
},
{
"epoch": 0.40404040404040403,
"grad_norm": 0.3975161910057068,
"learning_rate": 3.3670033670033675e-06,
"loss": 1.5223,
"step": 200
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.40105801820755005,
"learning_rate": 3.7037037037037037e-06,
"loss": 1.4514,
"step": 220
},
{
"epoch": 0.48484848484848486,
"grad_norm": 0.4912654459476471,
"learning_rate": 4.04040404040404e-06,
"loss": 1.346,
"step": 240
},
{
"epoch": 0.5252525252525253,
"grad_norm": 0.739983081817627,
"learning_rate": 4.377104377104377e-06,
"loss": 1.2296,
"step": 260
},
{
"epoch": 0.5656565656565656,
"grad_norm": 0.48706138134002686,
"learning_rate": 4.7138047138047145e-06,
"loss": 0.9912,
"step": 280
},
{
"epoch": 0.6060606060606061,
"grad_norm": 0.5479212999343872,
"learning_rate": 4.999921328558333e-06,
"loss": 0.9724,
"step": 300
},
{
"epoch": 0.6464646464646465,
"grad_norm": 0.5364205241203308,
"learning_rate": 4.995377268577495e-06,
"loss": 0.8591,
"step": 320
},
{
"epoch": 0.6868686868686869,
"grad_norm": 0.465681791305542,
"learning_rate": 4.983854712613647e-06,
"loss": 0.8323,
"step": 340
},
{
"epoch": 0.7272727272727273,
"grad_norm": 0.4937388300895691,
"learning_rate": 4.965385884295467e-06,
"loss": 0.8475,
"step": 360
},
{
"epoch": 0.7676767676767676,
"grad_norm": 0.49361351132392883,
"learning_rate": 4.940022432985096e-06,
"loss": 0.8435,
"step": 380
},
{
"epoch": 0.8080808080808081,
"grad_norm": 0.5452327728271484,
"learning_rate": 4.907835289337116e-06,
"loss": 0.7949,
"step": 400
},
{
"epoch": 0.8484848484848485,
"grad_norm": 0.6474315524101257,
"learning_rate": 4.868914466936038e-06,
"loss": 0.8125,
"step": 420
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.6446932554244995,
"learning_rate": 4.823368810567056e-06,
"loss": 0.8134,
"step": 440
},
{
"epoch": 0.9292929292929293,
"grad_norm": 0.5589977502822876,
"learning_rate": 4.771325691824046e-06,
"loss": 0.7894,
"step": 460
},
{
"epoch": 0.9696969696969697,
"grad_norm": 0.9970992803573608,
"learning_rate": 4.7129306529060415e-06,
"loss": 0.713,
"step": 480
},
{
"epoch": 1.0101010101010102,
"grad_norm": 0.6452479958534241,
"learning_rate": 4.651720442612076e-06,
"loss": 0.7647,
"step": 500
},
{
"epoch": 1.0505050505050506,
"grad_norm": 0.6939157247543335,
"learning_rate": 4.5814246365869285e-06,
"loss": 0.7049,
"step": 520
},
{
"epoch": 1.0909090909090908,
"grad_norm": 0.47475144267082214,
"learning_rate": 4.50530798188761e-06,
"loss": 0.753,
"step": 540
},
{
"epoch": 1.1313131313131313,
"grad_norm": 0.837317943572998,
"learning_rate": 4.423583344029786e-06,
"loss": 0.7873,
"step": 560
},
{
"epoch": 1.1717171717171717,
"grad_norm": 0.6641133427619934,
"learning_rate": 4.336479271643833e-06,
"loss": 0.7261,
"step": 580
},
{
"epoch": 1.2121212121212122,
"grad_norm": 0.5319123864173889,
"learning_rate": 4.244239357322705e-06,
"loss": 0.7751,
"step": 600
},
{
"epoch": 1.2525252525252526,
"grad_norm": 0.5603788495063782,
"learning_rate": 4.1471215563983125e-06,
"loss": 0.6936,
"step": 620
},
{
"epoch": 1.2929292929292928,
"grad_norm": 0.613637387752533,
"learning_rate": 4.045397465551513e-06,
"loss": 0.74,
"step": 640
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.4830845296382904,
"learning_rate": 3.93935156327311e-06,
"loss": 0.7406,
"step": 660
},
{
"epoch": 1.3737373737373737,
"grad_norm": 0.6951391696929932,
"learning_rate": 3.82928041429998e-06,
"loss": 0.7329,
"step": 680
},
{
"epoch": 1.4141414141414141,
"grad_norm": 0.46375522017478943,
"learning_rate": 3.715491840251172e-06,
"loss": 0.7398,
"step": 700
},
{
"epoch": 1.4545454545454546,
"grad_norm": 0.5353007912635803,
"learning_rate": 3.598304058783357e-06,
"loss": 0.7807,
"step": 720
},
{
"epoch": 1.494949494949495,
"grad_norm": 0.4662165939807892,
"learning_rate": 3.478044793673025e-06,
"loss": 0.7504,
"step": 740
},
{
"epoch": 1.5353535353535355,
"grad_norm": 0.5738622546195984,
"learning_rate": 3.3550503583141726e-06,
"loss": 0.7317,
"step": 760
},
{
"epoch": 1.5757575757575757,
"grad_norm": 0.6228781342506409,
"learning_rate": 3.2296647151945116e-06,
"loss": 0.7184,
"step": 780
},
{
"epoch": 1.6161616161616161,
"grad_norm": 0.5194560289382935,
"learning_rate": 3.102238513980471e-06,
"loss": 0.7674,
"step": 800
},
{
"epoch": 1.6565656565656566,
"grad_norm": 0.5566609501838684,
"learning_rate": 2.973128110901026e-06,
"loss": 0.7514,
"step": 820
},
{
"epoch": 1.696969696969697,
"grad_norm": 0.6666210293769836,
"learning_rate": 2.842694572172737e-06,
"loss": 0.7387,
"step": 840
},
{
"epoch": 1.7373737373737375,
"grad_norm": 0.5752224326133728,
"learning_rate": 2.7113026642529733e-06,
"loss": 0.706,
"step": 860
},
{
"epoch": 1.7777777777777777,
"grad_norm": 0.5893978476524353,
"learning_rate": 2.57931983374517e-06,
"loss": 0.726,
"step": 880
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.5704651474952698,
"learning_rate": 2.4471151798088465e-06,
"loss": 0.7362,
"step": 900
},
{
"epoch": 1.8585858585858586,
"grad_norm": 0.5845484733581543,
"learning_rate": 2.3150584219481644e-06,
"loss": 0.7105,
"step": 920
},
{
"epoch": 1.898989898989899,
"grad_norm": 0.4336787760257721,
"learning_rate": 2.183518866065627e-06,
"loss": 0.6878,
"step": 940
},
{
"epoch": 1.9393939393939394,
"grad_norm": 0.6674192547798157,
"learning_rate": 2.0528643716724572e-06,
"loss": 0.6198,
"step": 960
},
{
"epoch": 1.9797979797979797,
"grad_norm": 0.4571789503097534,
"learning_rate": 1.9234603231439e-06,
"loss": 0.7451,
"step": 980
},
{
"epoch": 2.0202020202020203,
"grad_norm": 0.5650418996810913,
"learning_rate": 1.7956686078964257e-06,
"loss": 0.6723,
"step": 1000
},
{
"epoch": 2.0606060606060606,
"grad_norm": 0.6139554381370544,
"learning_rate": 1.6698466043444122e-06,
"loss": 0.731,
"step": 1020
},
{
"epoch": 2.101010101010101,
"grad_norm": 0.5360502600669861,
"learning_rate": 1.546346182466566e-06,
"loss": 0.7429,
"step": 1040
},
{
"epoch": 2.1414141414141414,
"grad_norm": 0.5541247725486755,
"learning_rate": 1.425512719777071e-06,
"loss": 0.7364,
"step": 1060
},
{
"epoch": 2.1818181818181817,
"grad_norm": 0.5136921405792236,
"learning_rate": 1.3076841354533658e-06,
"loss": 0.7516,
"step": 1080
},
{
"epoch": 2.2222222222222223,
"grad_norm": 0.4408280551433563,
"learning_rate": 1.1931899453216698e-06,
"loss": 0.6962,
"step": 1100
},
{
"epoch": 2.2626262626262625,
"grad_norm": 0.581975519657135,
"learning_rate": 1.0823503403430736e-06,
"loss": 0.6866,
"step": 1120
},
{
"epoch": 2.303030303030303,
"grad_norm": 0.5768681764602661,
"learning_rate": 9.754752911772616e-07,
"loss": 0.6871,
"step": 1140
},
{
"epoch": 2.3434343434343434,
"grad_norm": 0.4733218848705292,
"learning_rate": 8.728636813280164e-07,
"loss": 0.6909,
"step": 1160
},
{
"epoch": 2.3838383838383836,
"grad_norm": 0.7906825542449951,
"learning_rate": 7.748024712947205e-07,
"loss": 0.7064,
"step": 1180
},
{
"epoch": 2.4242424242424243,
"grad_norm": 0.5581986904144287,
"learning_rate": 6.815658960673782e-07,
"loss": 0.7708,
"step": 1200
},
{
"epoch": 2.4646464646464645,
"grad_norm": 0.49159321188926697,
"learning_rate": 5.934146982094049e-07,
"loss": 0.7367,
"step": 1220
},
{
"epoch": 2.505050505050505,
"grad_norm": 0.6523622274398804,
"learning_rate": 5.105953986729196e-07,
"loss": 0.7043,
"step": 1240
},
{
"epoch": 2.5454545454545454,
"grad_norm": 0.48596930503845215,
"learning_rate": 4.3333960738577236e-07,
"loss": 0.6872,
"step": 1260
},
{
"epoch": 2.5858585858585856,
"grad_norm": 0.6256952285766602,
"learning_rate": 3.6186337553827747e-07,
"loss": 0.7094,
"step": 1280
},
{
"epoch": 2.6262626262626263,
"grad_norm": 0.5648958683013916,
"learning_rate": 2.963665913810451e-07,
"loss": 0.6617,
"step": 1300
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.5555101037025452,
"learning_rate": 2.370324212235936e-07,
"loss": 0.7337,
"step": 1320
},
{
"epoch": 2.707070707070707,
"grad_norm": 0.6085281372070312,
"learning_rate": 1.840267971970344e-07,
"loss": 0.7236,
"step": 1340
},
{
"epoch": 2.7474747474747474,
"grad_norm": 0.6137179136276245,
"learning_rate": 1.3749795321332887e-07,
"loss": 0.7372,
"step": 1360
},
{
"epoch": 2.787878787878788,
"grad_norm": 0.4088153839111328,
"learning_rate": 9.757601041885694e-08,
"loss": 0.7663,
"step": 1380
},
{
"epoch": 2.8282828282828283,
"grad_norm": 0.5769473314285278,
"learning_rate": 6.437261330158206e-08,
"loss": 0.7463,
"step": 1400
},
{
"epoch": 2.8686868686868685,
"grad_norm": 0.47392240166664124,
"learning_rate": 3.798061746947995e-08,
"loss": 0.6858,
"step": 1420
},
{
"epoch": 2.909090909090909,
"grad_norm": 0.48467594385147095,
"learning_rate": 1.847382997337943e-08,
"loss": 0.6589,
"step": 1440
},
{
"epoch": 2.9494949494949494,
"grad_norm": 0.5985103249549866,
"learning_rate": 5.906802900412789e-09,
"loss": 0.6969,
"step": 1460
},
{
"epoch": 2.98989898989899,
"grad_norm": 0.534121036529541,
"learning_rate": 3.146808153123293e-10,
"loss": 0.6605,
"step": 1480
},
{
"epoch": 3.0,
"step": 1485,
"total_flos": 6.836552056302797e+16,
"train_loss": 0.8824553647025266,
"train_runtime": 1815.9955,
"train_samples_per_second": 3.271,
"train_steps_per_second": 0.818
}
],
"logging_steps": 20,
"max_steps": 1485,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.836552056302797e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}