Shami-MT / trainer_state.json
Omartificial-Intelligence-Space's picture
uplaod 12 files
46c233b verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 22.0,
"eval_steps": 500,
"global_step": 10384,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.211864406779661,
"grad_norm": 2.61049222946167,
"learning_rate": 4.9500000000000004e-05,
"loss": 12.9286,
"step": 100
},
{
"epoch": 0.423728813559322,
"grad_norm": 2.035546064376831,
"learning_rate": 4.9518669778296385e-05,
"loss": 3.184,
"step": 200
},
{
"epoch": 0.635593220338983,
"grad_norm": 1.6674128770828247,
"learning_rate": 4.903247763516142e-05,
"loss": 2.4283,
"step": 300
},
{
"epoch": 0.847457627118644,
"grad_norm": 1.6008472442626953,
"learning_rate": 4.854628549202645e-05,
"loss": 2.1681,
"step": 400
},
{
"epoch": 1.0593220338983051,
"grad_norm": 1.4100618362426758,
"learning_rate": 4.806009334889148e-05,
"loss": 2.0264,
"step": 500
},
{
"epoch": 1.0593220338983051,
"eval_loss": 1.4357542991638184,
"eval_runtime": 1.2436,
"eval_samples_per_second": 804.114,
"eval_steps_per_second": 6.433,
"step": 500
},
{
"epoch": 1.271186440677966,
"grad_norm": 1.4121861457824707,
"learning_rate": 4.757390120575652e-05,
"loss": 1.9105,
"step": 600
},
{
"epoch": 1.4830508474576272,
"grad_norm": 1.5169304609298706,
"learning_rate": 4.708770906262155e-05,
"loss": 1.8444,
"step": 700
},
{
"epoch": 1.694915254237288,
"grad_norm": 1.2196599245071411,
"learning_rate": 4.660151691948658e-05,
"loss": 1.7763,
"step": 800
},
{
"epoch": 1.9067796610169492,
"grad_norm": 1.2915562391281128,
"learning_rate": 4.611532477635161e-05,
"loss": 1.7492,
"step": 900
},
{
"epoch": 2.1186440677966103,
"grad_norm": 1.352921485900879,
"learning_rate": 4.5629132633216645e-05,
"loss": 1.6981,
"step": 1000
},
{
"epoch": 2.1186440677966103,
"eval_loss": 1.2387933731079102,
"eval_runtime": 1.1755,
"eval_samples_per_second": 850.716,
"eval_steps_per_second": 6.806,
"step": 1000
},
{
"epoch": 2.330508474576271,
"grad_norm": 1.30990731716156,
"learning_rate": 4.514294049008168e-05,
"loss": 1.6452,
"step": 1100
},
{
"epoch": 2.542372881355932,
"grad_norm": 1.2262191772460938,
"learning_rate": 4.4656748346946715e-05,
"loss": 1.6154,
"step": 1200
},
{
"epoch": 2.7542372881355934,
"grad_norm": 1.3794692754745483,
"learning_rate": 4.4170556203811746e-05,
"loss": 1.6046,
"step": 1300
},
{
"epoch": 2.9661016949152543,
"grad_norm": 1.314780831336975,
"learning_rate": 4.368436406067678e-05,
"loss": 1.5812,
"step": 1400
},
{
"epoch": 3.1779661016949152,
"grad_norm": 1.1703613996505737,
"learning_rate": 4.3198171917541816e-05,
"loss": 1.5312,
"step": 1500
},
{
"epoch": 3.1779661016949152,
"eval_loss": 1.1293680667877197,
"eval_runtime": 1.1994,
"eval_samples_per_second": 833.764,
"eval_steps_per_second": 6.67,
"step": 1500
},
{
"epoch": 3.389830508474576,
"grad_norm": 1.1206786632537842,
"learning_rate": 4.271197977440685e-05,
"loss": 1.5192,
"step": 1600
},
{
"epoch": 3.601694915254237,
"grad_norm": 1.0490096807479858,
"learning_rate": 4.222578763127188e-05,
"loss": 1.5063,
"step": 1700
},
{
"epoch": 3.8135593220338984,
"grad_norm": 1.1094740629196167,
"learning_rate": 4.173959548813691e-05,
"loss": 1.4901,
"step": 1800
},
{
"epoch": 4.02542372881356,
"grad_norm": 1.0523357391357422,
"learning_rate": 4.125340334500194e-05,
"loss": 1.4702,
"step": 1900
},
{
"epoch": 4.237288135593221,
"grad_norm": 1.1458876132965088,
"learning_rate": 4.076721120186698e-05,
"loss": 1.4313,
"step": 2000
},
{
"epoch": 4.237288135593221,
"eval_loss": 1.0662319660186768,
"eval_runtime": 1.1801,
"eval_samples_per_second": 847.386,
"eval_steps_per_second": 6.779,
"step": 2000
},
{
"epoch": 4.4491525423728815,
"grad_norm": 1.0316150188446045,
"learning_rate": 4.028101905873201e-05,
"loss": 1.438,
"step": 2100
},
{
"epoch": 4.661016949152542,
"grad_norm": 1.016991376876831,
"learning_rate": 3.9794826915597044e-05,
"loss": 1.4165,
"step": 2200
},
{
"epoch": 4.872881355932203,
"grad_norm": 1.2765100002288818,
"learning_rate": 3.9308634772462075e-05,
"loss": 1.4133,
"step": 2300
},
{
"epoch": 5.084745762711864,
"grad_norm": 1.1391791105270386,
"learning_rate": 3.882244262932711e-05,
"loss": 1.3955,
"step": 2400
},
{
"epoch": 5.296610169491525,
"grad_norm": 1.0710008144378662,
"learning_rate": 3.8336250486192145e-05,
"loss": 1.3629,
"step": 2500
},
{
"epoch": 5.296610169491525,
"eval_loss": 1.0101332664489746,
"eval_runtime": 1.3522,
"eval_samples_per_second": 739.535,
"eval_steps_per_second": 5.916,
"step": 2500
},
{
"epoch": 5.508474576271187,
"grad_norm": 1.1584999561309814,
"learning_rate": 3.785005834305718e-05,
"loss": 1.3656,
"step": 2600
},
{
"epoch": 5.720338983050848,
"grad_norm": 0.9541441798210144,
"learning_rate": 3.736386619992221e-05,
"loss": 1.3556,
"step": 2700
},
{
"epoch": 5.932203389830509,
"grad_norm": 1.0870757102966309,
"learning_rate": 3.687767405678724e-05,
"loss": 1.3579,
"step": 2800
},
{
"epoch": 6.1440677966101696,
"grad_norm": 1.0170774459838867,
"learning_rate": 3.639148191365228e-05,
"loss": 1.3229,
"step": 2900
},
{
"epoch": 6.3559322033898304,
"grad_norm": 1.1141579151153564,
"learning_rate": 3.590528977051731e-05,
"loss": 1.3049,
"step": 3000
},
{
"epoch": 6.3559322033898304,
"eval_loss": 0.9768810272216797,
"eval_runtime": 1.25,
"eval_samples_per_second": 799.975,
"eval_steps_per_second": 6.4,
"step": 3000
},
{
"epoch": 6.567796610169491,
"grad_norm": 1.0974624156951904,
"learning_rate": 3.541909762738234e-05,
"loss": 1.3239,
"step": 3100
},
{
"epoch": 6.779661016949152,
"grad_norm": 1.0595228672027588,
"learning_rate": 3.493290548424737e-05,
"loss": 1.3089,
"step": 3200
},
{
"epoch": 6.991525423728813,
"grad_norm": 1.0377624034881592,
"learning_rate": 3.4446713341112405e-05,
"loss": 1.3133,
"step": 3300
},
{
"epoch": 7.203389830508475,
"grad_norm": 1.0550671815872192,
"learning_rate": 3.396052119797744e-05,
"loss": 1.2842,
"step": 3400
},
{
"epoch": 7.415254237288136,
"grad_norm": 1.097626805305481,
"learning_rate": 3.3474329054842475e-05,
"loss": 1.281,
"step": 3500
},
{
"epoch": 7.415254237288136,
"eval_loss": 0.9405697584152222,
"eval_runtime": 1.2881,
"eval_samples_per_second": 776.366,
"eval_steps_per_second": 6.211,
"step": 3500
},
{
"epoch": 7.627118644067797,
"grad_norm": 0.9320990443229675,
"learning_rate": 3.2988136911707506e-05,
"loss": 1.2664,
"step": 3600
},
{
"epoch": 7.838983050847458,
"grad_norm": 1.0325995683670044,
"learning_rate": 3.250194476857254e-05,
"loss": 1.2701,
"step": 3700
},
{
"epoch": 8.05084745762712,
"grad_norm": 0.9588521718978882,
"learning_rate": 3.2015752625437576e-05,
"loss": 1.2543,
"step": 3800
},
{
"epoch": 8.26271186440678,
"grad_norm": 0.9852187037467957,
"learning_rate": 3.152956048230261e-05,
"loss": 1.2419,
"step": 3900
},
{
"epoch": 8.474576271186441,
"grad_norm": 1.1718206405639648,
"learning_rate": 3.104336833916764e-05,
"loss": 1.2474,
"step": 4000
},
{
"epoch": 8.474576271186441,
"eval_loss": 0.9118660092353821,
"eval_runtime": 1.2362,
"eval_samples_per_second": 808.929,
"eval_steps_per_second": 6.471,
"step": 4000
},
{
"epoch": 8.686440677966102,
"grad_norm": 1.0222992897033691,
"learning_rate": 3.055717619603267e-05,
"loss": 1.2405,
"step": 4100
},
{
"epoch": 8.898305084745763,
"grad_norm": 1.1910102367401123,
"learning_rate": 3.0070984052897706e-05,
"loss": 1.2345,
"step": 4200
},
{
"epoch": 9.110169491525424,
"grad_norm": 0.9338302612304688,
"learning_rate": 2.9584791909762737e-05,
"loss": 1.2213,
"step": 4300
},
{
"epoch": 9.322033898305085,
"grad_norm": 4.818306922912598,
"learning_rate": 2.9098599766627772e-05,
"loss": 1.2286,
"step": 4400
},
{
"epoch": 9.533898305084746,
"grad_norm": 1.076406478881836,
"learning_rate": 2.8612407623492804e-05,
"loss": 1.2222,
"step": 4500
},
{
"epoch": 9.533898305084746,
"eval_loss": 0.8947927951812744,
"eval_runtime": 1.0672,
"eval_samples_per_second": 936.991,
"eval_steps_per_second": 7.496,
"step": 4500
},
{
"epoch": 9.745762711864407,
"grad_norm": 0.9330114126205444,
"learning_rate": 2.8126215480357835e-05,
"loss": 1.2152,
"step": 4600
},
{
"epoch": 9.957627118644067,
"grad_norm": 1.08220374584198,
"learning_rate": 2.764002333722287e-05,
"loss": 1.2064,
"step": 4700
},
{
"epoch": 10.169491525423728,
"grad_norm": 1.078016757965088,
"learning_rate": 2.7153831194087902e-05,
"loss": 1.1838,
"step": 4800
},
{
"epoch": 10.38135593220339,
"grad_norm": 1.0999090671539307,
"learning_rate": 2.6667639050952937e-05,
"loss": 1.1864,
"step": 4900
},
{
"epoch": 10.59322033898305,
"grad_norm": 1.0185160636901855,
"learning_rate": 2.618144690781797e-05,
"loss": 1.1951,
"step": 5000
},
{
"epoch": 10.59322033898305,
"eval_loss": 0.8726407885551453,
"eval_runtime": 1.1196,
"eval_samples_per_second": 893.186,
"eval_steps_per_second": 7.145,
"step": 5000
},
{
"epoch": 10.805084745762711,
"grad_norm": 1.079840064048767,
"learning_rate": 2.5695254764683003e-05,
"loss": 1.1893,
"step": 5100
},
{
"epoch": 11.016949152542374,
"grad_norm": 1.0007730722427368,
"learning_rate": 2.5209062621548035e-05,
"loss": 1.1858,
"step": 5200
},
{
"epoch": 11.228813559322035,
"grad_norm": 1.0986075401306152,
"learning_rate": 2.472287047841307e-05,
"loss": 1.1656,
"step": 5300
},
{
"epoch": 11.440677966101696,
"grad_norm": 0.9882263541221619,
"learning_rate": 2.4236678335278105e-05,
"loss": 1.165,
"step": 5400
},
{
"epoch": 11.652542372881356,
"grad_norm": 1.046499252319336,
"learning_rate": 2.3750486192143136e-05,
"loss": 1.163,
"step": 5500
},
{
"epoch": 11.652542372881356,
"eval_loss": 0.8517504930496216,
"eval_runtime": 1.0586,
"eval_samples_per_second": 944.64,
"eval_steps_per_second": 7.557,
"step": 5500
},
{
"epoch": 11.864406779661017,
"grad_norm": 0.9540964961051941,
"learning_rate": 2.326429404900817e-05,
"loss": 1.1651,
"step": 5600
},
{
"epoch": 12.076271186440678,
"grad_norm": 0.9471459984779358,
"learning_rate": 2.2778101905873203e-05,
"loss": 1.1531,
"step": 5700
},
{
"epoch": 12.288135593220339,
"grad_norm": 1.036387324333191,
"learning_rate": 2.2291909762738235e-05,
"loss": 1.1412,
"step": 5800
},
{
"epoch": 12.5,
"grad_norm": 0.9427016377449036,
"learning_rate": 2.180571761960327e-05,
"loss": 1.1487,
"step": 5900
},
{
"epoch": 12.711864406779661,
"grad_norm": 1.1486104726791382,
"learning_rate": 2.13195254764683e-05,
"loss": 1.1446,
"step": 6000
},
{
"epoch": 12.711864406779661,
"eval_loss": 0.8369362950325012,
"eval_runtime": 1.6466,
"eval_samples_per_second": 607.325,
"eval_steps_per_second": 4.859,
"step": 6000
},
{
"epoch": 12.923728813559322,
"grad_norm": 1.1171337366104126,
"learning_rate": 2.0833333333333336e-05,
"loss": 1.1481,
"step": 6100
},
{
"epoch": 13.135593220338983,
"grad_norm": 0.9343823194503784,
"learning_rate": 2.0347141190198368e-05,
"loss": 1.1327,
"step": 6200
},
{
"epoch": 13.347457627118644,
"grad_norm": 1.0053530931472778,
"learning_rate": 1.9860949047063403e-05,
"loss": 1.1321,
"step": 6300
},
{
"epoch": 13.559322033898304,
"grad_norm": 1.0828378200531006,
"learning_rate": 1.9374756903928434e-05,
"loss": 1.1286,
"step": 6400
},
{
"epoch": 13.771186440677965,
"grad_norm": 1.0227911472320557,
"learning_rate": 1.8888564760793466e-05,
"loss": 1.1203,
"step": 6500
},
{
"epoch": 13.771186440677965,
"eval_loss": 0.821231484413147,
"eval_runtime": 1.0368,
"eval_samples_per_second": 964.483,
"eval_steps_per_second": 7.716,
"step": 6500
},
{
"epoch": 13.983050847457626,
"grad_norm": 1.0566082000732422,
"learning_rate": 1.84023726176585e-05,
"loss": 1.1281,
"step": 6600
},
{
"epoch": 14.194915254237289,
"grad_norm": 0.9336996674537659,
"learning_rate": 1.7916180474523532e-05,
"loss": 1.1172,
"step": 6700
},
{
"epoch": 14.40677966101695,
"grad_norm": 0.9151898622512817,
"learning_rate": 1.7429988331388567e-05,
"loss": 1.1215,
"step": 6800
},
{
"epoch": 14.61864406779661,
"grad_norm": 0.977746844291687,
"learning_rate": 1.69437961882536e-05,
"loss": 1.1087,
"step": 6900
},
{
"epoch": 14.830508474576272,
"grad_norm": 0.8589827418327332,
"learning_rate": 1.6457604045118634e-05,
"loss": 1.1156,
"step": 7000
},
{
"epoch": 14.830508474576272,
"eval_loss": 0.8085836172103882,
"eval_runtime": 1.2389,
"eval_samples_per_second": 807.162,
"eval_steps_per_second": 6.457,
"step": 7000
},
{
"epoch": 15.042372881355933,
"grad_norm": 1.0282628536224365,
"learning_rate": 1.5971411901983665e-05,
"loss": 1.1027,
"step": 7100
},
{
"epoch": 15.254237288135593,
"grad_norm": 1.0358235836029053,
"learning_rate": 1.54852197588487e-05,
"loss": 1.0936,
"step": 7200
},
{
"epoch": 15.466101694915254,
"grad_norm": 0.9948374032974243,
"learning_rate": 1.4999027615713732e-05,
"loss": 1.1028,
"step": 7300
},
{
"epoch": 15.677966101694915,
"grad_norm": 1.0176063776016235,
"learning_rate": 1.4512835472578765e-05,
"loss": 1.1022,
"step": 7400
},
{
"epoch": 15.889830508474576,
"grad_norm": 0.930158793926239,
"learning_rate": 1.4026643329443798e-05,
"loss": 1.1037,
"step": 7500
},
{
"epoch": 15.889830508474576,
"eval_loss": 0.8010894060134888,
"eval_runtime": 1.0953,
"eval_samples_per_second": 913.012,
"eval_steps_per_second": 7.304,
"step": 7500
},
{
"epoch": 16.10169491525424,
"grad_norm": 1.0642187595367432,
"learning_rate": 1.354045118630883e-05,
"loss": 1.0961,
"step": 7600
},
{
"epoch": 16.3135593220339,
"grad_norm": 1.011250615119934,
"learning_rate": 1.3054259043173863e-05,
"loss": 1.0911,
"step": 7700
},
{
"epoch": 16.52542372881356,
"grad_norm": 0.8920814990997314,
"learning_rate": 1.2568066900038896e-05,
"loss": 1.0892,
"step": 7800
},
{
"epoch": 16.73728813559322,
"grad_norm": 1.063219666481018,
"learning_rate": 1.2081874756903928e-05,
"loss": 1.0847,
"step": 7900
},
{
"epoch": 16.949152542372882,
"grad_norm": 0.9891595840454102,
"learning_rate": 1.1595682613768961e-05,
"loss": 1.0854,
"step": 8000
},
{
"epoch": 16.949152542372882,
"eval_loss": 0.789776086807251,
"eval_runtime": 1.2524,
"eval_samples_per_second": 798.447,
"eval_steps_per_second": 6.388,
"step": 8000
},
{
"epoch": 17.161016949152543,
"grad_norm": 1.0095338821411133,
"learning_rate": 1.1109490470633994e-05,
"loss": 1.0801,
"step": 8100
},
{
"epoch": 17.372881355932204,
"grad_norm": 1.022760272026062,
"learning_rate": 1.0623298327499028e-05,
"loss": 1.0792,
"step": 8200
},
{
"epoch": 17.584745762711865,
"grad_norm": 1.0345150232315063,
"learning_rate": 1.0137106184364061e-05,
"loss": 1.0725,
"step": 8300
},
{
"epoch": 17.796610169491526,
"grad_norm": 1.1157786846160889,
"learning_rate": 9.650914041229094e-06,
"loss": 1.0827,
"step": 8400
},
{
"epoch": 18.008474576271187,
"grad_norm": 1.0153229236602783,
"learning_rate": 9.164721898094126e-06,
"loss": 1.0866,
"step": 8500
},
{
"epoch": 18.008474576271187,
"eval_loss": 0.7794539928436279,
"eval_runtime": 1.2458,
"eval_samples_per_second": 802.72,
"eval_steps_per_second": 6.422,
"step": 8500
},
{
"epoch": 18.220338983050848,
"grad_norm": 1.0266518592834473,
"learning_rate": 8.678529754959159e-06,
"loss": 1.0669,
"step": 8600
},
{
"epoch": 18.43220338983051,
"grad_norm": 1.039871096611023,
"learning_rate": 8.192337611824192e-06,
"loss": 1.0691,
"step": 8700
},
{
"epoch": 18.64406779661017,
"grad_norm": 0.9622082114219666,
"learning_rate": 7.706145468689227e-06,
"loss": 1.071,
"step": 8800
},
{
"epoch": 18.85593220338983,
"grad_norm": 0.9962339997291565,
"learning_rate": 7.21995332555426e-06,
"loss": 1.0789,
"step": 8900
},
{
"epoch": 19.06779661016949,
"grad_norm": 0.9597769975662231,
"learning_rate": 6.733761182419293e-06,
"loss": 1.0712,
"step": 9000
},
{
"epoch": 19.06779661016949,
"eval_loss": 0.776411235332489,
"eval_runtime": 1.274,
"eval_samples_per_second": 784.919,
"eval_steps_per_second": 6.279,
"step": 9000
},
{
"epoch": 19.279661016949152,
"grad_norm": 1.058910846710205,
"learning_rate": 6.247569039284325e-06,
"loss": 1.0732,
"step": 9100
},
{
"epoch": 19.491525423728813,
"grad_norm": 1.1111668348312378,
"learning_rate": 5.761376896149359e-06,
"loss": 1.0611,
"step": 9200
},
{
"epoch": 19.703389830508474,
"grad_norm": 1.0422521829605103,
"learning_rate": 5.275184753014392e-06,
"loss": 1.0636,
"step": 9300
},
{
"epoch": 19.915254237288135,
"grad_norm": 0.9715420603752136,
"learning_rate": 4.788992609879424e-06,
"loss": 1.0623,
"step": 9400
},
{
"epoch": 20.127118644067796,
"grad_norm": 1.067021369934082,
"learning_rate": 4.302800466744458e-06,
"loss": 1.0648,
"step": 9500
},
{
"epoch": 20.127118644067796,
"eval_loss": 0.773140013217926,
"eval_runtime": 1.0537,
"eval_samples_per_second": 949.028,
"eval_steps_per_second": 7.592,
"step": 9500
},
{
"epoch": 20.338983050847457,
"grad_norm": 0.9910571575164795,
"learning_rate": 3.816608323609491e-06,
"loss": 1.0578,
"step": 9600
},
{
"epoch": 20.550847457627118,
"grad_norm": 1.1001310348510742,
"learning_rate": 3.3304161804745237e-06,
"loss": 1.0628,
"step": 9700
},
{
"epoch": 20.76271186440678,
"grad_norm": 0.9902365803718567,
"learning_rate": 2.8442240373395565e-06,
"loss": 1.0598,
"step": 9800
},
{
"epoch": 20.97457627118644,
"grad_norm": 1.0591098070144653,
"learning_rate": 2.3580318942045898e-06,
"loss": 1.0593,
"step": 9900
},
{
"epoch": 21.1864406779661,
"grad_norm": 1.05185067653656,
"learning_rate": 1.8718397510696226e-06,
"loss": 1.0551,
"step": 10000
},
{
"epoch": 21.1864406779661,
"eval_loss": 0.770883321762085,
"eval_runtime": 1.265,
"eval_samples_per_second": 790.545,
"eval_steps_per_second": 6.324,
"step": 10000
},
{
"epoch": 21.39830508474576,
"grad_norm": 0.9792327880859375,
"learning_rate": 1.3856476079346559e-06,
"loss": 1.0672,
"step": 10100
},
{
"epoch": 21.610169491525422,
"grad_norm": 1.1287437677383423,
"learning_rate": 8.994554647996889e-07,
"loss": 1.0577,
"step": 10200
},
{
"epoch": 21.822033898305087,
"grad_norm": 1.1712942123413086,
"learning_rate": 4.13263321664722e-07,
"loss": 1.0585,
"step": 10300
}
],
"logging_steps": 100,
"max_steps": 10384,
"num_input_tokens_seen": 0,
"num_train_epochs": 22,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.582851649954775e+17,
"train_batch_size": 256,
"trial_name": null,
"trial_params": null
}