Maestro-ctx-L9 / trainer_state.json
hidude562's picture
Upload 15 files
19d0ff2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.1196327604545708,
"eval_steps": 500,
"global_step": 52000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.3076048195362091,
"learning_rate": 4.994617057468295e-05,
"loss": 0.6146,
"step": 500
},
{
"epoch": 0.02,
"grad_norm": 0.28934741020202637,
"learning_rate": 4.989234114936589e-05,
"loss": 0.5914,
"step": 1000
},
{
"epoch": 0.03,
"grad_norm": 0.32335227727890015,
"learning_rate": 4.9838511724048836e-05,
"loss": 0.6077,
"step": 1500
},
{
"epoch": 0.04,
"grad_norm": 0.23465125262737274,
"learning_rate": 4.9784682298731785e-05,
"loss": 0.6017,
"step": 2000
},
{
"epoch": 0.05,
"grad_norm": 0.3192775845527649,
"learning_rate": 4.973085287341472e-05,
"loss": 0.5961,
"step": 2500
},
{
"epoch": 0.06,
"grad_norm": 0.2321719229221344,
"learning_rate": 4.967702344809767e-05,
"loss": 0.5917,
"step": 3000
},
{
"epoch": 0.08,
"grad_norm": 0.29103726148605347,
"learning_rate": 4.962319402278062e-05,
"loss": 0.5936,
"step": 3500
},
{
"epoch": 0.09,
"grad_norm": 0.24932251870632172,
"learning_rate": 4.956936459746356e-05,
"loss": 0.6019,
"step": 4000
},
{
"epoch": 0.1,
"grad_norm": 0.3346109092235565,
"learning_rate": 4.9515535172146503e-05,
"loss": 0.6121,
"step": 4500
},
{
"epoch": 0.11,
"grad_norm": 0.2718544602394104,
"learning_rate": 4.946170574682945e-05,
"loss": 0.6042,
"step": 5000
},
{
"epoch": 0.12,
"grad_norm": 0.27134013175964355,
"learning_rate": 4.940798398036303e-05,
"loss": 0.594,
"step": 5500
},
{
"epoch": 0.13,
"grad_norm": 0.35403260588645935,
"learning_rate": 4.9354154555045976e-05,
"loss": 0.5987,
"step": 6000
},
{
"epoch": 0.14,
"grad_norm": 0.32682767510414124,
"learning_rate": 4.930032512972892e-05,
"loss": 0.5937,
"step": 6500
},
{
"epoch": 0.15,
"grad_norm": 0.34741413593292236,
"learning_rate": 4.924649570441186e-05,
"loss": 0.602,
"step": 7000
},
{
"epoch": 0.16,
"grad_norm": 0.29353055357933044,
"learning_rate": 4.919266627909481e-05,
"loss": 0.6093,
"step": 7500
},
{
"epoch": 0.17,
"grad_norm": 0.2304825335741043,
"learning_rate": 4.913883685377775e-05,
"loss": 0.6024,
"step": 8000
},
{
"epoch": 0.18,
"grad_norm": 0.32337111234664917,
"learning_rate": 4.9085007428460694e-05,
"loss": 0.5968,
"step": 8500
},
{
"epoch": 0.19,
"grad_norm": 0.21958841383457184,
"learning_rate": 4.903117800314364e-05,
"loss": 0.6019,
"step": 9000
},
{
"epoch": 0.2,
"grad_norm": 0.23487211763858795,
"learning_rate": 4.8977348577826585e-05,
"loss": 0.5883,
"step": 9500
},
{
"epoch": 0.22,
"grad_norm": 0.2670685350894928,
"learning_rate": 4.892351915250953e-05,
"loss": 0.5883,
"step": 10000
},
{
"epoch": 0.23,
"grad_norm": 0.3000301420688629,
"learning_rate": 4.886968972719248e-05,
"loss": 0.6039,
"step": 10500
},
{
"epoch": 0.24,
"grad_norm": 0.2296791523694992,
"learning_rate": 4.881586030187542e-05,
"loss": 0.5979,
"step": 11000
},
{
"epoch": 0.25,
"grad_norm": 0.23062767088413239,
"learning_rate": 4.876213853540899e-05,
"loss": 0.5996,
"step": 11500
},
{
"epoch": 0.26,
"grad_norm": 0.3908576965332031,
"learning_rate": 4.870830911009194e-05,
"loss": 0.5965,
"step": 12000
},
{
"epoch": 0.27,
"grad_norm": 0.3430127799510956,
"learning_rate": 4.865447968477489e-05,
"loss": 0.5857,
"step": 12500
},
{
"epoch": 0.28,
"grad_norm": 0.352638304233551,
"learning_rate": 4.8600650259457834e-05,
"loss": 0.6033,
"step": 13000
},
{
"epoch": 0.29,
"grad_norm": 0.2627607583999634,
"learning_rate": 4.8546820834140776e-05,
"loss": 0.5903,
"step": 13500
},
{
"epoch": 0.3,
"grad_norm": 0.3584256172180176,
"learning_rate": 4.849309906767436e-05,
"loss": 0.5973,
"step": 14000
},
{
"epoch": 0.31,
"grad_norm": 0.2537979185581207,
"learning_rate": 4.84392696423573e-05,
"loss": 0.591,
"step": 14500
},
{
"epoch": 0.32,
"grad_norm": 0.29915836453437805,
"learning_rate": 4.838544021704025e-05,
"loss": 0.5846,
"step": 15000
},
{
"epoch": 0.33,
"grad_norm": 0.2917690575122833,
"learning_rate": 4.833161079172319e-05,
"loss": 0.5958,
"step": 15500
},
{
"epoch": 0.34,
"grad_norm": 0.2941623628139496,
"learning_rate": 4.8277889025256765e-05,
"loss": 0.5969,
"step": 16000
},
{
"epoch": 0.36,
"grad_norm": 0.23406459391117096,
"learning_rate": 4.8224059599939714e-05,
"loss": 0.5849,
"step": 16500
},
{
"epoch": 0.37,
"grad_norm": 0.2897736728191376,
"learning_rate": 4.817023017462266e-05,
"loss": 0.5947,
"step": 17000
},
{
"epoch": 0.38,
"grad_norm": 0.2664598524570465,
"learning_rate": 4.81164007493056e-05,
"loss": 0.5951,
"step": 17500
},
{
"epoch": 0.39,
"grad_norm": 0.22176018357276917,
"learning_rate": 4.806257132398855e-05,
"loss": 0.5965,
"step": 18000
},
{
"epoch": 0.4,
"grad_norm": 0.2701508402824402,
"learning_rate": 4.8008741898671496e-05,
"loss": 0.5928,
"step": 18500
},
{
"epoch": 0.41,
"grad_norm": 0.23263579607009888,
"learning_rate": 4.795491247335443e-05,
"loss": 0.5957,
"step": 19000
},
{
"epoch": 0.42,
"grad_norm": 0.34762224555015564,
"learning_rate": 4.790108304803738e-05,
"loss": 0.5871,
"step": 19500
},
{
"epoch": 0.43,
"grad_norm": 0.30906400084495544,
"learning_rate": 4.784725362272033e-05,
"loss": 0.593,
"step": 20000
},
{
"epoch": 0.44,
"grad_norm": 0.28025567531585693,
"learning_rate": 4.779342419740327e-05,
"loss": 0.5817,
"step": 20500
},
{
"epoch": 0.45,
"grad_norm": 0.3143500089645386,
"learning_rate": 4.773970243093685e-05,
"loss": 0.5937,
"step": 21000
},
{
"epoch": 0.46,
"grad_norm": 0.32620757818222046,
"learning_rate": 4.7685980664470434e-05,
"loss": 0.5884,
"step": 21500
},
{
"epoch": 0.47,
"grad_norm": 0.25027623772621155,
"learning_rate": 4.763215123915337e-05,
"loss": 0.583,
"step": 22000
},
{
"epoch": 0.48,
"grad_norm": 0.23277850449085236,
"learning_rate": 4.757832181383632e-05,
"loss": 0.5869,
"step": 22500
},
{
"epoch": 0.5,
"grad_norm": 0.2901150584220886,
"learning_rate": 4.752449238851927e-05,
"loss": 0.5731,
"step": 23000
},
{
"epoch": 0.51,
"grad_norm": 0.20858778059482574,
"learning_rate": 4.7470662963202203e-05,
"loss": 0.5845,
"step": 23500
},
{
"epoch": 0.52,
"grad_norm": 0.23019325733184814,
"learning_rate": 4.7416941196735784e-05,
"loss": 0.5881,
"step": 24000
},
{
"epoch": 0.53,
"grad_norm": 0.30065199732780457,
"learning_rate": 4.7363111771418733e-05,
"loss": 0.5743,
"step": 24500
},
{
"epoch": 0.54,
"grad_norm": 0.24830493330955505,
"learning_rate": 4.7309282346101676e-05,
"loss": 0.574,
"step": 25000
},
{
"epoch": 0.55,
"grad_norm": 0.2857877314090729,
"learning_rate": 4.725545292078462e-05,
"loss": 0.5731,
"step": 25500
},
{
"epoch": 0.56,
"grad_norm": 0.3333861827850342,
"learning_rate": 4.720162349546757e-05,
"loss": 0.5832,
"step": 26000
},
{
"epoch": 0.57,
"grad_norm": 0.2669144868850708,
"learning_rate": 4.714779407015051e-05,
"loss": 0.5777,
"step": 26500
},
{
"epoch": 0.58,
"grad_norm": 0.20388497412204742,
"learning_rate": 4.709396464483345e-05,
"loss": 0.5956,
"step": 27000
},
{
"epoch": 0.59,
"grad_norm": 0.2957603633403778,
"learning_rate": 4.70401352195164e-05,
"loss": 0.5897,
"step": 27500
},
{
"epoch": 0.6,
"grad_norm": 0.1999462991952896,
"learning_rate": 4.6986413453049975e-05,
"loss": 0.586,
"step": 28000
},
{
"epoch": 0.61,
"grad_norm": 0.38801008462905884,
"learning_rate": 4.6932584027732924e-05,
"loss": 0.5837,
"step": 28500
},
{
"epoch": 0.62,
"grad_norm": 0.2928718626499176,
"learning_rate": 4.6878754602415866e-05,
"loss": 0.5794,
"step": 29000
},
{
"epoch": 0.64,
"grad_norm": 0.3175244927406311,
"learning_rate": 4.682492517709881e-05,
"loss": 0.5853,
"step": 29500
},
{
"epoch": 0.65,
"grad_norm": 0.2631368935108185,
"learning_rate": 4.677120341063239e-05,
"loss": 0.5829,
"step": 30000
},
{
"epoch": 0.66,
"grad_norm": 0.24740691483020782,
"learning_rate": 4.6717481644165964e-05,
"loss": 0.582,
"step": 30500
},
{
"epoch": 0.67,
"grad_norm": 0.31048133969306946,
"learning_rate": 4.666365221884891e-05,
"loss": 0.5942,
"step": 31000
},
{
"epoch": 0.68,
"grad_norm": 0.2478443831205368,
"learning_rate": 4.660982279353186e-05,
"loss": 0.6,
"step": 31500
},
{
"epoch": 0.69,
"grad_norm": 0.24174700677394867,
"learning_rate": 4.6555993368214804e-05,
"loss": 0.5768,
"step": 32000
},
{
"epoch": 0.7,
"grad_norm": 0.25594571232795715,
"learning_rate": 4.6502163942897746e-05,
"loss": 0.5791,
"step": 32500
},
{
"epoch": 0.71,
"grad_norm": 0.2157001793384552,
"learning_rate": 4.6448334517580696e-05,
"loss": 0.5743,
"step": 33000
},
{
"epoch": 0.72,
"grad_norm": 0.3016982078552246,
"learning_rate": 4.639450509226364e-05,
"loss": 0.5788,
"step": 33500
},
{
"epoch": 0.73,
"grad_norm": 0.28101280331611633,
"learning_rate": 4.634078332579722e-05,
"loss": 0.5957,
"step": 34000
},
{
"epoch": 0.74,
"grad_norm": 0.24969856441020966,
"learning_rate": 4.628695390048016e-05,
"loss": 0.5789,
"step": 34500
},
{
"epoch": 0.75,
"grad_norm": 0.2322104573249817,
"learning_rate": 4.62331244751631e-05,
"loss": 0.5804,
"step": 35000
},
{
"epoch": 0.76,
"grad_norm": 0.28361350297927856,
"learning_rate": 4.617929504984605e-05,
"loss": 0.579,
"step": 35500
},
{
"epoch": 0.78,
"grad_norm": 0.29101938009262085,
"learning_rate": 4.6125465624528995e-05,
"loss": 0.5685,
"step": 36000
},
{
"epoch": 0.79,
"grad_norm": 0.3486846685409546,
"learning_rate": 4.607163619921194e-05,
"loss": 0.5854,
"step": 36500
},
{
"epoch": 0.8,
"grad_norm": 0.2890051603317261,
"learning_rate": 4.6017806773894886e-05,
"loss": 0.5726,
"step": 37000
},
{
"epoch": 0.81,
"grad_norm": 0.24968238174915314,
"learning_rate": 4.596397734857783e-05,
"loss": 0.5685,
"step": 37500
},
{
"epoch": 0.82,
"grad_norm": 0.25876086950302124,
"learning_rate": 4.591014792326077e-05,
"loss": 0.5839,
"step": 38000
},
{
"epoch": 0.83,
"grad_norm": 0.1987730860710144,
"learning_rate": 4.585642615679435e-05,
"loss": 0.5547,
"step": 38500
},
{
"epoch": 0.84,
"grad_norm": 0.27402666211128235,
"learning_rate": 4.58025967314773e-05,
"loss": 0.5802,
"step": 39000
},
{
"epoch": 0.85,
"grad_norm": 0.31150248646736145,
"learning_rate": 4.5748767306160236e-05,
"loss": 0.5879,
"step": 39500
},
{
"epoch": 0.86,
"grad_norm": 0.32700014114379883,
"learning_rate": 4.5694937880843185e-05,
"loss": 0.5755,
"step": 40000
},
{
"epoch": 0.87,
"grad_norm": 0.23694302141666412,
"learning_rate": 4.5641216114376766e-05,
"loss": 0.5826,
"step": 40500
},
{
"epoch": 0.88,
"grad_norm": 0.25343358516693115,
"learning_rate": 4.558738668905971e-05,
"loss": 0.5716,
"step": 41000
},
{
"epoch": 0.89,
"grad_norm": 0.22144146263599396,
"learning_rate": 4.553355726374266e-05,
"loss": 0.5777,
"step": 41500
},
{
"epoch": 0.9,
"grad_norm": 0.23369935154914856,
"learning_rate": 4.547983549727624e-05,
"loss": 0.5673,
"step": 42000
},
{
"epoch": 0.92,
"grad_norm": 0.3007163405418396,
"learning_rate": 4.5426006071959174e-05,
"loss": 0.5785,
"step": 42500
},
{
"epoch": 0.93,
"grad_norm": 0.21664878726005554,
"learning_rate": 4.537217664664212e-05,
"loss": 0.5679,
"step": 43000
},
{
"epoch": 0.94,
"grad_norm": 0.2804299592971802,
"learning_rate": 4.531834722132507e-05,
"loss": 0.5673,
"step": 43500
},
{
"epoch": 0.95,
"grad_norm": 0.2640630900859833,
"learning_rate": 4.526451779600801e-05,
"loss": 0.5713,
"step": 44000
},
{
"epoch": 0.96,
"grad_norm": 0.2232804000377655,
"learning_rate": 4.521068837069096e-05,
"loss": 0.5689,
"step": 44500
},
{
"epoch": 0.97,
"grad_norm": 0.2426728457212448,
"learning_rate": 4.5156858945373906e-05,
"loss": 0.579,
"step": 45000
},
{
"epoch": 0.98,
"grad_norm": 0.3089877665042877,
"learning_rate": 4.510313717890748e-05,
"loss": 0.5792,
"step": 45500
},
{
"epoch": 0.99,
"grad_norm": 0.29300886392593384,
"learning_rate": 4.504930775359042e-05,
"loss": 0.5711,
"step": 46000
},
{
"epoch": 1.0,
"grad_norm": 0.21948744356632233,
"learning_rate": 4.499547832827337e-05,
"loss": 0.5725,
"step": 46500
},
{
"epoch": 1.01,
"grad_norm": 0.31533685326576233,
"learning_rate": 4.4941648902956314e-05,
"loss": 0.5691,
"step": 47000
},
{
"epoch": 1.02,
"grad_norm": 0.2665063738822937,
"learning_rate": 4.4887819477639256e-05,
"loss": 0.5681,
"step": 47500
},
{
"epoch": 1.03,
"grad_norm": 0.3198137879371643,
"learning_rate": 4.4833990052322205e-05,
"loss": 0.5639,
"step": 48000
},
{
"epoch": 1.04,
"grad_norm": 0.27481958270072937,
"learning_rate": 4.478026828585578e-05,
"loss": 0.5642,
"step": 48500
},
{
"epoch": 1.06,
"grad_norm": 0.2723371982574463,
"learning_rate": 4.472643886053873e-05,
"loss": 0.5609,
"step": 49000
},
{
"epoch": 1.07,
"grad_norm": 0.29230979084968567,
"learning_rate": 4.467260943522168e-05,
"loss": 0.5521,
"step": 49500
},
{
"epoch": 1.08,
"grad_norm": 0.25512951612472534,
"learning_rate": 4.461878000990461e-05,
"loss": 0.5509,
"step": 50000
},
{
"epoch": 1.09,
"grad_norm": 0.19084623456001282,
"learning_rate": 4.456495058458756e-05,
"loss": 0.5613,
"step": 50500
},
{
"epoch": 1.1,
"grad_norm": 0.2515014410018921,
"learning_rate": 4.451112115927051e-05,
"loss": 0.5647,
"step": 51000
},
{
"epoch": 1.11,
"grad_norm": 0.24781277775764465,
"learning_rate": 4.4457399392804085e-05,
"loss": 0.5635,
"step": 51500
},
{
"epoch": 1.12,
"grad_norm": 0.30817893147468567,
"learning_rate": 4.440356996748703e-05,
"loss": 0.5499,
"step": 52000
}
],
"logging_steps": 500,
"max_steps": 464430,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2000,
"total_flos": 9.6584872230912e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}