code-ArithHardC12Interm-240416 / trainer_state.json
slseanwu's picture
add weights
a1c328d
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 2500,
"global_step": 20000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 3.375241279602051,
"learning_rate": 4.000000000000001e-06,
"loss": 1.3562,
"step": 100
},
{
"epoch": 0.04,
"grad_norm": 3.329700231552124,
"learning_rate": 8.000000000000001e-06,
"loss": 1.0739,
"step": 200
},
{
"epoch": 0.06,
"grad_norm": 3.802884340286255,
"learning_rate": 1.2e-05,
"loss": 1.0389,
"step": 300
},
{
"epoch": 0.08,
"grad_norm": 3.4885237216949463,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.0009,
"step": 400
},
{
"epoch": 0.1,
"grad_norm": 2.990727663040161,
"learning_rate": 2e-05,
"loss": 0.9878,
"step": 500
},
{
"epoch": 0.12,
"grad_norm": 3.8411333560943604,
"learning_rate": 1.98974358974359e-05,
"loss": 0.9812,
"step": 600
},
{
"epoch": 0.14,
"grad_norm": 2.3672726154327393,
"learning_rate": 1.9794871794871798e-05,
"loss": 0.9462,
"step": 700
},
{
"epoch": 0.16,
"grad_norm": 2.6635308265686035,
"learning_rate": 1.9692307692307696e-05,
"loss": 0.9287,
"step": 800
},
{
"epoch": 0.18,
"grad_norm": 2.3260979652404785,
"learning_rate": 1.958974358974359e-05,
"loss": 0.9079,
"step": 900
},
{
"epoch": 0.2,
"grad_norm": 2.360579490661621,
"learning_rate": 1.9487179487179488e-05,
"loss": 0.9002,
"step": 1000
},
{
"epoch": 0.22,
"grad_norm": 2.2634215354919434,
"learning_rate": 1.9384615384615386e-05,
"loss": 0.8752,
"step": 1100
},
{
"epoch": 0.24,
"grad_norm": 2.375136137008667,
"learning_rate": 1.9282051282051284e-05,
"loss": 0.8791,
"step": 1200
},
{
"epoch": 0.26,
"grad_norm": 1.7745119333267212,
"learning_rate": 1.9179487179487182e-05,
"loss": 0.8601,
"step": 1300
},
{
"epoch": 0.28,
"grad_norm": 2.3610873222351074,
"learning_rate": 1.907692307692308e-05,
"loss": 0.846,
"step": 1400
},
{
"epoch": 0.3,
"grad_norm": 2.3921096324920654,
"learning_rate": 1.8974358974358975e-05,
"loss": 0.8465,
"step": 1500
},
{
"epoch": 0.32,
"grad_norm": 1.9155237674713135,
"learning_rate": 1.8871794871794873e-05,
"loss": 0.8358,
"step": 1600
},
{
"epoch": 0.34,
"grad_norm": 2.5246050357818604,
"learning_rate": 1.876923076923077e-05,
"loss": 0.8346,
"step": 1700
},
{
"epoch": 0.36,
"grad_norm": 2.402024030685425,
"learning_rate": 1.866666666666667e-05,
"loss": 0.8151,
"step": 1800
},
{
"epoch": 0.38,
"grad_norm": 1.9758305549621582,
"learning_rate": 1.8564102564102567e-05,
"loss": 0.8121,
"step": 1900
},
{
"epoch": 0.4,
"grad_norm": 1.7855979204177856,
"learning_rate": 1.8461538461538465e-05,
"loss": 0.8094,
"step": 2000
},
{
"epoch": 0.42,
"grad_norm": 2.5531435012817383,
"learning_rate": 1.835897435897436e-05,
"loss": 0.8056,
"step": 2100
},
{
"epoch": 0.44,
"grad_norm": 1.6370404958724976,
"learning_rate": 1.8256410256410257e-05,
"loss": 0.7954,
"step": 2200
},
{
"epoch": 0.46,
"grad_norm": 2.1094133853912354,
"learning_rate": 1.8153846153846155e-05,
"loss": 0.7954,
"step": 2300
},
{
"epoch": 0.48,
"grad_norm": 2.988926887512207,
"learning_rate": 1.8051282051282053e-05,
"loss": 0.7831,
"step": 2400
},
{
"epoch": 0.5,
"grad_norm": 2.0189740657806396,
"learning_rate": 1.794871794871795e-05,
"loss": 0.7734,
"step": 2500
},
{
"epoch": 0.5,
"eval_loss": 0.7750750184059143,
"eval_runtime": 37.7119,
"eval_samples_per_second": 26.517,
"eval_steps_per_second": 6.629,
"step": 2500
},
{
"epoch": 0.52,
"grad_norm": 1.6814110279083252,
"learning_rate": 1.784615384615385e-05,
"loss": 0.785,
"step": 2600
},
{
"epoch": 0.54,
"grad_norm": 1.3658416271209717,
"learning_rate": 1.7743589743589744e-05,
"loss": 0.7771,
"step": 2700
},
{
"epoch": 0.56,
"grad_norm": 2.0418968200683594,
"learning_rate": 1.7641025641025642e-05,
"loss": 0.7666,
"step": 2800
},
{
"epoch": 0.58,
"grad_norm": 1.8352694511413574,
"learning_rate": 1.753846153846154e-05,
"loss": 0.7636,
"step": 2900
},
{
"epoch": 0.6,
"grad_norm": 1.93773353099823,
"learning_rate": 1.743692307692308e-05,
"loss": 0.7447,
"step": 3000
},
{
"epoch": 0.62,
"grad_norm": 2.1721432209014893,
"learning_rate": 1.7334358974358977e-05,
"loss": 0.7583,
"step": 3100
},
{
"epoch": 0.64,
"grad_norm": 1.6705156564712524,
"learning_rate": 1.7231794871794875e-05,
"loss": 0.7347,
"step": 3200
},
{
"epoch": 0.66,
"grad_norm": 2.3235106468200684,
"learning_rate": 1.7129230769230773e-05,
"loss": 0.7509,
"step": 3300
},
{
"epoch": 0.68,
"grad_norm": 1.9557029008865356,
"learning_rate": 1.702666666666667e-05,
"loss": 0.7416,
"step": 3400
},
{
"epoch": 0.7,
"grad_norm": 1.8498666286468506,
"learning_rate": 1.6924102564102565e-05,
"loss": 0.7395,
"step": 3500
},
{
"epoch": 0.72,
"grad_norm": 1.7680326700210571,
"learning_rate": 1.6821538461538463e-05,
"loss": 0.7455,
"step": 3600
},
{
"epoch": 0.74,
"grad_norm": 1.8850408792495728,
"learning_rate": 1.671897435897436e-05,
"loss": 0.7174,
"step": 3700
},
{
"epoch": 0.76,
"grad_norm": 1.8840125799179077,
"learning_rate": 1.661641025641026e-05,
"loss": 0.7236,
"step": 3800
},
{
"epoch": 0.78,
"grad_norm": 1.8746532201766968,
"learning_rate": 1.6513846153846157e-05,
"loss": 0.7313,
"step": 3900
},
{
"epoch": 0.8,
"grad_norm": 1.9656246900558472,
"learning_rate": 1.6411282051282055e-05,
"loss": 0.7067,
"step": 4000
},
{
"epoch": 0.82,
"grad_norm": 2.298736572265625,
"learning_rate": 1.630871794871795e-05,
"loss": 0.7195,
"step": 4100
},
{
"epoch": 0.84,
"grad_norm": 1.647545337677002,
"learning_rate": 1.6206153846153848e-05,
"loss": 0.7166,
"step": 4200
},
{
"epoch": 0.86,
"grad_norm": 2.433582305908203,
"learning_rate": 1.6103589743589746e-05,
"loss": 0.7066,
"step": 4300
},
{
"epoch": 0.88,
"grad_norm": 1.937717080116272,
"learning_rate": 1.6001025641025644e-05,
"loss": 0.7065,
"step": 4400
},
{
"epoch": 0.9,
"grad_norm": 1.8050605058670044,
"learning_rate": 1.5898461538461542e-05,
"loss": 0.6854,
"step": 4500
},
{
"epoch": 0.92,
"grad_norm": 1.7141133546829224,
"learning_rate": 1.579589743589744e-05,
"loss": 0.6986,
"step": 4600
},
{
"epoch": 0.94,
"grad_norm": 2.356724739074707,
"learning_rate": 1.5693333333333334e-05,
"loss": 0.6915,
"step": 4700
},
{
"epoch": 0.96,
"grad_norm": 1.991727590560913,
"learning_rate": 1.5590769230769232e-05,
"loss": 0.7086,
"step": 4800
},
{
"epoch": 0.98,
"grad_norm": 1.5158222913742065,
"learning_rate": 1.548820512820513e-05,
"loss": 0.6915,
"step": 4900
},
{
"epoch": 1.0,
"grad_norm": 2.744266986846924,
"learning_rate": 1.538564102564103e-05,
"loss": 0.6765,
"step": 5000
},
{
"epoch": 1.0,
"eval_loss": 0.6998236179351807,
"eval_runtime": 37.7448,
"eval_samples_per_second": 26.494,
"eval_steps_per_second": 6.623,
"step": 5000
},
{
"epoch": 1.02,
"grad_norm": 1.5580978393554688,
"learning_rate": 1.5283076923076926e-05,
"loss": 0.6651,
"step": 5100
},
{
"epoch": 1.04,
"grad_norm": 1.8239487409591675,
"learning_rate": 1.5180512820512823e-05,
"loss": 0.6602,
"step": 5200
},
{
"epoch": 1.06,
"grad_norm": 1.6179425716400146,
"learning_rate": 1.5077948717948719e-05,
"loss": 0.6639,
"step": 5300
},
{
"epoch": 1.08,
"grad_norm": 2.1928112506866455,
"learning_rate": 1.4975384615384617e-05,
"loss": 0.6594,
"step": 5400
},
{
"epoch": 1.1,
"grad_norm": 1.6164484024047852,
"learning_rate": 1.4872820512820513e-05,
"loss": 0.675,
"step": 5500
},
{
"epoch": 1.12,
"grad_norm": 1.6600135564804077,
"learning_rate": 1.4770256410256411e-05,
"loss": 0.6601,
"step": 5600
},
{
"epoch": 1.14,
"grad_norm": 2.3864831924438477,
"learning_rate": 1.466769230769231e-05,
"loss": 0.6523,
"step": 5700
},
{
"epoch": 1.16,
"grad_norm": 1.7675199508666992,
"learning_rate": 1.4565128205128207e-05,
"loss": 0.6571,
"step": 5800
},
{
"epoch": 1.18,
"grad_norm": 2.3523380756378174,
"learning_rate": 1.4462564102564103e-05,
"loss": 0.644,
"step": 5900
},
{
"epoch": 1.2,
"grad_norm": 1.5896570682525635,
"learning_rate": 1.4360000000000001e-05,
"loss": 0.6425,
"step": 6000
},
{
"epoch": 1.22,
"grad_norm": 1.9609613418579102,
"learning_rate": 1.4257435897435898e-05,
"loss": 0.6449,
"step": 6100
},
{
"epoch": 1.24,
"grad_norm": 1.7148176431655884,
"learning_rate": 1.4154871794871796e-05,
"loss": 0.639,
"step": 6200
},
{
"epoch": 1.26,
"grad_norm": 1.6646454334259033,
"learning_rate": 1.4052307692307694e-05,
"loss": 0.6546,
"step": 6300
},
{
"epoch": 1.28,
"grad_norm": 1.6648869514465332,
"learning_rate": 1.3949743589743592e-05,
"loss": 0.6467,
"step": 6400
},
{
"epoch": 1.3,
"grad_norm": 1.6408113241195679,
"learning_rate": 1.3847179487179488e-05,
"loss": 0.629,
"step": 6500
},
{
"epoch": 1.32,
"grad_norm": 1.608528971672058,
"learning_rate": 1.3744615384615386e-05,
"loss": 0.6304,
"step": 6600
},
{
"epoch": 1.34,
"grad_norm": 1.7841941118240356,
"learning_rate": 1.3642051282051282e-05,
"loss": 0.636,
"step": 6700
},
{
"epoch": 1.36,
"grad_norm": 1.4850620031356812,
"learning_rate": 1.353948717948718e-05,
"loss": 0.6435,
"step": 6800
},
{
"epoch": 1.38,
"grad_norm": 1.8835147619247437,
"learning_rate": 1.3436923076923078e-05,
"loss": 0.6507,
"step": 6900
},
{
"epoch": 1.4,
"grad_norm": 2.422339916229248,
"learning_rate": 1.3334358974358976e-05,
"loss": 0.6419,
"step": 7000
},
{
"epoch": 1.42,
"grad_norm": 1.893487572669983,
"learning_rate": 1.3231794871794872e-05,
"loss": 0.6248,
"step": 7100
},
{
"epoch": 1.44,
"grad_norm": 2.7827541828155518,
"learning_rate": 1.312923076923077e-05,
"loss": 0.6376,
"step": 7200
},
{
"epoch": 1.46,
"grad_norm": 1.6783027648925781,
"learning_rate": 1.3026666666666667e-05,
"loss": 0.6207,
"step": 7300
},
{
"epoch": 1.48,
"grad_norm": 1.6965584754943848,
"learning_rate": 1.2924102564102565e-05,
"loss": 0.6239,
"step": 7400
},
{
"epoch": 1.5,
"grad_norm": 1.7779945135116577,
"learning_rate": 1.2821538461538463e-05,
"loss": 0.6211,
"step": 7500
},
{
"epoch": 1.5,
"eval_loss": 0.6189149022102356,
"eval_runtime": 37.7906,
"eval_samples_per_second": 26.462,
"eval_steps_per_second": 6.615,
"step": 7500
},
{
"epoch": 1.52,
"grad_norm": 1.8954280614852905,
"learning_rate": 1.271897435897436e-05,
"loss": 0.6301,
"step": 7600
},
{
"epoch": 1.54,
"grad_norm": 1.695956826210022,
"learning_rate": 1.2616410256410257e-05,
"loss": 0.6173,
"step": 7700
},
{
"epoch": 1.56,
"grad_norm": 2.6846532821655273,
"learning_rate": 1.2513846153846155e-05,
"loss": 0.6116,
"step": 7800
},
{
"epoch": 1.58,
"grad_norm": 2.0877957344055176,
"learning_rate": 1.2411282051282051e-05,
"loss": 0.6165,
"step": 7900
},
{
"epoch": 1.6,
"grad_norm": 1.568054437637329,
"learning_rate": 1.230871794871795e-05,
"loss": 0.6231,
"step": 8000
},
{
"epoch": 1.62,
"grad_norm": 1.8222888708114624,
"learning_rate": 1.2206153846153847e-05,
"loss": 0.6107,
"step": 8100
},
{
"epoch": 1.64,
"grad_norm": 1.8577901124954224,
"learning_rate": 1.2104615384615386e-05,
"loss": 0.6193,
"step": 8200
},
{
"epoch": 1.66,
"grad_norm": 1.689939022064209,
"learning_rate": 1.2002051282051284e-05,
"loss": 0.61,
"step": 8300
},
{
"epoch": 1.68,
"grad_norm": 1.8094621896743774,
"learning_rate": 1.189948717948718e-05,
"loss": 0.6131,
"step": 8400
},
{
"epoch": 1.7,
"grad_norm": 2.136385202407837,
"learning_rate": 1.1796923076923077e-05,
"loss": 0.6126,
"step": 8500
},
{
"epoch": 1.72,
"grad_norm": 1.6386775970458984,
"learning_rate": 1.1694358974358975e-05,
"loss": 0.617,
"step": 8600
},
{
"epoch": 1.74,
"grad_norm": 1.7911731004714966,
"learning_rate": 1.1591794871794873e-05,
"loss": 0.5999,
"step": 8700
},
{
"epoch": 1.76,
"grad_norm": 1.9262185096740723,
"learning_rate": 1.1489230769230771e-05,
"loss": 0.6069,
"step": 8800
},
{
"epoch": 1.78,
"grad_norm": 1.7505638599395752,
"learning_rate": 1.1386666666666669e-05,
"loss": 0.6083,
"step": 8900
},
{
"epoch": 1.8,
"grad_norm": 1.8164703845977783,
"learning_rate": 1.1284102564102565e-05,
"loss": 0.6237,
"step": 9000
},
{
"epoch": 1.82,
"grad_norm": 2.1859376430511475,
"learning_rate": 1.1181538461538461e-05,
"loss": 0.6004,
"step": 9100
},
{
"epoch": 1.84,
"grad_norm": 1.4279205799102783,
"learning_rate": 1.107897435897436e-05,
"loss": 0.6131,
"step": 9200
},
{
"epoch": 1.86,
"grad_norm": 1.8264535665512085,
"learning_rate": 1.0976410256410257e-05,
"loss": 0.602,
"step": 9300
},
{
"epoch": 1.88,
"grad_norm": 1.9791676998138428,
"learning_rate": 1.0873846153846155e-05,
"loss": 0.5974,
"step": 9400
},
{
"epoch": 1.9,
"grad_norm": 1.5951436758041382,
"learning_rate": 1.0771282051282053e-05,
"loss": 0.5855,
"step": 9500
},
{
"epoch": 1.92,
"grad_norm": 2.221043348312378,
"learning_rate": 1.066871794871795e-05,
"loss": 0.5942,
"step": 9600
},
{
"epoch": 1.94,
"grad_norm": 1.7138677835464478,
"learning_rate": 1.0566153846153846e-05,
"loss": 0.5981,
"step": 9700
},
{
"epoch": 1.96,
"grad_norm": 1.6244597434997559,
"learning_rate": 1.0463589743589744e-05,
"loss": 0.5908,
"step": 9800
},
{
"epoch": 1.98,
"grad_norm": 1.6802514791488647,
"learning_rate": 1.0361025641025642e-05,
"loss": 0.594,
"step": 9900
},
{
"epoch": 2.0,
"grad_norm": 2.2132487297058105,
"learning_rate": 1.025846153846154e-05,
"loss": 0.5894,
"step": 10000
},
{
"epoch": 2.0,
"eval_loss": 0.5827540159225464,
"eval_runtime": 37.6623,
"eval_samples_per_second": 26.552,
"eval_steps_per_second": 6.638,
"step": 10000
},
{
"epoch": 2.02,
"grad_norm": 2.0124223232269287,
"learning_rate": 1.0155897435897438e-05,
"loss": 0.5565,
"step": 10100
},
{
"epoch": 2.04,
"grad_norm": 1.5917423963546753,
"learning_rate": 1.0053333333333334e-05,
"loss": 0.5719,
"step": 10200
},
{
"epoch": 2.06,
"grad_norm": 1.895202875137329,
"learning_rate": 9.950769230769232e-06,
"loss": 0.5469,
"step": 10300
},
{
"epoch": 2.08,
"grad_norm": 1.5201858282089233,
"learning_rate": 9.84820512820513e-06,
"loss": 0.5672,
"step": 10400
},
{
"epoch": 2.1,
"grad_norm": 1.8425326347351074,
"learning_rate": 9.745641025641026e-06,
"loss": 0.5446,
"step": 10500
},
{
"epoch": 2.12,
"grad_norm": 2.431252956390381,
"learning_rate": 9.643076923076924e-06,
"loss": 0.552,
"step": 10600
},
{
"epoch": 2.14,
"grad_norm": 2.391223430633545,
"learning_rate": 9.541538461538462e-06,
"loss": 0.5668,
"step": 10700
},
{
"epoch": 2.16,
"grad_norm": 1.9579830169677734,
"learning_rate": 9.43897435897436e-06,
"loss": 0.5432,
"step": 10800
},
{
"epoch": 2.18,
"grad_norm": 1.6551591157913208,
"learning_rate": 9.336410256410258e-06,
"loss": 0.5345,
"step": 10900
},
{
"epoch": 2.2,
"grad_norm": 1.58843195438385,
"learning_rate": 9.233846153846154e-06,
"loss": 0.5527,
"step": 11000
},
{
"epoch": 2.22,
"grad_norm": 1.36421799659729,
"learning_rate": 9.131282051282052e-06,
"loss": 0.5543,
"step": 11100
},
{
"epoch": 2.24,
"grad_norm": 1.5961703062057495,
"learning_rate": 9.02871794871795e-06,
"loss": 0.5621,
"step": 11200
},
{
"epoch": 2.26,
"grad_norm": 1.9277173280715942,
"learning_rate": 8.926153846153846e-06,
"loss": 0.5404,
"step": 11300
},
{
"epoch": 2.28,
"grad_norm": 1.6406772136688232,
"learning_rate": 8.823589743589744e-06,
"loss": 0.5487,
"step": 11400
},
{
"epoch": 2.3,
"grad_norm": 2.2707431316375732,
"learning_rate": 8.721025641025642e-06,
"loss": 0.5581,
"step": 11500
},
{
"epoch": 2.32,
"grad_norm": 2.063711404800415,
"learning_rate": 8.618461538461539e-06,
"loss": 0.537,
"step": 11600
},
{
"epoch": 2.34,
"grad_norm": 1.40162992477417,
"learning_rate": 8.515897435897437e-06,
"loss": 0.5467,
"step": 11700
},
{
"epoch": 2.36,
"grad_norm": 1.9021568298339844,
"learning_rate": 8.413333333333335e-06,
"loss": 0.5492,
"step": 11800
},
{
"epoch": 2.38,
"grad_norm": 1.607601523399353,
"learning_rate": 8.310769230769231e-06,
"loss": 0.5358,
"step": 11900
},
{
"epoch": 2.4,
"grad_norm": 1.6282119750976562,
"learning_rate": 8.208205128205129e-06,
"loss": 0.5539,
"step": 12000
},
{
"epoch": 2.42,
"grad_norm": 1.7966066598892212,
"learning_rate": 8.105641025641027e-06,
"loss": 0.5459,
"step": 12100
},
{
"epoch": 2.44,
"grad_norm": 2.068737745285034,
"learning_rate": 8.003076923076923e-06,
"loss": 0.5427,
"step": 12200
},
{
"epoch": 2.46,
"grad_norm": 1.8832446336746216,
"learning_rate": 7.901538461538462e-06,
"loss": 0.5392,
"step": 12300
},
{
"epoch": 2.48,
"grad_norm": 3.1190991401672363,
"learning_rate": 7.798974358974359e-06,
"loss": 0.5479,
"step": 12400
},
{
"epoch": 2.5,
"grad_norm": 1.997453212738037,
"learning_rate": 7.696410256410257e-06,
"loss": 0.5462,
"step": 12500
},
{
"epoch": 2.5,
"eval_loss": 0.592738151550293,
"eval_runtime": 37.6944,
"eval_samples_per_second": 26.529,
"eval_steps_per_second": 6.632,
"step": 12500
},
{
"epoch": 2.52,
"grad_norm": 1.9580820798873901,
"learning_rate": 7.593846153846155e-06,
"loss": 0.5205,
"step": 12600
},
{
"epoch": 2.54,
"grad_norm": 1.8407106399536133,
"learning_rate": 7.491282051282052e-06,
"loss": 0.5283,
"step": 12700
},
{
"epoch": 2.56,
"grad_norm": 1.8764362335205078,
"learning_rate": 7.38871794871795e-06,
"loss": 0.5372,
"step": 12800
},
{
"epoch": 2.58,
"grad_norm": 2.0187058448791504,
"learning_rate": 7.286153846153847e-06,
"loss": 0.5443,
"step": 12900
},
{
"epoch": 2.6,
"grad_norm": 1.6760249137878418,
"learning_rate": 7.183589743589744e-06,
"loss": 0.5359,
"step": 13000
},
{
"epoch": 2.62,
"grad_norm": 1.7465155124664307,
"learning_rate": 7.081025641025642e-06,
"loss": 0.5332,
"step": 13100
},
{
"epoch": 2.64,
"grad_norm": 1.983605980873108,
"learning_rate": 6.978461538461539e-06,
"loss": 0.5375,
"step": 13200
},
{
"epoch": 2.66,
"grad_norm": 3.748850107192993,
"learning_rate": 6.875897435897436e-06,
"loss": 0.5202,
"step": 13300
},
{
"epoch": 2.68,
"grad_norm": 2.3786838054656982,
"learning_rate": 6.773333333333334e-06,
"loss": 0.5366,
"step": 13400
},
{
"epoch": 2.7,
"grad_norm": 1.8277692794799805,
"learning_rate": 6.670769230769231e-06,
"loss": 0.5406,
"step": 13500
},
{
"epoch": 2.72,
"grad_norm": 1.736742377281189,
"learning_rate": 6.5682051282051285e-06,
"loss": 0.5324,
"step": 13600
},
{
"epoch": 2.74,
"grad_norm": 2.152658224105835,
"learning_rate": 6.4656410256410265e-06,
"loss": 0.5238,
"step": 13700
},
{
"epoch": 2.76,
"grad_norm": 1.4771183729171753,
"learning_rate": 6.363076923076924e-06,
"loss": 0.5244,
"step": 13800
},
{
"epoch": 2.78,
"grad_norm": 2.6469223499298096,
"learning_rate": 6.260512820512821e-06,
"loss": 0.5296,
"step": 13900
},
{
"epoch": 2.8,
"grad_norm": 2.2587192058563232,
"learning_rate": 6.157948717948719e-06,
"loss": 0.5245,
"step": 14000
},
{
"epoch": 2.82,
"grad_norm": 1.691953182220459,
"learning_rate": 6.055384615384616e-06,
"loss": 0.523,
"step": 14100
},
{
"epoch": 2.84,
"grad_norm": 1.9120712280273438,
"learning_rate": 5.952820512820513e-06,
"loss": 0.522,
"step": 14200
},
{
"epoch": 2.86,
"grad_norm": 2.196423292160034,
"learning_rate": 5.850256410256411e-06,
"loss": 0.5252,
"step": 14300
},
{
"epoch": 2.88,
"grad_norm": 2.080812931060791,
"learning_rate": 5.747692307692308e-06,
"loss": 0.53,
"step": 14400
},
{
"epoch": 2.9,
"grad_norm": 1.567008376121521,
"learning_rate": 5.645128205128205e-06,
"loss": 0.5213,
"step": 14500
},
{
"epoch": 2.92,
"grad_norm": 1.8277350664138794,
"learning_rate": 5.542564102564103e-06,
"loss": 0.5288,
"step": 14600
},
{
"epoch": 2.94,
"grad_norm": 2.144063711166382,
"learning_rate": 5.4400000000000004e-06,
"loss": 0.5187,
"step": 14700
},
{
"epoch": 2.96,
"grad_norm": 2.0858545303344727,
"learning_rate": 5.338461538461539e-06,
"loss": 0.5104,
"step": 14800
},
{
"epoch": 2.98,
"grad_norm": 2.8449838161468506,
"learning_rate": 5.235897435897437e-06,
"loss": 0.5254,
"step": 14900
},
{
"epoch": 3.0,
"grad_norm": 1.867030143737793,
"learning_rate": 5.133333333333334e-06,
"loss": 0.5265,
"step": 15000
},
{
"epoch": 3.0,
"eval_loss": 0.5755677819252014,
"eval_runtime": 37.7393,
"eval_samples_per_second": 26.498,
"eval_steps_per_second": 6.624,
"step": 15000
},
{
"epoch": 3.02,
"grad_norm": 1.6803908348083496,
"learning_rate": 5.031794871794871e-06,
"loss": 0.4732,
"step": 15100
},
{
"epoch": 3.04,
"grad_norm": 1.657348871231079,
"learning_rate": 4.929230769230769e-06,
"loss": 0.4753,
"step": 15200
},
{
"epoch": 3.06,
"grad_norm": 2.1027541160583496,
"learning_rate": 4.826666666666667e-06,
"loss": 0.4763,
"step": 15300
},
{
"epoch": 3.08,
"grad_norm": 2.144843816757202,
"learning_rate": 4.7241025641025645e-06,
"loss": 0.4691,
"step": 15400
},
{
"epoch": 3.1,
"grad_norm": 2.0413217544555664,
"learning_rate": 4.621538461538462e-06,
"loss": 0.4694,
"step": 15500
},
{
"epoch": 3.12,
"grad_norm": 2.122432231903076,
"learning_rate": 4.51897435897436e-06,
"loss": 0.472,
"step": 15600
},
{
"epoch": 3.14,
"grad_norm": 2.2207162380218506,
"learning_rate": 4.416410256410257e-06,
"loss": 0.463,
"step": 15700
},
{
"epoch": 3.16,
"grad_norm": 1.912340521812439,
"learning_rate": 4.313846153846154e-06,
"loss": 0.4735,
"step": 15800
},
{
"epoch": 3.18,
"grad_norm": 2.1629879474639893,
"learning_rate": 4.211282051282052e-06,
"loss": 0.4759,
"step": 15900
},
{
"epoch": 3.2,
"grad_norm": 2.2578675746917725,
"learning_rate": 4.108717948717949e-06,
"loss": 0.4716,
"step": 16000
},
{
"epoch": 3.22,
"grad_norm": 1.8659443855285645,
"learning_rate": 4.006153846153846e-06,
"loss": 0.4521,
"step": 16100
},
{
"epoch": 3.24,
"grad_norm": 2.5843989849090576,
"learning_rate": 3.903589743589744e-06,
"loss": 0.4642,
"step": 16200
},
{
"epoch": 3.26,
"grad_norm": 2.093555212020874,
"learning_rate": 3.8010256410256412e-06,
"loss": 0.4625,
"step": 16300
},
{
"epoch": 3.28,
"grad_norm": 1.8870043754577637,
"learning_rate": 3.6984615384615384e-06,
"loss": 0.4715,
"step": 16400
},
{
"epoch": 3.3,
"grad_norm": 1.8587422370910645,
"learning_rate": 3.5958974358974363e-06,
"loss": 0.4598,
"step": 16500
},
{
"epoch": 3.32,
"grad_norm": 3.115006923675537,
"learning_rate": 3.4933333333333335e-06,
"loss": 0.4522,
"step": 16600
},
{
"epoch": 3.34,
"grad_norm": 3.0302741527557373,
"learning_rate": 3.3907692307692306e-06,
"loss": 0.4606,
"step": 16700
},
{
"epoch": 3.36,
"grad_norm": 2.0749588012695312,
"learning_rate": 3.2882051282051286e-06,
"loss": 0.4677,
"step": 16800
},
{
"epoch": 3.38,
"grad_norm": 1.8986167907714844,
"learning_rate": 3.1856410256410257e-06,
"loss": 0.4621,
"step": 16900
},
{
"epoch": 3.4,
"grad_norm": 4.082165241241455,
"learning_rate": 3.083076923076923e-06,
"loss": 0.472,
"step": 17000
},
{
"epoch": 3.42,
"grad_norm": 2.0937159061431885,
"learning_rate": 2.980512820512821e-06,
"loss": 0.4704,
"step": 17100
},
{
"epoch": 3.44,
"grad_norm": 1.8796356916427612,
"learning_rate": 2.877948717948718e-06,
"loss": 0.4664,
"step": 17200
},
{
"epoch": 3.46,
"grad_norm": 2.338709831237793,
"learning_rate": 2.775384615384615e-06,
"loss": 0.4642,
"step": 17300
},
{
"epoch": 3.48,
"grad_norm": 1.9063432216644287,
"learning_rate": 2.672820512820513e-06,
"loss": 0.4587,
"step": 17400
},
{
"epoch": 3.5,
"grad_norm": 2.500352621078491,
"learning_rate": 2.5702564102564103e-06,
"loss": 0.4653,
"step": 17500
},
{
"epoch": 3.5,
"eval_loss": 0.5773757696151733,
"eval_runtime": 37.7662,
"eval_samples_per_second": 26.479,
"eval_steps_per_second": 6.62,
"step": 17500
},
{
"epoch": 3.52,
"grad_norm": 2.412173271179199,
"learning_rate": 2.467692307692308e-06,
"loss": 0.4577,
"step": 17600
},
{
"epoch": 3.54,
"grad_norm": 2.3137009143829346,
"learning_rate": 2.3651282051282054e-06,
"loss": 0.4691,
"step": 17700
},
{
"epoch": 3.56,
"grad_norm": 2.7015998363494873,
"learning_rate": 2.2625641025641025e-06,
"loss": 0.4528,
"step": 17800
},
{
"epoch": 3.58,
"grad_norm": 2.0640199184417725,
"learning_rate": 2.16e-06,
"loss": 0.4543,
"step": 17900
},
{
"epoch": 3.6,
"grad_norm": 2.362372398376465,
"learning_rate": 2.0574358974358976e-06,
"loss": 0.4537,
"step": 18000
},
{
"epoch": 3.62,
"grad_norm": 2.2816359996795654,
"learning_rate": 1.9548717948717948e-06,
"loss": 0.4567,
"step": 18100
},
{
"epoch": 3.64,
"grad_norm": 1.7161535024642944,
"learning_rate": 1.8523076923076923e-06,
"loss": 0.4551,
"step": 18200
},
{
"epoch": 3.66,
"grad_norm": 2.011964797973633,
"learning_rate": 1.7497435897435899e-06,
"loss": 0.4501,
"step": 18300
},
{
"epoch": 3.68,
"grad_norm": 2.126624584197998,
"learning_rate": 1.6471794871794874e-06,
"loss": 0.4475,
"step": 18400
},
{
"epoch": 3.7,
"grad_norm": 2.778960704803467,
"learning_rate": 1.5446153846153846e-06,
"loss": 0.4525,
"step": 18500
},
{
"epoch": 3.72,
"grad_norm": 2.1998558044433594,
"learning_rate": 1.4420512820512821e-06,
"loss": 0.4542,
"step": 18600
},
{
"epoch": 3.74,
"grad_norm": 2.273758888244629,
"learning_rate": 1.3394871794871797e-06,
"loss": 0.4709,
"step": 18700
},
{
"epoch": 3.76,
"grad_norm": 2.270432949066162,
"learning_rate": 1.236923076923077e-06,
"loss": 0.4515,
"step": 18800
},
{
"epoch": 3.78,
"grad_norm": 2.251328706741333,
"learning_rate": 1.1343589743589744e-06,
"loss": 0.4414,
"step": 18900
},
{
"epoch": 3.8,
"grad_norm": 1.7179417610168457,
"learning_rate": 1.0317948717948717e-06,
"loss": 0.4571,
"step": 19000
},
{
"epoch": 3.82,
"grad_norm": 2.2892489433288574,
"learning_rate": 9.292307692307693e-07,
"loss": 0.4512,
"step": 19100
},
{
"epoch": 3.84,
"grad_norm": 2.386227607727051,
"learning_rate": 8.266666666666668e-07,
"loss": 0.4529,
"step": 19200
},
{
"epoch": 3.86,
"grad_norm": 3.4404356479644775,
"learning_rate": 7.241025641025641e-07,
"loss": 0.4561,
"step": 19300
},
{
"epoch": 3.88,
"grad_norm": 3.1425325870513916,
"learning_rate": 6.215384615384616e-07,
"loss": 0.4512,
"step": 19400
},
{
"epoch": 3.9,
"grad_norm": 2.285019636154175,
"learning_rate": 5.18974358974359e-07,
"loss": 0.4644,
"step": 19500
},
{
"epoch": 3.92,
"grad_norm": 1.933229923248291,
"learning_rate": 4.164102564102564e-07,
"loss": 0.4636,
"step": 19600
},
{
"epoch": 3.94,
"grad_norm": 2.129995346069336,
"learning_rate": 3.138461538461539e-07,
"loss": 0.4486,
"step": 19700
},
{
"epoch": 3.96,
"grad_norm": 2.06881046295166,
"learning_rate": 2.112820512820513e-07,
"loss": 0.4338,
"step": 19800
},
{
"epoch": 3.98,
"grad_norm": 2.346268892288208,
"learning_rate": 1.0871794871794872e-07,
"loss": 0.4543,
"step": 19900
},
{
"epoch": 4.0,
"grad_norm": 2.0282890796661377,
"learning_rate": 6.153846153846155e-09,
"loss": 0.4407,
"step": 20000
},
{
"epoch": 4.0,
"eval_loss": 0.5501033663749695,
"eval_runtime": 37.7502,
"eval_samples_per_second": 26.49,
"eval_steps_per_second": 6.622,
"step": 20000
}
],
"logging_steps": 100,
"max_steps": 20000,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 2500,
"total_flos": 2.51739173289984e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}