0.6B-8000C / trainer_state.json
SamMikaelson's picture
Upload folder using huggingface_hub
248e72e verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 500,
"global_step": 8072,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0004956015363647627,
"grad_norm": Infinity,
"learning_rate": 0.0,
"loss": 4.2404,
"step": 1
},
{
"epoch": 0.04956015363647627,
"grad_norm": 2.9627790451049805,
"learning_rate": 4.9435973720094217e-05,
"loss": 1.4839,
"step": 100
},
{
"epoch": 0.09912030727295254,
"grad_norm": 2.454435348510742,
"learning_rate": 4.8816164621296644e-05,
"loss": 1.1294,
"step": 200
},
{
"epoch": 0.14868046090942882,
"grad_norm": 2.297229290008545,
"learning_rate": 4.819635552249907e-05,
"loss": 1.0776,
"step": 300
},
{
"epoch": 0.19824061454590508,
"grad_norm": 2.2293310165405273,
"learning_rate": 4.75765464237015e-05,
"loss": 1.0337,
"step": 400
},
{
"epoch": 0.24780076818238136,
"grad_norm": 2.114262342453003,
"learning_rate": 4.6956737324903934e-05,
"loss": 1.0028,
"step": 500
},
{
"epoch": 0.24780076818238136,
"eval_loss": 2.039673328399658,
"eval_runtime": 372.7152,
"eval_samples_per_second": 6.501,
"eval_steps_per_second": 3.252,
"step": 500
},
{
"epoch": 0.29736092181885765,
"grad_norm": 2.0340042114257812,
"learning_rate": 4.633692822610636e-05,
"loss": 0.9991,
"step": 600
},
{
"epoch": 0.3469210754553339,
"grad_norm": 2.1469831466674805,
"learning_rate": 4.571711912730879e-05,
"loss": 0.9784,
"step": 700
},
{
"epoch": 0.39648122909181016,
"grad_norm": 1.7528233528137207,
"learning_rate": 4.509731002851122e-05,
"loss": 0.9586,
"step": 800
},
{
"epoch": 0.4460413827282865,
"grad_norm": 1.9578274488449097,
"learning_rate": 4.447750092971365e-05,
"loss": 0.9236,
"step": 900
},
{
"epoch": 0.49560153636476273,
"grad_norm": 2.0073506832122803,
"learning_rate": 4.385769183091608e-05,
"loss": 0.9236,
"step": 1000
},
{
"epoch": 0.49560153636476273,
"eval_loss": 1.9146523475646973,
"eval_runtime": 366.7833,
"eval_samples_per_second": 6.606,
"eval_steps_per_second": 3.304,
"step": 1000
},
{
"epoch": 0.545161690001239,
"grad_norm": 1.850284457206726,
"learning_rate": 4.3237882732118514e-05,
"loss": 0.9076,
"step": 1100
},
{
"epoch": 0.5947218436377153,
"grad_norm": 1.8297762870788574,
"learning_rate": 4.2618073633320935e-05,
"loss": 0.8888,
"step": 1200
},
{
"epoch": 0.6442819972741916,
"grad_norm": 1.9211479425430298,
"learning_rate": 4.199826453452337e-05,
"loss": 0.8918,
"step": 1300
},
{
"epoch": 0.6938421509106678,
"grad_norm": 1.6193498373031616,
"learning_rate": 4.13784554357258e-05,
"loss": 0.881,
"step": 1400
},
{
"epoch": 0.7434023045471441,
"grad_norm": 2.319871425628662,
"learning_rate": 4.075864633692823e-05,
"loss": 0.8831,
"step": 1500
},
{
"epoch": 0.7434023045471441,
"eval_loss": 1.9017372131347656,
"eval_runtime": 366.5077,
"eval_samples_per_second": 6.611,
"eval_steps_per_second": 3.307,
"step": 1500
},
{
"epoch": 0.7929624581836203,
"grad_norm": 1.7725250720977783,
"learning_rate": 4.013883723813065e-05,
"loss": 0.8829,
"step": 1600
},
{
"epoch": 0.8425226118200967,
"grad_norm": 1.7268086671829224,
"learning_rate": 3.951902813933309e-05,
"loss": 0.8725,
"step": 1700
},
{
"epoch": 0.892082765456573,
"grad_norm": 2.02824068069458,
"learning_rate": 3.8899219040535514e-05,
"loss": 0.8541,
"step": 1800
},
{
"epoch": 0.9416429190930492,
"grad_norm": 1.5427559614181519,
"learning_rate": 3.827940994173795e-05,
"loss": 0.8474,
"step": 1900
},
{
"epoch": 0.9912030727295255,
"grad_norm": 1.56866455078125,
"learning_rate": 3.7659600842940377e-05,
"loss": 0.8364,
"step": 2000
},
{
"epoch": 0.9912030727295255,
"eval_loss": 1.8795244693756104,
"eval_runtime": 361.6572,
"eval_samples_per_second": 6.7,
"eval_steps_per_second": 3.351,
"step": 2000
},
{
"epoch": 1.0406393259819104,
"grad_norm": 1.7035953998565674,
"learning_rate": 3.7039791744142804e-05,
"loss": 0.75,
"step": 2100
},
{
"epoch": 1.090199479618387,
"grad_norm": 1.8885678052902222,
"learning_rate": 3.641998264534524e-05,
"loss": 0.7393,
"step": 2200
},
{
"epoch": 1.1397596332548632,
"grad_norm": 1.467885136604309,
"learning_rate": 3.5800173546547666e-05,
"loss": 0.7352,
"step": 2300
},
{
"epoch": 1.1893197868913394,
"grad_norm": 1.5108469724655151,
"learning_rate": 3.5180364447750094e-05,
"loss": 0.7285,
"step": 2400
},
{
"epoch": 1.2388799405278157,
"grad_norm": 1.7523285150527954,
"learning_rate": 3.456055534895252e-05,
"loss": 0.7313,
"step": 2500
},
{
"epoch": 1.2388799405278157,
"eval_loss": 1.92574942111969,
"eval_runtime": 360.7532,
"eval_samples_per_second": 6.717,
"eval_steps_per_second": 3.36,
"step": 2500
},
{
"epoch": 1.288440094164292,
"grad_norm": 1.5373412370681763,
"learning_rate": 3.3940746250154956e-05,
"loss": 0.7392,
"step": 2600
},
{
"epoch": 1.3380002478007682,
"grad_norm": 1.9129449129104614,
"learning_rate": 3.3320937151357384e-05,
"loss": 0.7395,
"step": 2700
},
{
"epoch": 1.3875604014372445,
"grad_norm": 1.8050382137298584,
"learning_rate": 3.270112805255982e-05,
"loss": 0.7311,
"step": 2800
},
{
"epoch": 1.4371205550737207,
"grad_norm": 1.5149444341659546,
"learning_rate": 3.208131895376224e-05,
"loss": 0.7238,
"step": 2900
},
{
"epoch": 1.486680708710197,
"grad_norm": 1.834263563156128,
"learning_rate": 3.1461509854964674e-05,
"loss": 0.7409,
"step": 3000
},
{
"epoch": 1.486680708710197,
"eval_loss": 1.9400999546051025,
"eval_runtime": 361.3757,
"eval_samples_per_second": 6.705,
"eval_steps_per_second": 3.354,
"step": 3000
},
{
"epoch": 1.5362408623466732,
"grad_norm": 1.5594300031661987,
"learning_rate": 3.08417007561671e-05,
"loss": 0.7223,
"step": 3100
},
{
"epoch": 1.5858010159831495,
"grad_norm": 1.8869357109069824,
"learning_rate": 3.0221891657369533e-05,
"loss": 0.7242,
"step": 3200
},
{
"epoch": 1.635361169619626,
"grad_norm": 1.4488883018493652,
"learning_rate": 2.960208255857196e-05,
"loss": 0.7195,
"step": 3300
},
{
"epoch": 1.684921323256102,
"grad_norm": 1.5809857845306396,
"learning_rate": 2.8982273459774388e-05,
"loss": 0.7162,
"step": 3400
},
{
"epoch": 1.7344814768925785,
"grad_norm": 1.572243332862854,
"learning_rate": 2.836246436097682e-05,
"loss": 0.7193,
"step": 3500
},
{
"epoch": 1.7344814768925785,
"eval_loss": 1.9336891174316406,
"eval_runtime": 364.4827,
"eval_samples_per_second": 6.648,
"eval_steps_per_second": 3.325,
"step": 3500
},
{
"epoch": 1.7840416305290545,
"grad_norm": 1.4607752561569214,
"learning_rate": 2.774265526217925e-05,
"loss": 0.7239,
"step": 3600
},
{
"epoch": 1.833601784165531,
"grad_norm": 1.6760728359222412,
"learning_rate": 2.712284616338168e-05,
"loss": 0.7136,
"step": 3700
},
{
"epoch": 1.883161937802007,
"grad_norm": 1.4097050428390503,
"learning_rate": 2.650303706458411e-05,
"loss": 0.7207,
"step": 3800
},
{
"epoch": 1.9327220914384835,
"grad_norm": 1.3667641878128052,
"learning_rate": 2.588322796578654e-05,
"loss": 0.7068,
"step": 3900
},
{
"epoch": 1.9822822450749598,
"grad_norm": 1.767014980316162,
"learning_rate": 2.526341886698897e-05,
"loss": 0.7089,
"step": 4000
},
{
"epoch": 1.9822822450749598,
"eval_loss": 1.8813990354537964,
"eval_runtime": 360.4834,
"eval_samples_per_second": 6.722,
"eval_steps_per_second": 3.362,
"step": 4000
},
{
"epoch": 2.031718498327345,
"grad_norm": 1.5913705825805664,
"learning_rate": 2.46436097681914e-05,
"loss": 0.6173,
"step": 4100
},
{
"epoch": 2.081278651963821,
"grad_norm": 2.040769577026367,
"learning_rate": 2.402380066939383e-05,
"loss": 0.5627,
"step": 4200
},
{
"epoch": 2.1308388056002974,
"grad_norm": 1.890949010848999,
"learning_rate": 2.3403991570596257e-05,
"loss": 0.5706,
"step": 4300
},
{
"epoch": 2.180398959236774,
"grad_norm": 1.6909950971603394,
"learning_rate": 2.278418247179869e-05,
"loss": 0.5648,
"step": 4400
},
{
"epoch": 2.22995911287325,
"grad_norm": 1.497389316558838,
"learning_rate": 2.2164373373001116e-05,
"loss": 0.5665,
"step": 4500
},
{
"epoch": 2.22995911287325,
"eval_loss": 2.1176860332489014,
"eval_runtime": 362.5264,
"eval_samples_per_second": 6.684,
"eval_steps_per_second": 3.343,
"step": 4500
},
{
"epoch": 2.2795192665097264,
"grad_norm": 2.142023801803589,
"learning_rate": 2.1544564274203547e-05,
"loss": 0.5773,
"step": 4600
},
{
"epoch": 2.3290794201462024,
"grad_norm": 1.8469089269638062,
"learning_rate": 2.092475517540598e-05,
"loss": 0.5844,
"step": 4700
},
{
"epoch": 2.378639573782679,
"grad_norm": 1.9878648519515991,
"learning_rate": 2.0304946076608406e-05,
"loss": 0.5776,
"step": 4800
},
{
"epoch": 2.428199727419155,
"grad_norm": 1.9932163953781128,
"learning_rate": 1.9685136977810837e-05,
"loss": 0.5778,
"step": 4900
},
{
"epoch": 2.4777598810556314,
"grad_norm": 1.8406933546066284,
"learning_rate": 1.9065327879013265e-05,
"loss": 0.5693,
"step": 5000
},
{
"epoch": 2.4777598810556314,
"eval_loss": 2.1271347999572754,
"eval_runtime": 361.2042,
"eval_samples_per_second": 6.708,
"eval_steps_per_second": 3.355,
"step": 5000
},
{
"epoch": 2.5273200346921074,
"grad_norm": 1.8426705598831177,
"learning_rate": 1.8445518780215696e-05,
"loss": 0.5731,
"step": 5100
},
{
"epoch": 2.576880188328584,
"grad_norm": 1.974214792251587,
"learning_rate": 1.7825709681418124e-05,
"loss": 0.5701,
"step": 5200
},
{
"epoch": 2.62644034196506,
"grad_norm": 1.9426707029342651,
"learning_rate": 1.7205900582620555e-05,
"loss": 0.5766,
"step": 5300
},
{
"epoch": 2.6760004956015364,
"grad_norm": 1.796884298324585,
"learning_rate": 1.6586091483822982e-05,
"loss": 0.5584,
"step": 5400
},
{
"epoch": 2.7255606492380124,
"grad_norm": 2.1729025840759277,
"learning_rate": 1.5966282385025413e-05,
"loss": 0.5742,
"step": 5500
},
{
"epoch": 2.7255606492380124,
"eval_loss": 2.149742841720581,
"eval_runtime": 361.8954,
"eval_samples_per_second": 6.695,
"eval_steps_per_second": 3.349,
"step": 5500
},
{
"epoch": 2.775120802874489,
"grad_norm": 1.7941385507583618,
"learning_rate": 1.534647328622784e-05,
"loss": 0.5635,
"step": 5600
},
{
"epoch": 2.8246809565109654,
"grad_norm": 2.344470739364624,
"learning_rate": 1.4726664187430272e-05,
"loss": 0.5716,
"step": 5700
},
{
"epoch": 2.8742411101474414,
"grad_norm": 1.6030564308166504,
"learning_rate": 1.41068550886327e-05,
"loss": 0.5691,
"step": 5800
},
{
"epoch": 2.9238012637839175,
"grad_norm": 1.6917791366577148,
"learning_rate": 1.3487045989835131e-05,
"loss": 0.5678,
"step": 5900
},
{
"epoch": 2.973361417420394,
"grad_norm": 1.7974952459335327,
"learning_rate": 1.2867236891037562e-05,
"loss": 0.5609,
"step": 6000
},
{
"epoch": 2.973361417420394,
"eval_loss": 2.1735124588012695,
"eval_runtime": 362.267,
"eval_samples_per_second": 6.688,
"eval_steps_per_second": 3.346,
"step": 6000
},
{
"epoch": 3.0227976706727793,
"grad_norm": 2.0832440853118896,
"learning_rate": 1.224742779223999e-05,
"loss": 0.494,
"step": 6100
},
{
"epoch": 3.0723578243092553,
"grad_norm": 1.8607770204544067,
"learning_rate": 1.162761869344242e-05,
"loss": 0.3946,
"step": 6200
},
{
"epoch": 3.121917977945732,
"grad_norm": 2.1528215408325195,
"learning_rate": 1.100780959464485e-05,
"loss": 0.3964,
"step": 6300
},
{
"epoch": 3.171478131582208,
"grad_norm": 2.327425718307495,
"learning_rate": 1.038800049584728e-05,
"loss": 0.3915,
"step": 6400
},
{
"epoch": 3.2210382852186843,
"grad_norm": 1.8909003734588623,
"learning_rate": 9.768191397049709e-06,
"loss": 0.3989,
"step": 6500
},
{
"epoch": 3.2210382852186843,
"eval_loss": 2.5916407108306885,
"eval_runtime": 367.5177,
"eval_samples_per_second": 6.593,
"eval_steps_per_second": 3.298,
"step": 6500
},
{
"epoch": 3.2705984388551603,
"grad_norm": 2.429757595062256,
"learning_rate": 9.14838229825214e-06,
"loss": 0.3955,
"step": 6600
},
{
"epoch": 3.320158592491637,
"grad_norm": 2.5529799461364746,
"learning_rate": 8.52857319945457e-06,
"loss": 0.3921,
"step": 6700
},
{
"epoch": 3.369718746128113,
"grad_norm": 2.185617446899414,
"learning_rate": 7.908764100656999e-06,
"loss": 0.4004,
"step": 6800
},
{
"epoch": 3.4192788997645893,
"grad_norm": 2.2846291065216064,
"learning_rate": 7.288955001859428e-06,
"loss": 0.3938,
"step": 6900
},
{
"epoch": 3.4688390534010654,
"grad_norm": 2.211652994155884,
"learning_rate": 6.6691459030618575e-06,
"loss": 0.3861,
"step": 7000
},
{
"epoch": 3.4688390534010654,
"eval_loss": 2.668076753616333,
"eval_runtime": 365.3913,
"eval_samples_per_second": 6.631,
"eval_steps_per_second": 3.317,
"step": 7000
},
{
"epoch": 3.518399207037542,
"grad_norm": 2.2957966327667236,
"learning_rate": 6.049336804264287e-06,
"loss": 0.3851,
"step": 7100
},
{
"epoch": 3.567959360674018,
"grad_norm": 2.1889781951904297,
"learning_rate": 5.429527705466716e-06,
"loss": 0.3957,
"step": 7200
},
{
"epoch": 3.6175195143104943,
"grad_norm": 2.914120674133301,
"learning_rate": 4.809718606669146e-06,
"loss": 0.3995,
"step": 7300
},
{
"epoch": 3.667079667946971,
"grad_norm": 2.438662528991699,
"learning_rate": 4.189909507871576e-06,
"loss": 0.3875,
"step": 7400
},
{
"epoch": 3.716639821583447,
"grad_norm": 2.4311559200286865,
"learning_rate": 3.570100409074005e-06,
"loss": 0.3825,
"step": 7500
},
{
"epoch": 3.716639821583447,
"eval_loss": 2.6605801582336426,
"eval_runtime": 365.0751,
"eval_samples_per_second": 6.637,
"eval_steps_per_second": 3.32,
"step": 7500
},
{
"epoch": 3.7661999752199233,
"grad_norm": 2.665722608566284,
"learning_rate": 2.950291310276435e-06,
"loss": 0.3872,
"step": 7600
},
{
"epoch": 3.8157601288563994,
"grad_norm": 2.206805467605591,
"learning_rate": 2.330482211478865e-06,
"loss": 0.3899,
"step": 7700
},
{
"epoch": 3.865320282492876,
"grad_norm": 2.1605570316314697,
"learning_rate": 1.7106731126812943e-06,
"loss": 0.3866,
"step": 7800
},
{
"epoch": 3.914880436129352,
"grad_norm": 2.385495185852051,
"learning_rate": 1.0908640138837239e-06,
"loss": 0.3789,
"step": 7900
},
{
"epoch": 3.9644405897658284,
"grad_norm": 2.2114624977111816,
"learning_rate": 4.710549150861535e-07,
"loss": 0.3848,
"step": 8000
},
{
"epoch": 3.9644405897658284,
"eval_loss": 2.6748318672180176,
"eval_runtime": 366.1405,
"eval_samples_per_second": 6.618,
"eval_steps_per_second": 3.31,
"step": 8000
}
],
"logging_steps": 100,
"max_steps": 8072,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.6417343971328e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}