gsmyrnis's picture
End of training
3de1ab4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.962025316455696,
"eval_steps": 500,
"global_step": 78,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0379746835443038,
"grad_norm": 6.485745548246275,
"learning_rate": 1.25e-06,
"loss": 1.1878,
"step": 1
},
{
"epoch": 0.0759493670886076,
"grad_norm": 6.465822217568478,
"learning_rate": 2.5e-06,
"loss": 1.1735,
"step": 2
},
{
"epoch": 0.11392405063291139,
"grad_norm": 6.195277506391659,
"learning_rate": 3.7500000000000005e-06,
"loss": 1.1647,
"step": 3
},
{
"epoch": 0.1518987341772152,
"grad_norm": 5.799769823526993,
"learning_rate": 5e-06,
"loss": 1.1608,
"step": 4
},
{
"epoch": 0.189873417721519,
"grad_norm": 4.3450177180199,
"learning_rate": 6.25e-06,
"loss": 1.1421,
"step": 5
},
{
"epoch": 0.22784810126582278,
"grad_norm": 2.463610967994665,
"learning_rate": 7.500000000000001e-06,
"loss": 1.0396,
"step": 6
},
{
"epoch": 0.26582278481012656,
"grad_norm": 4.577891696246731,
"learning_rate": 8.750000000000001e-06,
"loss": 1.1231,
"step": 7
},
{
"epoch": 0.3037974683544304,
"grad_norm": 4.503384198538751,
"learning_rate": 1e-05,
"loss": 1.1075,
"step": 8
},
{
"epoch": 0.34177215189873417,
"grad_norm": 3.8310498612257686,
"learning_rate": 9.994965332706574e-06,
"loss": 0.9965,
"step": 9
},
{
"epoch": 0.379746835443038,
"grad_norm": 3.5638344423615647,
"learning_rate": 9.979871469976197e-06,
"loss": 0.9821,
"step": 10
},
{
"epoch": 0.4177215189873418,
"grad_norm": 2.881786623869597,
"learning_rate": 9.954748808839675e-06,
"loss": 1.0366,
"step": 11
},
{
"epoch": 0.45569620253164556,
"grad_norm": 1.9488810814561746,
"learning_rate": 9.91964794299315e-06,
"loss": 0.9426,
"step": 12
},
{
"epoch": 0.4936708860759494,
"grad_norm": 1.8755855672690618,
"learning_rate": 9.874639560909118e-06,
"loss": 0.8988,
"step": 13
},
{
"epoch": 0.5316455696202531,
"grad_norm": 1.8486508629321972,
"learning_rate": 9.819814303479268e-06,
"loss": 0.9482,
"step": 14
},
{
"epoch": 0.569620253164557,
"grad_norm": 1.3806887060387854,
"learning_rate": 9.755282581475769e-06,
"loss": 0.9246,
"step": 15
},
{
"epoch": 0.6075949367088608,
"grad_norm": 1.2204733734134552,
"learning_rate": 9.681174353198687e-06,
"loss": 0.9037,
"step": 16
},
{
"epoch": 0.6455696202531646,
"grad_norm": 1.2853760258931963,
"learning_rate": 9.597638862757255e-06,
"loss": 0.8985,
"step": 17
},
{
"epoch": 0.6835443037974683,
"grad_norm": 1.2437773881234602,
"learning_rate": 9.504844339512096e-06,
"loss": 0.9168,
"step": 18
},
{
"epoch": 0.7215189873417721,
"grad_norm": 1.146833630595906,
"learning_rate": 9.40297765928369e-06,
"loss": 0.8647,
"step": 19
},
{
"epoch": 0.759493670886076,
"grad_norm": 1.044550835994215,
"learning_rate": 9.292243968009332e-06,
"loss": 0.8802,
"step": 20
},
{
"epoch": 0.7974683544303798,
"grad_norm": 1.1571835712465777,
"learning_rate": 9.172866268606514e-06,
"loss": 0.8567,
"step": 21
},
{
"epoch": 0.8354430379746836,
"grad_norm": 1.0549337977158728,
"learning_rate": 9.045084971874738e-06,
"loss": 0.8573,
"step": 22
},
{
"epoch": 0.8734177215189873,
"grad_norm": 0.9935987376050021,
"learning_rate": 8.90915741234015e-06,
"loss": 0.8795,
"step": 23
},
{
"epoch": 0.9113924050632911,
"grad_norm": 0.9568003714663316,
"learning_rate": 8.765357330018056e-06,
"loss": 0.85,
"step": 24
},
{
"epoch": 0.9493670886075949,
"grad_norm": 1.084326509880812,
"learning_rate": 8.613974319136959e-06,
"loss": 0.8239,
"step": 25
},
{
"epoch": 0.9873417721518988,
"grad_norm": 1.0213309692673835,
"learning_rate": 8.455313244934324e-06,
"loss": 0.8843,
"step": 26
},
{
"epoch": 1.0253164556962024,
"grad_norm": 1.5805051692575611,
"learning_rate": 8.289693629698564e-06,
"loss": 1.366,
"step": 27
},
{
"epoch": 1.0632911392405062,
"grad_norm": 0.8244324641459649,
"learning_rate": 8.117449009293668e-06,
"loss": 0.784,
"step": 28
},
{
"epoch": 1.1012658227848102,
"grad_norm": 0.7560188526972407,
"learning_rate": 7.938926261462366e-06,
"loss": 0.7476,
"step": 29
},
{
"epoch": 1.139240506329114,
"grad_norm": 0.9004810483037317,
"learning_rate": 7.754484907260513e-06,
"loss": 0.7522,
"step": 30
},
{
"epoch": 1.1772151898734178,
"grad_norm": 0.6657243853335917,
"learning_rate": 7.564496387029532e-06,
"loss": 0.7875,
"step": 31
},
{
"epoch": 1.2151898734177216,
"grad_norm": 0.7897561995668748,
"learning_rate": 7.369343312364994e-06,
"loss": 0.7827,
"step": 32
},
{
"epoch": 1.2531645569620253,
"grad_norm": 0.8490295067257841,
"learning_rate": 7.169418695587791e-06,
"loss": 0.7995,
"step": 33
},
{
"epoch": 1.2911392405063291,
"grad_norm": 0.7054475226304026,
"learning_rate": 6.965125158269619e-06,
"loss": 0.7471,
"step": 34
},
{
"epoch": 1.3291139240506329,
"grad_norm": 0.6714021426026477,
"learning_rate": 6.7568741204067145e-06,
"loss": 0.7856,
"step": 35
},
{
"epoch": 1.3670886075949367,
"grad_norm": 0.7079475124768483,
"learning_rate": 6.545084971874738e-06,
"loss": 0.6909,
"step": 36
},
{
"epoch": 1.4050632911392404,
"grad_norm": 0.8283603594737133,
"learning_rate": 6.330184227833376e-06,
"loss": 0.8468,
"step": 37
},
{
"epoch": 1.4430379746835442,
"grad_norm": 0.762781447849204,
"learning_rate": 6.112604669781572e-06,
"loss": 0.7929,
"step": 38
},
{
"epoch": 1.481012658227848,
"grad_norm": 0.737050531054483,
"learning_rate": 5.892784473993184e-06,
"loss": 0.7777,
"step": 39
},
{
"epoch": 1.518987341772152,
"grad_norm": 0.7681887147382744,
"learning_rate": 5.671166329088278e-06,
"loss": 0.8305,
"step": 40
},
{
"epoch": 1.5569620253164556,
"grad_norm": 0.6735546654884726,
"learning_rate": 5.448196544517168e-06,
"loss": 0.8163,
"step": 41
},
{
"epoch": 1.5949367088607596,
"grad_norm": 0.6528315419344399,
"learning_rate": 5.224324151752575e-06,
"loss": 0.7768,
"step": 42
},
{
"epoch": 1.6329113924050633,
"grad_norm": 0.6443387987382299,
"learning_rate": 5e-06,
"loss": 0.7422,
"step": 43
},
{
"epoch": 1.6708860759493671,
"grad_norm": 0.6935358728809672,
"learning_rate": 4.775675848247427e-06,
"loss": 0.8107,
"step": 44
},
{
"epoch": 1.7088607594936709,
"grad_norm": 0.6346001503190914,
"learning_rate": 4.551803455482833e-06,
"loss": 0.7577,
"step": 45
},
{
"epoch": 1.7468354430379747,
"grad_norm": 0.7389755097450126,
"learning_rate": 4.3288336709117246e-06,
"loss": 0.808,
"step": 46
},
{
"epoch": 1.7848101265822784,
"grad_norm": 0.6701646868155358,
"learning_rate": 4.107215526006818e-06,
"loss": 0.7899,
"step": 47
},
{
"epoch": 1.8227848101265822,
"grad_norm": 0.6061610450983355,
"learning_rate": 3.887395330218429e-06,
"loss": 0.7518,
"step": 48
},
{
"epoch": 1.8607594936708862,
"grad_norm": 0.6113450822488097,
"learning_rate": 3.669815772166625e-06,
"loss": 0.8113,
"step": 49
},
{
"epoch": 1.8987341772151898,
"grad_norm": 0.6942091005029863,
"learning_rate": 3.4549150281252635e-06,
"loss": 0.765,
"step": 50
},
{
"epoch": 1.9367088607594938,
"grad_norm": 0.6050143971344853,
"learning_rate": 3.2431258795932863e-06,
"loss": 0.7156,
"step": 51
},
{
"epoch": 1.9746835443037973,
"grad_norm": 0.7105617556204705,
"learning_rate": 3.0348748417303826e-06,
"loss": 0.7184,
"step": 52
},
{
"epoch": 2.0126582278481013,
"grad_norm": 1.3246392741326032,
"learning_rate": 2.83058130441221e-06,
"loss": 1.214,
"step": 53
},
{
"epoch": 2.050632911392405,
"grad_norm": 0.6417792043489915,
"learning_rate": 2.6306566876350072e-06,
"loss": 0.7168,
"step": 54
},
{
"epoch": 2.088607594936709,
"grad_norm": 0.5964202254877964,
"learning_rate": 2.43550361297047e-06,
"loss": 0.6855,
"step": 55
},
{
"epoch": 2.1265822784810124,
"grad_norm": 0.5915140999717906,
"learning_rate": 2.245515092739488e-06,
"loss": 0.6483,
"step": 56
},
{
"epoch": 2.1645569620253164,
"grad_norm": 0.6027347895777584,
"learning_rate": 2.061073738537635e-06,
"loss": 0.7646,
"step": 57
},
{
"epoch": 2.2025316455696204,
"grad_norm": 0.6171953791580085,
"learning_rate": 1.8825509907063328e-06,
"loss": 0.7752,
"step": 58
},
{
"epoch": 2.240506329113924,
"grad_norm": 0.5884028276946883,
"learning_rate": 1.7103063703014372e-06,
"loss": 0.6389,
"step": 59
},
{
"epoch": 2.278481012658228,
"grad_norm": 0.5954292676999796,
"learning_rate": 1.544686755065677e-06,
"loss": 0.7054,
"step": 60
},
{
"epoch": 2.3164556962025316,
"grad_norm": 0.6614306100294592,
"learning_rate": 1.3860256808630429e-06,
"loss": 0.8011,
"step": 61
},
{
"epoch": 2.3544303797468356,
"grad_norm": 0.5442238857296332,
"learning_rate": 1.234642669981946e-06,
"loss": 0.6654,
"step": 62
},
{
"epoch": 2.392405063291139,
"grad_norm": 0.5398911342214016,
"learning_rate": 1.0908425876598512e-06,
"loss": 0.7555,
"step": 63
},
{
"epoch": 2.430379746835443,
"grad_norm": 0.5530982801705516,
"learning_rate": 9.549150281252633e-07,
"loss": 0.7548,
"step": 64
},
{
"epoch": 2.4683544303797467,
"grad_norm": 0.5638744668606761,
"learning_rate": 8.271337313934869e-07,
"loss": 0.6397,
"step": 65
},
{
"epoch": 2.5063291139240507,
"grad_norm": 0.5564528408103611,
"learning_rate": 7.077560319906696e-07,
"loss": 0.7229,
"step": 66
},
{
"epoch": 2.5443037974683547,
"grad_norm": 0.5636248726836497,
"learning_rate": 5.9702234071631e-07,
"loss": 0.7062,
"step": 67
},
{
"epoch": 2.5822784810126582,
"grad_norm": 0.5866165147488809,
"learning_rate": 4.951556604879049e-07,
"loss": 0.747,
"step": 68
},
{
"epoch": 2.620253164556962,
"grad_norm": 0.5143692009437217,
"learning_rate": 4.0236113724274716e-07,
"loss": 0.6617,
"step": 69
},
{
"epoch": 2.6582278481012658,
"grad_norm": 0.5633167411619424,
"learning_rate": 3.18825646801314e-07,
"loss": 0.7664,
"step": 70
},
{
"epoch": 2.6962025316455698,
"grad_norm": 0.5897836640010781,
"learning_rate": 2.447174185242324e-07,
"loss": 0.7053,
"step": 71
},
{
"epoch": 2.7341772151898733,
"grad_norm": 0.5866463117478233,
"learning_rate": 1.801856965207338e-07,
"loss": 0.7124,
"step": 72
},
{
"epoch": 2.7721518987341773,
"grad_norm": 0.5214543757917679,
"learning_rate": 1.253604390908819e-07,
"loss": 0.7179,
"step": 73
},
{
"epoch": 2.810126582278481,
"grad_norm": 0.5198883021466474,
"learning_rate": 8.035205700685167e-08,
"loss": 0.7522,
"step": 74
},
{
"epoch": 2.848101265822785,
"grad_norm": 0.5664317042404885,
"learning_rate": 4.52511911603265e-08,
"loss": 0.6989,
"step": 75
},
{
"epoch": 2.8860759493670884,
"grad_norm": 0.5580909347600271,
"learning_rate": 2.012853002380466e-08,
"loss": 0.729,
"step": 76
},
{
"epoch": 2.9240506329113924,
"grad_norm": 0.57492441797178,
"learning_rate": 5.034667293427053e-09,
"loss": 0.7252,
"step": 77
},
{
"epoch": 2.962025316455696,
"grad_norm": 0.5767576246482665,
"learning_rate": 0.0,
"loss": 0.7423,
"step": 78
},
{
"epoch": 2.962025316455696,
"step": 78,
"total_flos": 48341369913344.0,
"train_loss": 0.8378327083893311,
"train_runtime": 1191.9993,
"train_samples_per_second": 6.292,
"train_steps_per_second": 0.065
}
],
"logging_steps": 1,
"max_steps": 78,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 48341369913344.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}