hp_ablations_mistral_bsz1024 / trainer_state.json
sedrickkeh's picture
End of training
eed4fee verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.990379746835443,
"eval_steps": 500,
"global_step": 738,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04050632911392405,
"grad_norm": 6.559766181680777,
"learning_rate": 5e-06,
"loss": 0.7469,
"step": 10
},
{
"epoch": 0.0810126582278481,
"grad_norm": 2.0645407865130565,
"learning_rate": 5e-06,
"loss": 0.6482,
"step": 20
},
{
"epoch": 0.12151898734177215,
"grad_norm": 2.192313459271068,
"learning_rate": 5e-06,
"loss": 0.6153,
"step": 30
},
{
"epoch": 0.1620253164556962,
"grad_norm": 1.3146303292450665,
"learning_rate": 5e-06,
"loss": 0.5992,
"step": 40
},
{
"epoch": 0.20253164556962025,
"grad_norm": 4.0197909191598376,
"learning_rate": 5e-06,
"loss": 0.5961,
"step": 50
},
{
"epoch": 0.2430379746835443,
"grad_norm": 2.658139591723639,
"learning_rate": 5e-06,
"loss": 0.5883,
"step": 60
},
{
"epoch": 0.28354430379746837,
"grad_norm": 3.05141456733358,
"learning_rate": 5e-06,
"loss": 0.578,
"step": 70
},
{
"epoch": 0.3240506329113924,
"grad_norm": 1.550977980533193,
"learning_rate": 5e-06,
"loss": 0.581,
"step": 80
},
{
"epoch": 0.36455696202531646,
"grad_norm": 1.5325403635274513,
"learning_rate": 5e-06,
"loss": 0.5782,
"step": 90
},
{
"epoch": 0.4050632911392405,
"grad_norm": 1.3553457823507413,
"learning_rate": 5e-06,
"loss": 0.5733,
"step": 100
},
{
"epoch": 0.44556962025316454,
"grad_norm": 1.9379260555134865,
"learning_rate": 5e-06,
"loss": 0.5698,
"step": 110
},
{
"epoch": 0.4860759493670886,
"grad_norm": 1.5182450105686285,
"learning_rate": 5e-06,
"loss": 0.5694,
"step": 120
},
{
"epoch": 0.5265822784810127,
"grad_norm": 1.196138670028915,
"learning_rate": 5e-06,
"loss": 0.5631,
"step": 130
},
{
"epoch": 0.5670886075949367,
"grad_norm": 1.6047169596984416,
"learning_rate": 5e-06,
"loss": 0.5631,
"step": 140
},
{
"epoch": 0.6075949367088608,
"grad_norm": 2.561229989585886,
"learning_rate": 5e-06,
"loss": 0.5606,
"step": 150
},
{
"epoch": 0.6481012658227848,
"grad_norm": 2.0457845948979396,
"learning_rate": 5e-06,
"loss": 0.5628,
"step": 160
},
{
"epoch": 0.6886075949367089,
"grad_norm": 2.1629027472393845,
"learning_rate": 5e-06,
"loss": 0.5594,
"step": 170
},
{
"epoch": 0.7291139240506329,
"grad_norm": 1.7203051094851745,
"learning_rate": 5e-06,
"loss": 0.5612,
"step": 180
},
{
"epoch": 0.769620253164557,
"grad_norm": 1.6424314613227078,
"learning_rate": 5e-06,
"loss": 0.5534,
"step": 190
},
{
"epoch": 0.810126582278481,
"grad_norm": 1.6961150692619482,
"learning_rate": 5e-06,
"loss": 0.5553,
"step": 200
},
{
"epoch": 0.850632911392405,
"grad_norm": 1.575333089935049,
"learning_rate": 5e-06,
"loss": 0.5542,
"step": 210
},
{
"epoch": 0.8911392405063291,
"grad_norm": 1.4596900420741143,
"learning_rate": 5e-06,
"loss": 0.5542,
"step": 220
},
{
"epoch": 0.9316455696202531,
"grad_norm": 1.3253755066505102,
"learning_rate": 5e-06,
"loss": 0.5502,
"step": 230
},
{
"epoch": 0.9721518987341772,
"grad_norm": 1.0645682091565618,
"learning_rate": 5e-06,
"loss": 0.5522,
"step": 240
},
{
"epoch": 0.9964556962025316,
"eval_loss": 0.0690777450799942,
"eval_runtime": 253.2419,
"eval_samples_per_second": 52.535,
"eval_steps_per_second": 0.411,
"step": 246
},
{
"epoch": 1.0131645569620253,
"grad_norm": 2.2167473800014803,
"learning_rate": 5e-06,
"loss": 0.5352,
"step": 250
},
{
"epoch": 1.0536708860759494,
"grad_norm": 1.7633980281740598,
"learning_rate": 5e-06,
"loss": 0.4945,
"step": 260
},
{
"epoch": 1.0941772151898734,
"grad_norm": 1.2606315717897887,
"learning_rate": 5e-06,
"loss": 0.4929,
"step": 270
},
{
"epoch": 1.1346835443037975,
"grad_norm": 1.6049596178131293,
"learning_rate": 5e-06,
"loss": 0.4906,
"step": 280
},
{
"epoch": 1.1751898734177215,
"grad_norm": 1.3361496575935954,
"learning_rate": 5e-06,
"loss": 0.4965,
"step": 290
},
{
"epoch": 1.2156962025316456,
"grad_norm": 1.6838379949154632,
"learning_rate": 5e-06,
"loss": 0.4935,
"step": 300
},
{
"epoch": 1.2562025316455696,
"grad_norm": 1.2287081540136666,
"learning_rate": 5e-06,
"loss": 0.4946,
"step": 310
},
{
"epoch": 1.2967088607594937,
"grad_norm": 1.1041418905025817,
"learning_rate": 5e-06,
"loss": 0.4952,
"step": 320
},
{
"epoch": 1.3372151898734177,
"grad_norm": 1.1567183179957015,
"learning_rate": 5e-06,
"loss": 0.4939,
"step": 330
},
{
"epoch": 1.3777215189873417,
"grad_norm": 1.0827873207690484,
"learning_rate": 5e-06,
"loss": 0.4997,
"step": 340
},
{
"epoch": 1.4182278481012658,
"grad_norm": 1.384887984465678,
"learning_rate": 5e-06,
"loss": 0.4956,
"step": 350
},
{
"epoch": 1.4587341772151898,
"grad_norm": 1.4028817280629426,
"learning_rate": 5e-06,
"loss": 0.4997,
"step": 360
},
{
"epoch": 1.4992405063291139,
"grad_norm": 1.1049684628387706,
"learning_rate": 5e-06,
"loss": 0.4981,
"step": 370
},
{
"epoch": 1.539746835443038,
"grad_norm": 1.2590051489674519,
"learning_rate": 5e-06,
"loss": 0.4981,
"step": 380
},
{
"epoch": 1.5802531645569622,
"grad_norm": 1.1357365609688317,
"learning_rate": 5e-06,
"loss": 0.4968,
"step": 390
},
{
"epoch": 1.620759493670886,
"grad_norm": 1.1106976894525098,
"learning_rate": 5e-06,
"loss": 0.4977,
"step": 400
},
{
"epoch": 1.6612658227848103,
"grad_norm": 1.3665599034101343,
"learning_rate": 5e-06,
"loss": 0.4931,
"step": 410
},
{
"epoch": 1.701772151898734,
"grad_norm": 1.304233943964937,
"learning_rate": 5e-06,
"loss": 0.4937,
"step": 420
},
{
"epoch": 1.7422784810126584,
"grad_norm": 1.4646233158776383,
"learning_rate": 5e-06,
"loss": 0.496,
"step": 430
},
{
"epoch": 1.7827848101265822,
"grad_norm": 1.2915317661859655,
"learning_rate": 5e-06,
"loss": 0.4984,
"step": 440
},
{
"epoch": 1.8232911392405065,
"grad_norm": 1.480881353690771,
"learning_rate": 5e-06,
"loss": 0.4962,
"step": 450
},
{
"epoch": 1.8637974683544303,
"grad_norm": 1.2523408662507558,
"learning_rate": 5e-06,
"loss": 0.4963,
"step": 460
},
{
"epoch": 1.9043037974683545,
"grad_norm": 1.1003232267021519,
"learning_rate": 5e-06,
"loss": 0.493,
"step": 470
},
{
"epoch": 1.9448101265822784,
"grad_norm": 1.1189004594261551,
"learning_rate": 5e-06,
"loss": 0.4994,
"step": 480
},
{
"epoch": 1.9853164556962026,
"grad_norm": 1.574886373546533,
"learning_rate": 5e-06,
"loss": 0.4996,
"step": 490
},
{
"epoch": 1.9974683544303797,
"eval_loss": 0.06833568215370178,
"eval_runtime": 254.4783,
"eval_samples_per_second": 52.28,
"eval_steps_per_second": 0.409,
"step": 493
},
{
"epoch": 2.0263291139240507,
"grad_norm": 2.3014937314441917,
"learning_rate": 5e-06,
"loss": 0.4488,
"step": 500
},
{
"epoch": 2.0668354430379745,
"grad_norm": 1.5839634313845277,
"learning_rate": 5e-06,
"loss": 0.4174,
"step": 510
},
{
"epoch": 2.1073417721518988,
"grad_norm": 1.5382506185743696,
"learning_rate": 5e-06,
"loss": 0.4122,
"step": 520
},
{
"epoch": 2.1478481012658226,
"grad_norm": 1.9423195334308014,
"learning_rate": 5e-06,
"loss": 0.4176,
"step": 530
},
{
"epoch": 2.188354430379747,
"grad_norm": 1.7133386724331026,
"learning_rate": 5e-06,
"loss": 0.4155,
"step": 540
},
{
"epoch": 2.2288607594936707,
"grad_norm": 1.5970886196077994,
"learning_rate": 5e-06,
"loss": 0.4159,
"step": 550
},
{
"epoch": 2.269367088607595,
"grad_norm": 1.698428644338184,
"learning_rate": 5e-06,
"loss": 0.4168,
"step": 560
},
{
"epoch": 2.309873417721519,
"grad_norm": 1.4211509452212008,
"learning_rate": 5e-06,
"loss": 0.4171,
"step": 570
},
{
"epoch": 2.350379746835443,
"grad_norm": 1.5618144695743699,
"learning_rate": 5e-06,
"loss": 0.4213,
"step": 580
},
{
"epoch": 2.390886075949367,
"grad_norm": 1.6889726651564196,
"learning_rate": 5e-06,
"loss": 0.4217,
"step": 590
},
{
"epoch": 2.431392405063291,
"grad_norm": 1.3913389067387658,
"learning_rate": 5e-06,
"loss": 0.4203,
"step": 600
},
{
"epoch": 2.4718987341772154,
"grad_norm": 1.3963990757058133,
"learning_rate": 5e-06,
"loss": 0.4202,
"step": 610
},
{
"epoch": 2.512405063291139,
"grad_norm": 1.9356587258448503,
"learning_rate": 5e-06,
"loss": 0.4238,
"step": 620
},
{
"epoch": 2.552911392405063,
"grad_norm": 1.6113295443045734,
"learning_rate": 5e-06,
"loss": 0.4173,
"step": 630
},
{
"epoch": 2.5934177215189873,
"grad_norm": 1.9607308696000172,
"learning_rate": 5e-06,
"loss": 0.4234,
"step": 640
},
{
"epoch": 2.6339240506329116,
"grad_norm": 1.2271048631932833,
"learning_rate": 5e-06,
"loss": 0.4259,
"step": 650
},
{
"epoch": 2.6744303797468354,
"grad_norm": 1.113804445089212,
"learning_rate": 5e-06,
"loss": 0.4249,
"step": 660
},
{
"epoch": 2.714936708860759,
"grad_norm": 1.2492433372859684,
"learning_rate": 5e-06,
"loss": 0.4285,
"step": 670
},
{
"epoch": 2.7554430379746835,
"grad_norm": 1.3017899119460772,
"learning_rate": 5e-06,
"loss": 0.4234,
"step": 680
},
{
"epoch": 2.7959493670886078,
"grad_norm": 1.540529786978242,
"learning_rate": 5e-06,
"loss": 0.4271,
"step": 690
},
{
"epoch": 2.8364556962025316,
"grad_norm": 1.3913941622738781,
"learning_rate": 5e-06,
"loss": 0.4252,
"step": 700
},
{
"epoch": 2.876962025316456,
"grad_norm": 1.1221109278814458,
"learning_rate": 5e-06,
"loss": 0.428,
"step": 710
},
{
"epoch": 2.9174683544303797,
"grad_norm": 1.3475151536192913,
"learning_rate": 5e-06,
"loss": 0.4271,
"step": 720
},
{
"epoch": 2.957974683544304,
"grad_norm": 1.1695621792215076,
"learning_rate": 5e-06,
"loss": 0.4325,
"step": 730
},
{
"epoch": 2.990379746835443,
"eval_loss": 0.07060651481151581,
"eval_runtime": 253.6051,
"eval_samples_per_second": 52.46,
"eval_steps_per_second": 0.41,
"step": 738
},
{
"epoch": 2.990379746835443,
"step": 738,
"total_flos": 2471936214958080.0,
"train_loss": 0.4994575757644364,
"train_runtime": 42649.5957,
"train_samples_per_second": 17.779,
"train_steps_per_second": 0.017
}
],
"logging_steps": 10,
"max_steps": 738,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2471936214958080.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}