Checkpoints / checkpoint-700 /trainer_state.json
KrafterDen's picture
Training in progress, step 700, checkpoint
ae50bfa verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 29.75195822454308,
"eval_steps": 500,
"global_step": 700,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.4177545691906005,
"grad_norm": 0.28227752447128296,
"learning_rate": 2.9999999999999997e-05,
"loss": 4.1508,
"step": 10
},
{
"epoch": 0.835509138381201,
"grad_norm": 0.31433430314064026,
"learning_rate": 5.9999999999999995e-05,
"loss": 4.1593,
"step": 20
},
{
"epoch": 1.2532637075718016,
"grad_norm": 0.3350953161716461,
"learning_rate": 8.999999999999999e-05,
"loss": 4.0414,
"step": 30
},
{
"epoch": 1.671018276762402,
"grad_norm": 0.2885706126689911,
"learning_rate": 0.00011999999999999999,
"loss": 3.8411,
"step": 40
},
{
"epoch": 2.0887728459530024,
"grad_norm": 0.23711609840393066,
"learning_rate": 0.00015,
"loss": 3.6434,
"step": 50
},
{
"epoch": 2.506527415143603,
"grad_norm": 0.21583135426044464,
"learning_rate": 0.00017999999999999998,
"loss": 3.4636,
"step": 60
},
{
"epoch": 2.9242819843342036,
"grad_norm": 0.18754692375659943,
"learning_rate": 0.00020999999999999998,
"loss": 3.3154,
"step": 70
},
{
"epoch": 3.342036553524804,
"grad_norm": 0.15951760113239288,
"learning_rate": 0.00023999999999999998,
"loss": 3.2195,
"step": 80
},
{
"epoch": 3.759791122715405,
"grad_norm": 0.14639759063720703,
"learning_rate": 0.00027,
"loss": 3.122,
"step": 90
},
{
"epoch": 4.177545691906005,
"grad_norm": 0.1860765665769577,
"learning_rate": 0.0003,
"loss": 3.0677,
"step": 100
},
{
"epoch": 4.595300261096606,
"grad_norm": 0.1737535446882248,
"learning_rate": 0.000285,
"loss": 2.9992,
"step": 110
},
{
"epoch": 5.013054830287206,
"grad_norm": 0.181383416056633,
"learning_rate": 0.00027,
"loss": 2.9761,
"step": 120
},
{
"epoch": 5.430809399477806,
"grad_norm": 0.1873219609260559,
"learning_rate": 0.00025499999999999996,
"loss": 2.9281,
"step": 130
},
{
"epoch": 5.848563968668407,
"grad_norm": 0.19864186644554138,
"learning_rate": 0.00023999999999999998,
"loss": 2.9168,
"step": 140
},
{
"epoch": 6.266318537859008,
"grad_norm": 0.22326301038265228,
"learning_rate": 0.000225,
"loss": 2.8549,
"step": 150
},
{
"epoch": 6.684073107049608,
"grad_norm": 0.2200121283531189,
"learning_rate": 0.00020999999999999998,
"loss": 2.855,
"step": 160
},
{
"epoch": 7.101827676240209,
"grad_norm": 0.2546086311340332,
"learning_rate": 0.000195,
"loss": 2.8509,
"step": 170
},
{
"epoch": 7.51958224543081,
"grad_norm": 0.26345309615135193,
"learning_rate": 0.00017999999999999998,
"loss": 2.8144,
"step": 180
},
{
"epoch": 7.93733681462141,
"grad_norm": 0.21533280611038208,
"learning_rate": 0.000165,
"loss": 2.8006,
"step": 190
},
{
"epoch": 8.35509138381201,
"grad_norm": 0.2510657012462616,
"learning_rate": 0.00015,
"loss": 2.7816,
"step": 200
},
{
"epoch": 8.77284595300261,
"grad_norm": 0.23468665778636932,
"learning_rate": 0.000135,
"loss": 2.7762,
"step": 210
},
{
"epoch": 9.190600522193211,
"grad_norm": 0.23014432191848755,
"learning_rate": 0.00011999999999999999,
"loss": 2.7731,
"step": 220
},
{
"epoch": 9.608355091383812,
"grad_norm": 0.247611865401268,
"learning_rate": 0.00010499999999999999,
"loss": 2.742,
"step": 230
},
{
"epoch": 10.026109660574413,
"grad_norm": 0.2899376451969147,
"learning_rate": 8.999999999999999e-05,
"loss": 2.763,
"step": 240
},
{
"epoch": 10.443864229765014,
"grad_norm": 0.24601446092128754,
"learning_rate": 7.5e-05,
"loss": 2.7529,
"step": 250
},
{
"epoch": 10.861618798955613,
"grad_norm": 0.2344890832901001,
"learning_rate": 5.9999999999999995e-05,
"loss": 2.7373,
"step": 260
},
{
"epoch": 11.279373368146214,
"grad_norm": 0.22882166504859924,
"learning_rate": 4.4999999999999996e-05,
"loss": 2.7427,
"step": 270
},
{
"epoch": 11.697127937336814,
"grad_norm": 0.26199406385421753,
"learning_rate": 2.9999999999999997e-05,
"loss": 2.6814,
"step": 280
},
{
"epoch": 12.114882506527415,
"grad_norm": 0.2374505251646042,
"learning_rate": 1.4999999999999999e-05,
"loss": 2.758,
"step": 290
},
{
"epoch": 12.532637075718016,
"grad_norm": 0.2393040806055069,
"learning_rate": 0.0,
"loss": 2.7284,
"step": 300
},
{
"epoch": 13.459530026109661,
"grad_norm": 0.2965029180049896,
"learning_rate": 0.0002668421052631579,
"loss": 2.7219,
"step": 310
},
{
"epoch": 13.877284595300262,
"grad_norm": 0.2831190526485443,
"learning_rate": 0.0002652631578947368,
"loss": 2.7316,
"step": 320
},
{
"epoch": 14.295039164490861,
"grad_norm": 0.29041996598243713,
"learning_rate": 0.00026368421052631576,
"loss": 2.6869,
"step": 330
},
{
"epoch": 14.712793733681462,
"grad_norm": 0.27796632051467896,
"learning_rate": 0.0002621052631578947,
"loss": 2.7045,
"step": 340
},
{
"epoch": 15.130548302872063,
"grad_norm": 0.30092301964759827,
"learning_rate": 0.0002605263157894737,
"loss": 2.6589,
"step": 350
},
{
"epoch": 15.548302872062663,
"grad_norm": 0.33648282289505005,
"learning_rate": 0.0002589473684210526,
"loss": 2.6811,
"step": 360
},
{
"epoch": 15.966057441253264,
"grad_norm": 0.3513476550579071,
"learning_rate": 0.00025736842105263157,
"loss": 2.6424,
"step": 370
},
{
"epoch": 16.383812010443865,
"grad_norm": 0.366802453994751,
"learning_rate": 0.0002557894736842105,
"loss": 2.6225,
"step": 380
},
{
"epoch": 16.801566579634464,
"grad_norm": 0.3507522642612457,
"learning_rate": 0.00025421052631578945,
"loss": 2.6693,
"step": 390
},
{
"epoch": 17.219321148825067,
"grad_norm": 0.32098060846328735,
"learning_rate": 0.00025263157894736836,
"loss": 2.6372,
"step": 400
},
{
"epoch": 17.637075718015666,
"grad_norm": 0.34954994916915894,
"learning_rate": 0.0002510526315789474,
"loss": 2.6057,
"step": 410
},
{
"epoch": 18.054830287206265,
"grad_norm": 0.3401590585708618,
"learning_rate": 0.0002494736842105263,
"loss": 2.6224,
"step": 420
},
{
"epoch": 18.472584856396868,
"grad_norm": 0.3732793927192688,
"learning_rate": 0.00024789473684210526,
"loss": 2.5845,
"step": 430
},
{
"epoch": 18.890339425587467,
"grad_norm": 0.3447878956794739,
"learning_rate": 0.00024631578947368417,
"loss": 2.5766,
"step": 440
},
{
"epoch": 19.30809399477807,
"grad_norm": 0.4215945899486542,
"learning_rate": 0.00024473684210526314,
"loss": 2.5784,
"step": 450
},
{
"epoch": 19.72584856396867,
"grad_norm": 0.44425827264785767,
"learning_rate": 0.00024315789473684207,
"loss": 2.5752,
"step": 460
},
{
"epoch": 20.143603133159267,
"grad_norm": 0.38389045000076294,
"learning_rate": 0.000241578947368421,
"loss": 2.5828,
"step": 470
},
{
"epoch": 20.56135770234987,
"grad_norm": 0.3717693090438843,
"learning_rate": 0.00023999999999999998,
"loss": 2.5507,
"step": 480
},
{
"epoch": 20.97911227154047,
"grad_norm": 0.3933301568031311,
"learning_rate": 0.00023842105263157895,
"loss": 2.5479,
"step": 490
},
{
"epoch": 21.39686684073107,
"grad_norm": 0.3996904790401459,
"learning_rate": 0.00023684210526315788,
"loss": 2.5408,
"step": 500
},
{
"epoch": 21.81462140992167,
"grad_norm": 0.3934177756309509,
"learning_rate": 0.00023526315789473682,
"loss": 2.5463,
"step": 510
},
{
"epoch": 22.232375979112273,
"grad_norm": 0.42467737197875977,
"learning_rate": 0.00023368421052631576,
"loss": 2.5392,
"step": 520
},
{
"epoch": 22.650130548302872,
"grad_norm": 0.38297030329704285,
"learning_rate": 0.0002321052631578947,
"loss": 2.5204,
"step": 530
},
{
"epoch": 23.06788511749347,
"grad_norm": 0.39583373069763184,
"learning_rate": 0.00023052631578947364,
"loss": 2.5195,
"step": 540
},
{
"epoch": 23.485639686684074,
"grad_norm": 0.3692266345024109,
"learning_rate": 0.00022894736842105263,
"loss": 2.5091,
"step": 550
},
{
"epoch": 23.903394255874673,
"grad_norm": 0.34597283601760864,
"learning_rate": 0.00022736842105263157,
"loss": 2.5011,
"step": 560
},
{
"epoch": 24.321148825065276,
"grad_norm": 0.3991779088973999,
"learning_rate": 0.0002257894736842105,
"loss": 2.4919,
"step": 570
},
{
"epoch": 24.738903394255875,
"grad_norm": 0.37865138053894043,
"learning_rate": 0.00022421052631578945,
"loss": 2.4943,
"step": 580
},
{
"epoch": 25.156657963446474,
"grad_norm": 0.41416704654693604,
"learning_rate": 0.0002226315789473684,
"loss": 2.4847,
"step": 590
},
{
"epoch": 25.574412532637076,
"grad_norm": 0.37662285566329956,
"learning_rate": 0.00022105263157894733,
"loss": 2.4836,
"step": 600
},
{
"epoch": 25.992167101827675,
"grad_norm": 0.4186669588088989,
"learning_rate": 0.00021947368421052632,
"loss": 2.4627,
"step": 610
},
{
"epoch": 26.409921671018278,
"grad_norm": 0.3905445337295532,
"learning_rate": 0.00021789473684210526,
"loss": 2.4616,
"step": 620
},
{
"epoch": 26.827676240208877,
"grad_norm": 0.45327094197273254,
"learning_rate": 0.0002163157894736842,
"loss": 2.4777,
"step": 630
},
{
"epoch": 27.245430809399476,
"grad_norm": 0.43680539727211,
"learning_rate": 0.00021473684210526314,
"loss": 2.4654,
"step": 640
},
{
"epoch": 27.66318537859008,
"grad_norm": 0.36886611580848694,
"learning_rate": 0.00021315789473684208,
"loss": 2.4511,
"step": 650
},
{
"epoch": 28.080939947780678,
"grad_norm": 0.36019206047058105,
"learning_rate": 0.00021157894736842102,
"loss": 2.4343,
"step": 660
},
{
"epoch": 28.49869451697128,
"grad_norm": 0.39306387305259705,
"learning_rate": 0.00020999999999999998,
"loss": 2.443,
"step": 670
},
{
"epoch": 28.91644908616188,
"grad_norm": 0.38716623187065125,
"learning_rate": 0.00020842105263157895,
"loss": 2.4417,
"step": 680
},
{
"epoch": 29.334203655352482,
"grad_norm": 0.38376671075820923,
"learning_rate": 0.0002068421052631579,
"loss": 2.4234,
"step": 690
},
{
"epoch": 29.75195822454308,
"grad_norm": 0.39722415804862976,
"learning_rate": 0.00020526315789473683,
"loss": 2.4229,
"step": 700
}
],
"logging_steps": 10,
"max_steps": 2000,
"num_input_tokens_seen": 0,
"num_train_epochs": 87,
"save_steps": 100,
"total_flos": 1.1309905296213811e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}