hp_ablations_gemma_bsz2048 / trainer_state.json
sedrickkeh's picture
End of training
e647fbe verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9870276367738295,
"eval_steps": 500,
"global_step": 330,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09024252679075014,
"grad_norm": 1.0283902937545022,
"learning_rate": 5e-06,
"loss": 0.7434,
"step": 10
},
{
"epoch": 0.18048505358150027,
"grad_norm": 0.5152802784289524,
"learning_rate": 5e-06,
"loss": 0.6665,
"step": 20
},
{
"epoch": 0.2707275803722504,
"grad_norm": 0.7590056398823692,
"learning_rate": 5e-06,
"loss": 0.6466,
"step": 30
},
{
"epoch": 0.36097010716300054,
"grad_norm": 0.7655846434515482,
"learning_rate": 5e-06,
"loss": 0.6338,
"step": 40
},
{
"epoch": 0.4512126339537507,
"grad_norm": 0.38482451954850594,
"learning_rate": 5e-06,
"loss": 0.6242,
"step": 50
},
{
"epoch": 0.5414551607445008,
"grad_norm": 0.5243651524485891,
"learning_rate": 5e-06,
"loss": 0.618,
"step": 60
},
{
"epoch": 0.631697687535251,
"grad_norm": 0.4396733673138487,
"learning_rate": 5e-06,
"loss": 0.6136,
"step": 70
},
{
"epoch": 0.7219402143260011,
"grad_norm": 0.7869802084155179,
"learning_rate": 5e-06,
"loss": 0.6119,
"step": 80
},
{
"epoch": 0.8121827411167513,
"grad_norm": 0.7764319290422358,
"learning_rate": 5e-06,
"loss": 0.6074,
"step": 90
},
{
"epoch": 0.9024252679075014,
"grad_norm": 0.5476035261750426,
"learning_rate": 5e-06,
"loss": 0.6088,
"step": 100
},
{
"epoch": 0.9926677946982515,
"grad_norm": 0.5918273069691898,
"learning_rate": 5e-06,
"loss": 0.6031,
"step": 110
},
{
"epoch": 0.9926677946982515,
"eval_loss": 0.601691484451294,
"eval_runtime": 182.6033,
"eval_samples_per_second": 65.393,
"eval_steps_per_second": 0.515,
"step": 110
},
{
"epoch": 1.0874224478285393,
"grad_norm": 0.45356068248876164,
"learning_rate": 5e-06,
"loss": 0.6161,
"step": 120
},
{
"epoch": 1.1776649746192893,
"grad_norm": 0.5426773823607075,
"learning_rate": 5e-06,
"loss": 0.5733,
"step": 130
},
{
"epoch": 1.2679075014100394,
"grad_norm": 0.696280155344761,
"learning_rate": 5e-06,
"loss": 0.5743,
"step": 140
},
{
"epoch": 1.3581500282007897,
"grad_norm": 0.4757051970498703,
"learning_rate": 5e-06,
"loss": 0.5749,
"step": 150
},
{
"epoch": 1.4483925549915397,
"grad_norm": 0.4435543406215925,
"learning_rate": 5e-06,
"loss": 0.5699,
"step": 160
},
{
"epoch": 1.53863508178229,
"grad_norm": 0.7010821324607621,
"learning_rate": 5e-06,
"loss": 0.5713,
"step": 170
},
{
"epoch": 1.62887760857304,
"grad_norm": 0.6395686381010692,
"learning_rate": 5e-06,
"loss": 0.5726,
"step": 180
},
{
"epoch": 1.7191201353637902,
"grad_norm": 0.5342836767987817,
"learning_rate": 5e-06,
"loss": 0.571,
"step": 190
},
{
"epoch": 1.8093626621545402,
"grad_norm": 0.3836821654226498,
"learning_rate": 5e-06,
"loss": 0.5663,
"step": 200
},
{
"epoch": 1.8996051889452905,
"grad_norm": 0.8522337416083936,
"learning_rate": 5e-06,
"loss": 0.5699,
"step": 210
},
{
"epoch": 1.9898477157360406,
"grad_norm": 0.47597229963199594,
"learning_rate": 5e-06,
"loss": 0.567,
"step": 220
},
{
"epoch": 1.9898477157360406,
"eval_loss": 0.5897566080093384,
"eval_runtime": 182.7346,
"eval_samples_per_second": 65.346,
"eval_steps_per_second": 0.514,
"step": 220
},
{
"epoch": 2.0846023688663284,
"grad_norm": 0.5352597999178058,
"learning_rate": 5e-06,
"loss": 0.5744,
"step": 230
},
{
"epoch": 2.1748448956570785,
"grad_norm": 0.44771041154564994,
"learning_rate": 5e-06,
"loss": 0.5402,
"step": 240
},
{
"epoch": 2.2650874224478286,
"grad_norm": 0.4219614084617865,
"learning_rate": 5e-06,
"loss": 0.5316,
"step": 250
},
{
"epoch": 2.3553299492385786,
"grad_norm": 0.8733260275373232,
"learning_rate": 5e-06,
"loss": 0.54,
"step": 260
},
{
"epoch": 2.4455724760293287,
"grad_norm": 0.6912064073833414,
"learning_rate": 5e-06,
"loss": 0.5365,
"step": 270
},
{
"epoch": 2.5358150028200788,
"grad_norm": 0.48480317311945764,
"learning_rate": 5e-06,
"loss": 0.5379,
"step": 280
},
{
"epoch": 2.6260575296108293,
"grad_norm": 0.5627905970494923,
"learning_rate": 5e-06,
"loss": 0.5372,
"step": 290
},
{
"epoch": 2.7163000564015793,
"grad_norm": 0.9254178480304499,
"learning_rate": 5e-06,
"loss": 0.5385,
"step": 300
},
{
"epoch": 2.8065425831923294,
"grad_norm": 0.5677460820623565,
"learning_rate": 5e-06,
"loss": 0.5411,
"step": 310
},
{
"epoch": 2.8967851099830795,
"grad_norm": 0.5115776135415467,
"learning_rate": 5e-06,
"loss": 0.5393,
"step": 320
},
{
"epoch": 2.9870276367738295,
"grad_norm": 0.49851601671823487,
"learning_rate": 5e-06,
"loss": 0.5383,
"step": 330
},
{
"epoch": 2.9870276367738295,
"eval_loss": 0.5885838866233826,
"eval_runtime": 182.6863,
"eval_samples_per_second": 65.363,
"eval_steps_per_second": 0.515,
"step": 330
},
{
"epoch": 2.9870276367738295,
"step": 330,
"total_flos": 5029184170622976.0,
"train_loss": 0.5836054007212321,
"train_runtime": 31208.0329,
"train_samples_per_second": 21.809,
"train_steps_per_second": 0.011
}
],
"logging_steps": 10,
"max_steps": 330,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5029184170622976.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}