hp_ablations_gemma_epoch1 / trainer_state.json
sedrickkeh's picture
End of training
9a1330f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9997179125528914,
"eval_steps": 500,
"global_step": 443,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.022566995768688293,
"grad_norm": 1.1076090467924995,
"learning_rate": 5e-06,
"loss": 0.7438,
"step": 10
},
{
"epoch": 0.045133991537376586,
"grad_norm": 0.991735987363898,
"learning_rate": 5e-06,
"loss": 0.6802,
"step": 20
},
{
"epoch": 0.06770098730606489,
"grad_norm": 0.8721792478756093,
"learning_rate": 5e-06,
"loss": 0.6629,
"step": 30
},
{
"epoch": 0.09026798307475317,
"grad_norm": 0.5951545166641695,
"learning_rate": 5e-06,
"loss": 0.6406,
"step": 40
},
{
"epoch": 0.11283497884344147,
"grad_norm": 0.7007845996565958,
"learning_rate": 5e-06,
"loss": 0.634,
"step": 50
},
{
"epoch": 0.13540197461212977,
"grad_norm": 0.5943811510802003,
"learning_rate": 5e-06,
"loss": 0.6332,
"step": 60
},
{
"epoch": 0.15796897038081806,
"grad_norm": 0.6353234676535418,
"learning_rate": 5e-06,
"loss": 0.629,
"step": 70
},
{
"epoch": 0.18053596614950634,
"grad_norm": 0.6006567013207602,
"learning_rate": 5e-06,
"loss": 0.6306,
"step": 80
},
{
"epoch": 0.20310296191819463,
"grad_norm": 0.7046632924461987,
"learning_rate": 5e-06,
"loss": 0.6282,
"step": 90
},
{
"epoch": 0.22566995768688294,
"grad_norm": 0.7021240097891023,
"learning_rate": 5e-06,
"loss": 0.6272,
"step": 100
},
{
"epoch": 0.24823695345557123,
"grad_norm": 0.5910229174924321,
"learning_rate": 5e-06,
"loss": 0.618,
"step": 110
},
{
"epoch": 0.27080394922425954,
"grad_norm": 0.596959901149353,
"learning_rate": 5e-06,
"loss": 0.6173,
"step": 120
},
{
"epoch": 0.2933709449929478,
"grad_norm": 0.8993218759485339,
"learning_rate": 5e-06,
"loss": 0.6185,
"step": 130
},
{
"epoch": 0.3159379407616361,
"grad_norm": 0.6147671660802024,
"learning_rate": 5e-06,
"loss": 0.6135,
"step": 140
},
{
"epoch": 0.3385049365303244,
"grad_norm": 0.6724362951434409,
"learning_rate": 5e-06,
"loss": 0.614,
"step": 150
},
{
"epoch": 0.3610719322990127,
"grad_norm": 0.6020059058264652,
"learning_rate": 5e-06,
"loss": 0.6157,
"step": 160
},
{
"epoch": 0.383638928067701,
"grad_norm": 0.6467062739486626,
"learning_rate": 5e-06,
"loss": 0.6118,
"step": 170
},
{
"epoch": 0.40620592383638926,
"grad_norm": 0.6337134461733973,
"learning_rate": 5e-06,
"loss": 0.6087,
"step": 180
},
{
"epoch": 0.4287729196050776,
"grad_norm": 1.187290343191463,
"learning_rate": 5e-06,
"loss": 0.6032,
"step": 190
},
{
"epoch": 0.4513399153737659,
"grad_norm": 0.6573468292913784,
"learning_rate": 5e-06,
"loss": 0.6099,
"step": 200
},
{
"epoch": 0.47390691114245415,
"grad_norm": 0.5436088131885063,
"learning_rate": 5e-06,
"loss": 0.6063,
"step": 210
},
{
"epoch": 0.49647390691114246,
"grad_norm": 0.5714729478422241,
"learning_rate": 5e-06,
"loss": 0.598,
"step": 220
},
{
"epoch": 0.5190409026798307,
"grad_norm": 0.6369923617172581,
"learning_rate": 5e-06,
"loss": 0.6068,
"step": 230
},
{
"epoch": 0.5416078984485191,
"grad_norm": 0.6174803589160689,
"learning_rate": 5e-06,
"loss": 0.6057,
"step": 240
},
{
"epoch": 0.5641748942172073,
"grad_norm": 0.5746679987420106,
"learning_rate": 5e-06,
"loss": 0.5998,
"step": 250
},
{
"epoch": 0.5867418899858956,
"grad_norm": 0.7158022714822785,
"learning_rate": 5e-06,
"loss": 0.6025,
"step": 260
},
{
"epoch": 0.609308885754584,
"grad_norm": 0.6550177518697455,
"learning_rate": 5e-06,
"loss": 0.5979,
"step": 270
},
{
"epoch": 0.6318758815232722,
"grad_norm": 0.5848914985146456,
"learning_rate": 5e-06,
"loss": 0.6045,
"step": 280
},
{
"epoch": 0.6544428772919605,
"grad_norm": 0.6013598059389905,
"learning_rate": 5e-06,
"loss": 0.5979,
"step": 290
},
{
"epoch": 0.6770098730606487,
"grad_norm": 0.6694540638792662,
"learning_rate": 5e-06,
"loss": 0.604,
"step": 300
},
{
"epoch": 0.6995768688293371,
"grad_norm": 0.5419093913651906,
"learning_rate": 5e-06,
"loss": 0.5972,
"step": 310
},
{
"epoch": 0.7221438645980254,
"grad_norm": 0.5432682430557793,
"learning_rate": 5e-06,
"loss": 0.6004,
"step": 320
},
{
"epoch": 0.7447108603667136,
"grad_norm": 0.5609209464002083,
"learning_rate": 5e-06,
"loss": 0.596,
"step": 330
},
{
"epoch": 0.767277856135402,
"grad_norm": 0.5771320670008115,
"learning_rate": 5e-06,
"loss": 0.5955,
"step": 340
},
{
"epoch": 0.7898448519040903,
"grad_norm": 0.6553502516133991,
"learning_rate": 5e-06,
"loss": 0.597,
"step": 350
},
{
"epoch": 0.8124118476727785,
"grad_norm": 0.5735610527819333,
"learning_rate": 5e-06,
"loss": 0.5959,
"step": 360
},
{
"epoch": 0.8349788434414669,
"grad_norm": 0.6196981942326923,
"learning_rate": 5e-06,
"loss": 0.603,
"step": 370
},
{
"epoch": 0.8575458392101551,
"grad_norm": 0.5892757008227134,
"learning_rate": 5e-06,
"loss": 0.5997,
"step": 380
},
{
"epoch": 0.8801128349788434,
"grad_norm": 0.6377051547834243,
"learning_rate": 5e-06,
"loss": 0.5937,
"step": 390
},
{
"epoch": 0.9026798307475318,
"grad_norm": 0.5793619434689786,
"learning_rate": 5e-06,
"loss": 0.596,
"step": 400
},
{
"epoch": 0.92524682651622,
"grad_norm": 0.5940412617035421,
"learning_rate": 5e-06,
"loss": 0.594,
"step": 410
},
{
"epoch": 0.9478138222849083,
"grad_norm": 0.5720643197976489,
"learning_rate": 5e-06,
"loss": 0.5946,
"step": 420
},
{
"epoch": 0.9703808180535967,
"grad_norm": 0.5934993591499655,
"learning_rate": 5e-06,
"loss": 0.5886,
"step": 430
},
{
"epoch": 0.9929478138222849,
"grad_norm": 0.7147785717960933,
"learning_rate": 5e-06,
"loss": 0.5934,
"step": 440
},
{
"epoch": 0.9997179125528914,
"eval_loss": 0.5912777781486511,
"eval_runtime": 697.2724,
"eval_samples_per_second": 17.125,
"eval_steps_per_second": 0.536,
"step": 443
},
{
"epoch": 0.9997179125528914,
"step": 443,
"total_flos": 1687906242527232.0,
"train_loss": 0.6135331322323387,
"train_runtime": 40842.8187,
"train_samples_per_second": 5.555,
"train_steps_per_second": 0.011
}
],
"logging_steps": 10,
"max_steps": 443,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1687906242527232.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}