llama2-7b-backward-model / trainer_state.json
jasperyeoh2's picture
Upload folder using huggingface_hub
cbdf456 verified
{
"best_global_step": 4500,
"best_metric": 1.4357532262802124,
"best_model_checkpoint": "./llama2-backward-output/checkpoint-4500",
"epoch": 8.608666887198147,
"eval_steps": 500,
"global_step": 6500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.26463777704267283,
"grad_norm": 1.4625507593154907,
"learning_rate": 1.9600000000000002e-05,
"loss": 2.2395,
"step": 200
},
{
"epoch": 0.5292755540853457,
"grad_norm": 1.2510501146316528,
"learning_rate": 1.9066666666666668e-05,
"loss": 1.6278,
"step": 400
},
{
"epoch": 0.6615944426066821,
"eval_loss": NaN,
"eval_runtime": 145.2798,
"eval_samples_per_second": 9.251,
"eval_steps_per_second": 4.626,
"step": 500
},
{
"epoch": 0.7939133311280185,
"grad_norm": 1.5188781023025513,
"learning_rate": 1.8533333333333334e-05,
"loss": 1.5642,
"step": 600
},
{
"epoch": 1.0595434998346014,
"grad_norm": 1.30088472366333,
"learning_rate": 1.8e-05,
"loss": 1.556,
"step": 800
},
{
"epoch": 1.3241812768772743,
"grad_norm": 1.7489525079727173,
"learning_rate": 1.7466666666666667e-05,
"loss": 1.5175,
"step": 1000
},
{
"epoch": 1.3241812768772743,
"eval_loss": NaN,
"eval_runtime": 145.4858,
"eval_samples_per_second": 9.238,
"eval_steps_per_second": 4.619,
"step": 1000
},
{
"epoch": 1.5888190539199472,
"grad_norm": 2.2068583965301514,
"learning_rate": 1.6933333333333336e-05,
"loss": 1.5088,
"step": 1200
},
{
"epoch": 1.85345683096262,
"grad_norm": 1.7514328956604004,
"learning_rate": 1.64e-05,
"loss": 1.5206,
"step": 1400
},
{
"epoch": 1.9857757194839563,
"eval_loss": NaN,
"eval_runtime": 145.5169,
"eval_samples_per_second": 9.236,
"eval_steps_per_second": 4.618,
"step": 1500
},
{
"epoch": 2.1190869996692028,
"grad_norm": 1.1773896217346191,
"learning_rate": 1.586666666666667e-05,
"loss": 1.5358,
"step": 1600
},
{
"epoch": 2.3837247767118757,
"grad_norm": 1.3273141384124756,
"learning_rate": 1.5333333333333334e-05,
"loss": 1.4694,
"step": 1800
},
{
"epoch": 2.6483625537545485,
"grad_norm": 1.2176439762115479,
"learning_rate": 1.48e-05,
"loss": 1.4883,
"step": 2000
},
{
"epoch": 2.6483625537545485,
"eval_loss": NaN,
"eval_runtime": 145.5181,
"eval_samples_per_second": 9.236,
"eval_steps_per_second": 4.618,
"step": 2000
},
{
"epoch": 2.9130003307972214,
"grad_norm": 1.7919120788574219,
"learning_rate": 1.4266666666666668e-05,
"loss": 1.5084,
"step": 2200
},
{
"epoch": 3.178630499503804,
"grad_norm": 2.2114169597625732,
"learning_rate": 1.3733333333333335e-05,
"loss": 1.457,
"step": 2400
},
{
"epoch": 3.310949388025141,
"eval_loss": NaN,
"eval_runtime": 145.7705,
"eval_samples_per_second": 9.22,
"eval_steps_per_second": 4.61,
"step": 2500
},
{
"epoch": 3.443268276546477,
"grad_norm": 2.1541590690612793,
"learning_rate": 1.3200000000000002e-05,
"loss": 1.4664,
"step": 2600
},
{
"epoch": 3.70790605358915,
"grad_norm": 2.045276641845703,
"learning_rate": 1.2666666666666667e-05,
"loss": 1.4706,
"step": 2800
},
{
"epoch": 3.972543830631823,
"grad_norm": 1.3809666633605957,
"learning_rate": 1.2133333333333335e-05,
"loss": 1.4671,
"step": 3000
},
{
"epoch": 3.972543830631823,
"eval_loss": NaN,
"eval_runtime": 145.8274,
"eval_samples_per_second": 9.216,
"eval_steps_per_second": 4.608,
"step": 3000
},
{
"epoch": 4.2381739993384056,
"grad_norm": 1.9172261953353882,
"learning_rate": 1.16e-05,
"loss": 1.4704,
"step": 3200
},
{
"epoch": 4.502811776381078,
"grad_norm": 2.1002511978149414,
"learning_rate": 1.1066666666666669e-05,
"loss": 1.4386,
"step": 3400
},
{
"epoch": 4.635130664902415,
"eval_loss": 1.4397192001342773,
"eval_runtime": 130.1701,
"eval_samples_per_second": 9.211,
"eval_steps_per_second": 4.609,
"step": 3500
},
{
"epoch": 4.767449553423751,
"grad_norm": 2.430173873901367,
"learning_rate": 1.0533333333333333e-05,
"loss": 1.4629,
"step": 3600
},
{
"epoch": 5.033079722130334,
"grad_norm": 1.9935014247894287,
"learning_rate": 1e-05,
"loss": 1.4462,
"step": 3800
},
{
"epoch": 5.297717499173007,
"grad_norm": 1.5598417520523071,
"learning_rate": 9.466666666666667e-06,
"loss": 1.445,
"step": 4000
},
{
"epoch": 5.297717499173007,
"eval_loss": 1.4383418560028076,
"eval_runtime": 130.1186,
"eval_samples_per_second": 9.215,
"eval_steps_per_second": 4.611,
"step": 4000
},
{
"epoch": 5.56235527621568,
"grad_norm": 2.314366340637207,
"learning_rate": 8.933333333333333e-06,
"loss": 1.4218,
"step": 4200
},
{
"epoch": 5.826993053258352,
"grad_norm": 2.1189815998077393,
"learning_rate": 8.400000000000001e-06,
"loss": 1.4281,
"step": 4400
},
{
"epoch": 5.959311941779689,
"eval_loss": 1.4357532262802124,
"eval_runtime": 130.0198,
"eval_samples_per_second": 9.222,
"eval_steps_per_second": 4.615,
"step": 4500
},
{
"epoch": 6.092623221964936,
"grad_norm": 2.3731560707092285,
"learning_rate": 7.866666666666667e-06,
"loss": 1.4449,
"step": 4600
},
{
"epoch": 6.357260999007608,
"grad_norm": 2.974883556365967,
"learning_rate": 7.333333333333333e-06,
"loss": 1.4458,
"step": 4800
},
{
"epoch": 6.621898776050282,
"grad_norm": 2.829678773880005,
"learning_rate": 6.800000000000001e-06,
"loss": 1.4312,
"step": 5000
},
{
"epoch": 6.621898776050282,
"eval_loss": 1.435935616493225,
"eval_runtime": 130.3688,
"eval_samples_per_second": 9.197,
"eval_steps_per_second": 4.602,
"step": 5000
},
{
"epoch": 6.886536553092954,
"grad_norm": 2.288074493408203,
"learning_rate": 6.266666666666668e-06,
"loss": 1.392,
"step": 5200
},
{
"epoch": 7.152166721799537,
"grad_norm": 1.9908281564712524,
"learning_rate": 5.733333333333334e-06,
"loss": 1.4258,
"step": 5400
},
{
"epoch": 7.284485610320873,
"eval_loss": 1.4372740983963013,
"eval_runtime": 287.6507,
"eval_samples_per_second": 4.168,
"eval_steps_per_second": 2.086,
"step": 5500
},
{
"epoch": 7.416804498842209,
"grad_norm": 2.3958187103271484,
"learning_rate": 5.2e-06,
"loss": 1.4011,
"step": 5600
},
{
"epoch": 7.681442275884883,
"grad_norm": 3.133821725845337,
"learning_rate": 4.666666666666667e-06,
"loss": 1.4119,
"step": 5800
},
{
"epoch": 7.946080052927555,
"grad_norm": 2.6775546073913574,
"learning_rate": 4.133333333333333e-06,
"loss": 1.4287,
"step": 6000
},
{
"epoch": 7.946080052927555,
"eval_loss": 1.43629789352417,
"eval_runtime": 287.6708,
"eval_samples_per_second": 4.168,
"eval_steps_per_second": 2.086,
"step": 6000
},
{
"epoch": 8.211710221634139,
"grad_norm": 2.524921417236328,
"learning_rate": 3.6000000000000003e-06,
"loss": 1.3975,
"step": 6200
},
{
"epoch": 8.476347998676811,
"grad_norm": 2.997265100479126,
"learning_rate": 3.066666666666667e-06,
"loss": 1.3907,
"step": 6400
},
{
"epoch": 8.608666887198147,
"eval_loss": 1.4365167617797852,
"eval_runtime": 288.2825,
"eval_samples_per_second": 4.159,
"eval_steps_per_second": 2.081,
"step": 6500
}
],
"logging_steps": 200,
"max_steps": 7550,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 3
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.1138506359662182e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}