gpt2-RMT-8-mem512 / trainer_state.json
KotshinZ's picture
Model save
ae28c65 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9969230769230769,
"eval_steps": 100,
"global_step": 162,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06153846153846154,
"grad_norm": 0.39951827848719723,
"learning_rate": 1.1764705882352942e-05,
"loss": 3.1582,
"mean_token_accuracy": 0.400910259783268,
"step": 10
},
{
"epoch": 0.12307692307692308,
"grad_norm": 0.3995616499609791,
"learning_rate": 1.9978883431348845e-05,
"loss": 3.1668,
"mean_token_accuracy": 0.39937195032835004,
"step": 20
},
{
"epoch": 0.18461538461538463,
"grad_norm": 0.4102168990934247,
"learning_rate": 1.9605953553832987e-05,
"loss": 3.1387,
"mean_token_accuracy": 0.4016988441348076,
"step": 30
},
{
"epoch": 0.24615384615384617,
"grad_norm": 0.40391230141561146,
"learning_rate": 1.8783859964390466e-05,
"loss": 3.1551,
"mean_token_accuracy": 0.3992693811655045,
"step": 40
},
{
"epoch": 0.3076923076923077,
"grad_norm": 0.3961653949779693,
"learning_rate": 1.755104284557221e-05,
"loss": 3.1457,
"mean_token_accuracy": 0.40172072798013686,
"step": 50
},
{
"epoch": 0.36923076923076925,
"grad_norm": 0.41939623098641426,
"learning_rate": 1.5965147355676344e-05,
"loss": 3.1492,
"mean_token_accuracy": 0.4015924736857414,
"step": 60
},
{
"epoch": 0.4307692307692308,
"grad_norm": 0.3945808975159374,
"learning_rate": 1.4100328205214161e-05,
"loss": 3.1387,
"mean_token_accuracy": 0.4018391355872154,
"step": 70
},
{
"epoch": 0.49230769230769234,
"grad_norm": 0.37735963614306256,
"learning_rate": 1.204378226506365e-05,
"loss": 3.1488,
"mean_token_accuracy": 0.4004943951964378,
"step": 80
},
{
"epoch": 0.5538461538461539,
"grad_norm": 0.4114554888279876,
"learning_rate": 9.891671337699603e-06,
"loss": 3.1559,
"mean_token_accuracy": 0.4003040686249733,
"step": 90
},
{
"epoch": 0.6153846153846154,
"grad_norm": 0.41103454513473137,
"learning_rate": 7.74462573818606e-06,
"loss": 3.1543,
"mean_token_accuracy": 0.40160409063100816,
"step": 100
},
{
"epoch": 0.6153846153846154,
"eval_runtime": 0.4543,
"eval_samples_per_second": 52.823,
"eval_steps_per_second": 4.402,
"step": 100
},
{
"epoch": 0.676923076923077,
"grad_norm": 0.38569131648987776,
"learning_rate": 5.7030389324864845e-06,
"loss": 3.1824,
"mean_token_accuracy": 0.397557806968689,
"step": 110
},
{
"epoch": 0.7384615384615385,
"grad_norm": 0.4255553103146562,
"learning_rate": 3.862373250574626e-06,
"loss": 3.1828,
"mean_token_accuracy": 0.3972348183393478,
"step": 120
},
{
"epoch": 0.8,
"grad_norm": 0.432279104045337,
"learning_rate": 2.308696173983711e-06,
"loss": 3.1387,
"mean_token_accuracy": 0.40221917182207106,
"step": 130
},
{
"epoch": 0.8615384615384616,
"grad_norm": 0.4143971545965478,
"learning_rate": 1.1146559160270875e-06,
"loss": 3.1297,
"mean_token_accuracy": 0.4037084937095642,
"step": 140
},
{
"epoch": 0.9230769230769231,
"grad_norm": 0.41227809966322443,
"learning_rate": 3.360844720863765e-07,
"loss": 3.1383,
"mean_token_accuracy": 0.40221751779317855,
"step": 150
},
{
"epoch": 0.9846153846153847,
"grad_norm": 0.4212135535860477,
"learning_rate": 9.38697756023288e-09,
"loss": 3.1223,
"mean_token_accuracy": 0.4041751459240913,
"step": 160
},
{
"epoch": 0.9969230769230769,
"mean_token_accuracy": 0.3931814655661583,
"step": 162,
"total_flos": 5426859755962368.0,
"train_loss": 3.1510416666666665,
"train_runtime": 387.2622,
"train_samples_per_second": 13.394,
"train_steps_per_second": 0.418
}
],
"logging_steps": 10,
"max_steps": 162,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5426859755962368.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}