LLMClassWork1 / run-0 /checkpoint-2138 /trainer_state.json
halu1003's picture
Training in progress, epoch 1
5a26e4c
{
"best_metric": 0.43999593210437116,
"best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-0/checkpoint-2138",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 2138,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.47,
"learning_rate": 3.9693511172311064e-05,
"loss": 0.5777,
"step": 500
},
{
"epoch": 0.94,
"learning_rate": 2.7577054770506707e-05,
"loss": 0.5164,
"step": 1000
},
{
"epoch": 1.0,
"eval_loss": 0.4963381886482239,
"eval_matthews_correlation": 0.4299032759161841,
"eval_runtime": 0.7359,
"eval_samples_per_second": 1417.305,
"eval_steps_per_second": 89.686,
"step": 1069
},
{
"epoch": 1.4,
"learning_rate": 1.5460598368702356e-05,
"loss": 0.3518,
"step": 1500
},
{
"epoch": 1.87,
"learning_rate": 3.3441419668980017e-06,
"loss": 0.3457,
"step": 2000
},
{
"epoch": 2.0,
"eval_loss": 0.6384268403053284,
"eval_matthews_correlation": 0.43999593210437116,
"eval_runtime": 0.7288,
"eval_samples_per_second": 1431.097,
"eval_steps_per_second": 90.558,
"step": 2138
}
],
"logging_steps": 500,
"max_steps": 2138,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 75283395949464.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": {
"learning_rate": 5.180996757411542e-05,
"num_train_epochs": 2,
"per_device_train_batch_size": 8,
"seed": 27
}
}