llama-2-1.1B-Rhetorical-Agents / trainer_state.json
ccore's picture
Upload folder using huggingface_hub
b106e72
raw
history blame contribute delete
912 Bytes
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1143,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.44,
"learning_rate": 0.0005625546806649168,
"loss": 1.8855,
"step": 500
},
{
"epoch": 0.87,
"learning_rate": 0.00012510936132983377,
"loss": 1.7649,
"step": 1000
},
{
"epoch": 1.0,
"step": 1143,
"total_flos": 4.649509810297897e+17,
"train_loss": 1.8161194168050772,
"train_runtime": 6916.1714,
"train_samples_per_second": 10.577,
"train_steps_per_second": 0.165
}
],
"logging_steps": 500,
"max_steps": 1143,
"num_train_epochs": 1,
"save_steps": -1143,
"total_flos": 4.649509810297897e+17,
"trial_name": null,
"trial_params": null
}