llama-testing / trainer_state.json
kdearsty's picture
Upload my model weights
cbc0047 verified
raw
history blame
2.27 kB
{
"best_global_step": 315,
"best_metric": 0.2992021276595745,
"best_model_checkpoint": "/home/kat/git-repos/icse/results/rsa/19-05-2025:14-19-25/checkpoint-315",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 315,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.15873015873015872,
"grad_norm": 2.3694920539855957,
"learning_rate": 1.688888888888889e-05,
"loss": 0.6988,
"step": 50
},
{
"epoch": 0.31746031746031744,
"grad_norm": 3.2288453578948975,
"learning_rate": 1.3714285714285716e-05,
"loss": 0.7359,
"step": 100
},
{
"epoch": 0.47619047619047616,
"grad_norm": 2.3709213733673096,
"learning_rate": 1.053968253968254e-05,
"loss": 0.6617,
"step": 150
},
{
"epoch": 0.6349206349206349,
"grad_norm": 2.37154483795166,
"learning_rate": 7.3650793650793666e-06,
"loss": 0.6803,
"step": 200
},
{
"epoch": 0.7936507936507936,
"grad_norm": 2.3702969551086426,
"learning_rate": 4.190476190476191e-06,
"loss": 0.7116,
"step": 250
},
{
"epoch": 0.9523809523809523,
"grad_norm": 2.3685238361358643,
"learning_rate": 1.015873015873016e-06,
"loss": 0.6928,
"step": 300
},
{
"epoch": 1.0,
"eval_accuracy": 0.46875,
"eval_balanced_accuracy": 0.0,
"eval_f1_score": 0.2992021276595745,
"eval_loss": 0.714898407459259,
"eval_precision": 0.2197265625,
"eval_recall": 0.46875,
"eval_runtime": 2.4579,
"eval_samples_per_second": 26.038,
"eval_steps_per_second": 26.038,
"step": 315
}
],
"logging_steps": 50,
"max_steps": 315,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 47016456480.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}