LLM_Competition / checkpoint-198 /trainer_state.json
KazumaTsuboi's picture
Upload fixed LoRA adapter
a188864 verified
{
"best_global_step": 150,
"best_metric": 0.7291873097419739,
"best_model_checkpoint": "/content/lora_structeval_t_qwen3_4b/checkpoint-100",
"epoch": 1.0,
"eval_steps": 50,
"global_step": 198,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05068102629078239,
"grad_norm": 1.0233957254968118e-05,
"learning_rate": 9e-05,
"loss": 1.6154,
"step": 10
},
{
"epoch": 0.10136205258156478,
"grad_norm": 9.242238775186706e-06,
"learning_rate": 0.00019,
"loss": 1.3072,
"step": 20
},
{
"epoch": 0.15204307887234716,
"grad_norm": 9.054802831087727e-06,
"learning_rate": 0.00019874107050115954,
"loss": 1.0862,
"step": 30
},
{
"epoch": 0.20272410516312955,
"grad_norm": 1.0910169294220395e-05,
"learning_rate": 0.00019442989534495557,
"loss": 1.1074,
"step": 40
},
{
"epoch": 0.25340513145391197,
"grad_norm": 1.1410435035941191e-05,
"learning_rate": 0.00018718484526025387,
"loss": 0.9959,
"step": 50
},
{
"epoch": 0.25340513145391197,
"eval_loss": 0.8977804780006409,
"eval_runtime": 78.7683,
"eval_samples_per_second": 4.215,
"eval_steps_per_second": 4.215,
"step": 50
},
{
"epoch": 0.30408615774469433,
"grad_norm": 9.305776984547265e-06,
"learning_rate": 0.0001772310192024389,
"loss": 0.984,
"step": 60
},
{
"epoch": 0.35476718403547675,
"grad_norm": 8.993285518954508e-06,
"learning_rate": 0.00016487767602017263,
"loss": 0.9051,
"step": 70
},
{
"epoch": 0.4054482103262591,
"grad_norm": 8.788006198301446e-06,
"learning_rate": 0.00015050862598575476,
"loss": 0.8835,
"step": 80
},
{
"epoch": 0.4561292366170415,
"grad_norm": 8.296872692881152e-06,
"learning_rate": 0.00013457030606163562,
"loss": 0.8761,
"step": 90
},
{
"epoch": 0.5068102629078239,
"grad_norm": 1.0136979653907474e-05,
"learning_rate": 0.00011755790939673209,
"loss": 0.8776,
"step": 100
},
{
"epoch": 0.5068102629078239,
"eval_loss": 0.7661521434783936,
"eval_runtime": 77.5825,
"eval_samples_per_second": 4.279,
"eval_steps_per_second": 4.279,
"step": 100
},
{
"epoch": 0.5574912891986062,
"grad_norm": 1.0645497241057456e-05,
"learning_rate": 0.0001,
"loss": 0.9067,
"step": 110
},
{
"epoch": 0.6081723154893887,
"grad_norm": 9.953019798558671e-06,
"learning_rate": 8.244209060326794e-05,
"loss": 0.8558,
"step": 120
},
{
"epoch": 0.6588533417801711,
"grad_norm": 9.384052646055352e-06,
"learning_rate": 6.542969393836436e-05,
"loss": 0.8194,
"step": 130
},
{
"epoch": 0.7095343680709535,
"grad_norm": 1.0857322195079178e-05,
"learning_rate": 4.949137401424527e-05,
"loss": 0.7676,
"step": 140
},
{
"epoch": 0.7602153943617358,
"grad_norm": 9.119448804995045e-06,
"learning_rate": 3.5122323979827395e-05,
"loss": 0.7987,
"step": 150
},
{
"epoch": 0.7602153943617358,
"eval_loss": 0.7291873097419739,
"eval_runtime": 78.0054,
"eval_samples_per_second": 4.256,
"eval_steps_per_second": 4.256,
"step": 150
},
{
"epoch": 0.8108964206525182,
"grad_norm": 8.535769666195847e-06,
"learning_rate": 2.2768980797561124e-05,
"loss": 0.7869,
"step": 160
},
{
"epoch": 0.8615774469433006,
"grad_norm": 1.2374131074466277e-05,
"learning_rate": 1.2815154739746138e-05,
"loss": 0.8469,
"step": 170
},
{
"epoch": 0.912258473234083,
"grad_norm": 9.65424351306865e-06,
"learning_rate": 5.570104655044428e-06,
"loss": 0.8821,
"step": 180
},
{
"epoch": 0.9629394995248653,
"grad_norm": 1.015480393107282e-05,
"learning_rate": 1.2589294988404888e-06,
"loss": 0.8662,
"step": 190
}
],
"logging_steps": 10,
"max_steps": 198,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.457819030331802e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}