llama-lbtestcd-lora / checkpoint-200 /trainer_state.json
panikos's picture
Upload folder using huggingface_hub
62ed9d7 verified
{
"best_global_step": 200,
"best_metric": 0.04782823845744133,
"best_model_checkpoint": "./lbtestcd_lora_model/checkpoint-200",
"epoch": 1.6285714285714286,
"eval_steps": 100,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"entropy": 1.6688840508460998,
"epoch": 0.08163265306122448,
"grad_norm": 1.984375,
"learning_rate": 0.00013846153846153847,
"loss": 2.7854,
"mean_token_accuracy": 0.5734112456440925,
"num_tokens": 29874.0,
"step": 10
},
{
"entropy": 0.8716737445443868,
"epoch": 0.16326530612244897,
"grad_norm": 0.8046875,
"learning_rate": 0.00019484978540772535,
"loss": 0.7077,
"mean_token_accuracy": 0.8737192109227181,
"num_tokens": 59724.0,
"step": 20
},
{
"entropy": 0.12969586309045553,
"epoch": 0.24489795918367346,
"grad_norm": 0.1884765625,
"learning_rate": 0.00018626609442060085,
"loss": 0.1144,
"mean_token_accuracy": 0.9771728187799453,
"num_tokens": 89501.0,
"step": 30
},
{
"entropy": 0.10608638003468514,
"epoch": 0.32653061224489793,
"grad_norm": 0.119140625,
"learning_rate": 0.0001776824034334764,
"loss": 0.0903,
"mean_token_accuracy": 0.9806259959936142,
"num_tokens": 119257.0,
"step": 40
},
{
"entropy": 0.09941115416586399,
"epoch": 0.40816326530612246,
"grad_norm": 0.11376953125,
"learning_rate": 0.00016909871244635193,
"loss": 0.0896,
"mean_token_accuracy": 0.9801410123705864,
"num_tokens": 149090.0,
"step": 50
},
{
"entropy": 0.09959900509566069,
"epoch": 0.4897959183673469,
"grad_norm": 0.1416015625,
"learning_rate": 0.00016051502145922748,
"loss": 0.0788,
"mean_token_accuracy": 0.9814802333712578,
"num_tokens": 178953.0,
"step": 60
},
{
"entropy": 0.10063424333930016,
"epoch": 0.5714285714285714,
"grad_norm": 0.1572265625,
"learning_rate": 0.000151931330472103,
"loss": 0.0671,
"mean_token_accuracy": 0.9840218782424927,
"num_tokens": 208876.0,
"step": 70
},
{
"entropy": 0.06386051792651415,
"epoch": 0.6530612244897959,
"grad_norm": 0.099609375,
"learning_rate": 0.00014334763948497856,
"loss": 0.05,
"mean_token_accuracy": 0.987203674018383,
"num_tokens": 238734.0,
"step": 80
},
{
"entropy": 0.05305444821715355,
"epoch": 0.7346938775510204,
"grad_norm": 0.06494140625,
"learning_rate": 0.00013476394849785408,
"loss": 0.0477,
"mean_token_accuracy": 0.9869530826807023,
"num_tokens": 268539.0,
"step": 90
},
{
"entropy": 0.0516530342400074,
"epoch": 0.8163265306122449,
"grad_norm": 0.1142578125,
"learning_rate": 0.0001261802575107296,
"loss": 0.0479,
"mean_token_accuracy": 0.9873545721173287,
"num_tokens": 298439.0,
"step": 100
},
{
"epoch": 0.8163265306122449,
"eval_entropy": 0.0524380611705369,
"eval_loss": 0.05050847306847572,
"eval_mean_token_accuracy": 0.9866128844776373,
"eval_num_tokens": 298439.0,
"eval_runtime": 34.3005,
"eval_samples_per_second": 10.087,
"eval_steps_per_second": 2.536,
"step": 100
},
{
"entropy": 0.05258838403970003,
"epoch": 0.8979591836734694,
"grad_norm": 0.07861328125,
"learning_rate": 0.00011759656652360516,
"loss": 0.048,
"mean_token_accuracy": 0.9866469159722329,
"num_tokens": 328263.0,
"step": 110
},
{
"entropy": 0.05178862875327468,
"epoch": 0.9795918367346939,
"grad_norm": 0.09912109375,
"learning_rate": 0.0001090128755364807,
"loss": 0.0476,
"mean_token_accuracy": 0.9872021660208702,
"num_tokens": 358263.0,
"step": 120
},
{
"entropy": 0.052568491156163966,
"epoch": 1.0571428571428572,
"grad_norm": 0.08740234375,
"learning_rate": 0.00010042918454935624,
"loss": 0.0477,
"mean_token_accuracy": 0.987212166974419,
"num_tokens": 386372.0,
"step": 130
},
{
"entropy": 0.050634028390049934,
"epoch": 1.1387755102040815,
"grad_norm": 0.07861328125,
"learning_rate": 9.184549356223176e-05,
"loss": 0.0459,
"mean_token_accuracy": 0.9879572361707687,
"num_tokens": 416328.0,
"step": 140
},
{
"entropy": 0.04960239427164197,
"epoch": 1.220408163265306,
"grad_norm": 0.08203125,
"learning_rate": 8.32618025751073e-05,
"loss": 0.0441,
"mean_token_accuracy": 0.9879725813865662,
"num_tokens": 446171.0,
"step": 150
},
{
"entropy": 0.047286948189139366,
"epoch": 1.3020408163265307,
"grad_norm": 0.080078125,
"learning_rate": 7.467811158798284e-05,
"loss": 0.0427,
"mean_token_accuracy": 0.9882744088768959,
"num_tokens": 476097.0,
"step": 160
},
{
"entropy": 0.04795173741877079,
"epoch": 1.383673469387755,
"grad_norm": 0.09033203125,
"learning_rate": 6.609442060085838e-05,
"loss": 0.043,
"mean_token_accuracy": 0.9885840907692909,
"num_tokens": 505889.0,
"step": 170
},
{
"entropy": 0.04705191999673843,
"epoch": 1.4653061224489796,
"grad_norm": 0.0791015625,
"learning_rate": 5.751072961373391e-05,
"loss": 0.0434,
"mean_token_accuracy": 0.9881132692098618,
"num_tokens": 535679.0,
"step": 180
},
{
"entropy": 0.04972094791010022,
"epoch": 1.546938775510204,
"grad_norm": 0.0966796875,
"learning_rate": 4.8927038626609446e-05,
"loss": 0.0452,
"mean_token_accuracy": 0.9873160928487777,
"num_tokens": 565406.0,
"step": 190
},
{
"entropy": 0.049302170518785715,
"epoch": 1.6285714285714286,
"grad_norm": 0.10546875,
"learning_rate": 4.034334763948498e-05,
"loss": 0.0453,
"mean_token_accuracy": 0.9879378706216813,
"num_tokens": 595418.0,
"step": 200
},
{
"epoch": 1.6285714285714286,
"eval_entropy": 0.04912949901545185,
"eval_loss": 0.04782823845744133,
"eval_mean_token_accuracy": 0.9871041678834236,
"eval_num_tokens": 595418.0,
"eval_runtime": 34.3092,
"eval_samples_per_second": 10.085,
"eval_steps_per_second": 2.536,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 246,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.792364691021824e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}