codellama-7b-lora-java / checkpoint-185 /trainer_state.json
k1h0's picture
Upload folder using huggingface_hub
90285b5 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9966329966329966,
"eval_steps": 500,
"global_step": 185,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0053872053872053875,
"grad_norm": 0.5911839008331299,
"learning_rate": 0.0,
"loss": 0.834,
"step": 1
},
{
"epoch": 0.05387205387205387,
"grad_norm": 0.4561307728290558,
"learning_rate": 9.473684210526316e-05,
"loss": 1.4016,
"step": 10
},
{
"epoch": 0.10774410774410774,
"grad_norm": 0.06328851729631424,
"learning_rate": 0.0002,
"loss": 0.8905,
"step": 20
},
{
"epoch": 0.16161616161616163,
"grad_norm": 0.022817041724920273,
"learning_rate": 0.00018795180722891569,
"loss": 0.6956,
"step": 30
},
{
"epoch": 0.21548821548821548,
"grad_norm": 0.018048042431473732,
"learning_rate": 0.00017590361445783134,
"loss": 0.6707,
"step": 40
},
{
"epoch": 0.26936026936026936,
"grad_norm": 0.028622902929782867,
"learning_rate": 0.00016385542168674699,
"loss": 0.6629,
"step": 50
},
{
"epoch": 0.32323232323232326,
"grad_norm": 0.012167639099061489,
"learning_rate": 0.00015180722891566266,
"loss": 0.6721,
"step": 60
},
{
"epoch": 0.3771043771043771,
"grad_norm": 0.01069930475205183,
"learning_rate": 0.00013975903614457834,
"loss": 0.6565,
"step": 70
},
{
"epoch": 0.43097643097643096,
"grad_norm": 0.015863914042711258,
"learning_rate": 0.00012771084337349396,
"loss": 0.6458,
"step": 80
},
{
"epoch": 0.48484848484848486,
"grad_norm": 0.012852534651756287,
"learning_rate": 0.00011566265060240964,
"loss": 0.6363,
"step": 90
},
{
"epoch": 0.5387205387205387,
"grad_norm": 0.025411192327737808,
"learning_rate": 0.0001036144578313253,
"loss": 0.6416,
"step": 100
},
{
"epoch": 0.5925925925925926,
"grad_norm": 0.01106669008731842,
"learning_rate": 9.156626506024096e-05,
"loss": 0.6575,
"step": 110
},
{
"epoch": 0.6464646464646465,
"grad_norm": 0.010959242470562458,
"learning_rate": 7.951807228915663e-05,
"loss": 0.6438,
"step": 120
},
{
"epoch": 0.7003367003367004,
"grad_norm": 0.010998900979757309,
"learning_rate": 6.746987951807229e-05,
"loss": 0.6374,
"step": 130
},
{
"epoch": 0.7542087542087542,
"grad_norm": 0.010141533799469471,
"learning_rate": 5.5421686746987955e-05,
"loss": 0.6316,
"step": 140
},
{
"epoch": 0.8080808080808081,
"grad_norm": 0.023810530081391335,
"learning_rate": 4.337349397590362e-05,
"loss": 0.6369,
"step": 150
},
{
"epoch": 0.8619528619528619,
"grad_norm": 0.010837017558515072,
"learning_rate": 3.132530120481928e-05,
"loss": 0.6532,
"step": 160
},
{
"epoch": 0.9158249158249159,
"grad_norm": 0.009072311222553253,
"learning_rate": 1.927710843373494e-05,
"loss": 0.6411,
"step": 170
},
{
"epoch": 0.9696969696969697,
"grad_norm": 0.013384884223341942,
"learning_rate": 7.228915662650602e-06,
"loss": 0.629,
"step": 180
}
],
"logging_steps": 10,
"max_steps": 185,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.109138039878451e+16,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}