phi4-word-keeper-lora / checkpoint-150 /trainer_state.json
aaurelions's picture
Upload folder using huggingface_hub
506f6da verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.0,
"eval_steps": 500,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2,
"grad_norm": 1.3508174419403076,
"learning_rate": 0.0002,
"loss": 2.5227,
"step": 5
},
{
"epoch": 0.4,
"grad_norm": 1.8602172136306763,
"learning_rate": 0.0002,
"loss": 2.6438,
"step": 10
},
{
"epoch": 0.6,
"grad_norm": 1.7185380458831787,
"learning_rate": 0.0002,
"loss": 0.7979,
"step": 15
},
{
"epoch": 0.8,
"grad_norm": 1.46760892868042,
"learning_rate": 0.0002,
"loss": 0.4091,
"step": 20
},
{
"epoch": 1.0,
"grad_norm": 0.5913628935813904,
"learning_rate": 0.0002,
"loss": 0.274,
"step": 25
},
{
"epoch": 1.2,
"grad_norm": 0.46462497115135193,
"learning_rate": 0.0002,
"loss": 0.1921,
"step": 30
},
{
"epoch": 1.4,
"grad_norm": 0.47151607275009155,
"learning_rate": 0.0002,
"loss": 0.2399,
"step": 35
},
{
"epoch": 1.6,
"grad_norm": 0.45026347041130066,
"learning_rate": 0.0002,
"loss": 0.5266,
"step": 40
},
{
"epoch": 1.8,
"grad_norm": 0.8638098835945129,
"learning_rate": 0.0002,
"loss": 0.1596,
"step": 45
},
{
"epoch": 2.0,
"grad_norm": 0.41460809111595154,
"learning_rate": 0.0002,
"loss": 0.291,
"step": 50
},
{
"epoch": 2.2,
"grad_norm": 0.30362772941589355,
"learning_rate": 0.0002,
"loss": 0.0818,
"step": 55
},
{
"epoch": 2.4,
"grad_norm": 0.46699783205986023,
"learning_rate": 0.0002,
"loss": 0.1198,
"step": 60
},
{
"epoch": 2.6,
"grad_norm": 0.3688370883464813,
"learning_rate": 0.0002,
"loss": 0.0892,
"step": 65
},
{
"epoch": 2.8,
"grad_norm": 0.6380140781402588,
"learning_rate": 0.0002,
"loss": 0.1068,
"step": 70
},
{
"epoch": 3.0,
"grad_norm": 0.31224119663238525,
"learning_rate": 0.0002,
"loss": 0.1673,
"step": 75
},
{
"epoch": 3.2,
"grad_norm": 0.455768883228302,
"learning_rate": 0.0002,
"loss": 0.0405,
"step": 80
},
{
"epoch": 3.4,
"grad_norm": 0.29344627261161804,
"learning_rate": 0.0002,
"loss": 0.1494,
"step": 85
},
{
"epoch": 3.6,
"grad_norm": 0.4940664768218994,
"learning_rate": 0.0002,
"loss": 0.0483,
"step": 90
},
{
"epoch": 3.8,
"grad_norm": 0.2362147718667984,
"learning_rate": 0.0002,
"loss": 0.0396,
"step": 95
},
{
"epoch": 4.0,
"grad_norm": 0.2028852105140686,
"learning_rate": 0.0002,
"loss": 0.0475,
"step": 100
},
{
"epoch": 4.2,
"grad_norm": 0.12950176000595093,
"learning_rate": 0.0002,
"loss": 0.0272,
"step": 105
},
{
"epoch": 4.4,
"grad_norm": 0.3273729383945465,
"learning_rate": 0.0002,
"loss": 0.0302,
"step": 110
},
{
"epoch": 4.6,
"grad_norm": 0.2938006818294525,
"learning_rate": 0.0002,
"loss": 0.0487,
"step": 115
},
{
"epoch": 4.8,
"grad_norm": 0.6171326041221619,
"learning_rate": 0.0002,
"loss": 0.1138,
"step": 120
},
{
"epoch": 5.0,
"grad_norm": 0.2811235785484314,
"learning_rate": 0.0002,
"loss": 0.0313,
"step": 125
},
{
"epoch": 5.2,
"grad_norm": 0.5635420680046082,
"learning_rate": 0.0002,
"loss": 0.0242,
"step": 130
},
{
"epoch": 5.4,
"grad_norm": 0.5491178035736084,
"learning_rate": 0.0002,
"loss": 0.0486,
"step": 135
},
{
"epoch": 5.6,
"grad_norm": 0.1552320271730423,
"learning_rate": 0.0002,
"loss": 0.0332,
"step": 140
},
{
"epoch": 5.8,
"grad_norm": 0.4321906864643097,
"learning_rate": 0.0002,
"loss": 0.0241,
"step": 145
},
{
"epoch": 6.0,
"grad_norm": 0.3396056592464447,
"learning_rate": 0.0002,
"loss": 0.034,
"step": 150
}
],
"logging_steps": 5,
"max_steps": 375,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4802052564578304.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}