pricer-lora-ft-v3 / trainer_state.json
MightyOctopus's picture
Upload folder using huggingface_hub
68ccadc verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.066603460535672,
"eval_steps": 1500,
"global_step": 4500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0711068973690448,
"grad_norm": 1.3703839778900146,
"learning_rate": 1.4218009478672987e-05,
"loss": 1.6599,
"step": 300
},
{
"epoch": 0.1422137947380896,
"grad_norm": 1.2324339151382446,
"learning_rate": 1.9975676940479608e-05,
"loss": 1.6611,
"step": 600
},
{
"epoch": 0.2133206921071344,
"grad_norm": 1.3222817182540894,
"learning_rate": 1.9825039649994836e-05,
"loss": 1.6619,
"step": 900
},
{
"epoch": 0.2844275894761792,
"grad_norm": 1.1514230966567993,
"learning_rate": 1.953873955447588e-05,
"loss": 1.6576,
"step": 1200
},
{
"epoch": 0.355534486845224,
"grad_norm": 1.0430920124053955,
"learning_rate": 1.9120729846522797e-05,
"loss": 1.6591,
"step": 1500
},
{
"epoch": 0.355534486845224,
"eval_loss": 1.6568845510482788,
"eval_runtime": 2223.4322,
"eval_samples_per_second": 6.746,
"eval_steps_per_second": 6.746,
"step": 1500
},
{
"epoch": 0.4266413842142688,
"grad_norm": 1.1808266639709473,
"learning_rate": 1.857678234706788e-05,
"loss": 1.6534,
"step": 1800
},
{
"epoch": 0.4977482815833136,
"grad_norm": 1.1664097309112549,
"learning_rate": 1.7914407808860857e-05,
"loss": 1.6546,
"step": 2100
},
{
"epoch": 0.5688551789523584,
"grad_norm": 1.255988359451294,
"learning_rate": 1.714275220902116e-05,
"loss": 1.648,
"step": 2400
},
{
"epoch": 0.6399620763214032,
"grad_norm": 1.406730055809021,
"learning_rate": 1.627247046263554e-05,
"loss": 1.6505,
"step": 2700
},
{
"epoch": 0.711068973690448,
"grad_norm": 1.4758352041244507,
"learning_rate": 1.531557930114637e-05,
"loss": 1.6504,
"step": 3000
},
{
"epoch": 0.711068973690448,
"eval_loss": 1.6482607126235962,
"eval_runtime": 2220.8948,
"eval_samples_per_second": 6.754,
"eval_steps_per_second": 6.754,
"step": 3000
},
{
"epoch": 0.7821758710594928,
"grad_norm": 1.4238367080688477,
"learning_rate": 1.4285291346965831e-05,
"loss": 1.646,
"step": 3300
},
{
"epoch": 0.8532827684285376,
"grad_norm": 1.1593595743179321,
"learning_rate": 1.319583267539094e-05,
"loss": 1.6454,
"step": 3600
},
{
"epoch": 0.9243896657975824,
"grad_norm": 1.1607190370559692,
"learning_rate": 1.2062246382899667e-05,
"loss": 1.6429,
"step": 3900
},
{
"epoch": 0.9954965631666272,
"grad_norm": 1.341468334197998,
"learning_rate": 1.0900184874130164e-05,
"loss": 1.6381,
"step": 4200
},
{
"epoch": 1.066603460535672,
"grad_norm": 1.3884109258651733,
"learning_rate": 9.725693735616222e-06,
"loss": 1.6038,
"step": 4500
},
{
"epoch": 1.066603460535672,
"eval_loss": 1.642856478691101,
"eval_runtime": 2220.4526,
"eval_samples_per_second": 6.755,
"eval_steps_per_second": 6.755,
"step": 4500
}
],
"logging_steps": 300,
"max_steps": 8438,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.1996706885823365e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}