LLM_project / lora_weights /checkpoint-100 /trainer_state.json
narySt's picture
Добавлены веса модели
2a93bf8 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005,
"grad_norm": 6.723871231079102,
"learning_rate": 4.9800000000000004e-05,
"loss": 6.3518,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 3.806936502456665,
"learning_rate": 4.9550000000000005e-05,
"loss": 5.2146,
"step": 10
},
{
"epoch": 0.015,
"grad_norm": 5.464715003967285,
"learning_rate": 4.93e-05,
"loss": 5.3944,
"step": 15
},
{
"epoch": 0.02,
"grad_norm": 7.181273937225342,
"learning_rate": 4.905e-05,
"loss": 4.9653,
"step": 20
},
{
"epoch": 0.025,
"grad_norm": 3.466884136199951,
"learning_rate": 4.88e-05,
"loss": 4.3435,
"step": 25
},
{
"epoch": 0.03,
"grad_norm": 6.4920830726623535,
"learning_rate": 4.855e-05,
"loss": 5.0843,
"step": 30
},
{
"epoch": 0.035,
"grad_norm": 6.69465446472168,
"learning_rate": 4.83e-05,
"loss": 5.5838,
"step": 35
},
{
"epoch": 0.04,
"grad_norm": 13.27942943572998,
"learning_rate": 4.805e-05,
"loss": 5.4903,
"step": 40
},
{
"epoch": 0.045,
"grad_norm": 7.836321830749512,
"learning_rate": 4.78e-05,
"loss": 6.0388,
"step": 45
},
{
"epoch": 0.05,
"grad_norm": 6.168878555297852,
"learning_rate": 4.755e-05,
"loss": 5.1972,
"step": 50
},
{
"epoch": 0.055,
"grad_norm": 4.194369316101074,
"learning_rate": 4.73e-05,
"loss": 4.9553,
"step": 55
},
{
"epoch": 0.06,
"grad_norm": 14.01821517944336,
"learning_rate": 4.705e-05,
"loss": 5.059,
"step": 60
},
{
"epoch": 0.065,
"grad_norm": 5.50333309173584,
"learning_rate": 4.6800000000000006e-05,
"loss": 5.1333,
"step": 65
},
{
"epoch": 0.07,
"grad_norm": 8.491551399230957,
"learning_rate": 4.655000000000001e-05,
"loss": 5.6745,
"step": 70
},
{
"epoch": 0.075,
"grad_norm": 6.9947357177734375,
"learning_rate": 4.630000000000001e-05,
"loss": 5.9163,
"step": 75
},
{
"epoch": 0.08,
"grad_norm": 9.96507453918457,
"learning_rate": 4.605e-05,
"loss": 6.2083,
"step": 80
},
{
"epoch": 0.085,
"grad_norm": 4.116439342498779,
"learning_rate": 4.58e-05,
"loss": 4.9216,
"step": 85
},
{
"epoch": 0.09,
"grad_norm": 12.154153823852539,
"learning_rate": 4.555e-05,
"loss": 4.8928,
"step": 90
},
{
"epoch": 0.095,
"grad_norm": 5.3574395179748535,
"learning_rate": 4.53e-05,
"loss": 6.1012,
"step": 95
},
{
"epoch": 0.1,
"grad_norm": 4.469454288482666,
"learning_rate": 4.5050000000000004e-05,
"loss": 4.9137,
"step": 100
}
],
"logging_steps": 5,
"max_steps": 1000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}