lora_0-2_3B / trainer_state.json
gulaschnascher4000's picture
End of training
fd37cb7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.10006807351940095,
"eval_steps": 500,
"global_step": 294,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0034036759700476512,
"grad_norm": 0.3913409113883972,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.517,
"step": 10
},
{
"epoch": 0.0068073519400953025,
"grad_norm": 0.42031803727149963,
"learning_rate": 6.666666666666667e-05,
"loss": 1.4607,
"step": 20
},
{
"epoch": 0.010211027910142955,
"grad_norm": 0.33206892013549805,
"learning_rate": 0.0001,
"loss": 1.3907,
"step": 30
},
{
"epoch": 0.013614703880190605,
"grad_norm": 0.38453030586242676,
"learning_rate": 9.964639423366442e-05,
"loss": 1.3338,
"step": 40
},
{
"epoch": 0.01701837985023826,
"grad_norm": 0.5517820715904236,
"learning_rate": 9.859057841617709e-05,
"loss": 1.3894,
"step": 50
},
{
"epoch": 0.02042205582028591,
"grad_norm": 0.3676927983760834,
"learning_rate": 9.68474862499881e-05,
"loss": 1.3543,
"step": 60
},
{
"epoch": 0.023825731790333562,
"grad_norm": 1.1669039726257324,
"learning_rate": 9.444177243274618e-05,
"loss": 1.3556,
"step": 70
},
{
"epoch": 0.02722940776038121,
"grad_norm": 0.4291529953479767,
"learning_rate": 9.140746393556854e-05,
"loss": 1.3505,
"step": 80
},
{
"epoch": 0.03063308373042886,
"grad_norm": 0.6486052870750427,
"learning_rate": 8.778747871771292e-05,
"loss": 1.3473,
"step": 90
},
{
"epoch": 0.03403675970047652,
"grad_norm": 0.42231065034866333,
"learning_rate": 8.363301868506264e-05,
"loss": 1.3506,
"step": 100
},
{
"epoch": 0.037440435670524165,
"grad_norm": 0.5026513934135437,
"learning_rate": 7.900284547855991e-05,
"loss": 1.3029,
"step": 110
},
{
"epoch": 0.04084411164057182,
"grad_norm": 0.6771671175956726,
"learning_rate": 7.396244933600285e-05,
"loss": 1.3336,
"step": 120
},
{
"epoch": 0.04424778761061947,
"grad_norm": 0.5867034792900085,
"learning_rate": 6.858312278301637e-05,
"loss": 1.3177,
"step": 130
},
{
"epoch": 0.047651463580667124,
"grad_norm": 0.5451478958129883,
"learning_rate": 6.294095225512603e-05,
"loss": 1.2924,
"step": 140
},
{
"epoch": 0.05105513955071477,
"grad_norm": 0.7218224406242371,
"learning_rate": 5.7115741913664264e-05,
"loss": 1.3566,
"step": 150
},
{
"epoch": 0.05445881552076242,
"grad_norm": 0.5916715264320374,
"learning_rate": 5.1189884877305375e-05,
"loss": 1.3093,
"step": 160
},
{
"epoch": 0.057862491490810075,
"grad_norm": 0.45692551136016846,
"learning_rate": 4.5247197834790876e-05,
"loss": 1.3207,
"step": 170
},
{
"epoch": 0.06126616746085772,
"grad_norm": 0.840484082698822,
"learning_rate": 3.937173552235117e-05,
"loss": 1.3437,
"step": 180
},
{
"epoch": 0.06466984343090537,
"grad_norm": 0.6113607883453369,
"learning_rate": 3.364660183412892e-05,
"loss": 1.3109,
"step": 190
},
{
"epoch": 0.06807351940095303,
"grad_norm": 0.5302855968475342,
"learning_rate": 2.8152774381532033e-05,
"loss": 1.308,
"step": 200
},
{
"epoch": 0.07147719537100068,
"grad_norm": 0.5346827507019043,
"learning_rate": 2.296795912722014e-05,
"loss": 1.331,
"step": 210
},
{
"epoch": 0.07488087134104833,
"grad_norm": 0.6379411816596985,
"learning_rate": 1.8165491294045593e-05,
"loss": 1.33,
"step": 220
},
{
"epoch": 0.07828454731109598,
"grad_norm": 0.4734562635421753,
"learning_rate": 1.3813298094746491e-05,
"loss": 1.3551,
"step": 230
},
{
"epoch": 0.08168822328114364,
"grad_norm": 0.680587887763977,
"learning_rate": 9.972937953781986e-06,
"loss": 1.3258,
"step": 240
},
{
"epoch": 0.08509189925119129,
"grad_norm": 0.5624794960021973,
"learning_rate": 6.698729810778065e-06,
"loss": 1.3268,
"step": 250
},
{
"epoch": 0.08849557522123894,
"grad_norm": 0.5939269065856934,
"learning_rate": 4.036984820916723e-06,
"loss": 1.3556,
"step": 260
},
{
"epoch": 0.09189925119128659,
"grad_norm": 0.6130989193916321,
"learning_rate": 2.0253513192751373e-06,
"loss": 1.2661,
"step": 270
},
{
"epoch": 0.09530292716133425,
"grad_norm": 0.8308888673782349,
"learning_rate": 6.922823140906753e-07,
"loss": 1.3755,
"step": 280
},
{
"epoch": 0.0987066031313819,
"grad_norm": 0.8799161911010742,
"learning_rate": 5.663304084960186e-08,
"loss": 1.3005,
"step": 290
},
{
"epoch": 0.10006807351940095,
"step": 294,
"total_flos": 8.37378777242665e+16,
"train_loss": 1.3456703529876917,
"train_runtime": 360.87,
"train_samples_per_second": 26.045,
"train_steps_per_second": 0.815
}
],
"logging_steps": 10,
"max_steps": 294,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.37378777242665e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}