M3-quantized-qlora-4bit / trainer_state.json
bziemba's picture
Upload folder using huggingface_hub
5e1e986 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 375,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"grad_norm": 1.7216755151748657,
"learning_rate": 5e-05,
"loss": 2.4397,
"step": 20
},
{
"epoch": 0.32,
"grad_norm": 1.6662434339523315,
"learning_rate": 9.999782741484788e-05,
"loss": 2.2225,
"step": 40
},
{
"epoch": 0.48,
"grad_norm": 1.1107380390167236,
"learning_rate": 9.904493906342123e-05,
"loss": 1.9769,
"step": 60
},
{
"epoch": 0.64,
"grad_norm": 1.3425688743591309,
"learning_rate": 9.639210244594334e-05,
"loss": 1.8957,
"step": 80
},
{
"epoch": 0.8,
"grad_norm": 1.6660774946212769,
"learning_rate": 9.213126762075088e-05,
"loss": 1.9327,
"step": 100
},
{
"epoch": 0.96,
"grad_norm": 1.0810869932174683,
"learning_rate": 8.641011952560371e-05,
"loss": 1.902,
"step": 120
},
{
"epoch": 1.12,
"grad_norm": 1.5443850755691528,
"learning_rate": 7.94269590651393e-05,
"loss": 1.8109,
"step": 140
},
{
"epoch": 1.28,
"grad_norm": 1.0120549201965332,
"learning_rate": 7.142382979661386e-05,
"loss": 1.7861,
"step": 160
},
{
"epoch": 1.44,
"grad_norm": 1.5919784307479858,
"learning_rate": 6.26781284501043e-05,
"loss": 1.8054,
"step": 180
},
{
"epoch": 1.6,
"grad_norm": 1.353637933731079,
"learning_rate": 5.3492990071209806e-05,
"loss": 1.7685,
"step": 200
},
{
"epoch": 1.76,
"grad_norm": 1.2411092519760132,
"learning_rate": 4.418678104714214e-05,
"loss": 1.8407,
"step": 220
},
{
"epoch": 1.92,
"grad_norm": 1.9999362230300903,
"learning_rate": 3.5082064198777e-05,
"loss": 1.7761,
"step": 240
},
{
"epoch": 2.08,
"grad_norm": 1.3810490369796753,
"learning_rate": 2.6494418419978482e-05,
"loss": 1.7177,
"step": 260
},
{
"epoch": 2.24,
"grad_norm": 1.3220276832580566,
"learning_rate": 1.872150038705015e-05,
"loss": 1.6397,
"step": 280
},
{
"epoch": 2.4,
"grad_norm": 1.6852960586547852,
"learning_rate": 1.203272747076598e-05,
"loss": 1.7622,
"step": 300
},
{
"epoch": 2.56,
"grad_norm": 1.4293591976165771,
"learning_rate": 6.659939451910341e-06,
"loss": 1.6719,
"step": 320
},
{
"epoch": 2.7199999999999998,
"grad_norm": 1.658665657043457,
"learning_rate": 2.7893627149161716e-06,
"loss": 1.6602,
"step": 340
},
{
"epoch": 2.88,
"grad_norm": 1.8376392126083374,
"learning_rate": 5.551554489528432e-07,
"loss": 1.7036,
"step": 360
},
{
"epoch": 3.0,
"step": 375,
"total_flos": 1382710662660096.0,
"train_loss": 1.8455538279215495,
"train_runtime": 977.2087,
"train_samples_per_second": 3.07,
"train_steps_per_second": 0.384
}
],
"logging_steps": 20,
"max_steps": 375,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1382710662660096.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}