mistral-based-NIDS / checkpoint-26 /trainer_state.json
caffeinatedcherrychic's picture
Upload folder using huggingface_hub
d68a778 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.08,
"eval_steps": 4,
"global_step": 26,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"grad_norm": 102.28898620605469,
"learning_rate": 2e-05,
"loss": 6.6367,
"step": 1
},
{
"epoch": 0.08,
"eval_loss": 7.300913333892822,
"eval_runtime": 1.3523,
"eval_samples_per_second": 8.873,
"eval_steps_per_second": 4.437,
"step": 1
},
{
"epoch": 0.16,
"grad_norm": 103.4541015625,
"learning_rate": 4e-05,
"loss": 7.0616,
"step": 2
},
{
"epoch": 0.24,
"grad_norm": 67.47515869140625,
"learning_rate": 6e-05,
"loss": 4.686,
"step": 3
},
{
"epoch": 0.32,
"grad_norm": 72.36919403076172,
"learning_rate": 8e-05,
"loss": 2.3866,
"step": 4
},
{
"epoch": 0.32,
"eval_loss": 0.7137572169303894,
"eval_runtime": 1.3532,
"eval_samples_per_second": 8.868,
"eval_steps_per_second": 4.434,
"step": 4
},
{
"epoch": 0.4,
"grad_norm": 16.83085060119629,
"learning_rate": 0.0001,
"loss": 0.6844,
"step": 5
},
{
"epoch": 0.48,
"grad_norm": 25.897714614868164,
"learning_rate": 0.00012,
"loss": 0.914,
"step": 6
},
{
"epoch": 0.56,
"grad_norm": 18.89151382446289,
"learning_rate": 0.00014,
"loss": 0.63,
"step": 7
},
{
"epoch": 0.64,
"grad_norm": 27.15555763244629,
"learning_rate": 0.00016,
"loss": 0.948,
"step": 8
},
{
"epoch": 0.64,
"eval_loss": 1.0445994138717651,
"eval_runtime": 1.356,
"eval_samples_per_second": 8.85,
"eval_steps_per_second": 4.425,
"step": 8
},
{
"epoch": 0.72,
"grad_norm": 20.812381744384766,
"learning_rate": 0.00018,
"loss": 1.0285,
"step": 9
},
{
"epoch": 0.8,
"grad_norm": 56.3886604309082,
"learning_rate": 0.0002,
"loss": 1.3756,
"step": 10
},
{
"epoch": 0.88,
"grad_norm": 6.24803352355957,
"learning_rate": 0.00019981755542233177,
"loss": 0.5178,
"step": 11
},
{
"epoch": 0.96,
"grad_norm": 8.379430770874023,
"learning_rate": 0.0001992708874098054,
"loss": 0.6822,
"step": 12
},
{
"epoch": 0.96,
"eval_loss": 1.3959709405899048,
"eval_runtime": 1.3583,
"eval_samples_per_second": 8.835,
"eval_steps_per_second": 4.417,
"step": 12
},
{
"epoch": 1.04,
"grad_norm": 20.744348526000977,
"learning_rate": 0.00019836199069471437,
"loss": 1.3762,
"step": 13
},
{
"epoch": 1.12,
"grad_norm": 4.800480842590332,
"learning_rate": 0.0001970941817426052,
"loss": 0.5248,
"step": 14
},
{
"epoch": 1.2,
"grad_norm": 11.284302711486816,
"learning_rate": 0.00019547208665085457,
"loss": 0.8094,
"step": 15
},
{
"epoch": 1.28,
"grad_norm": 5.787976264953613,
"learning_rate": 0.0001935016242685415,
"loss": 0.5222,
"step": 16
},
{
"epoch": 1.28,
"eval_loss": 0.9023411870002747,
"eval_runtime": 1.3623,
"eval_samples_per_second": 8.808,
"eval_steps_per_second": 4.404,
"step": 16
},
{
"epoch": 1.36,
"grad_norm": 21.48629379272461,
"learning_rate": 0.00019118998459920902,
"loss": 0.8027,
"step": 17
},
{
"epoch": 1.44,
"grad_norm": 38.0982666015625,
"learning_rate": 0.000188545602565321,
"loss": 1.7772,
"step": 18
},
{
"epoch": 1.52,
"grad_norm": 10.824837684631348,
"learning_rate": 0.00018557812723014476,
"loss": 0.7737,
"step": 19
},
{
"epoch": 1.6,
"grad_norm": 9.1353120803833,
"learning_rate": 0.00018229838658936564,
"loss": 0.534,
"step": 20
},
{
"epoch": 1.6,
"eval_loss": 0.4847445785999298,
"eval_runtime": 1.3637,
"eval_samples_per_second": 8.799,
"eval_steps_per_second": 4.4,
"step": 20
},
{
"epoch": 1.68,
"grad_norm": 3.8411033153533936,
"learning_rate": 0.00017871834806090501,
"loss": 0.3201,
"step": 21
},
{
"epoch": 1.76,
"grad_norm": 23.888507843017578,
"learning_rate": 0.00017485107481711012,
"loss": 2.2541,
"step": 22
},
{
"epoch": 1.84,
"grad_norm": 8.5956392288208,
"learning_rate": 0.00017071067811865476,
"loss": 0.8177,
"step": 23
},
{
"epoch": 1.92,
"grad_norm": 3.825141191482544,
"learning_rate": 0.00016631226582407952,
"loss": 0.4624,
"step": 24
},
{
"epoch": 1.92,
"eval_loss": 0.5740255117416382,
"eval_runtime": 1.3655,
"eval_samples_per_second": 8.788,
"eval_steps_per_second": 4.394,
"step": 24
},
{
"epoch": 2.0,
"grad_norm": 3.558993101119995,
"learning_rate": 0.00016167188726285434,
"loss": 0.3714,
"step": 25
},
{
"epoch": 2.08,
"grad_norm": 11.759211540222168,
"learning_rate": 0.00015680647467311557,
"loss": 0.6562,
"step": 26
}
],
"logging_steps": 1,
"max_steps": 62,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 13,
"total_flos": 2276469522825216.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}