phi3_safety_clf / checkpoint-4000 /trainer_state.json
Mohamedd123321's picture
Upload folder using huggingface_hub
693eea9 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.43243243243243246,
"eval_steps": 500,
"global_step": 4000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.021621621621621623,
"grad_norm": 43.9290657043457,
"learning_rate": 1.9856576576576577e-05,
"loss": 0.6499,
"step": 200
},
{
"epoch": 0.043243243243243246,
"grad_norm": 13.308595657348633,
"learning_rate": 1.9712432432432433e-05,
"loss": 0.568,
"step": 400
},
{
"epoch": 0.06486486486486487,
"grad_norm": 5.098663806915283,
"learning_rate": 1.956828828828829e-05,
"loss": 0.5371,
"step": 600
},
{
"epoch": 0.08648648648648649,
"grad_norm": 9.126262664794922,
"learning_rate": 1.9424144144144147e-05,
"loss": 0.5181,
"step": 800
},
{
"epoch": 0.10810810810810811,
"grad_norm": 15.761275291442871,
"learning_rate": 1.9280000000000002e-05,
"loss": 0.504,
"step": 1000
},
{
"epoch": 0.12972972972972974,
"grad_norm": 33.255863189697266,
"learning_rate": 1.9135855855855857e-05,
"loss": 0.5017,
"step": 1200
},
{
"epoch": 0.15135135135135136,
"grad_norm": 6.04021692276001,
"learning_rate": 1.8991711711711712e-05,
"loss": 0.468,
"step": 1400
},
{
"epoch": 0.17297297297297298,
"grad_norm": 15.048690795898438,
"learning_rate": 1.8847567567567568e-05,
"loss": 0.5056,
"step": 1600
},
{
"epoch": 0.1945945945945946,
"grad_norm": 22.5860538482666,
"learning_rate": 1.8703423423423426e-05,
"loss": 0.4601,
"step": 1800
},
{
"epoch": 0.21621621621621623,
"grad_norm": 12.32684326171875,
"learning_rate": 1.855927927927928e-05,
"loss": 0.4401,
"step": 2000
},
{
"epoch": 0.23783783783783785,
"grad_norm": 9.132744789123535,
"learning_rate": 1.8415135135135137e-05,
"loss": 0.4707,
"step": 2200
},
{
"epoch": 0.2594594594594595,
"grad_norm": 10.921464920043945,
"learning_rate": 1.8270990990990992e-05,
"loss": 0.4676,
"step": 2400
},
{
"epoch": 0.2810810810810811,
"grad_norm": 20.87602424621582,
"learning_rate": 1.8126846846846847e-05,
"loss": 0.469,
"step": 2600
},
{
"epoch": 0.3027027027027027,
"grad_norm": 27.636375427246094,
"learning_rate": 1.7982702702702702e-05,
"loss": 0.4419,
"step": 2800
},
{
"epoch": 0.32432432432432434,
"grad_norm": 8.047364234924316,
"learning_rate": 1.783855855855856e-05,
"loss": 0.4331,
"step": 3000
},
{
"epoch": 0.34594594594594597,
"grad_norm": 14.144438743591309,
"learning_rate": 1.7694414414414416e-05,
"loss": 0.4444,
"step": 3200
},
{
"epoch": 0.3675675675675676,
"grad_norm": 13.833978652954102,
"learning_rate": 1.755027027027027e-05,
"loss": 0.4269,
"step": 3400
},
{
"epoch": 0.3891891891891892,
"grad_norm": 7.048819541931152,
"learning_rate": 1.7406126126126127e-05,
"loss": 0.4518,
"step": 3600
},
{
"epoch": 0.41081081081081083,
"grad_norm": 16.350954055786133,
"learning_rate": 1.7261981981981982e-05,
"loss": 0.4306,
"step": 3800
},
{
"epoch": 0.43243243243243246,
"grad_norm": 36.817256927490234,
"learning_rate": 1.7117837837837837e-05,
"loss": 0.3993,
"step": 4000
}
],
"logging_steps": 200,
"max_steps": 27750,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.922026999808e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}