phi3_safety_clf / checkpoint-8000 /trainer_state.json
Mohamedd123321's picture
Upload folder using huggingface_hub
693eea9 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.8648648648648649,
"eval_steps": 500,
"global_step": 8000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.021621621621621623,
"grad_norm": 43.9290657043457,
"learning_rate": 1.9856576576576577e-05,
"loss": 0.6499,
"step": 200
},
{
"epoch": 0.043243243243243246,
"grad_norm": 13.308595657348633,
"learning_rate": 1.9712432432432433e-05,
"loss": 0.568,
"step": 400
},
{
"epoch": 0.06486486486486487,
"grad_norm": 5.098663806915283,
"learning_rate": 1.956828828828829e-05,
"loss": 0.5371,
"step": 600
},
{
"epoch": 0.08648648648648649,
"grad_norm": 9.126262664794922,
"learning_rate": 1.9424144144144147e-05,
"loss": 0.5181,
"step": 800
},
{
"epoch": 0.10810810810810811,
"grad_norm": 15.761275291442871,
"learning_rate": 1.9280000000000002e-05,
"loss": 0.504,
"step": 1000
},
{
"epoch": 0.12972972972972974,
"grad_norm": 33.255863189697266,
"learning_rate": 1.9135855855855857e-05,
"loss": 0.5017,
"step": 1200
},
{
"epoch": 0.15135135135135136,
"grad_norm": 6.04021692276001,
"learning_rate": 1.8991711711711712e-05,
"loss": 0.468,
"step": 1400
},
{
"epoch": 0.17297297297297298,
"grad_norm": 15.048690795898438,
"learning_rate": 1.8847567567567568e-05,
"loss": 0.5056,
"step": 1600
},
{
"epoch": 0.1945945945945946,
"grad_norm": 22.5860538482666,
"learning_rate": 1.8703423423423426e-05,
"loss": 0.4601,
"step": 1800
},
{
"epoch": 0.21621621621621623,
"grad_norm": 12.32684326171875,
"learning_rate": 1.855927927927928e-05,
"loss": 0.4401,
"step": 2000
},
{
"epoch": 0.23783783783783785,
"grad_norm": 9.132744789123535,
"learning_rate": 1.8415135135135137e-05,
"loss": 0.4707,
"step": 2200
},
{
"epoch": 0.2594594594594595,
"grad_norm": 10.921464920043945,
"learning_rate": 1.8270990990990992e-05,
"loss": 0.4676,
"step": 2400
},
{
"epoch": 0.2810810810810811,
"grad_norm": 20.87602424621582,
"learning_rate": 1.8126846846846847e-05,
"loss": 0.469,
"step": 2600
},
{
"epoch": 0.3027027027027027,
"grad_norm": 27.636375427246094,
"learning_rate": 1.7982702702702702e-05,
"loss": 0.4419,
"step": 2800
},
{
"epoch": 0.32432432432432434,
"grad_norm": 8.047364234924316,
"learning_rate": 1.783855855855856e-05,
"loss": 0.4331,
"step": 3000
},
{
"epoch": 0.34594594594594597,
"grad_norm": 14.144438743591309,
"learning_rate": 1.7694414414414416e-05,
"loss": 0.4444,
"step": 3200
},
{
"epoch": 0.3675675675675676,
"grad_norm": 13.833978652954102,
"learning_rate": 1.755027027027027e-05,
"loss": 0.4269,
"step": 3400
},
{
"epoch": 0.3891891891891892,
"grad_norm": 7.048819541931152,
"learning_rate": 1.7406126126126127e-05,
"loss": 0.4518,
"step": 3600
},
{
"epoch": 0.41081081081081083,
"grad_norm": 16.350954055786133,
"learning_rate": 1.7261981981981982e-05,
"loss": 0.4306,
"step": 3800
},
{
"epoch": 0.43243243243243246,
"grad_norm": 36.817256927490234,
"learning_rate": 1.7117837837837837e-05,
"loss": 0.3993,
"step": 4000
},
{
"epoch": 0.4540540540540541,
"grad_norm": 6.8075151443481445,
"learning_rate": 1.6973693693693696e-05,
"loss": 0.4624,
"step": 4200
},
{
"epoch": 0.4756756756756757,
"grad_norm": 22.440227508544922,
"learning_rate": 1.682954954954955e-05,
"loss": 0.4062,
"step": 4400
},
{
"epoch": 0.4972972972972973,
"grad_norm": 27.693946838378906,
"learning_rate": 1.6685405405405406e-05,
"loss": 0.3986,
"step": 4600
},
{
"epoch": 0.518918918918919,
"grad_norm": 5.609838485717773,
"learning_rate": 1.6541261261261262e-05,
"loss": 0.418,
"step": 4800
},
{
"epoch": 0.5405405405405406,
"grad_norm": 9.340408325195312,
"learning_rate": 1.6397117117117117e-05,
"loss": 0.4292,
"step": 5000
},
{
"epoch": 0.5621621621621622,
"grad_norm": 17.670503616333008,
"learning_rate": 1.6252972972972972e-05,
"loss": 0.4358,
"step": 5200
},
{
"epoch": 0.5837837837837838,
"grad_norm": 22.615758895874023,
"learning_rate": 1.610882882882883e-05,
"loss": 0.4324,
"step": 5400
},
{
"epoch": 0.6054054054054054,
"grad_norm": 25.58919906616211,
"learning_rate": 1.5964684684684686e-05,
"loss": 0.4338,
"step": 5600
},
{
"epoch": 0.6270270270270271,
"grad_norm": 32.85356140136719,
"learning_rate": 1.582054054054054e-05,
"loss": 0.4226,
"step": 5800
},
{
"epoch": 0.6486486486486487,
"grad_norm": 9.336725234985352,
"learning_rate": 1.56763963963964e-05,
"loss": 0.4391,
"step": 6000
},
{
"epoch": 0.6702702702702703,
"grad_norm": 24.154613494873047,
"learning_rate": 1.5532252252252252e-05,
"loss": 0.3986,
"step": 6200
},
{
"epoch": 0.6918918918918919,
"grad_norm": 18.917322158813477,
"learning_rate": 1.5388108108108107e-05,
"loss": 0.4107,
"step": 6400
},
{
"epoch": 0.7135135135135136,
"grad_norm": 24.948320388793945,
"learning_rate": 1.5243963963963966e-05,
"loss": 0.4165,
"step": 6600
},
{
"epoch": 0.7351351351351352,
"grad_norm": 14.24731731414795,
"learning_rate": 1.5099819819819821e-05,
"loss": 0.4294,
"step": 6800
},
{
"epoch": 0.7567567567567568,
"grad_norm": 10.69514274597168,
"learning_rate": 1.4955675675675676e-05,
"loss": 0.4004,
"step": 7000
},
{
"epoch": 0.7783783783783784,
"grad_norm": 5.057804107666016,
"learning_rate": 1.4811531531531533e-05,
"loss": 0.4235,
"step": 7200
},
{
"epoch": 0.8,
"grad_norm": 19.741506576538086,
"learning_rate": 1.4667387387387388e-05,
"loss": 0.3834,
"step": 7400
},
{
"epoch": 0.8216216216216217,
"grad_norm": 15.608851432800293,
"learning_rate": 1.4523243243243244e-05,
"loss": 0.4089,
"step": 7600
},
{
"epoch": 0.8432432432432433,
"grad_norm": 8.335741996765137,
"learning_rate": 1.43790990990991e-05,
"loss": 0.4025,
"step": 7800
},
{
"epoch": 0.8648648648648649,
"grad_norm": 8.1391019821167,
"learning_rate": 1.4234954954954956e-05,
"loss": 0.444,
"step": 8000
}
],
"logging_steps": 200,
"max_steps": 27750,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.7844053999616e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}