MoM-Qwen32B-based / trainer_state.json
TzJ2006's picture
Upload folder using huggingface_hub
6895df0 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 224,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.5128205128205128,
"grad_norm": 0.848931074142456,
"learning_rate": 9.53153893518325e-05,
"loss": 6.0403,
"step": 10
},
{
"epoch": 1.0,
"grad_norm": 1.975097894668579,
"learning_rate": 6.294095225512603e-05,
"loss": 5.4718,
"step": 20
},
{
"epoch": 1.5128205128205128,
"grad_norm": 2.3797974586486816,
"learning_rate": 2.132117818244771e-05,
"loss": 4.8283,
"step": 30
},
{
"epoch": 2.0,
"grad_norm": 1.789046287536621,
"learning_rate": 1.9026509541272275e-07,
"loss": 4.6286,
"step": 40
},
{
"epoch": 1.1355932203389831,
"grad_norm": 1.8822929859161377,
"learning_rate": 9.596765194911181e-05,
"loss": 5.0217,
"step": 50
},
{
"epoch": 1.3615819209039548,
"grad_norm": 1.4929884672164917,
"learning_rate": 9.236573524788887e-05,
"loss": 3.9458,
"step": 60
},
{
"epoch": 1.5875706214689265,
"grad_norm": 0.9337669014930725,
"learning_rate": 8.774114695766286e-05,
"loss": 3.6254,
"step": 70
},
{
"epoch": 1.8135593220338984,
"grad_norm": 0.8921490907669067,
"learning_rate": 8.22055205725199e-05,
"loss": 3.4809,
"step": 80
},
{
"epoch": 2.022598870056497,
"grad_norm": 0.9192579984664917,
"learning_rate": 7.589248124491627e-05,
"loss": 3.4035,
"step": 90
},
{
"epoch": 2.248587570621469,
"grad_norm": 1.1187790632247925,
"learning_rate": 6.895442019201897e-05,
"loss": 3.3861,
"step": 100
},
{
"epoch": 2.4745762711864407,
"grad_norm": 1.2873858213424683,
"learning_rate": 6.15588161057485e-05,
"loss": 3.3066,
"step": 110
},
{
"epoch": 2.7005649717514126,
"grad_norm": 1.34447181224823,
"learning_rate": 5.3884192364450325e-05,
"loss": 3.2884,
"step": 120
},
{
"epoch": 2.926553672316384,
"grad_norm": 0.7228454947471619,
"learning_rate": 4.611580763554969e-05,
"loss": 3.2397,
"step": 130
},
{
"epoch": 3.135593220338983,
"grad_norm": 1.1347743272781372,
"learning_rate": 3.844118389425153e-05,
"loss": 3.1814,
"step": 140
},
{
"epoch": 3.361581920903955,
"grad_norm": 1.3022897243499756,
"learning_rate": 3.104557980798104e-05,
"loss": 3.1947,
"step": 150
},
{
"epoch": 3.5875706214689265,
"grad_norm": 1.1423296928405762,
"learning_rate": 2.410751875508373e-05,
"loss": 3.2494,
"step": 160
},
{
"epoch": 3.8135593220338984,
"grad_norm": 0.9820030927658081,
"learning_rate": 1.7794479427480117e-05,
"loss": 3.199,
"step": 170
},
{
"epoch": 4.022598870056497,
"grad_norm": 1.5156248807907104,
"learning_rate": 1.225885304233716e-05,
"loss": 3.1656,
"step": 180
},
{
"epoch": 4.248587570621469,
"grad_norm": 0.8429480195045471,
"learning_rate": 7.63426475211113e-06,
"loss": 3.2354,
"step": 190
},
{
"epoch": 4.47457627118644,
"grad_norm": 1.0040243864059448,
"learning_rate": 4.032348050888179e-06,
"loss": 3.216,
"step": 200
},
{
"epoch": 4.700564971751413,
"grad_norm": 1.4402868747711182,
"learning_rate": 1.5400500400166939e-06,
"loss": 3.1459,
"step": 210
},
{
"epoch": 4.926553672316384,
"grad_norm": 1.0170223712921143,
"learning_rate": 2.1753260154906973e-07,
"loss": 3.2043,
"step": 220
},
{
"epoch": 5.0,
"step": 224,
"total_flos": 2.2662781232726016e+17,
"train_loss": 2.800803001437868,
"train_runtime": 303.5624,
"train_samples_per_second": 23.29,
"train_steps_per_second": 0.741
}
],
"logging_steps": 10,
"max_steps": 225,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.2662781232726016e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}