eswarankrishnamurthy's picture
Upload folder using huggingface_hub
9baa6a9 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 102,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09950248756218906,
"grad_norm": 0.6564624905586243,
"learning_rate": 0.00019215686274509807,
"loss": 3.0078,
"step": 5
},
{
"epoch": 0.19900497512437812,
"grad_norm": 0.6871398091316223,
"learning_rate": 0.0001823529411764706,
"loss": 2.8242,
"step": 10
},
{
"epoch": 0.29850746268656714,
"grad_norm": 0.8124077320098877,
"learning_rate": 0.00017254901960784316,
"loss": 2.5151,
"step": 15
},
{
"epoch": 0.39800995024875624,
"grad_norm": 1.6018518209457397,
"learning_rate": 0.0001627450980392157,
"loss": 2.1472,
"step": 20
},
{
"epoch": 0.4975124378109453,
"grad_norm": 0.9264693856239319,
"learning_rate": 0.00015294117647058822,
"loss": 1.8993,
"step": 25
},
{
"epoch": 0.5970149253731343,
"grad_norm": 0.8736022114753723,
"learning_rate": 0.00014313725490196078,
"loss": 1.8384,
"step": 30
},
{
"epoch": 0.6965174129353234,
"grad_norm": 1.6786866188049316,
"learning_rate": 0.00013333333333333334,
"loss": 1.4841,
"step": 35
},
{
"epoch": 0.7960199004975125,
"grad_norm": 2.2415640354156494,
"learning_rate": 0.0001235294117647059,
"loss": 1.3095,
"step": 40
},
{
"epoch": 0.8955223880597015,
"grad_norm": 0.7268061637878418,
"learning_rate": 0.00011372549019607843,
"loss": 1.5891,
"step": 45
},
{
"epoch": 0.9950248756218906,
"grad_norm": 0.6739829778671265,
"learning_rate": 0.00010392156862745099,
"loss": 1.5697,
"step": 50
},
{
"epoch": 1.0796019900497513,
"grad_norm": 0.8251414895057678,
"learning_rate": 9.411764705882353e-05,
"loss": 1.4902,
"step": 55
},
{
"epoch": 1.1791044776119404,
"grad_norm": 0.6778109073638916,
"learning_rate": 8.431372549019608e-05,
"loss": 1.3125,
"step": 60
},
{
"epoch": 1.2786069651741294,
"grad_norm": 0.7273361682891846,
"learning_rate": 7.450980392156864e-05,
"loss": 1.1912,
"step": 65
},
{
"epoch": 1.3781094527363185,
"grad_norm": 0.7228267788887024,
"learning_rate": 6.470588235294118e-05,
"loss": 1.4244,
"step": 70
},
{
"epoch": 1.4776119402985075,
"grad_norm": 3.164576768875122,
"learning_rate": 5.490196078431373e-05,
"loss": 1.4583,
"step": 75
},
{
"epoch": 1.5771144278606966,
"grad_norm": 0.7823672294616699,
"learning_rate": 4.5098039215686275e-05,
"loss": 1.4158,
"step": 80
},
{
"epoch": 1.6766169154228856,
"grad_norm": 0.7993632555007935,
"learning_rate": 3.529411764705883e-05,
"loss": 1.2409,
"step": 85
},
{
"epoch": 1.7761194029850746,
"grad_norm": 0.7184426188468933,
"learning_rate": 2.5490196078431373e-05,
"loss": 1.2346,
"step": 90
},
{
"epoch": 1.8756218905472637,
"grad_norm": 0.8258879780769348,
"learning_rate": 1.568627450980392e-05,
"loss": 1.3385,
"step": 95
},
{
"epoch": 1.9751243781094527,
"grad_norm": 0.727968692779541,
"learning_rate": 5.882352941176471e-06,
"loss": 1.5243,
"step": 100
}
],
"logging_steps": 5,
"max_steps": 102,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3275743297536000.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}