Python-UML-v0.1 / trainer_state.json
HA-Siala's picture
Upload folder using huggingface_hub
67a11ff verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.9980430528375734,
"eval_steps": 50,
"global_step": 1020,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.19569471624266144,
"grad_norm": 0.8885060548782349,
"learning_rate": 4.901960784313726e-06,
"loss": 0.9897,
"step": 50
},
{
"epoch": 0.3913894324853229,
"grad_norm": 0.45187801122665405,
"learning_rate": 9.803921568627451e-06,
"loss": 0.7524,
"step": 100
},
{
"epoch": 0.5870841487279843,
"grad_norm": 0.43322402238845825,
"learning_rate": 1.4705882352941177e-05,
"loss": 0.6248,
"step": 150
},
{
"epoch": 0.7827788649706457,
"grad_norm": 0.6817905902862549,
"learning_rate": 1.9607843137254903e-05,
"loss": 0.5634,
"step": 200
},
{
"epoch": 0.9784735812133072,
"grad_norm": 0.6186803579330444,
"learning_rate": 2.4509803921568626e-05,
"loss": 0.5199,
"step": 250
},
{
"epoch": 0.9980430528375733,
"eval_loss": 0.5416576862335205,
"eval_runtime": 67.3935,
"eval_samples_per_second": 3.368,
"eval_steps_per_second": 0.43,
"step": 255
},
{
"epoch": 1.1741682974559686,
"grad_norm": 0.8519768118858337,
"learning_rate": 2.9411764705882354e-05,
"loss": 0.5778,
"step": 300
},
{
"epoch": 1.36986301369863,
"grad_norm": 0.6045963764190674,
"learning_rate": 3.431372549019608e-05,
"loss": 0.5183,
"step": 350
},
{
"epoch": 1.5655577299412915,
"grad_norm": 0.6654757261276245,
"learning_rate": 3.9215686274509805e-05,
"loss": 0.5266,
"step": 400
},
{
"epoch": 1.7612524461839532,
"grad_norm": 0.5622931718826294,
"learning_rate": 4.411764705882353e-05,
"loss": 0.5165,
"step": 450
},
{
"epoch": 1.9569471624266144,
"grad_norm": 0.4925207495689392,
"learning_rate": 4.901960784313725e-05,
"loss": 0.5031,
"step": 500
},
{
"epoch": 2.0,
"eval_loss": 0.5099073648452759,
"eval_runtime": 67.3595,
"eval_samples_per_second": 3.37,
"eval_steps_per_second": 0.431,
"step": 511
},
{
"epoch": 2.152641878669276,
"grad_norm": 0.6266790628433228,
"learning_rate": 4.995258321842611e-05,
"loss": 0.5236,
"step": 550
},
{
"epoch": 2.3483365949119372,
"grad_norm": 0.5779295563697815,
"learning_rate": 4.976026077188013e-05,
"loss": 0.4695,
"step": 600
},
{
"epoch": 2.544031311154599,
"grad_norm": 0.7699418663978577,
"learning_rate": 4.942120794399002e-05,
"loss": 0.5105,
"step": 650
},
{
"epoch": 2.73972602739726,
"grad_norm": 0.4785248339176178,
"learning_rate": 4.893743397654811e-05,
"loss": 0.5547,
"step": 700
},
{
"epoch": 2.935420743639922,
"grad_norm": 0.534096360206604,
"learning_rate": 4.8311805735108894e-05,
"loss": 0.4766,
"step": 750
},
{
"epoch": 2.9980430528375734,
"eval_loss": 0.4970957636833191,
"eval_runtime": 67.3751,
"eval_samples_per_second": 3.369,
"eval_steps_per_second": 0.43,
"step": 766
},
{
"epoch": 3.136986301369863,
"grad_norm": 0.557356595993042,
"learning_rate": 4.754803071981916e-05,
"loss": 0.4412,
"step": 800
},
{
"epoch": 3.3326810176125243,
"grad_norm": 0.6019110083580017,
"learning_rate": 4.665063509461097e-05,
"loss": 0.4561,
"step": 850
},
{
"epoch": 3.528375733855186,
"grad_norm": 0.6057672500610352,
"learning_rate": 4.5624936864957556e-05,
"loss": 0.4561,
"step": 900
},
{
"epoch": 3.724070450097847,
"grad_norm": 0.4167369306087494,
"learning_rate": 4.447701436314176e-05,
"loss": 0.4621,
"step": 950
},
{
"epoch": 3.919765166340509,
"grad_norm": 0.5263449549674988,
"learning_rate": 4.321367022779476e-05,
"loss": 0.5035,
"step": 1000
},
{
"epoch": 3.9980430528375734,
"eval_loss": 0.495980829000473,
"eval_runtime": 67.4502,
"eval_samples_per_second": 3.365,
"eval_steps_per_second": 0.43,
"step": 1020
}
],
"logging_steps": 50,
"max_steps": 2550,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 6.79057249258537e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}