AbdoTW's picture
Upload checkpoint-600
a0acb53 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.761904761904762,
"eval_steps": 50,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1984126984126984,
"grad_norm": 1.0922354459762573,
"learning_rate": 9.523809523809523e-06,
"loss": 0.8473,
"step": 25
},
{
"epoch": 0.3968253968253968,
"grad_norm": 0.7849488854408264,
"learning_rate": 1.9444444444444445e-05,
"loss": 0.3692,
"step": 50
},
{
"epoch": 0.3968253968253968,
"eval_loss": 0.3103943467140198,
"eval_runtime": 10.1545,
"eval_samples_per_second": 4.136,
"eval_steps_per_second": 2.068,
"step": 50
},
{
"epoch": 0.5952380952380952,
"grad_norm": 0.4042750298976898,
"learning_rate": 2.9365079365079366e-05,
"loss": 0.249,
"step": 75
},
{
"epoch": 0.7936507936507936,
"grad_norm": 0.505721926689148,
"learning_rate": 3.928571428571429e-05,
"loss": 0.2248,
"step": 100
},
{
"epoch": 0.7936507936507936,
"eval_loss": 0.23757970333099365,
"eval_runtime": 10.1307,
"eval_samples_per_second": 4.146,
"eval_steps_per_second": 2.073,
"step": 100
},
{
"epoch": 0.9920634920634921,
"grad_norm": 0.39118653535842896,
"learning_rate": 4.9206349206349204e-05,
"loss": 0.1997,
"step": 125
},
{
"epoch": 1.1904761904761905,
"grad_norm": 0.4834806025028229,
"learning_rate": 5.912698412698413e-05,
"loss": 0.1717,
"step": 150
},
{
"epoch": 1.1904761904761905,
"eval_loss": 0.20604592561721802,
"eval_runtime": 10.1545,
"eval_samples_per_second": 4.136,
"eval_steps_per_second": 2.068,
"step": 150
},
{
"epoch": 1.3888888888888888,
"grad_norm": 0.38224297761917114,
"learning_rate": 6.904761904761905e-05,
"loss": 0.1684,
"step": 175
},
{
"epoch": 1.5873015873015874,
"grad_norm": 0.35443320870399475,
"learning_rate": 7.896825396825397e-05,
"loss": 0.1502,
"step": 200
},
{
"epoch": 1.5873015873015874,
"eval_loss": 0.19332902133464813,
"eval_runtime": 10.0977,
"eval_samples_per_second": 4.159,
"eval_steps_per_second": 2.08,
"step": 200
},
{
"epoch": 1.7857142857142856,
"grad_norm": 0.3137013614177704,
"learning_rate": 8.888888888888889e-05,
"loss": 0.1549,
"step": 225
},
{
"epoch": 1.9841269841269842,
"grad_norm": 0.30676764249801636,
"learning_rate": 9.880952380952381e-05,
"loss": 0.1449,
"step": 250
},
{
"epoch": 1.9841269841269842,
"eval_loss": 0.17996199429035187,
"eval_runtime": 10.0168,
"eval_samples_per_second": 4.193,
"eval_steps_per_second": 2.096,
"step": 250
},
{
"epoch": 2.1825396825396823,
"grad_norm": 0.31375375390052795,
"learning_rate": 9.997678517546382e-05,
"loss": 0.1111,
"step": 275
},
{
"epoch": 2.380952380952381,
"grad_norm": 0.2725541293621063,
"learning_rate": 9.989407561073525e-05,
"loss": 0.1016,
"step": 300
},
{
"epoch": 2.380952380952381,
"eval_loss": 0.17762605845928192,
"eval_runtime": 10.0289,
"eval_samples_per_second": 4.188,
"eval_steps_per_second": 2.094,
"step": 300
},
{
"epoch": 2.5793650793650795,
"grad_norm": 0.31162211298942566,
"learning_rate": 9.975153876827008e-05,
"loss": 0.1044,
"step": 325
},
{
"epoch": 2.7777777777777777,
"grad_norm": 0.2855686843395233,
"learning_rate": 9.954934556197257e-05,
"loss": 0.1087,
"step": 350
},
{
"epoch": 2.7777777777777777,
"eval_loss": 0.1633986085653305,
"eval_runtime": 10.056,
"eval_samples_per_second": 4.177,
"eval_steps_per_second": 2.088,
"step": 350
},
{
"epoch": 2.9761904761904763,
"grad_norm": 0.33260759711265564,
"learning_rate": 9.928773843884593e-05,
"loss": 0.1028,
"step": 375
},
{
"epoch": 3.1746031746031744,
"grad_norm": 0.3426005244255066,
"learning_rate": 9.896703108827759e-05,
"loss": 0.0725,
"step": 400
},
{
"epoch": 3.1746031746031744,
"eval_loss": 0.17406687140464783,
"eval_runtime": 10.0302,
"eval_samples_per_second": 4.187,
"eval_steps_per_second": 2.094,
"step": 400
},
{
"epoch": 3.373015873015873,
"grad_norm": 0.3507950007915497,
"learning_rate": 9.85876080658986e-05,
"loss": 0.0698,
"step": 425
},
{
"epoch": 3.571428571428571,
"grad_norm": 0.3167800009250641,
"learning_rate": 9.814992433246858e-05,
"loss": 0.0662,
"step": 450
},
{
"epoch": 3.571428571428571,
"eval_loss": 0.17003558576107025,
"eval_runtime": 10.035,
"eval_samples_per_second": 4.185,
"eval_steps_per_second": 2.093,
"step": 450
},
{
"epoch": 3.7698412698412698,
"grad_norm": 0.31847792863845825,
"learning_rate": 9.765450470833865e-05,
"loss": 0.0724,
"step": 475
},
{
"epoch": 3.9682539682539684,
"grad_norm": 0.3440268635749817,
"learning_rate": 9.710194324414683e-05,
"loss": 0.0736,
"step": 500
},
{
"epoch": 3.9682539682539684,
"eval_loss": 0.1597495824098587,
"eval_runtime": 10.0596,
"eval_samples_per_second": 4.175,
"eval_steps_per_second": 2.088,
"step": 500
},
{
"epoch": 4.166666666666667,
"grad_norm": 0.20968282222747803,
"learning_rate": 9.649290250850029e-05,
"loss": 0.0507,
"step": 525
},
{
"epoch": 4.365079365079365,
"grad_norm": 0.6211559772491455,
"learning_rate": 9.582811279349882e-05,
"loss": 0.0488,
"step": 550
},
{
"epoch": 4.365079365079365,
"eval_loss": 0.1775166541337967,
"eval_runtime": 10.0328,
"eval_samples_per_second": 4.186,
"eval_steps_per_second": 2.093,
"step": 550
},
{
"epoch": 4.563492063492063,
"grad_norm": 0.2983364760875702,
"learning_rate": 9.51083712390519e-05,
"loss": 0.0519,
"step": 575
},
{
"epoch": 4.761904761904762,
"grad_norm": 0.2705860733985901,
"learning_rate": 9.433454087703954e-05,
"loss": 0.0522,
"step": 600
},
{
"epoch": 4.761904761904762,
"eval_loss": 0.1731569916009903,
"eval_runtime": 10.0695,
"eval_samples_per_second": 4.171,
"eval_steps_per_second": 2.085,
"step": 600
}
],
"logging_steps": 25,
"max_steps": 2520,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.627868424091448e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}