MuhammadHelmy's picture
Upload folder using huggingface_hub
fd8ae5a verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 500,
"global_step": 17560,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11389521640091116,
"grad_norm": 12.961915016174316,
"learning_rate": 2.2733485193621873e-05,
"loss": 2.138,
"step": 500
},
{
"epoch": 0.22779043280182232,
"grad_norm": 20.78866958618164,
"learning_rate": 4.5512528473804106e-05,
"loss": 0.8384,
"step": 1000
},
{
"epoch": 0.3416856492027335,
"grad_norm": 15.086663246154785,
"learning_rate": 6.829157175398633e-05,
"loss": 0.6629,
"step": 1500
},
{
"epoch": 0.45558086560364464,
"grad_norm": 34.28147888183594,
"learning_rate": 7.99533422493134e-05,
"loss": 0.6011,
"step": 2000
},
{
"epoch": 0.5694760820045558,
"grad_norm": 42.04814529418945,
"learning_rate": 7.95645044782997e-05,
"loss": 0.5135,
"step": 2500
},
{
"epoch": 0.683371298405467,
"grad_norm": 2.5099027156829834,
"learning_rate": 7.878513717033947e-05,
"loss": 0.49,
"step": 3000
},
{
"epoch": 0.7972665148063781,
"grad_norm": 4.545470237731934,
"learning_rate": 7.762293322991625e-05,
"loss": 0.4222,
"step": 3500
},
{
"epoch": 0.9111617312072893,
"grad_norm": 3.895155668258667,
"learning_rate": 7.608936442882838e-05,
"loss": 0.4221,
"step": 4000
},
{
"epoch": 1.0250569476082005,
"grad_norm": 2.197298288345337,
"learning_rate": 7.419956817171832e-05,
"loss": 0.4035,
"step": 4500
},
{
"epoch": 1.1389521640091116,
"grad_norm": 4.543397426605225,
"learning_rate": 7.19721980792226e-05,
"loss": 0.3188,
"step": 5000
},
{
"epoch": 1.2528473804100229,
"grad_norm": 1.2027770280838013,
"learning_rate": 6.942923986359271e-05,
"loss": 0.301,
"step": 5500
},
{
"epoch": 1.366742596810934,
"grad_norm": 5.7361955642700195,
"learning_rate": 6.659579431422354e-05,
"loss": 0.3052,
"step": 6000
},
{
"epoch": 1.4806378132118452,
"grad_norm": 60.32481002807617,
"learning_rate": 6.349982953517576e-05,
"loss": 0.2945,
"step": 6500
},
{
"epoch": 1.5945330296127562,
"grad_norm": 24.024532318115234,
"learning_rate": 6.017190488028201e-05,
"loss": 0.262,
"step": 7000
},
{
"epoch": 1.7084282460136673,
"grad_norm": 5.258288860321045,
"learning_rate": 5.664486931079191e-05,
"loss": 0.2964,
"step": 7500
},
{
"epoch": 1.8223234624145785,
"grad_norm": 22.747085571289062,
"learning_rate": 5.295353715297871e-05,
"loss": 0.2934,
"step": 8000
},
{
"epoch": 1.9362186788154898,
"grad_norm": 1.9899779558181763,
"learning_rate": 4.913434445620825e-05,
"loss": 0.2535,
"step": 8500
},
{
"epoch": 2.050113895216401,
"grad_norm": 0.44854220747947693,
"learning_rate": 4.522498934345834e-05,
"loss": 0.1939,
"step": 9000
},
{
"epoch": 2.164009111617312,
"grad_norm": 12.502466201782227,
"learning_rate": 4.126405990428233e-05,
"loss": 0.161,
"step": 9500
},
{
"epoch": 2.277904328018223,
"grad_norm": 0.9645262360572815,
"learning_rate": 3.7290653303175396e-05,
"loss": 0.1656,
"step": 10000
},
{
"epoch": 2.3917995444191344,
"grad_norm": 0.041170768439769745,
"learning_rate": 3.334398986301262e-05,
"loss": 0.1394,
"step": 10500
},
{
"epoch": 2.5056947608200457,
"grad_norm": 56.7460823059082,
"learning_rate": 2.9463025932826876e-05,
"loss": 0.1377,
"step": 11000
},
{
"epoch": 2.619589977220957,
"grad_norm": 1.695715069770813,
"learning_rate": 2.5686069361194757e-05,
"loss": 0.1274,
"step": 11500
},
{
"epoch": 2.733485193621868,
"grad_norm": 0.49590688943862915,
"learning_rate": 2.205040137077874e-05,
"loss": 0.1508,
"step": 12000
},
{
"epoch": 2.847380410022779,
"grad_norm": 2.330212116241455,
"learning_rate": 1.8591908566390646e-05,
"loss": 0.13,
"step": 12500
},
{
"epoch": 2.9612756264236904,
"grad_norm": 0.003335492219775915,
"learning_rate": 1.5344728708915906e-05,
"loss": 0.1254,
"step": 13000
},
{
"epoch": 3.075170842824601,
"grad_norm": 0.9676417112350464,
"learning_rate": 1.2340913751559621e-05,
"loss": 0.0922,
"step": 13500
},
{
"epoch": 3.1890660592255125,
"grad_norm": 0.6340539455413818,
"learning_rate": 9.610113464484163e-06,
"loss": 0.0876,
"step": 14000
},
{
"epoch": 3.3029612756264237,
"grad_norm": 1.2587453126907349,
"learning_rate": 7.179282770685981e-06,
"loss": 0.0827,
"step": 14500
},
{
"epoch": 3.416856492027335,
"grad_norm": 2.4137210845947266,
"learning_rate": 5.072415681912745e-06,
"loss": 0.0827,
"step": 15000
},
{
"epoch": 3.5307517084282463,
"grad_norm": 1.391802191734314,
"learning_rate": 3.3103084608609646e-06,
"loss": 0.0803,
"step": 15500
},
{
"epoch": 3.644646924829157,
"grad_norm": 0.5722363591194153,
"learning_rate": 1.910354347409693e-06,
"loss": 0.0777,
"step": 16000
},
{
"epoch": 3.7585421412300684,
"grad_norm": 1.3810276985168457,
"learning_rate": 8.863718750874395e-07,
"loss": 0.0846,
"step": 16500
},
{
"epoch": 3.8724373576309796,
"grad_norm": 0.8889294862747192,
"learning_rate": 2.484684724094999e-07,
"loss": 0.0867,
"step": 17000
},
{
"epoch": 3.9863325740318905,
"grad_norm": 1.0857609510421753,
"learning_rate": 2.940695436648433e-09,
"loss": 0.0769,
"step": 17500
}
],
"logging_steps": 500,
"max_steps": 17560,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.393629300129792e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}