fluency-phobert-v1 / trainer_state.json
SCM-LAB's picture
Upload folder using huggingface_hub
e546c64 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 50000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"grad_norm": 2.37485408782959,
"learning_rate": 0.0002,
"loss": 0.7725,
"step": 500
},
{
"epoch": 0.1,
"grad_norm": 2.3556101322174072,
"learning_rate": 0.0004,
"loss": 0.73,
"step": 1000
},
{
"epoch": 0.15,
"grad_norm": 4.192414283752441,
"learning_rate": 0.0006,
"loss": 0.7414,
"step": 1500
},
{
"epoch": 0.2,
"grad_norm": 3.5939464569091797,
"learning_rate": 0.0008,
"loss": 0.7544,
"step": 2000
},
{
"epoch": 0.25,
"grad_norm": 15.336837768554688,
"learning_rate": 0.001,
"loss": 0.7723,
"step": 2500
},
{
"epoch": 0.3,
"grad_norm": 3.893497943878174,
"learning_rate": 0.0009894736842105264,
"loss": 0.7893,
"step": 3000
},
{
"epoch": 0.35,
"grad_norm": 11.713598251342773,
"learning_rate": 0.0009789473684210528,
"loss": 0.7843,
"step": 3500
},
{
"epoch": 0.4,
"grad_norm": 13.350496292114258,
"learning_rate": 0.000968421052631579,
"loss": 0.7894,
"step": 4000
},
{
"epoch": 0.45,
"grad_norm": 2.8563292026519775,
"learning_rate": 0.0009578947368421053,
"loss": 0.7843,
"step": 4500
},
{
"epoch": 0.5,
"grad_norm": 9.680468559265137,
"learning_rate": 0.0009473684210526315,
"loss": 0.8511,
"step": 5000
},
{
"epoch": 0.55,
"grad_norm": 4.680222034454346,
"learning_rate": 0.0009368421052631579,
"loss": 0.8686,
"step": 5500
},
{
"epoch": 0.6,
"grad_norm": 12.037607192993164,
"learning_rate": 0.0009263157894736843,
"loss": 0.8393,
"step": 6000
},
{
"epoch": 0.65,
"grad_norm": 8.154277801513672,
"learning_rate": 0.0009157894736842105,
"loss": 0.8252,
"step": 6500
},
{
"epoch": 0.7,
"grad_norm": 3.256535530090332,
"learning_rate": 0.0009052631578947369,
"loss": 0.7854,
"step": 7000
},
{
"epoch": 0.75,
"grad_norm": 4.168020725250244,
"learning_rate": 0.0008947368421052632,
"loss": 0.8059,
"step": 7500
},
{
"epoch": 0.8,
"grad_norm": 2.775940418243408,
"learning_rate": 0.0008842105263157894,
"loss": 0.8272,
"step": 8000
},
{
"epoch": 0.85,
"grad_norm": 14.476820945739746,
"learning_rate": 0.0008736842105263159,
"loss": 0.8006,
"step": 8500
},
{
"epoch": 0.9,
"grad_norm": 14.232259750366211,
"learning_rate": 0.0008631578947368422,
"loss": 0.7537,
"step": 9000
},
{
"epoch": 0.95,
"grad_norm": 5.338253021240234,
"learning_rate": 0.0008526315789473684,
"loss": 0.7831,
"step": 9500
},
{
"epoch": 1.0,
"grad_norm": 5.454962730407715,
"learning_rate": 0.0008421052631578947,
"loss": 0.7633,
"step": 10000
},
{
"epoch": 1.05,
"grad_norm": 8.250731468200684,
"learning_rate": 0.0008315789473684212,
"loss": 0.7565,
"step": 10500
},
{
"epoch": 1.1,
"grad_norm": 8.233278274536133,
"learning_rate": 0.0008210526315789474,
"loss": 0.7724,
"step": 11000
},
{
"epoch": 1.15,
"grad_norm": 10.603631019592285,
"learning_rate": 0.0008105263157894737,
"loss": 0.782,
"step": 11500
},
{
"epoch": 1.2,
"grad_norm": 3.75233793258667,
"learning_rate": 0.0008,
"loss": 0.7519,
"step": 12000
},
{
"epoch": 1.25,
"grad_norm": 4.677459239959717,
"learning_rate": 0.0007894736842105263,
"loss": 0.7556,
"step": 12500
},
{
"epoch": 1.3,
"grad_norm": 4.583637714385986,
"learning_rate": 0.0007789473684210527,
"loss": 0.7459,
"step": 13000
},
{
"epoch": 1.35,
"grad_norm": 9.343656539916992,
"learning_rate": 0.0007684210526315789,
"loss": 0.7802,
"step": 13500
},
{
"epoch": 1.4,
"grad_norm": 1.4162571430206299,
"learning_rate": 0.0007578947368421053,
"loss": 0.7384,
"step": 14000
},
{
"epoch": 1.45,
"grad_norm": 6.586658477783203,
"learning_rate": 0.0007473684210526316,
"loss": 0.7795,
"step": 14500
},
{
"epoch": 1.5,
"grad_norm": 5.946737289428711,
"learning_rate": 0.0007368421052631579,
"loss": 0.7381,
"step": 15000
},
{
"epoch": 1.55,
"grad_norm": 5.825309753417969,
"learning_rate": 0.0007263157894736843,
"loss": 0.7482,
"step": 15500
},
{
"epoch": 1.6,
"grad_norm": 2.1794614791870117,
"learning_rate": 0.0007157894736842105,
"loss": 0.7381,
"step": 16000
},
{
"epoch": 1.65,
"grad_norm": 7.837281227111816,
"learning_rate": 0.0007052631578947368,
"loss": 0.7314,
"step": 16500
},
{
"epoch": 1.7,
"grad_norm": 4.612753868103027,
"learning_rate": 0.0006947368421052632,
"loss": 0.7384,
"step": 17000
},
{
"epoch": 1.75,
"grad_norm": 6.038923263549805,
"learning_rate": 0.0006842105263157895,
"loss": 0.752,
"step": 17500
},
{
"epoch": 1.8,
"grad_norm": 5.17457389831543,
"learning_rate": 0.0006736842105263158,
"loss": 0.7421,
"step": 18000
},
{
"epoch": 1.85,
"grad_norm": 4.990045547485352,
"learning_rate": 0.0006631578947368421,
"loss": 0.7253,
"step": 18500
},
{
"epoch": 1.9,
"grad_norm": 3.1730449199676514,
"learning_rate": 0.0006526315789473684,
"loss": 0.7202,
"step": 19000
},
{
"epoch": 1.95,
"grad_norm": 1.5063399076461792,
"learning_rate": 0.0006421052631578948,
"loss": 0.7488,
"step": 19500
},
{
"epoch": 2.0,
"grad_norm": 3.6247060298919678,
"learning_rate": 0.0006315789473684211,
"loss": 0.7263,
"step": 20000
},
{
"epoch": 2.05,
"grad_norm": 1.4821553230285645,
"learning_rate": 0.0006210526315789474,
"loss": 0.7176,
"step": 20500
},
{
"epoch": 2.1,
"grad_norm": 2.641312837600708,
"learning_rate": 0.0006105263157894737,
"loss": 0.7182,
"step": 21000
},
{
"epoch": 2.15,
"grad_norm": 3.3185555934906006,
"learning_rate": 0.0006,
"loss": 0.7257,
"step": 21500
},
{
"epoch": 2.2,
"grad_norm": 2.4133715629577637,
"learning_rate": 0.0005894736842105263,
"loss": 0.7202,
"step": 22000
},
{
"epoch": 2.25,
"grad_norm": 1.8694628477096558,
"learning_rate": 0.0005789473684210527,
"loss": 0.7172,
"step": 22500
},
{
"epoch": 2.3,
"grad_norm": 5.248977184295654,
"learning_rate": 0.0005684210526315789,
"loss": 0.7148,
"step": 23000
},
{
"epoch": 2.35,
"grad_norm": 6.179994106292725,
"learning_rate": 0.0005578947368421052,
"loss": 0.7308,
"step": 23500
},
{
"epoch": 2.4,
"grad_norm": 1.9322715997695923,
"learning_rate": 0.0005473684210526317,
"loss": 0.7163,
"step": 24000
},
{
"epoch": 2.45,
"grad_norm": 1.6366279125213623,
"learning_rate": 0.0005368421052631579,
"loss": 0.7262,
"step": 24500
},
{
"epoch": 2.5,
"grad_norm": 5.179625034332275,
"learning_rate": 0.0005263157894736842,
"loss": 0.7054,
"step": 25000
},
{
"epoch": 2.55,
"grad_norm": 1.7737842798233032,
"learning_rate": 0.0005157894736842106,
"loss": 0.7104,
"step": 25500
},
{
"epoch": 2.6,
"grad_norm": 1.6575433015823364,
"learning_rate": 0.0005052631578947368,
"loss": 0.7114,
"step": 26000
},
{
"epoch": 2.65,
"grad_norm": 3.6518235206604004,
"learning_rate": 0.0004947368421052632,
"loss": 0.7087,
"step": 26500
},
{
"epoch": 2.7,
"grad_norm": 1.3658417463302612,
"learning_rate": 0.0004842105263157895,
"loss": 0.7106,
"step": 27000
},
{
"epoch": 2.75,
"grad_norm": 1.312829852104187,
"learning_rate": 0.00047368421052631577,
"loss": 0.7118,
"step": 27500
},
{
"epoch": 2.8,
"grad_norm": 2.94315767288208,
"learning_rate": 0.00046315789473684214,
"loss": 0.7102,
"step": 28000
},
{
"epoch": 2.85,
"grad_norm": 1.349211573600769,
"learning_rate": 0.00045263157894736845,
"loss": 0.7058,
"step": 28500
},
{
"epoch": 2.9,
"grad_norm": 1.3551281690597534,
"learning_rate": 0.0004421052631578947,
"loss": 0.708,
"step": 29000
},
{
"epoch": 2.95,
"grad_norm": 1.3995367288589478,
"learning_rate": 0.0004315789473684211,
"loss": 0.7106,
"step": 29500
},
{
"epoch": 3.0,
"grad_norm": 0.9616995453834534,
"learning_rate": 0.00042105263157894734,
"loss": 0.7021,
"step": 30000
},
{
"epoch": 3.05,
"grad_norm": 2.390662908554077,
"learning_rate": 0.0004105263157894737,
"loss": 0.7061,
"step": 30500
},
{
"epoch": 3.1,
"grad_norm": 1.2397950887680054,
"learning_rate": 0.0004,
"loss": 0.705,
"step": 31000
},
{
"epoch": 3.15,
"grad_norm": 3.110950231552124,
"learning_rate": 0.00038947368421052633,
"loss": 0.7031,
"step": 31500
},
{
"epoch": 3.2,
"grad_norm": 1.1811543703079224,
"learning_rate": 0.00037894736842105265,
"loss": 0.7013,
"step": 32000
},
{
"epoch": 3.25,
"grad_norm": 1.1221048831939697,
"learning_rate": 0.00036842105263157896,
"loss": 0.7026,
"step": 32500
},
{
"epoch": 3.3,
"grad_norm": 0.9882568717002869,
"learning_rate": 0.0003578947368421053,
"loss": 0.7012,
"step": 33000
},
{
"epoch": 3.35,
"grad_norm": 0.8155710697174072,
"learning_rate": 0.0003473684210526316,
"loss": 0.7033,
"step": 33500
},
{
"epoch": 3.4,
"grad_norm": 1.1295320987701416,
"learning_rate": 0.0003368421052631579,
"loss": 0.701,
"step": 34000
},
{
"epoch": 3.45,
"grad_norm": 2.0776407718658447,
"learning_rate": 0.0003263157894736842,
"loss": 0.6986,
"step": 34500
},
{
"epoch": 3.5,
"grad_norm": 1.7907339334487915,
"learning_rate": 0.00031578947368421053,
"loss": 0.698,
"step": 35000
},
{
"epoch": 3.55,
"grad_norm": 1.978267788887024,
"learning_rate": 0.00030526315789473684,
"loss": 0.7031,
"step": 35500
},
{
"epoch": 3.6,
"grad_norm": 3.065232515335083,
"learning_rate": 0.00029473684210526316,
"loss": 0.697,
"step": 36000
},
{
"epoch": 3.65,
"grad_norm": 0.9516103267669678,
"learning_rate": 0.00028421052631578947,
"loss": 0.7006,
"step": 36500
},
{
"epoch": 3.7,
"grad_norm": 1.0045952796936035,
"learning_rate": 0.00027368421052631584,
"loss": 0.6986,
"step": 37000
},
{
"epoch": 3.75,
"grad_norm": 1.6901030540466309,
"learning_rate": 0.0002631578947368421,
"loss": 0.6995,
"step": 37500
},
{
"epoch": 3.8,
"grad_norm": 1.8813843727111816,
"learning_rate": 0.0002526315789473684,
"loss": 0.6987,
"step": 38000
},
{
"epoch": 3.85,
"grad_norm": 1.8174165487289429,
"learning_rate": 0.00024210526315789475,
"loss": 0.6999,
"step": 38500
},
{
"epoch": 3.9,
"grad_norm": 1.7451245784759521,
"learning_rate": 0.00023157894736842107,
"loss": 0.6981,
"step": 39000
},
{
"epoch": 3.95,
"grad_norm": 0.9422154426574707,
"learning_rate": 0.00022105263157894735,
"loss": 0.6962,
"step": 39500
},
{
"epoch": 4.0,
"grad_norm": 2.196399688720703,
"learning_rate": 0.00021052631578947367,
"loss": 0.6984,
"step": 40000
},
{
"epoch": 4.05,
"grad_norm": 1.940382719039917,
"learning_rate": 0.0002,
"loss": 0.6945,
"step": 40500
},
{
"epoch": 4.1,
"grad_norm": 1.2243621349334717,
"learning_rate": 0.00018947368421052632,
"loss": 0.6965,
"step": 41000
},
{
"epoch": 4.15,
"grad_norm": 2.004392623901367,
"learning_rate": 0.00017894736842105264,
"loss": 0.6992,
"step": 41500
},
{
"epoch": 4.2,
"grad_norm": 1.8801628351211548,
"learning_rate": 0.00016842105263157895,
"loss": 0.6944,
"step": 42000
},
{
"epoch": 4.25,
"grad_norm": 0.9306873679161072,
"learning_rate": 0.00015789473684210527,
"loss": 0.693,
"step": 42500
},
{
"epoch": 4.3,
"grad_norm": 1.9730675220489502,
"learning_rate": 0.00014736842105263158,
"loss": 0.6957,
"step": 43000
},
{
"epoch": 4.35,
"grad_norm": 1.0207428932189941,
"learning_rate": 0.00013684210526315792,
"loss": 0.7004,
"step": 43500
},
{
"epoch": 4.4,
"grad_norm": 0.9524587988853455,
"learning_rate": 0.0001263157894736842,
"loss": 0.6927,
"step": 44000
},
{
"epoch": 4.45,
"grad_norm": 0.9829258322715759,
"learning_rate": 0.00011578947368421053,
"loss": 0.6962,
"step": 44500
},
{
"epoch": 4.5,
"grad_norm": 1.317120909690857,
"learning_rate": 0.00010526315789473683,
"loss": 0.6943,
"step": 45000
},
{
"epoch": 4.55,
"grad_norm": 1.8932749032974243,
"learning_rate": 9.473684210526316e-05,
"loss": 0.6933,
"step": 45500
},
{
"epoch": 4.6,
"grad_norm": 0.8536391854286194,
"learning_rate": 8.421052631578948e-05,
"loss": 0.693,
"step": 46000
},
{
"epoch": 4.65,
"grad_norm": 1.9397382736206055,
"learning_rate": 7.368421052631579e-05,
"loss": 0.6942,
"step": 46500
},
{
"epoch": 4.7,
"grad_norm": 1.846257209777832,
"learning_rate": 6.31578947368421e-05,
"loss": 0.6941,
"step": 47000
},
{
"epoch": 4.75,
"grad_norm": 0.937384307384491,
"learning_rate": 5.263157894736842e-05,
"loss": 0.6956,
"step": 47500
},
{
"epoch": 4.8,
"grad_norm": 0.9019428491592407,
"learning_rate": 4.210526315789474e-05,
"loss": 0.6935,
"step": 48000
},
{
"epoch": 4.85,
"grad_norm": 1.892066478729248,
"learning_rate": 3.157894736842105e-05,
"loss": 0.6945,
"step": 48500
},
{
"epoch": 4.9,
"grad_norm": 1.764987826347351,
"learning_rate": 2.105263157894737e-05,
"loss": 0.695,
"step": 49000
},
{
"epoch": 4.95,
"grad_norm": 1.626028060913086,
"learning_rate": 1.0526315789473684e-05,
"loss": 0.6941,
"step": 49500
},
{
"epoch": 5.0,
"grad_norm": 1.6325174570083618,
"learning_rate": 0.0,
"loss": 0.6935,
"step": 50000
}
],
"logging_steps": 500,
"max_steps": 50000,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 2877777168000000.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}