Kearm's picture
Upload folder using huggingface_hub
78f5115 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 80,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.012578616352201259,
"grad_norm": 24.235414505004883,
"learning_rate": 0.0,
"loss": 5.9429,
"step": 1
},
{
"epoch": 0.025157232704402517,
"grad_norm": 24.20835304260254,
"learning_rate": 2.5e-06,
"loss": 5.9877,
"step": 2
},
{
"epoch": 0.03773584905660377,
"grad_norm": 23.525680541992188,
"learning_rate": 5e-06,
"loss": 5.9906,
"step": 3
},
{
"epoch": 0.050314465408805034,
"grad_norm": 25.947757720947266,
"learning_rate": 7.500000000000001e-06,
"loss": 5.9738,
"step": 4
},
{
"epoch": 0.06289308176100629,
"grad_norm": 23.722261428833008,
"learning_rate": 1e-05,
"loss": 5.9398,
"step": 5
},
{
"epoch": 0.07547169811320754,
"grad_norm": 23.66267204284668,
"learning_rate": 1.25e-05,
"loss": 5.8582,
"step": 6
},
{
"epoch": 0.0880503144654088,
"grad_norm": 22.97976303100586,
"learning_rate": 1.5000000000000002e-05,
"loss": 5.7527,
"step": 7
},
{
"epoch": 0.10062893081761007,
"grad_norm": 21.711450576782227,
"learning_rate": 1.7500000000000002e-05,
"loss": 5.644,
"step": 8
},
{
"epoch": 0.11320754716981132,
"grad_norm": 19.465877532958984,
"learning_rate": 2e-05,
"loss": 5.5225,
"step": 9
},
{
"epoch": 0.12578616352201258,
"grad_norm": 17.033899307250977,
"learning_rate": 1.999048221581858e-05,
"loss": 5.2972,
"step": 10
},
{
"epoch": 0.13836477987421383,
"grad_norm": 15.466821670532227,
"learning_rate": 1.9961946980917457e-05,
"loss": 5.1174,
"step": 11
},
{
"epoch": 0.1509433962264151,
"grad_norm": 14.021515846252441,
"learning_rate": 1.9914448613738107e-05,
"loss": 4.9761,
"step": 12
},
{
"epoch": 0.16352201257861634,
"grad_norm": 13.911083221435547,
"learning_rate": 1.9848077530122083e-05,
"loss": 4.8011,
"step": 13
},
{
"epoch": 0.1761006289308176,
"grad_norm": 12.739690780639648,
"learning_rate": 1.9762960071199334e-05,
"loss": 4.6854,
"step": 14
},
{
"epoch": 0.18867924528301888,
"grad_norm": 11.953996658325195,
"learning_rate": 1.9659258262890683e-05,
"loss": 4.5724,
"step": 15
},
{
"epoch": 0.20125786163522014,
"grad_norm": 11.002123832702637,
"learning_rate": 1.953716950748227e-05,
"loss": 4.4691,
"step": 16
},
{
"epoch": 0.2138364779874214,
"grad_norm": 10.387340545654297,
"learning_rate": 1.9396926207859085e-05,
"loss": 4.3876,
"step": 17
},
{
"epoch": 0.22641509433962265,
"grad_norm": 10.04249382019043,
"learning_rate": 1.9238795325112867e-05,
"loss": 4.2144,
"step": 18
},
{
"epoch": 0.2389937106918239,
"grad_norm": 9.719793319702148,
"learning_rate": 1.9063077870366504e-05,
"loss": 4.0801,
"step": 19
},
{
"epoch": 0.25157232704402516,
"grad_norm": 9.387718200683594,
"learning_rate": 1.887010833178222e-05,
"loss": 3.9868,
"step": 20
},
{
"epoch": 0.2641509433962264,
"grad_norm": 9.012751579284668,
"learning_rate": 1.866025403784439e-05,
"loss": 3.9461,
"step": 21
},
{
"epoch": 0.27672955974842767,
"grad_norm": 9.132621765136719,
"learning_rate": 1.843391445812886e-05,
"loss": 3.881,
"step": 22
},
{
"epoch": 0.2893081761006289,
"grad_norm": 8.821338653564453,
"learning_rate": 1.819152044288992e-05,
"loss": 3.7709,
"step": 23
},
{
"epoch": 0.3018867924528302,
"grad_norm": 8.580257415771484,
"learning_rate": 1.7933533402912354e-05,
"loss": 3.7456,
"step": 24
},
{
"epoch": 0.31446540880503143,
"grad_norm": 7.926499843597412,
"learning_rate": 1.766044443118978e-05,
"loss": 3.6151,
"step": 25
},
{
"epoch": 0.3270440251572327,
"grad_norm": 7.801671028137207,
"learning_rate": 1.737277336810124e-05,
"loss": 3.6009,
"step": 26
},
{
"epoch": 0.33962264150943394,
"grad_norm": 7.5823283195495605,
"learning_rate": 1.7071067811865477e-05,
"loss": 3.5085,
"step": 27
},
{
"epoch": 0.3522012578616352,
"grad_norm": 7.164687633514404,
"learning_rate": 1.6755902076156606e-05,
"loss": 3.4462,
"step": 28
},
{
"epoch": 0.36477987421383645,
"grad_norm": 6.993910312652588,
"learning_rate": 1.6427876096865394e-05,
"loss": 3.4085,
"step": 29
},
{
"epoch": 0.37735849056603776,
"grad_norm": 6.862181663513184,
"learning_rate": 1.608761429008721e-05,
"loss": 3.3406,
"step": 30
},
{
"epoch": 0.389937106918239,
"grad_norm": 6.750012397766113,
"learning_rate": 1.573576436351046e-05,
"loss": 3.2278,
"step": 31
},
{
"epoch": 0.4025157232704403,
"grad_norm": 6.868540287017822,
"learning_rate": 1.5372996083468242e-05,
"loss": 3.1773,
"step": 32
},
{
"epoch": 0.41509433962264153,
"grad_norm": 6.78640604019165,
"learning_rate": 1.5000000000000002e-05,
"loss": 3.1175,
"step": 33
},
{
"epoch": 0.4276729559748428,
"grad_norm": 7.065934181213379,
"learning_rate": 1.4617486132350343e-05,
"loss": 3.0835,
"step": 34
},
{
"epoch": 0.44025157232704404,
"grad_norm": 7.395648956298828,
"learning_rate": 1.4226182617406996e-05,
"loss": 3.0579,
"step": 35
},
{
"epoch": 0.4528301886792453,
"grad_norm": 7.377147197723389,
"learning_rate": 1.3826834323650899e-05,
"loss": 3.001,
"step": 36
},
{
"epoch": 0.46540880503144655,
"grad_norm": 7.350756645202637,
"learning_rate": 1.342020143325669e-05,
"loss": 2.9134,
"step": 37
},
{
"epoch": 0.4779874213836478,
"grad_norm": 7.21485710144043,
"learning_rate": 1.300705799504273e-05,
"loss": 2.8916,
"step": 38
},
{
"epoch": 0.49056603773584906,
"grad_norm": 7.14323616027832,
"learning_rate": 1.2588190451025209e-05,
"loss": 2.8545,
"step": 39
},
{
"epoch": 0.5031446540880503,
"grad_norm": 7.045004844665527,
"learning_rate": 1.2164396139381029e-05,
"loss": 2.7926,
"step": 40
},
{
"epoch": 0.5157232704402516,
"grad_norm": 7.102219104766846,
"learning_rate": 1.1736481776669307e-05,
"loss": 2.7661,
"step": 41
},
{
"epoch": 0.5283018867924528,
"grad_norm": 7.334245681762695,
"learning_rate": 1.130526192220052e-05,
"loss": 2.7134,
"step": 42
},
{
"epoch": 0.5408805031446541,
"grad_norm": 7.159778594970703,
"learning_rate": 1.0871557427476585e-05,
"loss": 2.6547,
"step": 43
},
{
"epoch": 0.5534591194968553,
"grad_norm": 7.077873706817627,
"learning_rate": 1.0436193873653362e-05,
"loss": 2.607,
"step": 44
},
{
"epoch": 0.5660377358490566,
"grad_norm": 7.217611789703369,
"learning_rate": 1e-05,
"loss": 2.6118,
"step": 45
},
{
"epoch": 0.5786163522012578,
"grad_norm": 6.973113059997559,
"learning_rate": 9.563806126346643e-06,
"loss": 2.5558,
"step": 46
},
{
"epoch": 0.5911949685534591,
"grad_norm": 7.162886142730713,
"learning_rate": 9.128442572523418e-06,
"loss": 2.5148,
"step": 47
},
{
"epoch": 0.6037735849056604,
"grad_norm": 7.065187931060791,
"learning_rate": 8.694738077799487e-06,
"loss": 2.4874,
"step": 48
},
{
"epoch": 0.6163522012578616,
"grad_norm": 7.004238605499268,
"learning_rate": 8.263518223330698e-06,
"loss": 2.4503,
"step": 49
},
{
"epoch": 0.6289308176100629,
"grad_norm": 7.0252790451049805,
"learning_rate": 7.835603860618973e-06,
"loss": 2.4348,
"step": 50
},
{
"epoch": 0.6415094339622641,
"grad_norm": 6.905361175537109,
"learning_rate": 7.411809548974792e-06,
"loss": 2.377,
"step": 51
},
{
"epoch": 0.6540880503144654,
"grad_norm": 6.864150524139404,
"learning_rate": 6.992942004957271e-06,
"loss": 2.3397,
"step": 52
},
{
"epoch": 0.6666666666666666,
"grad_norm": 6.928408145904541,
"learning_rate": 6.579798566743314e-06,
"loss": 2.331,
"step": 53
},
{
"epoch": 0.6792452830188679,
"grad_norm": 6.910766124725342,
"learning_rate": 6.173165676349103e-06,
"loss": 2.3138,
"step": 54
},
{
"epoch": 0.6918238993710691,
"grad_norm": 6.662342071533203,
"learning_rate": 5.773817382593008e-06,
"loss": 2.2694,
"step": 55
},
{
"epoch": 0.7044025157232704,
"grad_norm": 6.670816421508789,
"learning_rate": 5.382513867649663e-06,
"loss": 2.2476,
"step": 56
},
{
"epoch": 0.7169811320754716,
"grad_norm": 6.450694561004639,
"learning_rate": 5.000000000000003e-06,
"loss": 2.23,
"step": 57
},
{
"epoch": 0.7295597484276729,
"grad_norm": 6.611984729766846,
"learning_rate": 4.627003916531761e-06,
"loss": 2.1873,
"step": 58
},
{
"epoch": 0.7421383647798742,
"grad_norm": 6.599513053894043,
"learning_rate": 4.264235636489542e-06,
"loss": 2.1645,
"step": 59
},
{
"epoch": 0.7547169811320755,
"grad_norm": 6.474591255187988,
"learning_rate": 3.912385709912794e-06,
"loss": 2.132,
"step": 60
},
{
"epoch": 0.7672955974842768,
"grad_norm": 6.484611988067627,
"learning_rate": 3.5721239031346067e-06,
"loss": 2.1703,
"step": 61
},
{
"epoch": 0.779874213836478,
"grad_norm": 6.360864162445068,
"learning_rate": 3.2440979238433977e-06,
"loss": 2.1489,
"step": 62
},
{
"epoch": 0.7924528301886793,
"grad_norm": 6.536640644073486,
"learning_rate": 2.9289321881345257e-06,
"loss": 2.1015,
"step": 63
},
{
"epoch": 0.8050314465408805,
"grad_norm": 6.510339736938477,
"learning_rate": 2.6272266318987606e-06,
"loss": 2.0919,
"step": 64
},
{
"epoch": 0.8176100628930818,
"grad_norm": 6.453073501586914,
"learning_rate": 2.339555568810221e-06,
"loss": 2.1152,
"step": 65
},
{
"epoch": 0.8301886792452831,
"grad_norm": 6.530029773712158,
"learning_rate": 2.0664665970876496e-06,
"loss": 2.1062,
"step": 66
},
{
"epoch": 0.8427672955974843,
"grad_norm": 6.49432897567749,
"learning_rate": 1.808479557110081e-06,
"loss": 2.1046,
"step": 67
},
{
"epoch": 0.8553459119496856,
"grad_norm": 6.546372890472412,
"learning_rate": 1.566085541871145e-06,
"loss": 2.0763,
"step": 68
},
{
"epoch": 0.8679245283018868,
"grad_norm": 6.68710994720459,
"learning_rate": 1.339745962155613e-06,
"loss": 2.0718,
"step": 69
},
{
"epoch": 0.8805031446540881,
"grad_norm": 6.77480936050415,
"learning_rate": 1.129891668217783e-06,
"loss": 2.0944,
"step": 70
},
{
"epoch": 0.8930817610062893,
"grad_norm": 6.603630065917969,
"learning_rate": 9.369221296335007e-07,
"loss": 2.0652,
"step": 71
},
{
"epoch": 0.9056603773584906,
"grad_norm": 6.616003513336182,
"learning_rate": 7.612046748871327e-07,
"loss": 2.0317,
"step": 72
},
{
"epoch": 0.9182389937106918,
"grad_norm": 6.576902389526367,
"learning_rate": 6.030737921409169e-07,
"loss": 2.0268,
"step": 73
},
{
"epoch": 0.9308176100628931,
"grad_norm": 6.478517055511475,
"learning_rate": 4.628304925177318e-07,
"loss": 2.0539,
"step": 74
},
{
"epoch": 0.9433962264150944,
"grad_norm": 6.657393455505371,
"learning_rate": 3.4074173710931804e-07,
"loss": 2.0288,
"step": 75
},
{
"epoch": 0.9559748427672956,
"grad_norm": 6.596218585968018,
"learning_rate": 2.370399288006664e-07,
"loss": 2.0511,
"step": 76
},
{
"epoch": 0.9685534591194969,
"grad_norm": 6.601206302642822,
"learning_rate": 1.519224698779198e-07,
"loss": 2.0497,
"step": 77
},
{
"epoch": 0.9811320754716981,
"grad_norm": 6.5980401039123535,
"learning_rate": 8.555138626189619e-08,
"loss": 2.0436,
"step": 78
},
{
"epoch": 0.9937106918238994,
"grad_norm": 6.563250541687012,
"learning_rate": 3.805301908254455e-08,
"loss": 2.0327,
"step": 79
},
{
"epoch": 1.0,
"grad_norm": 6.6095662117004395,
"learning_rate": 9.517784181422018e-09,
"loss": 1.9954,
"step": 80
}
],
"logging_steps": 1,
"max_steps": 80,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.836271296493304e+17,
"train_batch_size": 128,
"trial_name": null,
"trial_params": null
}