Safetensors
xlm-roberta
Falcxx's picture
Upload folder using huggingface_hub
be46cd1 verified
{
"best_global_step": 6000,
"best_metric": 0.8871330073620233,
"best_model_checkpoint": "/home/rupak/Desktop/Topic-Modeling /topic modeling 25k/checkpoint/xlm-r-25k-2e5-256-16-10/checkpoint-6000",
"epoch": 4.7964814074370254,
"eval_steps": 1000,
"global_step": 6000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0007996801279488205,
"grad_norm": 4.53638219833374,
"learning_rate": 0.0,
"loss": 1.6879,
"step": 1
},
{
"epoch": 0.07996801279488205,
"grad_norm": 2.835871934890747,
"learning_rate": 1.5840000000000002e-06,
"loss": 1.6188,
"step": 100
},
{
"epoch": 0.1599360255897641,
"grad_norm": 10.777544021606445,
"learning_rate": 3.1840000000000003e-06,
"loss": 1.6119,
"step": 200
},
{
"epoch": 0.23990403838464613,
"grad_norm": 12.409767150878906,
"learning_rate": 4.784e-06,
"loss": 1.2151,
"step": 300
},
{
"epoch": 0.3198720511795282,
"grad_norm": 31.977880477905273,
"learning_rate": 6.384e-06,
"loss": 0.7791,
"step": 400
},
{
"epoch": 0.39984006397441024,
"grad_norm": 21.095924377441406,
"learning_rate": 7.984e-06,
"loss": 0.6522,
"step": 500
},
{
"epoch": 0.47980807676929227,
"grad_norm": 42.61677932739258,
"learning_rate": 9.584000000000002e-06,
"loss": 0.5709,
"step": 600
},
{
"epoch": 0.5597760895641744,
"grad_norm": 12.979878425598145,
"learning_rate": 1.1184000000000002e-05,
"loss": 0.5023,
"step": 700
},
{
"epoch": 0.6397441023590564,
"grad_norm": 13.918512344360352,
"learning_rate": 1.2784000000000002e-05,
"loss": 0.5145,
"step": 800
},
{
"epoch": 0.7197121151539384,
"grad_norm": 5.735707759857178,
"learning_rate": 1.4384e-05,
"loss": 0.5147,
"step": 900
},
{
"epoch": 0.7996801279488205,
"grad_norm": 19.905000686645508,
"learning_rate": 1.5984e-05,
"loss": 0.479,
"step": 1000
},
{
"epoch": 0.7996801279488205,
"eval_accuracy": 0.8600559776089565,
"eval_auroc_macro": 0.9697997005619113,
"eval_auroc_weighted": 0.9693976210021283,
"eval_f1_macro": 0.8609883608675034,
"eval_f1_weighted": 0.8598453926609552,
"eval_loss": 0.4712389409542084,
"eval_precision_macro": 0.8631139602509299,
"eval_precision_weighted": 0.8628361263926728,
"eval_recall_macro": 0.8621203563951813,
"eval_recall_weighted": 0.8600559776089565,
"eval_runtime": 7.4659,
"eval_samples_per_second": 334.989,
"eval_steps_per_second": 41.924,
"step": 1000
},
{
"epoch": 0.8796481407437026,
"grad_norm": 41.90662384033203,
"learning_rate": 1.7584e-05,
"loss": 0.4544,
"step": 1100
},
{
"epoch": 0.9596161535385845,
"grad_norm": 21.125547409057617,
"learning_rate": 1.9184e-05,
"loss": 0.4281,
"step": 1200
},
{
"epoch": 1.0391843262694922,
"grad_norm": 28.119949340820312,
"learning_rate": 1.991296625222025e-05,
"loss": 0.4532,
"step": 1300
},
{
"epoch": 1.1191523390643743,
"grad_norm": 12.584921836853027,
"learning_rate": 1.9735346358792185e-05,
"loss": 0.4224,
"step": 1400
},
{
"epoch": 1.1991203518592564,
"grad_norm": 9.214597702026367,
"learning_rate": 1.955772646536412e-05,
"loss": 0.4,
"step": 1500
},
{
"epoch": 1.2790883646541382,
"grad_norm": 11.22278881072998,
"learning_rate": 1.938010657193606e-05,
"loss": 0.4031,
"step": 1600
},
{
"epoch": 1.3590563774490203,
"grad_norm": 10.408547401428223,
"learning_rate": 1.9202486678507993e-05,
"loss": 0.3869,
"step": 1700
},
{
"epoch": 1.4390243902439024,
"grad_norm": 3.784024477005005,
"learning_rate": 1.902486678507993e-05,
"loss": 0.4519,
"step": 1800
},
{
"epoch": 1.5189924030387845,
"grad_norm": 39.47583770751953,
"learning_rate": 1.884724689165187e-05,
"loss": 0.4135,
"step": 1900
},
{
"epoch": 1.5989604158336665,
"grad_norm": 31.41134262084961,
"learning_rate": 1.86696269982238e-05,
"loss": 0.3946,
"step": 2000
},
{
"epoch": 1.5989604158336665,
"eval_accuracy": 0.8692522990803678,
"eval_auroc_macro": 0.9771314037560426,
"eval_auroc_weighted": 0.976669092429264,
"eval_f1_macro": 0.8689438661562177,
"eval_f1_weighted": 0.8676413311378186,
"eval_loss": 0.4619748890399933,
"eval_precision_macro": 0.8703728530490539,
"eval_precision_weighted": 0.8706160950869455,
"eval_recall_macro": 0.8720003755655708,
"eval_recall_weighted": 0.8692522990803678,
"eval_runtime": 6.9001,
"eval_samples_per_second": 362.46,
"eval_steps_per_second": 45.362,
"step": 2000
},
{
"epoch": 1.6789284286285486,
"grad_norm": 2.4029605388641357,
"learning_rate": 1.849200710479574e-05,
"loss": 0.3931,
"step": 2100
},
{
"epoch": 1.7588964414234307,
"grad_norm": 17.38362693786621,
"learning_rate": 1.8314387211367673e-05,
"loss": 0.3962,
"step": 2200
},
{
"epoch": 1.8388644542183128,
"grad_norm": 18.689165115356445,
"learning_rate": 1.813676731793961e-05,
"loss": 0.365,
"step": 2300
},
{
"epoch": 1.9188324670131949,
"grad_norm": 10.522760391235352,
"learning_rate": 1.795914742451155e-05,
"loss": 0.3506,
"step": 2400
},
{
"epoch": 1.9988004798080767,
"grad_norm": 25.250308990478516,
"learning_rate": 1.778152753108348e-05,
"loss": 0.4575,
"step": 2500
},
{
"epoch": 2.0783686525389844,
"grad_norm": 19.280771255493164,
"learning_rate": 1.760390763765542e-05,
"loss": 0.3698,
"step": 2600
},
{
"epoch": 2.1583366653338665,
"grad_norm": 5.396891117095947,
"learning_rate": 1.7426287744227356e-05,
"loss": 0.2822,
"step": 2700
},
{
"epoch": 2.2383046781287486,
"grad_norm": 9.906753540039062,
"learning_rate": 1.724866785079929e-05,
"loss": 0.2936,
"step": 2800
},
{
"epoch": 2.3182726909236306,
"grad_norm": 29.26998519897461,
"learning_rate": 1.7071047957371228e-05,
"loss": 0.3453,
"step": 2900
},
{
"epoch": 2.3982407037185127,
"grad_norm": 18.436208724975586,
"learning_rate": 1.6893428063943164e-05,
"loss": 0.3204,
"step": 3000
},
{
"epoch": 2.3982407037185127,
"eval_accuracy": 0.8740503798480608,
"eval_auroc_macro": 0.9792758464373478,
"eval_auroc_weighted": 0.9788870577881947,
"eval_f1_macro": 0.8741945993115099,
"eval_f1_weighted": 0.872931355571633,
"eval_loss": 0.45468512177467346,
"eval_precision_macro": 0.8741535901174557,
"eval_precision_weighted": 0.8737552557579643,
"eval_recall_macro": 0.8761328388099946,
"eval_recall_weighted": 0.8740503798480608,
"eval_runtime": 6.8511,
"eval_samples_per_second": 365.049,
"eval_steps_per_second": 45.686,
"step": 3000
},
{
"epoch": 2.478208716513395,
"grad_norm": 11.020326614379883,
"learning_rate": 1.67158081705151e-05,
"loss": 0.3072,
"step": 3100
},
{
"epoch": 2.5581767293082764,
"grad_norm": 8.49850845336914,
"learning_rate": 1.6538188277087036e-05,
"loss": 0.3163,
"step": 3200
},
{
"epoch": 2.638144742103159,
"grad_norm": 17.164398193359375,
"learning_rate": 1.636056838365897e-05,
"loss": 0.3255,
"step": 3300
},
{
"epoch": 2.7181127548980406,
"grad_norm": 7.5612077713012695,
"learning_rate": 1.6182948490230908e-05,
"loss": 0.3249,
"step": 3400
},
{
"epoch": 2.7980807676929227,
"grad_norm": 25.795366287231445,
"learning_rate": 1.6005328596802844e-05,
"loss": 0.3305,
"step": 3500
},
{
"epoch": 2.8780487804878048,
"grad_norm": 32.6719970703125,
"learning_rate": 1.582770870337478e-05,
"loss": 0.3116,
"step": 3600
},
{
"epoch": 2.958016793282687,
"grad_norm": 9.986326217651367,
"learning_rate": 1.5650088809946716e-05,
"loss": 0.2978,
"step": 3700
},
{
"epoch": 3.0375849660135947,
"grad_norm": 13.737031936645508,
"learning_rate": 1.5472468916518652e-05,
"loss": 0.2762,
"step": 3800
},
{
"epoch": 3.117552978808477,
"grad_norm": 46.34874725341797,
"learning_rate": 1.5294849023090588e-05,
"loss": 0.2792,
"step": 3900
},
{
"epoch": 3.1975209916033585,
"grad_norm": 10.898789405822754,
"learning_rate": 1.5117229129662522e-05,
"loss": 0.2284,
"step": 4000
},
{
"epoch": 3.1975209916033585,
"eval_accuracy": 0.8848460615753698,
"eval_auroc_macro": 0.9814776143689887,
"eval_auroc_weighted": 0.9811501779795945,
"eval_f1_macro": 0.8852410917462988,
"eval_f1_weighted": 0.884236665097445,
"eval_loss": 0.4859422445297241,
"eval_precision_macro": 0.885234053430883,
"eval_precision_weighted": 0.8851060401022259,
"eval_recall_macro": 0.8867084554148464,
"eval_recall_weighted": 0.8848460615753698,
"eval_runtime": 6.8545,
"eval_samples_per_second": 364.87,
"eval_steps_per_second": 45.663,
"step": 4000
},
{
"epoch": 3.2774890043982405,
"grad_norm": 10.87129020690918,
"learning_rate": 1.493960923623446e-05,
"loss": 0.2376,
"step": 4100
},
{
"epoch": 3.3574570171931226,
"grad_norm": 7.824963092803955,
"learning_rate": 1.4761989342806396e-05,
"loss": 0.243,
"step": 4200
},
{
"epoch": 3.4374250299880047,
"grad_norm": 19.788555145263672,
"learning_rate": 1.458436944937833e-05,
"loss": 0.2927,
"step": 4300
},
{
"epoch": 3.517393042782887,
"grad_norm": 19.342580795288086,
"learning_rate": 1.4406749555950268e-05,
"loss": 0.2673,
"step": 4400
},
{
"epoch": 3.597361055577769,
"grad_norm": 1.4744329452514648,
"learning_rate": 1.4229129662522204e-05,
"loss": 0.1985,
"step": 4500
},
{
"epoch": 3.677329068372651,
"grad_norm": 19.05389976501465,
"learning_rate": 1.4051509769094141e-05,
"loss": 0.2826,
"step": 4600
},
{
"epoch": 3.757297081167533,
"grad_norm": 38.120121002197266,
"learning_rate": 1.3873889875666075e-05,
"loss": 0.2567,
"step": 4700
},
{
"epoch": 3.837265093962415,
"grad_norm": 38.54651641845703,
"learning_rate": 1.3696269982238011e-05,
"loss": 0.3233,
"step": 4800
},
{
"epoch": 3.917233106757297,
"grad_norm": 7.073267936706543,
"learning_rate": 1.3518650088809947e-05,
"loss": 0.2253,
"step": 4900
},
{
"epoch": 3.9972011195521793,
"grad_norm": 28.43503761291504,
"learning_rate": 1.3341030195381883e-05,
"loss": 0.2922,
"step": 5000
},
{
"epoch": 3.9972011195521793,
"eval_accuracy": 0.8692522990803678,
"eval_auroc_macro": 0.9772039806373872,
"eval_auroc_weighted": 0.9767263471937441,
"eval_f1_macro": 0.8687049494752337,
"eval_f1_weighted": 0.8672298347929932,
"eval_loss": 0.5068673491477966,
"eval_precision_macro": 0.8714207501064626,
"eval_precision_weighted": 0.8717661851493683,
"eval_recall_macro": 0.8724193433641065,
"eval_recall_weighted": 0.8692522990803678,
"eval_runtime": 7.1301,
"eval_samples_per_second": 350.766,
"eval_steps_per_second": 43.898,
"step": 5000
},
{
"epoch": 4.076769292283087,
"grad_norm": 88.04476165771484,
"learning_rate": 1.3163410301953821e-05,
"loss": 0.1666,
"step": 5100
},
{
"epoch": 4.156737305077969,
"grad_norm": 8.783529281616211,
"learning_rate": 1.2985790408525755e-05,
"loss": 0.2068,
"step": 5200
},
{
"epoch": 4.23670531787285,
"grad_norm": 4.993801116943359,
"learning_rate": 1.2808170515097691e-05,
"loss": 0.2067,
"step": 5300
},
{
"epoch": 4.316673330667733,
"grad_norm": 32.33308410644531,
"learning_rate": 1.2630550621669629e-05,
"loss": 0.2222,
"step": 5400
},
{
"epoch": 4.396641343462615,
"grad_norm": 1.1174397468566895,
"learning_rate": 1.2452930728241563e-05,
"loss": 0.216,
"step": 5500
},
{
"epoch": 4.476609356257497,
"grad_norm": 3.2966408729553223,
"learning_rate": 1.22753108348135e-05,
"loss": 0.1728,
"step": 5600
},
{
"epoch": 4.556577369052379,
"grad_norm": 23.643796920776367,
"learning_rate": 1.2097690941385437e-05,
"loss": 0.2034,
"step": 5700
},
{
"epoch": 4.636545381847261,
"grad_norm": 40.708675384521484,
"learning_rate": 1.1920071047957371e-05,
"loss": 0.1959,
"step": 5800
},
{
"epoch": 4.716513394642143,
"grad_norm": 8.088835716247559,
"learning_rate": 1.1742451154529309e-05,
"loss": 0.2288,
"step": 5900
},
{
"epoch": 4.7964814074370254,
"grad_norm": 28.324237823486328,
"learning_rate": 1.1564831261101243e-05,
"loss": 0.1829,
"step": 6000
},
{
"epoch": 4.7964814074370254,
"eval_accuracy": 0.8876449420231908,
"eval_auroc_macro": 0.9753087930207407,
"eval_auroc_weighted": 0.9748734596563122,
"eval_f1_macro": 0.8882520006466459,
"eval_f1_weighted": 0.8871330073620233,
"eval_loss": 0.5317224264144897,
"eval_precision_macro": 0.8881729106348502,
"eval_precision_weighted": 0.8878156337143654,
"eval_recall_macro": 0.8895224186044739,
"eval_recall_weighted": 0.8876449420231908,
"eval_runtime": 6.8194,
"eval_samples_per_second": 366.745,
"eval_steps_per_second": 45.898,
"step": 6000
}
],
"logging_steps": 100,
"max_steps": 12510,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 1000,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 15,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1947209924525808.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}