sindhi-bert-base / checkpoint-8793 /trainer_state.json
hellosindh's picture
Upload folder using huggingface_hub
ec371f4 verified
{
"best_global_step": 8793,
"best_metric": 4.369416236877441,
"best_model_checkpoint": "sindhibert_scratch/checkpoint-8793",
"epoch": 4.500224,
"eval_steps": 977,
"global_step": 8793,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0512,
"grad_norm": 18.65768051147461,
"learning_rate": 9.900000000000002e-06,
"loss": 77.516953125,
"step": 100
},
{
"epoch": 0.1024,
"grad_norm": 13.30678939819336,
"learning_rate": 1.9900000000000003e-05,
"loss": 67.7811669921875,
"step": 200
},
{
"epoch": 0.1536,
"grad_norm": 9.597126007080078,
"learning_rate": 2.9900000000000002e-05,
"loss": 60.4075341796875,
"step": 300
},
{
"epoch": 0.2048,
"grad_norm": 8.602116584777832,
"learning_rate": 3.99e-05,
"loss": 57.3657763671875,
"step": 400
},
{
"epoch": 0.256,
"grad_norm": 9.814536094665527,
"learning_rate": 4.99e-05,
"loss": 56.28333984375,
"step": 500
},
{
"epoch": 0.3072,
"grad_norm": 11.049890518188477,
"learning_rate": 5.99e-05,
"loss": 55.6001611328125,
"step": 600
},
{
"epoch": 0.3584,
"grad_norm": 11.56328010559082,
"learning_rate": 6.99e-05,
"loss": 55.2201416015625,
"step": 700
},
{
"epoch": 0.4096,
"grad_norm": 13.450942993164062,
"learning_rate": 7.99e-05,
"loss": 54.76716796875,
"step": 800
},
{
"epoch": 0.4608,
"grad_norm": 11.986808776855469,
"learning_rate": 8.99e-05,
"loss": 54.2532373046875,
"step": 900
},
{
"epoch": 0.500224,
"eval_loss": 6.677970886230469,
"eval_runtime": 53.9539,
"eval_samples_per_second": 370.687,
"eval_steps_per_second": 11.584,
"step": 977
},
{
"epoch": 0.512,
"grad_norm": 11.47065258026123,
"learning_rate": 9.99e-05,
"loss": 53.8267431640625,
"step": 1000
},
{
"epoch": 0.5632,
"grad_norm": 14.508301734924316,
"learning_rate": 9.887115165336375e-05,
"loss": 53.4335546875,
"step": 1100
},
{
"epoch": 0.6144,
"grad_norm": 13.515727043151855,
"learning_rate": 9.773090079817561e-05,
"loss": 53.126240234375,
"step": 1200
},
{
"epoch": 0.6656,
"grad_norm": 13.126218795776367,
"learning_rate": 9.659064994298746e-05,
"loss": 52.5950341796875,
"step": 1300
},
{
"epoch": 0.7168,
"grad_norm": 21.854541778564453,
"learning_rate": 9.545039908779932e-05,
"loss": 52.1828076171875,
"step": 1400
},
{
"epoch": 0.768,
"grad_norm": 18.417003631591797,
"learning_rate": 9.431014823261119e-05,
"loss": 51.821767578125,
"step": 1500
},
{
"epoch": 0.8192,
"grad_norm": 12.346606254577637,
"learning_rate": 9.316989737742304e-05,
"loss": 51.36373046875,
"step": 1600
},
{
"epoch": 0.8704,
"grad_norm": 22.9099063873291,
"learning_rate": 9.202964652223489e-05,
"loss": 50.9531591796875,
"step": 1700
},
{
"epoch": 0.9216,
"grad_norm": 18.55419158935547,
"learning_rate": 9.088939566704675e-05,
"loss": 50.487490234375,
"step": 1800
},
{
"epoch": 0.9728,
"grad_norm": 16.247209548950195,
"learning_rate": 8.974914481185861e-05,
"loss": 50.037265625,
"step": 1900
},
{
"epoch": 1.0,
"eval_loss": 6.126930236816406,
"eval_runtime": 54.5716,
"eval_samples_per_second": 366.491,
"eval_steps_per_second": 11.453,
"step": 1954
},
{
"epoch": 1.023552,
"grad_norm": 15.577176094055176,
"learning_rate": 8.860889395667046e-05,
"loss": 49.1747216796875,
"step": 2000
},
{
"epoch": 1.074752,
"grad_norm": 14.534530639648438,
"learning_rate": 8.746864310148233e-05,
"loss": 48.833271484375,
"step": 2100
},
{
"epoch": 1.125952,
"grad_norm": 21.150440216064453,
"learning_rate": 8.632839224629419e-05,
"loss": 47.9638330078125,
"step": 2200
},
{
"epoch": 1.177152,
"grad_norm": 14.35093879699707,
"learning_rate": 8.518814139110604e-05,
"loss": 47.6843505859375,
"step": 2300
},
{
"epoch": 1.228352,
"grad_norm": 18.37192726135254,
"learning_rate": 8.40478905359179e-05,
"loss": 47.1163623046875,
"step": 2400
},
{
"epoch": 1.279552,
"grad_norm": 15.366902351379395,
"learning_rate": 8.290763968072977e-05,
"loss": 46.6763720703125,
"step": 2500
},
{
"epoch": 1.330752,
"grad_norm": 18.58373260498047,
"learning_rate": 8.176738882554162e-05,
"loss": 46.071142578125,
"step": 2600
},
{
"epoch": 1.381952,
"grad_norm": 16.35076141357422,
"learning_rate": 8.062713797035348e-05,
"loss": 45.5451220703125,
"step": 2700
},
{
"epoch": 1.433152,
"grad_norm": 18.8570556640625,
"learning_rate": 7.948688711516535e-05,
"loss": 45.1469384765625,
"step": 2800
},
{
"epoch": 1.484352,
"grad_norm": 17.72637367248535,
"learning_rate": 7.83466362599772e-05,
"loss": 44.6100927734375,
"step": 2900
},
{
"epoch": 1.500224,
"eval_loss": 5.475055694580078,
"eval_runtime": 54.4157,
"eval_samples_per_second": 367.541,
"eval_steps_per_second": 11.486,
"step": 2931
},
{
"epoch": 1.535552,
"grad_norm": 14.798540115356445,
"learning_rate": 7.720638540478906e-05,
"loss": 44.670400390625,
"step": 3000
},
{
"epoch": 1.5867520000000002,
"grad_norm": 17.05501365661621,
"learning_rate": 7.606613454960093e-05,
"loss": 44.11970703125,
"step": 3100
},
{
"epoch": 1.6379519999999999,
"grad_norm": 15.10769271850586,
"learning_rate": 7.492588369441278e-05,
"loss": 43.6269775390625,
"step": 3200
},
{
"epoch": 1.689152,
"grad_norm": 16.199745178222656,
"learning_rate": 7.378563283922463e-05,
"loss": 43.4040771484375,
"step": 3300
},
{
"epoch": 1.7403520000000001,
"grad_norm": 19.104358673095703,
"learning_rate": 7.264538198403649e-05,
"loss": 42.9161328125,
"step": 3400
},
{
"epoch": 1.791552,
"grad_norm": 17.44623374938965,
"learning_rate": 7.150513112884834e-05,
"loss": 42.778564453125,
"step": 3500
},
{
"epoch": 1.842752,
"grad_norm": 16.97149658203125,
"learning_rate": 7.03648802736602e-05,
"loss": 42.4598193359375,
"step": 3600
},
{
"epoch": 1.893952,
"grad_norm": 14.990788459777832,
"learning_rate": 6.922462941847207e-05,
"loss": 42.2105419921875,
"step": 3700
},
{
"epoch": 1.945152,
"grad_norm": 17.827381134033203,
"learning_rate": 6.808437856328392e-05,
"loss": 42.019033203125,
"step": 3800
},
{
"epoch": 1.996352,
"grad_norm": 20.906902313232422,
"learning_rate": 6.694412770809578e-05,
"loss": 41.5656201171875,
"step": 3900
},
{
"epoch": 2.0,
"eval_loss": 5.107455253601074,
"eval_runtime": 54.6023,
"eval_samples_per_second": 366.285,
"eval_steps_per_second": 11.446,
"step": 3908
},
{
"epoch": 2.047104,
"grad_norm": 16.346454620361328,
"learning_rate": 6.580387685290765e-05,
"loss": 40.89203369140625,
"step": 4000
},
{
"epoch": 2.098304,
"grad_norm": 16.84693145751953,
"learning_rate": 6.46636259977195e-05,
"loss": 41.0606884765625,
"step": 4100
},
{
"epoch": 2.149504,
"grad_norm": 18.569360733032227,
"learning_rate": 6.352337514253136e-05,
"loss": 40.92314697265625,
"step": 4200
},
{
"epoch": 2.200704,
"grad_norm": 15.775079727172852,
"learning_rate": 6.238312428734322e-05,
"loss": 40.7518408203125,
"step": 4300
},
{
"epoch": 2.251904,
"grad_norm": 18.271591186523438,
"learning_rate": 6.124287343215507e-05,
"loss": 40.4889990234375,
"step": 4400
},
{
"epoch": 2.303104,
"grad_norm": 20.265701293945312,
"learning_rate": 6.010262257696694e-05,
"loss": 40.1488427734375,
"step": 4500
},
{
"epoch": 2.354304,
"grad_norm": 19.53594398498535,
"learning_rate": 5.8962371721778794e-05,
"loss": 40.10376708984375,
"step": 4600
},
{
"epoch": 2.405504,
"grad_norm": 19.707582473754883,
"learning_rate": 5.782212086659066e-05,
"loss": 39.953662109375,
"step": 4700
},
{
"epoch": 2.456704,
"grad_norm": 15.16901683807373,
"learning_rate": 5.6681870011402515e-05,
"loss": 39.77469970703125,
"step": 4800
},
{
"epoch": 2.5002240000000002,
"eval_loss": 4.868845462799072,
"eval_runtime": 54.8341,
"eval_samples_per_second": 364.737,
"eval_steps_per_second": 11.398,
"step": 4885
},
{
"epoch": 2.507904,
"grad_norm": 17.12852668762207,
"learning_rate": 5.554161915621437e-05,
"loss": 39.64916015625,
"step": 4900
},
{
"epoch": 2.559104,
"grad_norm": 19.869260787963867,
"learning_rate": 5.440136830102622e-05,
"loss": 39.51341552734375,
"step": 5000
},
{
"epoch": 2.610304,
"grad_norm": 17.342073440551758,
"learning_rate": 5.326111744583808e-05,
"loss": 39.350986328125,
"step": 5100
},
{
"epoch": 2.661504,
"grad_norm": 19.635601043701172,
"learning_rate": 5.212086659064994e-05,
"loss": 39.2056787109375,
"step": 5200
},
{
"epoch": 2.712704,
"grad_norm": 16.2427921295166,
"learning_rate": 5.09806157354618e-05,
"loss": 38.98402587890625,
"step": 5300
},
{
"epoch": 2.763904,
"grad_norm": 21.025632858276367,
"learning_rate": 4.984036488027366e-05,
"loss": 38.84795166015625,
"step": 5400
},
{
"epoch": 2.815104,
"grad_norm": 16.225173950195312,
"learning_rate": 4.870011402508552e-05,
"loss": 38.63864013671875,
"step": 5500
},
{
"epoch": 2.866304,
"grad_norm": 19.175825119018555,
"learning_rate": 4.755986316989738e-05,
"loss": 38.5738232421875,
"step": 5600
},
{
"epoch": 2.917504,
"grad_norm": 18.190704345703125,
"learning_rate": 4.6419612314709235e-05,
"loss": 38.47998046875,
"step": 5700
},
{
"epoch": 2.968704,
"grad_norm": 17.764493942260742,
"learning_rate": 4.52793614595211e-05,
"loss": 38.36706298828125,
"step": 5800
},
{
"epoch": 3.0,
"eval_loss": 4.690184116363525,
"eval_runtime": 54.4789,
"eval_samples_per_second": 367.115,
"eval_steps_per_second": 11.472,
"step": 5862
},
{
"epoch": 3.019456,
"grad_norm": 16.051603317260742,
"learning_rate": 4.4139110604332956e-05,
"loss": 37.86203369140625,
"step": 5900
},
{
"epoch": 3.070656,
"grad_norm": 16.511327743530273,
"learning_rate": 4.299885974914481e-05,
"loss": 38.09601318359375,
"step": 6000
},
{
"epoch": 3.121856,
"grad_norm": 16.089813232421875,
"learning_rate": 4.1858608893956676e-05,
"loss": 37.88030029296875,
"step": 6100
},
{
"epoch": 3.173056,
"grad_norm": 18.612686157226562,
"learning_rate": 4.0718358038768533e-05,
"loss": 37.66849853515625,
"step": 6200
},
{
"epoch": 3.224256,
"grad_norm": 17.79659652709961,
"learning_rate": 3.957810718358039e-05,
"loss": 37.43648681640625,
"step": 6300
},
{
"epoch": 3.275456,
"grad_norm": 16.967939376831055,
"learning_rate": 3.843785632839225e-05,
"loss": 37.43171142578125,
"step": 6400
},
{
"epoch": 3.326656,
"grad_norm": 16.4842529296875,
"learning_rate": 3.7297605473204104e-05,
"loss": 37.26192138671875,
"step": 6500
},
{
"epoch": 3.377856,
"grad_norm": 16.261512756347656,
"learning_rate": 3.615735461801597e-05,
"loss": 37.26702392578125,
"step": 6600
},
{
"epoch": 3.429056,
"grad_norm": 17.182903289794922,
"learning_rate": 3.5017103762827825e-05,
"loss": 37.16359619140625,
"step": 6700
},
{
"epoch": 3.480256,
"grad_norm": 17.765832901000977,
"learning_rate": 3.387685290763968e-05,
"loss": 37.05822998046875,
"step": 6800
},
{
"epoch": 3.5002240000000002,
"eval_loss": 4.552316188812256,
"eval_runtime": 54.6295,
"eval_samples_per_second": 366.103,
"eval_steps_per_second": 11.441,
"step": 6839
},
{
"epoch": 3.531456,
"grad_norm": 18.267107009887695,
"learning_rate": 3.2736602052451546e-05,
"loss": 36.907744140625,
"step": 6900
},
{
"epoch": 3.582656,
"grad_norm": 20.562347412109375,
"learning_rate": 3.15963511972634e-05,
"loss": 36.8392919921875,
"step": 7000
},
{
"epoch": 3.6338559999999998,
"grad_norm": 16.440811157226562,
"learning_rate": 3.0456100342075257e-05,
"loss": 36.639384765625,
"step": 7100
},
{
"epoch": 3.685056,
"grad_norm": 17.070350646972656,
"learning_rate": 2.9315849486887114e-05,
"loss": 36.63670166015625,
"step": 7200
},
{
"epoch": 3.736256,
"grad_norm": 17.15755844116211,
"learning_rate": 2.8175598631698974e-05,
"loss": 36.6428759765625,
"step": 7300
},
{
"epoch": 3.787456,
"grad_norm": 18.988977432250977,
"learning_rate": 2.7035347776510834e-05,
"loss": 36.578349609375,
"step": 7400
},
{
"epoch": 3.838656,
"grad_norm": 17.87462615966797,
"learning_rate": 2.589509692132269e-05,
"loss": 36.4393994140625,
"step": 7500
},
{
"epoch": 3.889856,
"grad_norm": 18.244951248168945,
"learning_rate": 2.4754846066134552e-05,
"loss": 36.40805908203125,
"step": 7600
},
{
"epoch": 3.941056,
"grad_norm": 16.166940689086914,
"learning_rate": 2.361459521094641e-05,
"loss": 36.23387451171875,
"step": 7700
},
{
"epoch": 3.9922560000000002,
"grad_norm": 19.10250473022461,
"learning_rate": 2.2474344355758266e-05,
"loss": 36.08807373046875,
"step": 7800
},
{
"epoch": 4.0,
"eval_loss": 4.433136940002441,
"eval_runtime": 54.6448,
"eval_samples_per_second": 366.0,
"eval_steps_per_second": 11.438,
"step": 7816
},
{
"epoch": 4.043008,
"grad_norm": 17.705839157104492,
"learning_rate": 2.1334093500570126e-05,
"loss": 35.8281103515625,
"step": 7900
},
{
"epoch": 4.094208,
"grad_norm": 16.15859031677246,
"learning_rate": 2.0193842645381987e-05,
"loss": 35.8823193359375,
"step": 8000
},
{
"epoch": 4.145408,
"grad_norm": 17.96906852722168,
"learning_rate": 1.9053591790193844e-05,
"loss": 35.968681640625,
"step": 8100
},
{
"epoch": 4.196608,
"grad_norm": 16.74631118774414,
"learning_rate": 1.79133409350057e-05,
"loss": 35.894248046875,
"step": 8200
},
{
"epoch": 4.247808,
"grad_norm": 16.067609786987305,
"learning_rate": 1.677309007981756e-05,
"loss": 35.78274169921875,
"step": 8300
},
{
"epoch": 4.299008,
"grad_norm": 17.308250427246094,
"learning_rate": 1.563283922462942e-05,
"loss": 35.8246630859375,
"step": 8400
},
{
"epoch": 4.350208,
"grad_norm": 17.30617332458496,
"learning_rate": 1.4492588369441278e-05,
"loss": 35.7324560546875,
"step": 8500
},
{
"epoch": 4.401408,
"grad_norm": 16.38850975036621,
"learning_rate": 1.3352337514253135e-05,
"loss": 35.88875244140625,
"step": 8600
},
{
"epoch": 4.452608,
"grad_norm": 20.229633331298828,
"learning_rate": 1.2212086659064994e-05,
"loss": 35.7011962890625,
"step": 8700
},
{
"epoch": 4.500224,
"eval_loss": 4.369416236877441,
"eval_runtime": 54.3995,
"eval_samples_per_second": 367.651,
"eval_steps_per_second": 11.489,
"step": 8793
}
],
"logging_steps": 100,
"max_steps": 9770,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 977,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.922505310959043e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}