NIOS_Trilingual_Decoder_Biased / trainer_state.json
Pretam's picture
Upload folder using huggingface_hub
39085d5 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500.0,
"global_step": 30630,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1632519794302506,
"grad_norm": 0.42205774784088135,
"learning_rate": 0.0001996,
"loss": 9.0337958984375,
"step": 500
},
{
"epoch": 0.3265039588605012,
"grad_norm": 0.4745626747608185,
"learning_rate": 0.00019668768669100565,
"loss": 7.18489794921875,
"step": 1000
},
{
"epoch": 0.48975593829075176,
"grad_norm": 0.5245969295501709,
"learning_rate": 0.00019336873547958847,
"loss": 6.76750439453125,
"step": 1500
},
{
"epoch": 0.6530079177210024,
"grad_norm": 0.5147088170051575,
"learning_rate": 0.00019004978426817126,
"loss": 6.49613671875,
"step": 2000
},
{
"epoch": 0.8162598971512529,
"grad_norm": 0.5029264688491821,
"learning_rate": 0.00018673083305675408,
"loss": 6.30743798828125,
"step": 2500
},
{
"epoch": 0.9795118765815035,
"grad_norm": 0.52044677734375,
"learning_rate": 0.00018341188184533688,
"loss": 6.20166357421875,
"step": 3000
},
{
"epoch": 1.142682230022039,
"grad_norm": 0.5113121867179871,
"learning_rate": 0.0001800929306339197,
"loss": 5.97177001953125,
"step": 3500
},
{
"epoch": 1.3059342094522897,
"grad_norm": 0.5511940121650696,
"learning_rate": 0.0001767739794225025,
"loss": 5.921890625,
"step": 4000
},
{
"epoch": 1.4691861888825402,
"grad_norm": 0.4959582984447479,
"learning_rate": 0.0001734550282110853,
"loss": 5.78601318359375,
"step": 4500
},
{
"epoch": 1.6324381683127909,
"grad_norm": 0.527199387550354,
"learning_rate": 0.0001701360769996681,
"loss": 5.76173681640625,
"step": 5000
},
{
"epoch": 1.7956901477430414,
"grad_norm": 0.49316641688346863,
"learning_rate": 0.00016681712578825092,
"loss": 5.67507861328125,
"step": 5500
},
{
"epoch": 1.9589421271732919,
"grad_norm": 0.4699156880378723,
"learning_rate": 0.00016349817457683371,
"loss": 5.567580078125,
"step": 6000
},
{
"epoch": 2.1221124806138274,
"grad_norm": 0.5287707448005676,
"learning_rate": 0.00016017922336541653,
"loss": 5.44504248046875,
"step": 6500
},
{
"epoch": 2.285364460044078,
"grad_norm": 1.1574363708496094,
"learning_rate": 0.00015686027215399935,
"loss": 5.31701904296875,
"step": 7000
},
{
"epoch": 2.4486164394743284,
"grad_norm": 0.45680922269821167,
"learning_rate": 0.00015354132094258215,
"loss": 5.35315771484375,
"step": 7500
},
{
"epoch": 2.6118684189045793,
"grad_norm": 0.5091222524642944,
"learning_rate": 0.00015022236973116497,
"loss": 5.32297998046875,
"step": 8000
},
{
"epoch": 2.77512039833483,
"grad_norm": 0.5736968517303467,
"learning_rate": 0.00014690341851974776,
"loss": 5.2701904296875,
"step": 8500
},
{
"epoch": 2.9383723777650803,
"grad_norm": 0.5231903791427612,
"learning_rate": 0.00014358446730833058,
"loss": 5.30401953125,
"step": 9000
},
{
"epoch": 3.101542731205616,
"grad_norm": 0.5694177746772766,
"learning_rate": 0.00014026551609691337,
"loss": 5.1482998046875,
"step": 9500
},
{
"epoch": 3.2647947106358663,
"grad_norm": 0.5935769081115723,
"learning_rate": 0.0001369465648854962,
"loss": 5.08368701171875,
"step": 10000
},
{
"epoch": 3.4280466900661173,
"grad_norm": 0.6495661735534668,
"learning_rate": 0.000133627613674079,
"loss": 5.0398076171875,
"step": 10500
},
{
"epoch": 3.591298669496368,
"grad_norm": 0.5465214252471924,
"learning_rate": 0.0001303086624626618,
"loss": 5.07168017578125,
"step": 11000
},
{
"epoch": 3.7545506489266183,
"grad_norm": 0.5718339681625366,
"learning_rate": 0.00012698971125124463,
"loss": 5.0271943359375,
"step": 11500
},
{
"epoch": 3.9178026283568688,
"grad_norm": 0.607941746711731,
"learning_rate": 0.00012367076003982742,
"loss": 5.05531640625,
"step": 12000
},
{
"epoch": 4.080972981797404,
"grad_norm": 0.5361756682395935,
"learning_rate": 0.00012035180882841021,
"loss": 4.88087744140625,
"step": 12500
},
{
"epoch": 4.244224961227655,
"grad_norm": 0.5884597301483154,
"learning_rate": 0.00011703285761699303,
"loss": 4.826615234375,
"step": 13000
},
{
"epoch": 4.407476940657905,
"grad_norm": 0.6183493137359619,
"learning_rate": 0.00011371390640557584,
"loss": 4.8345087890625,
"step": 13500
},
{
"epoch": 4.570728920088156,
"grad_norm": 0.4895070493221283,
"learning_rate": 0.00011039495519415866,
"loss": 4.808126953125,
"step": 14000
},
{
"epoch": 4.733980899518406,
"grad_norm": 0.5796904563903809,
"learning_rate": 0.00010707600398274147,
"loss": 4.8475546875,
"step": 14500
},
{
"epoch": 4.897232878948657,
"grad_norm": 0.5432486534118652,
"learning_rate": 0.00010375705277132426,
"loss": 4.81570703125,
"step": 15000
},
{
"epoch": 5.060403232389192,
"grad_norm": 0.6771642565727234,
"learning_rate": 0.00010043810155990707,
"loss": 4.76427978515625,
"step": 15500
},
{
"epoch": 5.223655211819444,
"grad_norm": 0.6445333957672119,
"learning_rate": 9.711915034848989e-05,
"loss": 4.65180908203125,
"step": 16000
},
{
"epoch": 5.386907191249694,
"grad_norm": 0.599590539932251,
"learning_rate": 9.380019913707268e-05,
"loss": 4.70791796875,
"step": 16500
},
{
"epoch": 5.550159170679945,
"grad_norm": 0.6182076334953308,
"learning_rate": 9.04812479256555e-05,
"loss": 4.666,
"step": 17000
},
{
"epoch": 5.713411150110195,
"grad_norm": 0.601693332195282,
"learning_rate": 8.71622967142383e-05,
"loss": 4.6578359375,
"step": 17500
},
{
"epoch": 5.876663129540446,
"grad_norm": 0.6478536128997803,
"learning_rate": 8.384334550282111e-05,
"loss": 4.64422509765625,
"step": 18000
},
{
"epoch": 6.039833482980981,
"grad_norm": 0.6848897337913513,
"learning_rate": 8.052439429140392e-05,
"loss": 4.63602099609375,
"step": 18500
},
{
"epoch": 6.203085462411232,
"grad_norm": 0.599690854549408,
"learning_rate": 7.720544307998673e-05,
"loss": 4.57735595703125,
"step": 19000
},
{
"epoch": 6.366337441841482,
"grad_norm": 0.567436158657074,
"learning_rate": 7.388649186856955e-05,
"loss": 4.51408740234375,
"step": 19500
},
{
"epoch": 6.529589421271733,
"grad_norm": 0.9956502914428711,
"learning_rate": 7.056754065715234e-05,
"loss": 4.52962158203125,
"step": 20000
},
{
"epoch": 6.692841400701983,
"grad_norm": 0.6528770923614502,
"learning_rate": 6.724858944573516e-05,
"loss": 4.47805078125,
"step": 20500
},
{
"epoch": 6.856093380132235,
"grad_norm": 0.5711560249328613,
"learning_rate": 6.392963823431795e-05,
"loss": 4.5910791015625,
"step": 21000
},
{
"epoch": 7.019263733572769,
"grad_norm": 0.5683479309082031,
"learning_rate": 6.061068702290077e-05,
"loss": 4.54874267578125,
"step": 21500
},
{
"epoch": 7.182515713003021,
"grad_norm": 0.6191678643226624,
"learning_rate": 5.729173581148357e-05,
"loss": 4.418267578125,
"step": 22000
},
{
"epoch": 7.345767692433271,
"grad_norm": 0.6200758814811707,
"learning_rate": 5.3972784600066386e-05,
"loss": 4.46817431640625,
"step": 22500
},
{
"epoch": 7.509019671863522,
"grad_norm": 0.5810480713844299,
"learning_rate": 5.0653833388649185e-05,
"loss": 4.4288740234375,
"step": 23000
},
{
"epoch": 7.672271651293772,
"grad_norm": 0.6930559277534485,
"learning_rate": 4.7334882177232e-05,
"loss": 4.42653369140625,
"step": 23500
},
{
"epoch": 7.835523630724023,
"grad_norm": 0.6396164298057556,
"learning_rate": 4.4015930965814805e-05,
"loss": 4.49230419921875,
"step": 24000
},
{
"epoch": 7.998775610154273,
"grad_norm": 0.5532464981079102,
"learning_rate": 4.069697975439761e-05,
"loss": 4.39004931640625,
"step": 24500
},
{
"epoch": 8.161945963594809,
"grad_norm": 0.6885871291160583,
"learning_rate": 3.737802854298042e-05,
"loss": 4.382765625,
"step": 25000
},
{
"epoch": 8.32519794302506,
"grad_norm": 0.6912867426872253,
"learning_rate": 3.4059077331563225e-05,
"loss": 4.396109375,
"step": 25500
},
{
"epoch": 8.48844992245531,
"grad_norm": 0.6827392578125,
"learning_rate": 3.074012612014603e-05,
"loss": 4.40128125,
"step": 26000
},
{
"epoch": 8.651701901885561,
"grad_norm": 0.6782070398330688,
"learning_rate": 2.742117490872884e-05,
"loss": 4.35358203125,
"step": 26500
},
{
"epoch": 8.81495388131581,
"grad_norm": 0.7143053412437439,
"learning_rate": 2.410222369731165e-05,
"loss": 4.339625,
"step": 27000
},
{
"epoch": 8.978205860746062,
"grad_norm": 0.684012234210968,
"learning_rate": 2.0783272485894458e-05,
"loss": 4.3393515625,
"step": 27500
},
{
"epoch": 9.141376214186597,
"grad_norm": 0.6413611769676208,
"learning_rate": 1.7464321274477265e-05,
"loss": 4.32941748046875,
"step": 28000
},
{
"epoch": 9.304628193616848,
"grad_norm": 0.6120012402534485,
"learning_rate": 1.4145370063060073e-05,
"loss": 4.31831884765625,
"step": 28500
},
{
"epoch": 9.467880173047098,
"grad_norm": 0.6299709677696228,
"learning_rate": 1.0826418851642881e-05,
"loss": 4.28150341796875,
"step": 29000
},
{
"epoch": 9.631132152477349,
"grad_norm": 0.6235489845275879,
"learning_rate": 7.5074676402256894e-06,
"loss": 4.31871484375,
"step": 29500
},
{
"epoch": 9.794384131907599,
"grad_norm": 0.7088222503662109,
"learning_rate": 4.188516428808497e-06,
"loss": 4.35441064453125,
"step": 30000
},
{
"epoch": 9.95763611133785,
"grad_norm": 0.636341392993927,
"learning_rate": 8.695652173913044e-07,
"loss": 4.2993779296875,
"step": 30500
},
{
"epoch": 10.0,
"step": 30630,
"total_flos": 1.2426966463650202e+17,
"train_loss": 5.026907345643823,
"train_runtime": 19932.1348,
"train_samples_per_second": 49.168,
"train_steps_per_second": 1.537
}
],
"logging_steps": 500,
"max_steps": 30630,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.2426966463650202e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}