ChiefTheLord's picture
Upload folder using huggingface_hub
9bcef97 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9932104752667313,
"eval_steps": 1024,
"global_step": 21504,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.011823934229365849,
"grad_norm": 0.526302695274353,
"learning_rate": 0.000498046875,
"loss": 9.227896690368652,
"step": 256
},
{
"epoch": 0.023647868458731697,
"grad_norm": 0.40557485818862915,
"learning_rate": 0.000998046875,
"loss": 6.491486072540283,
"step": 512
},
{
"epoch": 0.03547180268809755,
"grad_norm": 0.2611554265022278,
"learning_rate": 0.000999640996023194,
"loss": 3.871699333190918,
"step": 768
},
{
"epoch": 0.047295736917463395,
"grad_norm": 0.3245032727718353,
"learning_rate": 0.0009985588674043958,
"loss": 2.9131131172180176,
"step": 1024
},
{
"epoch": 0.047295736917463395,
"eval_bleu": 0.37680876061744384,
"eval_ce_loss": 2.7332340827271273,
"eval_loss": 2.7332340827271273,
"step": 1024
},
{
"epoch": 0.047295736917463395,
"eval_bleu": 0.37680876061744384,
"eval_ce_loss": 2.7332340827271273,
"eval_loss": 2.7332340827271273,
"eval_runtime": 123.8633,
"eval_samples_per_second": 225.999,
"eval_steps_per_second": 3.536,
"step": 1024
},
{
"epoch": 0.05911967114682925,
"grad_norm": 0.40771251916885376,
"learning_rate": 0.0009967551747861387,
"loss": 2.60915207862854,
"step": 1280
},
{
"epoch": 0.0709436053761951,
"grad_norm": 0.456460177898407,
"learning_rate": 0.000994232528651847,
"loss": 2.4321346282958984,
"step": 1536
},
{
"epoch": 0.08276753960556095,
"grad_norm": 0.583853542804718,
"learning_rate": 0.0009909945800260092,
"loss": 2.3184077739715576,
"step": 1792
},
{
"epoch": 0.09459147383492679,
"grad_norm": 0.4923252463340759,
"learning_rate": 0.0009870460151900522,
"loss": 2.2322187423706055,
"step": 2048
},
{
"epoch": 0.09459147383492679,
"eval_bleu": 0.44342304632288854,
"eval_ce_loss": 2.180436749164372,
"eval_loss": 2.180436749164372,
"step": 2048
},
{
"epoch": 0.09459147383492679,
"eval_bleu": 0.44342304632288854,
"eval_ce_loss": 2.180436749164372,
"eval_loss": 2.180436749164372,
"eval_runtime": 112.2247,
"eval_samples_per_second": 249.437,
"eval_steps_per_second": 3.903,
"step": 2048
},
{
"epoch": 0.10641540806429264,
"grad_norm": 0.4688430726528168,
"learning_rate": 0.0009823925488998885,
"loss": 2.177077531814575,
"step": 2304
},
{
"epoch": 0.1182393422936585,
"grad_norm": 0.559630274772644,
"learning_rate": 0.0009770409161149525,
"loss": 2.114898920059204,
"step": 2560
},
{
"epoch": 0.13006327652302435,
"grad_norm": 0.5434157848358154,
"learning_rate": 0.0009709988622506973,
"loss": 2.0660085678100586,
"step": 2816
},
{
"epoch": 0.1418872107523902,
"grad_norm": 0.6108939051628113,
"learning_rate": 0.000964275131968659,
"loss": 2.0337276458740234,
"step": 3072
},
{
"epoch": 0.1418872107523902,
"eval_bleu": 0.45990036074581603,
"eval_ce_loss": 2.022843875841463,
"eval_loss": 2.022843875841463,
"step": 3072
},
{
"epoch": 0.1418872107523902,
"eval_bleu": 0.45990036074581603,
"eval_ce_loss": 2.022843875841463,
"eval_loss": 2.022843875841463,
"eval_runtime": 115.2151,
"eval_samples_per_second": 242.963,
"eval_steps_per_second": 3.802,
"step": 3072
},
{
"epoch": 0.15371114498175603,
"grad_norm": 0.784342348575592,
"learning_rate": 0.0009568794565203123,
"loss": 2.0136122703552246,
"step": 3328
},
{
"epoch": 0.1655350792111219,
"grad_norm": 0.5167074203491211,
"learning_rate": 0.0009488225396630347,
"loss": 1.9863924980163574,
"step": 3584
},
{
"epoch": 0.17735901344048774,
"grad_norm": 0.6403853297233582,
"learning_rate": 0.0009401160421685646,
"loss": 1.9703547954559326,
"step": 3840
},
{
"epoch": 0.18918294766985358,
"grad_norm": 0.5935908555984497,
"learning_rate": 0.0009307725649463714,
"loss": 1.9429030418395996,
"step": 4096
},
{
"epoch": 0.18918294766985358,
"eval_bleu": 0.47210647699382713,
"eval_ce_loss": 1.938350352522445,
"eval_loss": 1.938350352522445,
"step": 4096
},
{
"epoch": 0.18918294766985358,
"eval_bleu": 0.47210647699382713,
"eval_ce_loss": 1.938350352522445,
"eval_loss": 1.938350352522445,
"eval_runtime": 115.0632,
"eval_samples_per_second": 243.284,
"eval_steps_per_second": 3.807,
"step": 4096
},
{
"epoch": 0.20100688189921945,
"grad_norm": 0.6215645670890808,
"learning_rate": 0.0009208056308063659,
"loss": 1.93471097946167,
"step": 4352
},
{
"epoch": 0.2128308161285853,
"grad_norm": 0.6367761492729187,
"learning_rate": 0.0009102296648873445,
"loss": 1.9198952913284302,
"step": 4608
},
{
"epoch": 0.22465475035795113,
"grad_norm": 0.686975359916687,
"learning_rate": 0.0008990599737794927,
"loss": 1.905943512916565,
"step": 4864
},
{
"epoch": 0.236478684587317,
"grad_norm": 0.5950681567192078,
"learning_rate": 0.0008873127233711644,
"loss": 1.8912562131881714,
"step": 5120
},
{
"epoch": 0.236478684587317,
"eval_bleu": 0.4805682328833919,
"eval_ce_loss": 1.8848687097362187,
"eval_loss": 1.8848687097362187,
"step": 5120
},
{
"epoch": 0.236478684587317,
"eval_bleu": 0.4805682328833919,
"eval_ce_loss": 1.8848687097362187,
"eval_loss": 1.8848687097362187,
"eval_runtime": 115.9176,
"eval_samples_per_second": 241.49,
"eval_steps_per_second": 3.779,
"step": 5120
},
{
"epoch": 0.24830261881668284,
"grad_norm": 0.5887701511383057,
"learning_rate": 0.0008750049154520011,
"loss": 1.868552327156067,
"step": 5376
},
{
"epoch": 0.2601265530460487,
"grad_norm": 0.6690143942832947,
"learning_rate": 0.0008621543631062487,
"loss": 1.8647550344467163,
"step": 5632
},
{
"epoch": 0.27195048727541454,
"grad_norm": 0.6140768527984619,
"learning_rate": 0.0008487796649318904,
"loss": 1.8564214706420898,
"step": 5888
},
{
"epoch": 0.2837744215047804,
"grad_norm": 0.720294713973999,
"learning_rate": 0.0008349001781229053,
"loss": 1.852386236190796,
"step": 6144
},
{
"epoch": 0.2837744215047804,
"eval_bleu": 0.48331258984762954,
"eval_ce_loss": 1.8427181676642534,
"eval_loss": 1.8427181676642534,
"step": 6144
},
{
"epoch": 0.2837744215047804,
"eval_bleu": 0.48331258984762954,
"eval_ce_loss": 1.8427181676642534,
"eval_loss": 1.8427181676642534,
"eval_runtime": 114.2068,
"eval_samples_per_second": 245.108,
"eval_steps_per_second": 3.835,
"step": 6144
},
{
"epoch": 0.2955983557341462,
"grad_norm": 0.6018796563148499,
"learning_rate": 0.0008205359904536107,
"loss": 1.8348900079727173,
"step": 6400
},
{
"epoch": 0.30742228996351206,
"grad_norm": 0.5634647607803345,
"learning_rate": 0.0008057078912056363,
"loss": 1.8397186994552612,
"step": 6656
},
{
"epoch": 0.3192462241928779,
"grad_norm": 0.7408234477043152,
"learning_rate": 0.0007904373410796086,
"loss": 1.8375225067138672,
"step": 6912
},
{
"epoch": 0.3310701584222438,
"grad_norm": 0.7126314043998718,
"learning_rate": 0.0007747464411350876,
"loss": 1.8180835247039795,
"step": 7168
},
{
"epoch": 0.3310701584222438,
"eval_bleu": 0.4866456652902785,
"eval_ce_loss": 1.8144619418605823,
"eval_loss": 1.8144619418605823,
"step": 7168
},
{
"epoch": 0.3310701584222438,
"eval_bleu": 0.4866456652902785,
"eval_ce_loss": 1.8144619418605823,
"eval_loss": 1.8144619418605823,
"eval_runtime": 113.3842,
"eval_samples_per_second": 246.886,
"eval_steps_per_second": 3.863,
"step": 7168
},
{
"epoch": 0.34289409265160964,
"grad_norm": 0.7098305225372314,
"learning_rate": 0.000758657900803716,
"loss": 1.8147518634796143,
"step": 7424
},
{
"epoch": 0.3547180268809755,
"grad_norm": 0.7013034820556641,
"learning_rate": 0.000742195005021869,
"loss": 1.8021713495254517,
"step": 7680
},
{
"epoch": 0.3665419611103413,
"grad_norm": 0.7947620749473572,
"learning_rate": 0.0007253815805303786,
"loss": 1.8060873746871948,
"step": 7936
},
{
"epoch": 0.37836589533970716,
"grad_norm": 0.6340846419334412,
"learning_rate": 0.0007082419613901028,
"loss": 1.7940701246261597,
"step": 8192
},
{
"epoch": 0.37836589533970716,
"eval_bleu": 0.4905398670578081,
"eval_ce_loss": 1.7928550904744291,
"eval_loss": 1.7928550904744291,
"step": 8192
},
{
"epoch": 0.37836589533970716,
"eval_bleu": 0.4905398670578081,
"eval_ce_loss": 1.7928550904744291,
"eval_loss": 1.7928550904744291,
"eval_runtime": 117.9382,
"eval_samples_per_second": 237.353,
"eval_steps_per_second": 3.714,
"step": 8192
},
{
"epoch": 0.390189829569073,
"grad_norm": 0.7005776166915894,
"learning_rate": 0.0006908009537632514,
"loss": 1.7938249111175537,
"step": 8448
},
{
"epoch": 0.4020137637984389,
"grad_norm": 0.6635662317276001,
"learning_rate": 0.0006730838000114403,
"loss": 1.7853164672851562,
"step": 8704
},
{
"epoch": 0.41383769802780473,
"grad_norm": 0.6025717854499817,
"learning_rate": 0.0006551161421624341,
"loss": 1.7877322435379028,
"step": 8960
},
{
"epoch": 0.4256616322571706,
"grad_norm": 0.6658259034156799,
"learning_rate": 0.0006369239847984517,
"loss": 1.7728495597839355,
"step": 9216
},
{
"epoch": 0.4256616322571706,
"eval_bleu": 0.491521411484475,
"eval_ce_loss": 1.7803823493387056,
"eval_loss": 1.7803823493387056,
"step": 9216
},
{
"epoch": 0.4256616322571706,
"eval_bleu": 0.491521411484475,
"eval_ce_loss": 1.7803823493387056,
"eval_loss": 1.7803823493387056,
"eval_runtime": 113.1853,
"eval_samples_per_second": 247.32,
"eval_steps_per_second": 3.87,
"step": 9216
},
{
"epoch": 0.4374855664865364,
"grad_norm": 0.6417582035064697,
"learning_rate": 0.0006185336574197479,
"loss": 1.767223596572876,
"step": 9472
},
{
"epoch": 0.44930950071590225,
"grad_norm": 0.6002334356307983,
"learning_rate": 0.0005999717763379407,
"loss": 1.7725470066070557,
"step": 9728
},
{
"epoch": 0.4611334349452681,
"grad_norm": 0.6244451403617859,
"learning_rate": 0.0005812652061542363,
"loss": 1.7692821025848389,
"step": 9984
},
{
"epoch": 0.472957369174634,
"grad_norm": 0.8042502999305725,
"learning_rate": 0.0005624410208783071,
"loss": 1.7654914855957031,
"step": 10240
},
{
"epoch": 0.472957369174634,
"eval_bleu": 0.4928508058708409,
"eval_ce_loss": 1.7702069639070939,
"eval_loss": 1.7702069639070939,
"step": 10240
},
{
"epoch": 0.472957369174634,
"eval_bleu": 0.4928508058708409,
"eval_ce_loss": 1.7702069639070939,
"eval_loss": 1.7702069639070939,
"eval_runtime": 111.2062,
"eval_samples_per_second": 251.722,
"eval_steps_per_second": 3.939,
"step": 10240
},
{
"epoch": 0.48478130340399983,
"grad_norm": 0.6481178998947144,
"learning_rate": 0.0005435264647440881,
"loss": 1.7591522932052612,
"step": 10496
},
{
"epoch": 0.49660523763336567,
"grad_norm": 0.6686874628067017,
"learning_rate": 0.000524548912779213,
"loss": 1.7500008344650269,
"step": 10752
},
{
"epoch": 0.5084291718627315,
"grad_norm": 0.6231724619865417,
"learning_rate": 0.0005055358311851499,
"loss": 1.7510260343551636,
"step": 11008
},
{
"epoch": 0.5202531060920974,
"grad_norm": 0.6039224863052368,
"learning_rate": 0.0004865147375853812,
"loss": 1.752295732498169,
"step": 11264
},
{
"epoch": 0.5202531060920974,
"eval_bleu": 0.49508567529295305,
"eval_ce_loss": 1.7585640699351759,
"eval_loss": 1.7585640699351759,
"step": 11264
},
{
"epoch": 0.5202531060920974,
"eval_bleu": 0.49508567529295305,
"eval_ce_loss": 1.7585640699351759,
"eval_loss": 1.7585640699351759,
"eval_runtime": 112.7663,
"eval_samples_per_second": 248.239,
"eval_steps_per_second": 3.884,
"step": 11264
},
{
"epoch": 0.5320770403214632,
"grad_norm": 0.6699737310409546,
"learning_rate": 0.0004675131611991607,
"loss": 1.7415655851364136,
"step": 11520
},
{
"epoch": 0.5439009745508291,
"grad_norm": 0.608718991279602,
"learning_rate": 0.0004485586029984899,
"loss": 1.7422733306884766,
"step": 11776
},
{
"epoch": 0.5557249087801949,
"grad_norm": 0.6244316697120667,
"learning_rate": 0.00042967849590597266,
"loss": 1.729731559753418,
"step": 12032
},
{
"epoch": 0.5675488430095608,
"grad_norm": 0.6394057869911194,
"learning_rate": 0.0004109001650911621,
"loss": 1.7405071258544922,
"step": 12288
},
{
"epoch": 0.5675488430095608,
"eval_bleu": 0.4976184098974849,
"eval_ce_loss": 1.7458034745634419,
"eval_loss": 1.7458034745634419,
"step": 12288
},
{
"epoch": 0.5675488430095608,
"eval_bleu": 0.4976184098974849,
"eval_ce_loss": 1.7458034745634419,
"eval_loss": 1.7458034745634419,
"eval_runtime": 113.3537,
"eval_samples_per_second": 246.953,
"eval_steps_per_second": 3.864,
"step": 12288
},
{
"epoch": 0.5793727772389267,
"grad_norm": 0.6351113319396973,
"learning_rate": 0.0003922507884228551,
"loss": 1.7407402992248535,
"step": 12544
},
{
"epoch": 0.5911967114682924,
"grad_norm": 0.6591783761978149,
"learning_rate": 0.00037375735713457723,
"loss": 1.7183029651641846,
"step": 12800
},
{
"epoch": 0.6030206456976583,
"grad_norm": 0.6365231275558472,
"learning_rate": 0.00035544663676018276,
"loss": 1.7422548532485962,
"step": 13056
},
{
"epoch": 0.6148445799270241,
"grad_norm": 0.6042783260345459,
"learning_rate": 0.00033734512839611255,
"loss": 1.731939673423767,
"step": 13312
},
{
"epoch": 0.6148445799270241,
"eval_bleu": 0.5030280015017363,
"eval_ce_loss": 1.7251083521538129,
"eval_loss": 1.7251083521538129,
"step": 13312
},
{
"epoch": 0.6148445799270241,
"eval_bleu": 0.5030280015017363,
"eval_ce_loss": 1.7251083521538129,
"eval_loss": 1.7251083521538129,
"eval_runtime": 114.7776,
"eval_samples_per_second": 243.889,
"eval_steps_per_second": 3.816,
"step": 13312
},
{
"epoch": 0.62666851415639,
"grad_norm": 0.8445199131965637,
"learning_rate": 0.0003194790303463687,
"loss": 1.7291183471679688,
"step": 13568
},
{
"epoch": 0.6384924483857558,
"grad_norm": 0.6880821585655212,
"learning_rate": 0.00030187420020572406,
"loss": 1.7337849140167236,
"step": 13824
},
{
"epoch": 0.6503163826151217,
"grad_norm": 0.6618293523788452,
"learning_rate": 0.00028455611743603626,
"loss": 1.7243127822875977,
"step": 14080
},
{
"epoch": 0.6621403168444876,
"grad_norm": 0.6698670387268066,
"learning_rate": 0.0002675498464898373,
"loss": 1.7110638618469238,
"step": 14336
},
{
"epoch": 0.6621403168444876,
"eval_bleu": 0.5033082348172044,
"eval_ce_loss": 1.7181274387390102,
"eval_loss": 1.7181274387390102,
"step": 14336
},
{
"epoch": 0.6621403168444876,
"eval_bleu": 0.5033082348172044,
"eval_ce_loss": 1.7181274387390102,
"eval_loss": 1.7181274387390102,
"eval_runtime": 113.7527,
"eval_samples_per_second": 246.086,
"eval_steps_per_second": 3.85,
"step": 14336
},
{
"epoch": 0.6739642510738534,
"grad_norm": 0.7639990448951721,
"learning_rate": 0.0002508800005345623,
"loss": 1.7315621376037598,
"step": 14592
},
{
"epoch": 0.6857881853032193,
"grad_norm": 0.6297742128372192,
"learning_rate": 0.00023457070582992562,
"loss": 1.7225186824798584,
"step": 14848
},
{
"epoch": 0.6976121195325851,
"grad_norm": 0.6663972735404968,
"learning_rate": 0.00021864556680999692,
"loss": 1.726626992225647,
"step": 15104
},
{
"epoch": 0.709436053761951,
"grad_norm": 0.6332765221595764,
"learning_rate": 0.0002031276319205152,
"loss": 1.7189596891403198,
"step": 15360
},
{
"epoch": 0.709436053761951,
"eval_bleu": 0.499889278094126,
"eval_ce_loss": 1.7232733518565626,
"eval_loss": 1.7232733518565626,
"step": 15360
},
{
"epoch": 0.709436053761951,
"eval_bleu": 0.499889278094126,
"eval_ce_loss": 1.7232733518565626,
"eval_loss": 1.7232733518565626,
"eval_runtime": 112.4417,
"eval_samples_per_second": 248.956,
"eval_steps_per_second": 3.895,
"step": 15360
},
{
"epoch": 0.7212599879913169,
"grad_norm": 0.7163941264152527,
"learning_rate": 0.00018803936026088542,
"loss": 1.7184514999389648,
"step": 15616
},
{
"epoch": 0.7330839222206826,
"grad_norm": 0.6824806332588196,
"learning_rate": 0.00017340258907913464,
"loss": 1.715112566947937,
"step": 15872
},
{
"epoch": 0.7449078564500485,
"grad_norm": 0.6306845545768738,
"learning_rate": 0.0001592385021668743,
"loss": 1.7062709331512451,
"step": 16128
},
{
"epoch": 0.7567317906794143,
"grad_norm": 0.6257657408714294,
"learning_rate": 0.0001455675992000087,
"loss": 1.7213497161865234,
"step": 16384
},
{
"epoch": 0.7567317906794143,
"eval_bleu": 0.5018863282352667,
"eval_ce_loss": 1.712352893123888,
"eval_loss": 1.712352893123888,
"step": 16384
},
{
"epoch": 0.7567317906794143,
"eval_bleu": 0.5018863282352667,
"eval_ce_loss": 1.712352893123888,
"eval_loss": 1.712352893123888,
"eval_runtime": 114.1951,
"eval_samples_per_second": 245.133,
"eval_steps_per_second": 3.836,
"step": 16384
},
{
"epoch": 0.7685557249087802,
"grad_norm": 0.660374104976654,
"learning_rate": 0.000132409666069565,
"loss": 1.7006453275680542,
"step": 16640
},
{
"epoch": 0.780379659138146,
"grad_norm": 0.6620026230812073,
"learning_rate": 0.0001197837462455823,
"loss": 1.708181381225586,
"step": 16896
},
{
"epoch": 0.7922035933675119,
"grad_norm": 0.6751854419708252,
"learning_rate": 0.00010770811321550749,
"loss": 1.6984927654266357,
"step": 17152
},
{
"epoch": 0.8040275275968778,
"grad_norm": 0.6999450325965881,
"learning_rate": 9.620024403698591e-05,
"loss": 1.7066656351089478,
"step": 17408
},
{
"epoch": 0.8040275275968778,
"eval_bleu": 0.5057314692968334,
"eval_ce_loss": 1.6962808190959773,
"eval_loss": 1.6962808190959773,
"step": 17408
},
{
"epoch": 0.8040275275968778,
"eval_bleu": 0.5057314692968334,
"eval_ce_loss": 1.6962808190959773,
"eval_loss": 1.6962808190959773,
"eval_runtime": 113.9552,
"eval_samples_per_second": 245.649,
"eval_steps_per_second": 3.844,
"step": 17408
},
{
"epoch": 0.8158514618262436,
"grad_norm": 0.6427358388900757,
"learning_rate": 8.527679404332429e-05,
"loss": 1.701072096824646,
"step": 17664
},
{
"epoch": 0.8276753960556095,
"grad_norm": 0.6666444540023804,
"learning_rate": 7.495357273823544e-05,
"loss": 1.7122679948806763,
"step": 17920
},
{
"epoch": 0.8394993302849753,
"grad_norm": 0.6453679203987122,
"learning_rate": 6.524552091475183e-05,
"loss": 1.7073798179626465,
"step": 18176
},
{
"epoch": 0.8513232645143411,
"grad_norm": 0.648821234703064,
"learning_rate": 5.6166689031422024e-05,
"loss": 1.6983073949813843,
"step": 18432
},
{
"epoch": 0.8513232645143411,
"eval_bleu": 0.5035658750943952,
"eval_ce_loss": 1.7023935086650936,
"eval_loss": 1.7023935086650936,
"step": 18432
},
{
"epoch": 0.8513232645143411,
"eval_bleu": 0.5035658750943952,
"eval_ce_loss": 1.7023935086650936,
"eval_loss": 1.7023935086650936,
"eval_runtime": 112.2723,
"eval_samples_per_second": 249.331,
"eval_steps_per_second": 3.901,
"step": 18432
},
{
"epoch": 0.8631471987437069,
"grad_norm": 0.6233495473861694,
"learning_rate": 4.773021687709067e-05,
"loss": 1.7038770914077759,
"step": 18688
},
{
"epoch": 0.8749711329730728,
"grad_norm": 0.6371004581451416,
"learning_rate": 3.994831455368719e-05,
"loss": 1.70210599899292,
"step": 18944
},
{
"epoch": 0.8867950672024387,
"grad_norm": 0.6629026532173157,
"learning_rate": 3.283224480455282e-05,
"loss": 1.7117823362350464,
"step": 19200
},
{
"epoch": 0.8986190014318045,
"grad_norm": 0.640360414981842,
"learning_rate": 2.639230671387627e-05,
"loss": 1.6973387002944946,
"step": 19456
},
{
"epoch": 0.8986190014318045,
"eval_bleu": 0.5034783964085036,
"eval_ce_loss": 1.7032495644538914,
"eval_loss": 1.7032495644538914,
"step": 19456
},
{
"epoch": 0.8986190014318045,
"eval_bleu": 0.5034783964085036,
"eval_ce_loss": 1.7032495644538914,
"eval_loss": 1.7032495644538914,
"eval_runtime": 113.7629,
"eval_samples_per_second": 246.064,
"eval_steps_per_second": 3.85,
"step": 19456
},
{
"epoch": 0.9104429356611704,
"grad_norm": 0.625438392162323,
"learning_rate": 2.063782080083576e-05,
"loss": 1.695639967918396,
"step": 19712
},
{
"epoch": 0.9222668698905362,
"grad_norm": 0.6405285000801086,
"learning_rate": 1.557711553001523e-05,
"loss": 1.697039246559143,
"step": 19968
},
{
"epoch": 0.9340908041199021,
"grad_norm": 0.6282166838645935,
"learning_rate": 1.1217515257622269e-05,
"loss": 1.6970669031143188,
"step": 20224
},
{
"epoch": 0.945914738349268,
"grad_norm": 0.6324545741081238,
"learning_rate": 7.565329630950746e-06,
"loss": 1.7013015747070312,
"step": 20480
},
{
"epoch": 0.945914738349268,
"eval_bleu": 0.5066154325587271,
"eval_ce_loss": 1.6894163007605565,
"eval_loss": 1.6894163007605565,
"step": 20480
},
{
"epoch": 0.945914738349268,
"eval_bleu": 0.5066154325587271,
"eval_ce_loss": 1.6894163007605565,
"eval_loss": 1.6894163007605565,
"eval_runtime": 112.9102,
"eval_samples_per_second": 247.923,
"eval_steps_per_second": 3.879,
"step": 20480
},
{
"epoch": 0.9577386725786338,
"grad_norm": 0.6203088164329529,
"learning_rate": 4.62584445643166e-06,
"loss": 1.6999309062957764,
"step": 20736
},
{
"epoch": 0.9695626068079997,
"grad_norm": 0.6161953806877136,
"learning_rate": 2.40331404948807e-06,
"loss": 1.7076772451400757,
"step": 20992
},
{
"epoch": 0.9813865410373654,
"grad_norm": 0.6234349012374878,
"learning_rate": 9.009550772663965e-07,
"loss": 1.6992141008377075,
"step": 21248
},
{
"epoch": 0.9932104752667313,
"grad_norm": 0.6052360534667969,
"learning_rate": 1.2094190315575791e-07,
"loss": 1.7013704776763916,
"step": 21504
},
{
"epoch": 0.9932104752667313,
"eval_bleu": 0.5054799917933074,
"eval_ce_loss": 1.699716367678011,
"eval_loss": 1.699716367678011,
"step": 21504
},
{
"epoch": 0.9932104752667313,
"eval_bleu": 0.5054799917933074,
"eval_ce_loss": 1.699716367678011,
"eval_loss": 1.699716367678011,
"eval_runtime": 113.8908,
"eval_samples_per_second": 245.788,
"eval_steps_per_second": 3.846,
"step": 21504
}
],
"logging_steps": 256,
"max_steps": 21651,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1024,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}