| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.114575496670289, | |
| "global_step": 10000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00011145476329794645, | |
| "learning_rate": 9.999000000000001e-05, | |
| "loss": 8.06905746459961, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.011145476329794644, | |
| "learning_rate": 9.900000000000001e-05, | |
| "loss": 1.7745859165384312, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.02229095265958929, | |
| "learning_rate": 9.8e-05, | |
| "loss": 1.4695932006835937, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.03343642898938393, | |
| "learning_rate": 9.7e-05, | |
| "loss": 1.471973876953125, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.04458190531917858, | |
| "learning_rate": 9.6e-05, | |
| "loss": 1.4246876525878907, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.05572738164897322, | |
| "learning_rate": 9.5e-05, | |
| "loss": 1.4235630798339844, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.06687285797876787, | |
| "learning_rate": 9.4e-05, | |
| "loss": 1.4458001708984376, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.07801833430856252, | |
| "learning_rate": 9.300000000000001e-05, | |
| "loss": 1.4168995666503905, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.08916381063835715, | |
| "learning_rate": 9.200000000000001e-05, | |
| "loss": 1.432281494140625, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.1003092869681518, | |
| "learning_rate": 9.1e-05, | |
| "loss": 1.3954049682617187, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.11145476329794644, | |
| "learning_rate": 9e-05, | |
| "loss": 1.41295654296875, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.1226002396277411, | |
| "learning_rate": 8.900000000000001e-05, | |
| "loss": 1.4206352233886719, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.13374571595753573, | |
| "learning_rate": 8.800000000000001e-05, | |
| "loss": 1.4066107177734375, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.14489119228733038, | |
| "learning_rate": 8.7e-05, | |
| "loss": 1.3928131103515624, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.15603666861712503, | |
| "learning_rate": 8.6e-05, | |
| "loss": 1.4154879760742187, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.16718214494691966, | |
| "learning_rate": 8.5e-05, | |
| "loss": 1.4014813232421874, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.1783276212767143, | |
| "learning_rate": 8.4e-05, | |
| "loss": 1.3965155029296874, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.18947309760650896, | |
| "learning_rate": 8.3e-05, | |
| "loss": 1.3976228332519531, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.2006185739363036, | |
| "learning_rate": 8.2e-05, | |
| "loss": 1.3655722045898437, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.21176405026609824, | |
| "learning_rate": 8.1e-05, | |
| "loss": 1.3941130065917968, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.2229095265958929, | |
| "learning_rate": 8e-05, | |
| "loss": 1.389833984375, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.23405500292568754, | |
| "learning_rate": 7.900000000000001e-05, | |
| "loss": 1.3948770141601563, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.2452004792554822, | |
| "learning_rate": 7.800000000000001e-05, | |
| "loss": 1.3855517578125, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.25634595558527684, | |
| "learning_rate": 7.7e-05, | |
| "loss": 1.4055433654785157, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.26749143191507146, | |
| "learning_rate": 7.6e-05, | |
| "loss": 1.3657418823242187, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.2786369082448661, | |
| "learning_rate": 7.500000000000001e-05, | |
| "loss": 1.3927548217773438, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.2786369082448661, | |
| "eval_gen_len": 50.3, | |
| "eval_loss": 1.3837015628814697, | |
| "eval_rouge1": 35.4891, | |
| "eval_rouge2": 15.6119, | |
| "eval_rougeL": 26.4862, | |
| "eval_rougeLsum": 32.3041, | |
| "eval_runtime": 163.7005, | |
| "eval_samples_per_second": 3.054, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.28978238457466077, | |
| "learning_rate": 7.4e-05, | |
| "loss": 1.3879615783691406, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.3009278609044554, | |
| "learning_rate": 7.3e-05, | |
| "loss": 1.3767970275878907, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.31207333723425007, | |
| "learning_rate": 7.2e-05, | |
| "loss": 1.3870814514160157, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.3232188135640447, | |
| "learning_rate": 7.1e-05, | |
| "loss": 1.3717910766601562, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.3343642898938393, | |
| "learning_rate": 7e-05, | |
| "loss": 1.3754934692382812, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.345509766223634, | |
| "learning_rate": 6.9e-05, | |
| "loss": 1.3606668090820313, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.3566552425534286, | |
| "learning_rate": 6.800000000000001e-05, | |
| "loss": 1.359674530029297, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.3678007188832233, | |
| "learning_rate": 6.7e-05, | |
| "loss": 1.37053955078125, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.3789461952130179, | |
| "learning_rate": 6.6e-05, | |
| "loss": 1.3728164672851562, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.39009167154281255, | |
| "learning_rate": 6.500000000000001e-05, | |
| "loss": 1.3717462158203124, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.4012371478726072, | |
| "learning_rate": 6.400000000000001e-05, | |
| "loss": 1.3698835754394532, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.41238262420240185, | |
| "learning_rate": 6.3e-05, | |
| "loss": 1.3624909973144532, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.42352810053219647, | |
| "learning_rate": 6.2e-05, | |
| "loss": 1.365331268310547, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.43467357686199115, | |
| "learning_rate": 6.1e-05, | |
| "loss": 1.3554270935058594, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.4458190531917858, | |
| "learning_rate": 6e-05, | |
| "loss": 1.3769105529785157, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.45696452952158045, | |
| "learning_rate": 5.9e-05, | |
| "loss": 1.3486868286132812, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.4681100058513751, | |
| "learning_rate": 5.8e-05, | |
| "loss": 1.3736349487304687, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.4792554821811697, | |
| "learning_rate": 5.6999999999999996e-05, | |
| "loss": 1.3558815002441407, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.4904009585109644, | |
| "learning_rate": 5.6000000000000006e-05, | |
| "loss": 1.3559271240234374, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.501546434840759, | |
| "learning_rate": 5.500000000000001e-05, | |
| "loss": 1.349674530029297, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.5126919111705537, | |
| "learning_rate": 5.4000000000000005e-05, | |
| "loss": 1.3654806518554687, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.5238373875003483, | |
| "learning_rate": 5.300000000000001e-05, | |
| "loss": 1.359230499267578, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.5349828638301429, | |
| "learning_rate": 5.2000000000000004e-05, | |
| "loss": 1.359907684326172, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.5461283401599376, | |
| "learning_rate": 5.1000000000000006e-05, | |
| "loss": 1.3600314331054688, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.5572738164897322, | |
| "learning_rate": 5e-05, | |
| "loss": 1.3618020629882812, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.5572738164897322, | |
| "eval_gen_len": 49.3, | |
| "eval_loss": 1.3626569509506226, | |
| "eval_rouge1": 35.3235, | |
| "eval_rouge2": 15.308, | |
| "eval_rougeL": 26.6971, | |
| "eval_rougeLsum": 32.2475, | |
| "eval_runtime": 163.5583, | |
| "eval_samples_per_second": 3.057, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.5684192928195269, | |
| "learning_rate": 4.9e-05, | |
| "loss": 1.3476010131835938, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.5795647691493215, | |
| "learning_rate": 4.8e-05, | |
| "loss": 1.3705815124511718, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.5907102454791162, | |
| "learning_rate": 4.7e-05, | |
| "loss": 1.3650910949707031, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 0.6018557218089108, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 1.3458297729492188, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.6130011981387055, | |
| "learning_rate": 4.5e-05, | |
| "loss": 1.3574575805664062, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.6241466744685001, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 1.3446278381347656, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 0.6352921507982947, | |
| "learning_rate": 4.3e-05, | |
| "loss": 1.3720254516601562, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 0.6464376271280894, | |
| "learning_rate": 4.2e-05, | |
| "loss": 1.3550611877441405, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 0.6575831034578841, | |
| "learning_rate": 4.1e-05, | |
| "loss": 1.3649459838867188, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 0.6687285797876786, | |
| "learning_rate": 4e-05, | |
| "loss": 1.3249327087402343, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.6798740561174733, | |
| "learning_rate": 3.9000000000000006e-05, | |
| "loss": 1.3522917175292968, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 0.691019532447268, | |
| "learning_rate": 3.8e-05, | |
| "loss": 1.3376327514648438, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 0.7021650087770626, | |
| "learning_rate": 3.7e-05, | |
| "loss": 1.3476719665527344, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 0.7133104851068572, | |
| "learning_rate": 3.6e-05, | |
| "loss": 1.3413494873046874, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 0.7244559614366519, | |
| "learning_rate": 3.5e-05, | |
| "loss": 1.3463385009765625, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.7356014377664466, | |
| "learning_rate": 3.4000000000000007e-05, | |
| "loss": 1.346422882080078, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 0.7467469140962412, | |
| "learning_rate": 3.3e-05, | |
| "loss": 1.3565861511230468, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 0.7578923904260358, | |
| "learning_rate": 3.2000000000000005e-05, | |
| "loss": 1.3385652160644532, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 0.7690378667558305, | |
| "learning_rate": 3.1e-05, | |
| "loss": 1.3510641479492187, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 0.7801833430856251, | |
| "learning_rate": 3e-05, | |
| "loss": 1.3658317565917968, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.7913288194154198, | |
| "learning_rate": 2.9e-05, | |
| "loss": 1.3442698669433595, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 0.8024742957452144, | |
| "learning_rate": 2.8000000000000003e-05, | |
| "loss": 1.3289457702636718, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 0.813619772075009, | |
| "learning_rate": 2.7000000000000002e-05, | |
| "loss": 1.3627702331542968, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 0.8247652484048037, | |
| "learning_rate": 2.6000000000000002e-05, | |
| "loss": 1.3569662475585937, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 0.8359107247345984, | |
| "learning_rate": 2.5e-05, | |
| "loss": 1.3445625305175781, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.8359107247345984, | |
| "eval_gen_len": 49.5, | |
| "eval_loss": 1.3491926193237305, | |
| "eval_rouge1": 34.7061, | |
| "eval_rouge2": 15.2333, | |
| "eval_rougeL": 26.1848, | |
| "eval_rougeLsum": 31.7067, | |
| "eval_runtime": 162.9659, | |
| "eval_samples_per_second": 3.068, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.8470562010643929, | |
| "learning_rate": 2.4e-05, | |
| "loss": 1.3476835632324218, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 0.8582016773941876, | |
| "learning_rate": 2.3000000000000003e-05, | |
| "loss": 1.329300079345703, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 0.8693471537239823, | |
| "learning_rate": 2.2000000000000003e-05, | |
| "loss": 1.3521742248535156, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 0.8804926300537769, | |
| "learning_rate": 2.1e-05, | |
| "loss": 1.3405534362792968, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 0.8916381063835715, | |
| "learning_rate": 2e-05, | |
| "loss": 1.3501358032226562, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.9027835827133662, | |
| "learning_rate": 1.9e-05, | |
| "loss": 1.3406822204589843, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 0.9139290590431609, | |
| "learning_rate": 1.8e-05, | |
| "loss": 1.3406187438964843, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 0.9250745353729555, | |
| "learning_rate": 1.7000000000000003e-05, | |
| "loss": 1.3410650634765624, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 0.9362200117027502, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 1.3546810913085938, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 0.9473654880325448, | |
| "learning_rate": 1.5e-05, | |
| "loss": 1.330055694580078, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.9585109643623394, | |
| "learning_rate": 1.4000000000000001e-05, | |
| "loss": 1.325668487548828, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 0.9696564406921341, | |
| "learning_rate": 1.3000000000000001e-05, | |
| "loss": 1.331268310546875, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 0.9808019170219288, | |
| "learning_rate": 1.2e-05, | |
| "loss": 1.346514892578125, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 0.9919473933517233, | |
| "learning_rate": 1.1000000000000001e-05, | |
| "loss": 1.3515042114257811, | |
| "step": 8900 | |
| }, | |
| { | |
| "epoch": 1.0031207333723424, | |
| "learning_rate": 1e-05, | |
| "loss": 1.3323361206054687, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.014266209702137, | |
| "learning_rate": 9e-06, | |
| "loss": 1.2915484619140625, | |
| "step": 9100 | |
| }, | |
| { | |
| "epoch": 1.0254116860319318, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 1.2769253540039063, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 1.0365571623617265, | |
| "learning_rate": 7.000000000000001e-06, | |
| "loss": 1.2951361083984374, | |
| "step": 9300 | |
| }, | |
| { | |
| "epoch": 1.0477026386915211, | |
| "learning_rate": 6e-06, | |
| "loss": 1.30968505859375, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 1.0588481150213158, | |
| "learning_rate": 5e-06, | |
| "loss": 1.2824142456054688, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.0699935913511103, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 1.2977745056152343, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 1.081139067680905, | |
| "learning_rate": 3e-06, | |
| "loss": 1.296290283203125, | |
| "step": 9700 | |
| }, | |
| { | |
| "epoch": 1.0922845440106996, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 1.301255645751953, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 1.1034300203404943, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 1.28552490234375, | |
| "step": 9900 | |
| }, | |
| { | |
| "epoch": 1.114575496670289, | |
| "learning_rate": 0.0, | |
| "loss": 1.2935409545898438, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.114575496670289, | |
| "eval_gen_len": 48.8, | |
| "eval_loss": 1.3494852781295776, | |
| "eval_rouge1": 35.2792, | |
| "eval_rouge2": 15.3645, | |
| "eval_rougeL": 26.3329, | |
| "eval_rougeLsum": 32.2724, | |
| "eval_runtime": 161.2437, | |
| "eval_samples_per_second": 3.101, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.114575496670289, | |
| "step": 10000, | |
| "train_runtime": 28429.155, | |
| "train_samples_per_second": 0.352 | |
| } | |
| ], | |
| "max_steps": 10000, | |
| "num_train_epochs": 2, | |
| "total_flos": 454127617474172928, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |