| { | |
| "best_global_step": 2000, | |
| "best_metric": 0.7107943555686831, | |
| "best_model_checkpoint": "./SALAMA_C6/checkpoint-2000", | |
| "epoch": 1.3614703880190606, | |
| "eval_steps": 1000, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.013614703880190605, | |
| "grad_norm": 0.12137346714735031, | |
| "learning_rate": 3.8e-07, | |
| "loss": 0.008, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02722940776038121, | |
| "grad_norm": 0.114188052713871, | |
| "learning_rate": 7.8e-07, | |
| "loss": 0.0051, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04084411164057182, | |
| "grad_norm": 1.0347418785095215, | |
| "learning_rate": 1.1800000000000001e-06, | |
| "loss": 0.0065, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.05445881552076242, | |
| "grad_norm": 1.6876862049102783, | |
| "learning_rate": 1.5800000000000001e-06, | |
| "loss": 0.0086, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.06807351940095303, | |
| "grad_norm": 0.44940465688705444, | |
| "learning_rate": 1.98e-06, | |
| "loss": 0.0058, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.08168822328114364, | |
| "grad_norm": 0.12510177493095398, | |
| "learning_rate": 2.38e-06, | |
| "loss": 0.007, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.09530292716133425, | |
| "grad_norm": 0.20447663962841034, | |
| "learning_rate": 2.7800000000000005e-06, | |
| "loss": 0.0076, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.10891763104152484, | |
| "grad_norm": 0.5220006108283997, | |
| "learning_rate": 3.1800000000000005e-06, | |
| "loss": 0.0049, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.12253233492171545, | |
| "grad_norm": 1.6316051483154297, | |
| "learning_rate": 3.58e-06, | |
| "loss": 0.0056, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.13614703880190607, | |
| "grad_norm": 0.33280882239341736, | |
| "learning_rate": 3.980000000000001e-06, | |
| "loss": 0.0046, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.14976174268209666, | |
| "grad_norm": 1.9523682594299316, | |
| "learning_rate": 4.38e-06, | |
| "loss": 0.0059, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.16337644656228728, | |
| "grad_norm": 2.4847404956817627, | |
| "learning_rate": 4.78e-06, | |
| "loss": 0.0102, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.17699115044247787, | |
| "grad_norm": 1.8841696977615356, | |
| "learning_rate": 5.18e-06, | |
| "loss": 0.0102, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.1906058543226685, | |
| "grad_norm": 0.3495821952819824, | |
| "learning_rate": 5.580000000000001e-06, | |
| "loss": 0.0112, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.2042205582028591, | |
| "grad_norm": 0.7233176231384277, | |
| "learning_rate": 5.98e-06, | |
| "loss": 0.0099, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.21783526208304968, | |
| "grad_norm": 1.7731997966766357, | |
| "learning_rate": 6.380000000000001e-06, | |
| "loss": 0.0101, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.2314499659632403, | |
| "grad_norm": 1.6149709224700928, | |
| "learning_rate": 6.780000000000001e-06, | |
| "loss": 0.0158, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.2450646698434309, | |
| "grad_norm": 1.7614096403121948, | |
| "learning_rate": 7.180000000000001e-06, | |
| "loss": 0.0149, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.2586793737236215, | |
| "grad_norm": 1.063245177268982, | |
| "learning_rate": 7.58e-06, | |
| "loss": 0.0081, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.27229407760381213, | |
| "grad_norm": 2.128750801086426, | |
| "learning_rate": 7.980000000000002e-06, | |
| "loss": 0.0101, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.2859087814840027, | |
| "grad_norm": 4.583016872406006, | |
| "learning_rate": 8.380000000000001e-06, | |
| "loss": 0.0161, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.2995234853641933, | |
| "grad_norm": 2.7598137855529785, | |
| "learning_rate": 8.78e-06, | |
| "loss": 0.0155, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.3131381892443839, | |
| "grad_norm": 1.4036035537719727, | |
| "learning_rate": 9.180000000000002e-06, | |
| "loss": 0.0126, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.32675289312457456, | |
| "grad_norm": 2.3361454010009766, | |
| "learning_rate": 9.58e-06, | |
| "loss": 0.0182, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.34036759700476515, | |
| "grad_norm": 4.159565448760986, | |
| "learning_rate": 9.980000000000001e-06, | |
| "loss": 0.0132, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.35398230088495575, | |
| "grad_norm": 2.0892586708068848, | |
| "learning_rate": 9.95136933708728e-06, | |
| "loss": 0.0185, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.36759700476514634, | |
| "grad_norm": 4.004888534545898, | |
| "learning_rate": 9.900179165600206e-06, | |
| "loss": 0.012, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.381211708645337, | |
| "grad_norm": 2.4637577533721924, | |
| "learning_rate": 9.848988994113131e-06, | |
| "loss": 0.0162, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.3948264125255276, | |
| "grad_norm": 2.228325128555298, | |
| "learning_rate": 9.797798822626056e-06, | |
| "loss": 0.031, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.4084411164057182, | |
| "grad_norm": 2.1333324909210205, | |
| "learning_rate": 9.746608651138983e-06, | |
| "loss": 0.0158, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.42205582028590877, | |
| "grad_norm": 3.556786060333252, | |
| "learning_rate": 9.695418479651908e-06, | |
| "loss": 0.0141, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.43567052416609936, | |
| "grad_norm": 1.8913724422454834, | |
| "learning_rate": 9.644228308164833e-06, | |
| "loss": 0.0187, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.44928522804629, | |
| "grad_norm": 2.2834181785583496, | |
| "learning_rate": 9.59303813667776e-06, | |
| "loss": 0.0195, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.4628999319264806, | |
| "grad_norm": 1.7960888147354126, | |
| "learning_rate": 9.541847965190683e-06, | |
| "loss": 0.0184, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.4765146358066712, | |
| "grad_norm": 3.7277791500091553, | |
| "learning_rate": 9.49065779370361e-06, | |
| "loss": 0.0147, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.4901293396868618, | |
| "grad_norm": 3.077613353729248, | |
| "learning_rate": 9.439467622216535e-06, | |
| "loss": 0.0191, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.5037440435670524, | |
| "grad_norm": 3.0859522819519043, | |
| "learning_rate": 9.38827745072946e-06, | |
| "loss": 0.0207, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.517358747447243, | |
| "grad_norm": 3.4750730991363525, | |
| "learning_rate": 9.337087279242385e-06, | |
| "loss": 0.0219, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.5309734513274337, | |
| "grad_norm": 3.5851595401763916, | |
| "learning_rate": 9.285897107755312e-06, | |
| "loss": 0.0177, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.5445881552076243, | |
| "grad_norm": 1.2438093423843384, | |
| "learning_rate": 9.234706936268237e-06, | |
| "loss": 0.0163, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5582028590878149, | |
| "grad_norm": 2.504617691040039, | |
| "learning_rate": 9.183516764781162e-06, | |
| "loss": 0.0175, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.5718175629680055, | |
| "grad_norm": 2.9373462200164795, | |
| "learning_rate": 9.132326593294089e-06, | |
| "loss": 0.0188, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.585432266848196, | |
| "grad_norm": 1.7320044040679932, | |
| "learning_rate": 9.081136421807014e-06, | |
| "loss": 0.0216, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.5990469707283866, | |
| "grad_norm": 3.027221202850342, | |
| "learning_rate": 9.02994625031994e-06, | |
| "loss": 0.0211, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.6126616746085772, | |
| "grad_norm": 2.3604636192321777, | |
| "learning_rate": 8.978756078832866e-06, | |
| "loss": 0.0224, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6262763784887678, | |
| "grad_norm": 3.7307496070861816, | |
| "learning_rate": 8.92756590734579e-06, | |
| "loss": 0.016, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.6398910823689585, | |
| "grad_norm": 3.3601698875427246, | |
| "learning_rate": 8.876375735858716e-06, | |
| "loss": 0.0216, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.6535057862491491, | |
| "grad_norm": 2.8738534450531006, | |
| "learning_rate": 8.825185564371641e-06, | |
| "loss": 0.0219, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.6671204901293397, | |
| "grad_norm": 3.381775379180908, | |
| "learning_rate": 8.773995392884566e-06, | |
| "loss": 0.0166, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.6807351940095303, | |
| "grad_norm": 0.9475630521774292, | |
| "learning_rate": 8.722805221397493e-06, | |
| "loss": 0.0148, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6807351940095303, | |
| "eval_loss": 0.01398420799523592, | |
| "eval_runtime": 4342.5429, | |
| "eval_samples_per_second": 2.706, | |
| "eval_steps_per_second": 0.338, | |
| "eval_wer": 1.1414633216773222, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6943498978897209, | |
| "grad_norm": 5.041502952575684, | |
| "learning_rate": 8.671615049910418e-06, | |
| "loss": 0.0175, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.7079646017699115, | |
| "grad_norm": 2.7368288040161133, | |
| "learning_rate": 8.620424878423343e-06, | |
| "loss": 0.0218, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.7215793056501021, | |
| "grad_norm": 2.3039634227752686, | |
| "learning_rate": 8.56923470693627e-06, | |
| "loss": 0.0191, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.7351940095302927, | |
| "grad_norm": 2.662198543548584, | |
| "learning_rate": 8.518044535449195e-06, | |
| "loss": 0.017, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.7488087134104833, | |
| "grad_norm": 1.5759329795837402, | |
| "learning_rate": 8.46685436396212e-06, | |
| "loss": 0.0253, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.762423417290674, | |
| "grad_norm": 4.4511542320251465, | |
| "learning_rate": 8.415664192475045e-06, | |
| "loss": 0.0214, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.7760381211708646, | |
| "grad_norm": 2.462972402572632, | |
| "learning_rate": 8.36447402098797e-06, | |
| "loss": 0.0174, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.7896528250510552, | |
| "grad_norm": 2.2568044662475586, | |
| "learning_rate": 8.313283849500895e-06, | |
| "loss": 0.023, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.8032675289312458, | |
| "grad_norm": 2.2731740474700928, | |
| "learning_rate": 8.262093678013822e-06, | |
| "loss": 0.0194, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.8168822328114363, | |
| "grad_norm": 1.3251385688781738, | |
| "learning_rate": 8.210903506526747e-06, | |
| "loss": 0.0188, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.8304969366916269, | |
| "grad_norm": 1.3998346328735352, | |
| "learning_rate": 8.159713335039672e-06, | |
| "loss": 0.0183, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.8441116405718175, | |
| "grad_norm": 1.4427942037582397, | |
| "learning_rate": 8.108523163552599e-06, | |
| "loss": 0.0184, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.8577263444520081, | |
| "grad_norm": 3.7367708683013916, | |
| "learning_rate": 8.057332992065524e-06, | |
| "loss": 0.0189, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.8713410483321987, | |
| "grad_norm": 3.2188096046447754, | |
| "learning_rate": 8.00614282057845e-06, | |
| "loss": 0.018, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.8849557522123894, | |
| "grad_norm": 1.9088647365570068, | |
| "learning_rate": 7.954952649091376e-06, | |
| "loss": 0.0143, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.89857045609258, | |
| "grad_norm": 2.296995162963867, | |
| "learning_rate": 7.903762477604301e-06, | |
| "loss": 0.0174, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.9121851599727706, | |
| "grad_norm": 2.857095718383789, | |
| "learning_rate": 7.852572306117226e-06, | |
| "loss": 0.0203, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.9257998638529612, | |
| "grad_norm": 2.143751621246338, | |
| "learning_rate": 7.801382134630153e-06, | |
| "loss": 0.0274, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.9394145677331518, | |
| "grad_norm": 1.9943230152130127, | |
| "learning_rate": 7.750191963143076e-06, | |
| "loss": 0.0148, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.9530292716133424, | |
| "grad_norm": 1.7356202602386475, | |
| "learning_rate": 7.699001791656003e-06, | |
| "loss": 0.0143, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.966643975493533, | |
| "grad_norm": 1.9049350023269653, | |
| "learning_rate": 7.650371128743282e-06, | |
| "loss": 0.0164, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.9802586793737236, | |
| "grad_norm": 3.2257440090179443, | |
| "learning_rate": 7.599180957256207e-06, | |
| "loss": 0.0154, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.9938733832539143, | |
| "grad_norm": 2.0696637630462646, | |
| "learning_rate": 7.5479907857691325e-06, | |
| "loss": 0.0196, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 1.0074880871341048, | |
| "grad_norm": 1.411531925201416, | |
| "learning_rate": 7.4968006142820584e-06, | |
| "loss": 0.015, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 1.0211027910142954, | |
| "grad_norm": 7.875269889831543, | |
| "learning_rate": 7.4456104427949835e-06, | |
| "loss": 0.0074, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.034717494894486, | |
| "grad_norm": 1.7613502740859985, | |
| "learning_rate": 7.3944202713079094e-06, | |
| "loss": 0.0076, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 1.0483321987746765, | |
| "grad_norm": 2.590519905090332, | |
| "learning_rate": 7.343230099820835e-06, | |
| "loss": 0.0102, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 1.0619469026548674, | |
| "grad_norm": 1.4597114324569702, | |
| "learning_rate": 7.2920399283337604e-06, | |
| "loss": 0.0081, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 1.075561606535058, | |
| "grad_norm": 3.469754934310913, | |
| "learning_rate": 7.240849756846686e-06, | |
| "loss": 0.0088, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 1.0891763104152485, | |
| "grad_norm": 1.9216970205307007, | |
| "learning_rate": 7.1896595853596114e-06, | |
| "loss": 0.0102, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.1027910142954391, | |
| "grad_norm": 2.306184768676758, | |
| "learning_rate": 7.138469413872537e-06, | |
| "loss": 0.0135, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 1.1164057181756297, | |
| "grad_norm": 2.4771056175231934, | |
| "learning_rate": 7.087279242385463e-06, | |
| "loss": 0.009, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 1.1300204220558203, | |
| "grad_norm": 1.8208683729171753, | |
| "learning_rate": 7.0360890708983875e-06, | |
| "loss": 0.011, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 1.143635125936011, | |
| "grad_norm": 1.1904667615890503, | |
| "learning_rate": 6.9848988994113134e-06, | |
| "loss": 0.0074, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 1.1572498298162015, | |
| "grad_norm": 1.3536099195480347, | |
| "learning_rate": 6.9337087279242385e-06, | |
| "loss": 0.0097, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.170864533696392, | |
| "grad_norm": 2.2063775062561035, | |
| "learning_rate": 6.8825185564371644e-06, | |
| "loss": 0.0123, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 1.1844792375765827, | |
| "grad_norm": 1.0882831811904907, | |
| "learning_rate": 6.83132838495009e-06, | |
| "loss": 0.01, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 1.1980939414567733, | |
| "grad_norm": 0.9484243988990784, | |
| "learning_rate": 6.7801382134630154e-06, | |
| "loss": 0.0086, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 1.2117086453369639, | |
| "grad_norm": 2.0926241874694824, | |
| "learning_rate": 6.728948041975941e-06, | |
| "loss": 0.01, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 1.2253233492171545, | |
| "grad_norm": 2.288508653640747, | |
| "learning_rate": 6.6777578704888664e-06, | |
| "loss": 0.0079, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.238938053097345, | |
| "grad_norm": 0.998699963092804, | |
| "learning_rate": 6.626567699001792e-06, | |
| "loss": 0.0069, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 1.2525527569775359, | |
| "grad_norm": 0.9781405925750732, | |
| "learning_rate": 6.575377527514718e-06, | |
| "loss": 0.0071, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 1.2661674608577265, | |
| "grad_norm": 1.6135525703430176, | |
| "learning_rate": 6.524187356027643e-06, | |
| "loss": 0.0087, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 1.279782164737917, | |
| "grad_norm": 1.845056414604187, | |
| "learning_rate": 6.472997184540569e-06, | |
| "loss": 0.0068, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 1.2933968686181077, | |
| "grad_norm": 2.4946863651275635, | |
| "learning_rate": 6.4218070130534935e-06, | |
| "loss": 0.0072, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.3070115724982982, | |
| "grad_norm": 2.3637702465057373, | |
| "learning_rate": 6.3706168415664194e-06, | |
| "loss": 0.0058, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 1.3206262763784888, | |
| "grad_norm": 0.9564550518989563, | |
| "learning_rate": 6.319426670079345e-06, | |
| "loss": 0.0081, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 1.3342409802586794, | |
| "grad_norm": 2.1385440826416016, | |
| "learning_rate": 6.2682364985922704e-06, | |
| "loss": 0.0083, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 1.34785568413887, | |
| "grad_norm": 2.863182306289673, | |
| "learning_rate": 6.217046327105196e-06, | |
| "loss": 0.0077, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 1.3614703880190606, | |
| "grad_norm": 1.2685619592666626, | |
| "learning_rate": 6.1658561556181214e-06, | |
| "loss": 0.0069, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.3614703880190606, | |
| "eval_loss": 0.009049512445926666, | |
| "eval_runtime": 4354.1558, | |
| "eval_samples_per_second": 2.699, | |
| "eval_steps_per_second": 0.337, | |
| "eval_wer": 0.7107943555686831, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 4407, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.846629118107648e+19, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |