| { | |
| "best_global_step": 2000, | |
| "best_metric": 3.935134725289306, | |
| "best_model_checkpoint": "./SALAMA_C4/checkpoint-2000", | |
| "epoch": 1.3614703880190606, | |
| "eval_steps": 2000, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.013614703880190605, | |
| "grad_norm": 6.790204048156738, | |
| "learning_rate": 6.333333333333334e-07, | |
| "loss": 0.1302, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02722940776038121, | |
| "grad_norm": 6.674442768096924, | |
| "learning_rate": 1.3e-06, | |
| "loss": 0.1286, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04084411164057182, | |
| "grad_norm": 6.946224689483643, | |
| "learning_rate": 1.9666666666666668e-06, | |
| "loss": 0.1394, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.05445881552076242, | |
| "grad_norm": 8.382658004760742, | |
| "learning_rate": 2.6333333333333332e-06, | |
| "loss": 0.1267, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.06807351940095303, | |
| "grad_norm": 5.505548477172852, | |
| "learning_rate": 3.3000000000000006e-06, | |
| "loss": 0.1, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.08168822328114364, | |
| "grad_norm": 3.323385715484619, | |
| "learning_rate": 3.966666666666667e-06, | |
| "loss": 0.0979, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.09530292716133425, | |
| "grad_norm": 3.4979546070098877, | |
| "learning_rate": 4.633333333333334e-06, | |
| "loss": 0.0934, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.10891763104152484, | |
| "grad_norm": 5.154605865478516, | |
| "learning_rate": 5.300000000000001e-06, | |
| "loss": 0.1039, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.12253233492171545, | |
| "grad_norm": 5.488887786865234, | |
| "learning_rate": 5.966666666666667e-06, | |
| "loss": 0.1029, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.13614703880190607, | |
| "grad_norm": 3.0190138816833496, | |
| "learning_rate": 6.633333333333334e-06, | |
| "loss": 0.1151, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.14976174268209666, | |
| "grad_norm": 4.113169193267822, | |
| "learning_rate": 7.3e-06, | |
| "loss": 0.0936, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.16337644656228728, | |
| "grad_norm": 6.0397515296936035, | |
| "learning_rate": 7.966666666666668e-06, | |
| "loss": 0.1214, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.17699115044247787, | |
| "grad_norm": 6.033300399780273, | |
| "learning_rate": 8.633333333333334e-06, | |
| "loss": 0.1153, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.1906058543226685, | |
| "grad_norm": 3.8764054775238037, | |
| "learning_rate": 9.3e-06, | |
| "loss": 0.1025, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.2042205582028591, | |
| "grad_norm": 5.634373188018799, | |
| "learning_rate": 9.966666666666667e-06, | |
| "loss": 0.12, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.21783526208304968, | |
| "grad_norm": 4.584943771362305, | |
| "learning_rate": 9.973030518097942e-06, | |
| "loss": 0.1099, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.2314499659632403, | |
| "grad_norm": 5.086080551147461, | |
| "learning_rate": 9.944641589779987e-06, | |
| "loss": 0.1267, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.2450646698434309, | |
| "grad_norm": 5.2667741775512695, | |
| "learning_rate": 9.91625266146203e-06, | |
| "loss": 0.1059, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.2586793737236215, | |
| "grad_norm": 5.114360809326172, | |
| "learning_rate": 9.887863733144075e-06, | |
| "loss": 0.1294, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.27229407760381213, | |
| "grad_norm": 6.17408561706543, | |
| "learning_rate": 9.859474804826119e-06, | |
| "loss": 0.12, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.2859087814840027, | |
| "grad_norm": 4.034195899963379, | |
| "learning_rate": 9.831085876508163e-06, | |
| "loss": 0.1082, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.2995234853641933, | |
| "grad_norm": 4.708987236022949, | |
| "learning_rate": 9.802696948190207e-06, | |
| "loss": 0.1041, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.3131381892443839, | |
| "grad_norm": 3.7173264026641846, | |
| "learning_rate": 9.774308019872251e-06, | |
| "loss": 0.115, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.32675289312457456, | |
| "grad_norm": 4.3770647048950195, | |
| "learning_rate": 9.745919091554295e-06, | |
| "loss": 0.1202, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.34036759700476515, | |
| "grad_norm": 3.7374465465545654, | |
| "learning_rate": 9.717530163236339e-06, | |
| "loss": 0.1008, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.35398230088495575, | |
| "grad_norm": 3.801574945449829, | |
| "learning_rate": 9.689141234918381e-06, | |
| "loss": 0.1171, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.36759700476514634, | |
| "grad_norm": 3.962735176086426, | |
| "learning_rate": 9.660752306600427e-06, | |
| "loss": 0.0997, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.381211708645337, | |
| "grad_norm": 4.948540687561035, | |
| "learning_rate": 9.632363378282471e-06, | |
| "loss": 0.1304, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.3948264125255276, | |
| "grad_norm": 5.182698726654053, | |
| "learning_rate": 9.603974449964515e-06, | |
| "loss": 0.1059, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.4084411164057182, | |
| "grad_norm": 5.27463436126709, | |
| "learning_rate": 9.57558552164656e-06, | |
| "loss": 0.1379, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.42205582028590877, | |
| "grad_norm": 4.754587650299072, | |
| "learning_rate": 9.547196593328602e-06, | |
| "loss": 0.1135, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.43567052416609936, | |
| "grad_norm": 4.032769680023193, | |
| "learning_rate": 9.518807665010646e-06, | |
| "loss": 0.0925, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.44928522804629, | |
| "grad_norm": Infinity, | |
| "learning_rate": 9.49041873669269e-06, | |
| "loss": 0.1764, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.4628999319264806, | |
| "grad_norm": 12.64933967590332, | |
| "learning_rate": 9.467707594038326e-06, | |
| "loss": 0.5231, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.4765146358066712, | |
| "grad_norm": 16.499448776245117, | |
| "learning_rate": 9.43931866572037e-06, | |
| "loss": 0.1525, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.4901293396868618, | |
| "grad_norm": 2.7940587997436523, | |
| "learning_rate": 9.410929737402414e-06, | |
| "loss": 0.1635, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.5037440435670524, | |
| "grad_norm": 5.6086039543151855, | |
| "learning_rate": 9.382540809084458e-06, | |
| "loss": 0.1177, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.517358747447243, | |
| "grad_norm": 5.121674537658691, | |
| "learning_rate": 9.354151880766502e-06, | |
| "loss": 0.1185, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.5309734513274337, | |
| "grad_norm": 5.849049091339111, | |
| "learning_rate": 9.325762952448546e-06, | |
| "loss": 0.1358, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.5445881552076243, | |
| "grad_norm": 4.180930137634277, | |
| "learning_rate": 9.29737402413059e-06, | |
| "loss": 0.1192, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5582028590878149, | |
| "grad_norm": 5.0372796058654785, | |
| "learning_rate": 9.268985095812634e-06, | |
| "loss": 0.1194, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.5718175629680055, | |
| "grad_norm": 3.9500577449798584, | |
| "learning_rate": 9.240596167494677e-06, | |
| "loss": 0.1026, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.585432266848196, | |
| "grad_norm": 5.2215800285339355, | |
| "learning_rate": 9.212207239176721e-06, | |
| "loss": 0.1227, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.5990469707283866, | |
| "grad_norm": 7.128079414367676, | |
| "learning_rate": 9.183818310858765e-06, | |
| "loss": 0.1296, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.6126616746085772, | |
| "grad_norm": 4.189157485961914, | |
| "learning_rate": 9.15542938254081e-06, | |
| "loss": 0.1092, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6262763784887678, | |
| "grad_norm": 4.203508377075195, | |
| "learning_rate": 9.127040454222855e-06, | |
| "loss": 0.1067, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.6398910823689585, | |
| "grad_norm": 5.561427593231201, | |
| "learning_rate": 9.098651525904897e-06, | |
| "loss": 0.1156, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.6535057862491491, | |
| "grad_norm": 2.9789481163024902, | |
| "learning_rate": 9.070262597586941e-06, | |
| "loss": 0.1144, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.6671204901293397, | |
| "grad_norm": 6.220657825469971, | |
| "learning_rate": 9.041873669268985e-06, | |
| "loss": 0.121, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.6807351940095303, | |
| "grad_norm": 5.125210285186768, | |
| "learning_rate": 9.01348474095103e-06, | |
| "loss": 0.1073, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6943498978897209, | |
| "grad_norm": 4.665761947631836, | |
| "learning_rate": 8.985095812633073e-06, | |
| "loss": 0.1032, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.7079646017699115, | |
| "grad_norm": 3.9673075675964355, | |
| "learning_rate": 8.956706884315118e-06, | |
| "loss": 0.1133, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.7215793056501021, | |
| "grad_norm": 4.1961140632629395, | |
| "learning_rate": 8.928317955997162e-06, | |
| "loss": 0.1008, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.7351940095302927, | |
| "grad_norm": 6.15500545501709, | |
| "learning_rate": 8.899929027679206e-06, | |
| "loss": 0.1041, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.7488087134104833, | |
| "grad_norm": 4.683866024017334, | |
| "learning_rate": 8.87154009936125e-06, | |
| "loss": 0.1086, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.762423417290674, | |
| "grad_norm": 8.679821968078613, | |
| "learning_rate": 8.843151171043294e-06, | |
| "loss": 0.1049, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.7760381211708646, | |
| "grad_norm": 4.2732038497924805, | |
| "learning_rate": 8.814762242725338e-06, | |
| "loss": 0.1013, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.7896528250510552, | |
| "grad_norm": 9.020421028137207, | |
| "learning_rate": 8.786373314407382e-06, | |
| "loss": 0.101, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.8032675289312458, | |
| "grad_norm": 6.960070610046387, | |
| "learning_rate": 8.757984386089426e-06, | |
| "loss": 0.1067, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.8168822328114363, | |
| "grad_norm": 3.1116604804992676, | |
| "learning_rate": 8.72959545777147e-06, | |
| "loss": 0.099, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.8304969366916269, | |
| "grad_norm": 3.949913740158081, | |
| "learning_rate": 8.701206529453514e-06, | |
| "loss": 0.1068, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.8441116405718175, | |
| "grad_norm": 4.617140293121338, | |
| "learning_rate": 8.672817601135558e-06, | |
| "loss": 0.1237, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.8577263444520081, | |
| "grad_norm": 6.639930248260498, | |
| "learning_rate": 8.644428672817602e-06, | |
| "loss": 0.1295, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.8713410483321987, | |
| "grad_norm": 5.353048324584961, | |
| "learning_rate": 8.616039744499646e-06, | |
| "loss": 0.1088, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.8849557522123894, | |
| "grad_norm": 7.117270469665527, | |
| "learning_rate": 8.587650816181689e-06, | |
| "loss": 0.112, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.89857045609258, | |
| "grad_norm": 5.29574728012085, | |
| "learning_rate": 8.559261887863733e-06, | |
| "loss": 0.1188, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.9121851599727706, | |
| "grad_norm": 7.008860111236572, | |
| "learning_rate": 8.530872959545777e-06, | |
| "loss": 0.1076, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.9257998638529612, | |
| "grad_norm": 5.290408134460449, | |
| "learning_rate": 8.502484031227823e-06, | |
| "loss": 0.1339, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.9394145677331518, | |
| "grad_norm": 4.095961093902588, | |
| "learning_rate": 8.474095102909867e-06, | |
| "loss": 0.111, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.9530292716133424, | |
| "grad_norm": 4.4582085609436035, | |
| "learning_rate": 8.44570617459191e-06, | |
| "loss": 0.1079, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.966643975493533, | |
| "grad_norm": 3.397148370742798, | |
| "learning_rate": 8.417317246273953e-06, | |
| "loss": 0.0989, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.9802586793737236, | |
| "grad_norm": 4.60284948348999, | |
| "learning_rate": 8.388928317955997e-06, | |
| "loss": 0.1205, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.9938733832539143, | |
| "grad_norm": 4.263270378112793, | |
| "learning_rate": 8.360539389638041e-06, | |
| "loss": 0.0981, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 1.0074880871341048, | |
| "grad_norm": 2.734585762023926, | |
| "learning_rate": 8.332150461320085e-06, | |
| "loss": 0.0687, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 1.0211027910142954, | |
| "grad_norm": 4.107769966125488, | |
| "learning_rate": 8.30376153300213e-06, | |
| "loss": 0.0483, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.034717494894486, | |
| "grad_norm": 3.7158114910125732, | |
| "learning_rate": 8.275372604684174e-06, | |
| "loss": 0.0523, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 1.0483321987746765, | |
| "grad_norm": 3.725184917449951, | |
| "learning_rate": 8.246983676366218e-06, | |
| "loss": 0.0438, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 1.0619469026548674, | |
| "grad_norm": 2.794508218765259, | |
| "learning_rate": 8.218594748048262e-06, | |
| "loss": 0.0534, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 1.075561606535058, | |
| "grad_norm": 3.6485531330108643, | |
| "learning_rate": 8.190205819730306e-06, | |
| "loss": 0.0443, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 1.0891763104152485, | |
| "grad_norm": 2.9312281608581543, | |
| "learning_rate": 8.16181689141235e-06, | |
| "loss": 0.0426, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.1027910142954391, | |
| "grad_norm": 3.66829514503479, | |
| "learning_rate": 8.133427963094394e-06, | |
| "loss": 0.0558, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 1.1164057181756297, | |
| "grad_norm": 2.831806182861328, | |
| "learning_rate": 8.105039034776438e-06, | |
| "loss": 0.0504, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 1.1300204220558203, | |
| "grad_norm": 4.0393171310424805, | |
| "learning_rate": 8.076650106458482e-06, | |
| "loss": 0.0447, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 1.143635125936011, | |
| "grad_norm": 3.919926643371582, | |
| "learning_rate": 8.048261178140526e-06, | |
| "loss": 0.0432, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 1.1572498298162015, | |
| "grad_norm": 2.3747353553771973, | |
| "learning_rate": 8.01987224982257e-06, | |
| "loss": 0.0495, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.170864533696392, | |
| "grad_norm": 2.7838926315307617, | |
| "learning_rate": 7.991483321504614e-06, | |
| "loss": 0.0477, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 1.1844792375765827, | |
| "grad_norm": 4.423709869384766, | |
| "learning_rate": 7.963094393186658e-06, | |
| "loss": 0.0511, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 1.1980939414567733, | |
| "grad_norm": 3.6399760246276855, | |
| "learning_rate": 7.9347054648687e-06, | |
| "loss": 0.0512, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 1.2117086453369639, | |
| "grad_norm": 4.124867916107178, | |
| "learning_rate": 7.906316536550745e-06, | |
| "loss": 0.0503, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 1.2253233492171545, | |
| "grad_norm": 2.90708589553833, | |
| "learning_rate": 7.877927608232789e-06, | |
| "loss": 0.0428, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.238938053097345, | |
| "grad_norm": 3.0257723331451416, | |
| "learning_rate": 7.849538679914835e-06, | |
| "loss": 0.0505, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 1.2525527569775359, | |
| "grad_norm": 3.9913330078125, | |
| "learning_rate": 7.821149751596879e-06, | |
| "loss": 0.051, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 1.2661674608577265, | |
| "grad_norm": 3.413067102432251, | |
| "learning_rate": 7.792760823278921e-06, | |
| "loss": 0.0566, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 1.279782164737917, | |
| "grad_norm": 2.3691389560699463, | |
| "learning_rate": 7.764371894960965e-06, | |
| "loss": 0.0387, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 1.2933968686181077, | |
| "grad_norm": 2.782249689102173, | |
| "learning_rate": 7.73598296664301e-06, | |
| "loss": 0.0548, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.3070115724982982, | |
| "grad_norm": 2.8736960887908936, | |
| "learning_rate": 7.707594038325053e-06, | |
| "loss": 0.0512, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 1.3206262763784888, | |
| "grad_norm": 2.5277926921844482, | |
| "learning_rate": 7.679205110007098e-06, | |
| "loss": 0.0449, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 1.3342409802586794, | |
| "grad_norm": 3.7290806770324707, | |
| "learning_rate": 7.650816181689142e-06, | |
| "loss": 0.052, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 1.34785568413887, | |
| "grad_norm": 3.4079151153564453, | |
| "learning_rate": 7.6224272533711865e-06, | |
| "loss": 0.0459, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 1.3614703880190606, | |
| "grad_norm": 2.7560651302337646, | |
| "learning_rate": 7.59403832505323e-06, | |
| "loss": 0.0536, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.3614703880190606, | |
| "eval_loss": 0.05250042304396629, | |
| "eval_runtime": 4762.58, | |
| "eval_samples_per_second": 2.467, | |
| "eval_steps_per_second": 0.308, | |
| "eval_wer": 3.935134725289306, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 7345, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 2000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.846629118107648e+19, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |