| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 280, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 2.168394589424133, | |
| "epoch": 0.01791044776119403, | |
| "grad_norm": 1.8660153150558472, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 3.2950328826904296, | |
| "mean_token_accuracy": 0.44889993518590926, | |
| "num_tokens": 605050.0, | |
| "step": 5 | |
| }, | |
| { | |
| "entropy": 2.184136521816254, | |
| "epoch": 0.03582089552238806, | |
| "grad_norm": 2.0418319702148438, | |
| "learning_rate": 1.970909090909091e-05, | |
| "loss": 3.2416725158691406, | |
| "mean_token_accuracy": 0.4498732253909111, | |
| "num_tokens": 1209069.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 2.248383100827535, | |
| "epoch": 0.05373134328358209, | |
| "grad_norm": 1.8357728719711304, | |
| "learning_rate": 1.9345454545454548e-05, | |
| "loss": 3.0685535430908204, | |
| "mean_token_accuracy": 0.4595695684353511, | |
| "num_tokens": 1813685.0, | |
| "step": 15 | |
| }, | |
| { | |
| "entropy": 2.3278944293657937, | |
| "epoch": 0.07164179104477612, | |
| "grad_norm": 1.2048472166061401, | |
| "learning_rate": 1.8981818181818185e-05, | |
| "loss": 2.8284069061279298, | |
| "mean_token_accuracy": 0.47826829701662066, | |
| "num_tokens": 2417879.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 2.376464025179545, | |
| "epoch": 0.08955223880597014, | |
| "grad_norm": 0.8425176739692688, | |
| "learning_rate": 1.861818181818182e-05, | |
| "loss": 2.6103424072265624, | |
| "mean_token_accuracy": 0.5095398277044296, | |
| "num_tokens": 3021136.0, | |
| "step": 25 | |
| }, | |
| { | |
| "entropy": 2.376994041601817, | |
| "epoch": 0.10746268656716418, | |
| "grad_norm": 0.7408229112625122, | |
| "learning_rate": 1.8254545454545455e-05, | |
| "loss": 2.450078773498535, | |
| "mean_token_accuracy": 0.5392259319623312, | |
| "num_tokens": 3625140.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 2.3348599791526796, | |
| "epoch": 0.1253731343283582, | |
| "grad_norm": 0.6881565451622009, | |
| "learning_rate": 1.789090909090909e-05, | |
| "loss": 2.3218967437744142, | |
| "mean_token_accuracy": 0.5738894611597061, | |
| "num_tokens": 4229156.0, | |
| "step": 35 | |
| }, | |
| { | |
| "entropy": 2.2545143087704975, | |
| "epoch": 0.14328358208955225, | |
| "grad_norm": 0.6354486346244812, | |
| "learning_rate": 1.7527272727272728e-05, | |
| "loss": 2.18653564453125, | |
| "mean_token_accuracy": 0.5986390103896458, | |
| "num_tokens": 4832108.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 2.1765093048413595, | |
| "epoch": 0.16119402985074627, | |
| "grad_norm": 0.5830056071281433, | |
| "learning_rate": 1.7163636363636365e-05, | |
| "loss": 2.0722366333007813, | |
| "mean_token_accuracy": 0.6129042307535807, | |
| "num_tokens": 5436606.0, | |
| "step": 45 | |
| }, | |
| { | |
| "entropy": 2.0826696693897246, | |
| "epoch": 0.1791044776119403, | |
| "grad_norm": 0.5773599147796631, | |
| "learning_rate": 1.6800000000000002e-05, | |
| "loss": 1.9402727127075194, | |
| "mean_token_accuracy": 0.63487542172273, | |
| "num_tokens": 6041829.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 1.9882533133029938, | |
| "epoch": 0.19701492537313434, | |
| "grad_norm": 0.5663616061210632, | |
| "learning_rate": 1.643636363636364e-05, | |
| "loss": 1.8316377639770507, | |
| "mean_token_accuracy": 0.6560344994068146, | |
| "num_tokens": 6646359.0, | |
| "step": 55 | |
| }, | |
| { | |
| "entropy": 1.8791691561539967, | |
| "epoch": 0.21492537313432836, | |
| "grad_norm": 0.5420525074005127, | |
| "learning_rate": 1.6072727272727272e-05, | |
| "loss": 1.719741439819336, | |
| "mean_token_accuracy": 0.6697693943977356, | |
| "num_tokens": 7250668.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 1.7820984462896983, | |
| "epoch": 0.23283582089552238, | |
| "grad_norm": 0.5506711006164551, | |
| "learning_rate": 1.570909090909091e-05, | |
| "loss": 1.6132436752319337, | |
| "mean_token_accuracy": 0.6853097409009934, | |
| "num_tokens": 7853611.0, | |
| "step": 65 | |
| }, | |
| { | |
| "entropy": 1.6836315234502157, | |
| "epoch": 0.2507462686567164, | |
| "grad_norm": 0.5666050910949707, | |
| "learning_rate": 1.5345454545454545e-05, | |
| "loss": 1.4924021720886231, | |
| "mean_token_accuracy": 0.7045944074789683, | |
| "num_tokens": 8456934.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 1.5688092291355134, | |
| "epoch": 0.26865671641791045, | |
| "grad_norm": 0.5567929148674011, | |
| "learning_rate": 1.4981818181818184e-05, | |
| "loss": 1.3738606452941895, | |
| "mean_token_accuracy": 0.7291759083668391, | |
| "num_tokens": 9060644.0, | |
| "step": 75 | |
| }, | |
| { | |
| "entropy": 1.434225849310557, | |
| "epoch": 0.2865671641791045, | |
| "grad_norm": 0.5849553346633911, | |
| "learning_rate": 1.461818181818182e-05, | |
| "loss": 1.2681632041931152, | |
| "mean_token_accuracy": 0.7659504026174545, | |
| "num_tokens": 9666304.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 1.309420390923818, | |
| "epoch": 0.3044776119402985, | |
| "grad_norm": 0.5013670921325684, | |
| "learning_rate": 1.4254545454545456e-05, | |
| "loss": 1.1568625450134278, | |
| "mean_token_accuracy": 0.7931581805149714, | |
| "num_tokens": 10270756.0, | |
| "step": 85 | |
| }, | |
| { | |
| "entropy": 1.2003730138142903, | |
| "epoch": 0.32238805970149254, | |
| "grad_norm": 0.46035531163215637, | |
| "learning_rate": 1.3890909090909093e-05, | |
| "loss": 1.0587100982666016, | |
| "mean_token_accuracy": 0.8115196357170741, | |
| "num_tokens": 10875051.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 1.099229786793391, | |
| "epoch": 0.3402985074626866, | |
| "grad_norm": 0.44595953822135925, | |
| "learning_rate": 1.352727272727273e-05, | |
| "loss": 0.9824249267578125, | |
| "mean_token_accuracy": 0.8298713942368825, | |
| "num_tokens": 11479234.0, | |
| "step": 95 | |
| }, | |
| { | |
| "entropy": 1.0117118199666342, | |
| "epoch": 0.3582089552238806, | |
| "grad_norm": 0.39377522468566895, | |
| "learning_rate": 1.3163636363636364e-05, | |
| "loss": 0.9114532470703125, | |
| "mean_token_accuracy": 0.8430858343839646, | |
| "num_tokens": 12083366.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.9327667226394017, | |
| "epoch": 0.3761194029850746, | |
| "grad_norm": 0.34281033277511597, | |
| "learning_rate": 1.2800000000000001e-05, | |
| "loss": 0.8528074264526367, | |
| "mean_token_accuracy": 0.8589683284362157, | |
| "num_tokens": 12688192.0, | |
| "step": 105 | |
| }, | |
| { | |
| "entropy": 0.8706107308467229, | |
| "epoch": 0.3940298507462687, | |
| "grad_norm": 0.2726999819278717, | |
| "learning_rate": 1.2436363636363638e-05, | |
| "loss": 0.8149747848510742, | |
| "mean_token_accuracy": 0.8668629239002864, | |
| "num_tokens": 13291854.0, | |
| "step": 110 | |
| }, | |
| { | |
| "entropy": 0.8064202666282654, | |
| "epoch": 0.41194029850746267, | |
| "grad_norm": 0.24471968412399292, | |
| "learning_rate": 1.2072727272727273e-05, | |
| "loss": 0.7800429821014404, | |
| "mean_token_accuracy": 0.8727914780378342, | |
| "num_tokens": 13895528.0, | |
| "step": 115 | |
| }, | |
| { | |
| "entropy": 0.7670980860789617, | |
| "epoch": 0.4298507462686567, | |
| "grad_norm": 0.22166411578655243, | |
| "learning_rate": 1.170909090909091e-05, | |
| "loss": 0.7577459335327148, | |
| "mean_token_accuracy": 0.8743686219056447, | |
| "num_tokens": 14499574.0, | |
| "step": 120 | |
| }, | |
| { | |
| "entropy": 0.7291957606871923, | |
| "epoch": 0.44776119402985076, | |
| "grad_norm": 0.22010307013988495, | |
| "learning_rate": 1.1345454545454546e-05, | |
| "loss": 0.7382506370544434, | |
| "mean_token_accuracy": 0.8784372409184774, | |
| "num_tokens": 15102709.0, | |
| "step": 125 | |
| }, | |
| { | |
| "entropy": 0.7073083202044169, | |
| "epoch": 0.46567164179104475, | |
| "grad_norm": 0.20731766521930695, | |
| "learning_rate": 1.0981818181818182e-05, | |
| "loss": 0.7176527976989746, | |
| "mean_token_accuracy": 0.8826558848222097, | |
| "num_tokens": 15705516.0, | |
| "step": 130 | |
| }, | |
| { | |
| "entropy": 0.6878383725881576, | |
| "epoch": 0.4835820895522388, | |
| "grad_norm": 0.1982019543647766, | |
| "learning_rate": 1.0618181818181818e-05, | |
| "loss": 0.6997775554656982, | |
| "mean_token_accuracy": 0.8837330102920532, | |
| "num_tokens": 16309772.0, | |
| "step": 135 | |
| }, | |
| { | |
| "entropy": 0.6752834647893906, | |
| "epoch": 0.5014925373134328, | |
| "grad_norm": 0.19192369282245636, | |
| "learning_rate": 1.0254545454545455e-05, | |
| "loss": 0.6955764293670654, | |
| "mean_token_accuracy": 0.8869551440080007, | |
| "num_tokens": 16914524.0, | |
| "step": 140 | |
| }, | |
| { | |
| "entropy": 0.6586959759394327, | |
| "epoch": 0.5194029850746269, | |
| "grad_norm": 0.17141292989253998, | |
| "learning_rate": 9.890909090909092e-06, | |
| "loss": 0.6816110134124755, | |
| "mean_token_accuracy": 0.8900096178054809, | |
| "num_tokens": 17519395.0, | |
| "step": 145 | |
| }, | |
| { | |
| "entropy": 0.6370273639758428, | |
| "epoch": 0.5373134328358209, | |
| "grad_norm": 0.1430523842573166, | |
| "learning_rate": 9.527272727272729e-06, | |
| "loss": 0.6567408561706543, | |
| "mean_token_accuracy": 0.8930429806311925, | |
| "num_tokens": 18122162.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.6359477937221527, | |
| "epoch": 0.5552238805970149, | |
| "grad_norm": 0.14557921886444092, | |
| "learning_rate": 9.163636363636365e-06, | |
| "loss": 0.6546257972717285, | |
| "mean_token_accuracy": 0.8917513291041056, | |
| "num_tokens": 18725712.0, | |
| "step": 155 | |
| }, | |
| { | |
| "entropy": 0.6198909282684326, | |
| "epoch": 0.573134328358209, | |
| "grad_norm": 0.130398690700531, | |
| "learning_rate": 8.8e-06, | |
| "loss": 0.6443045616149903, | |
| "mean_token_accuracy": 0.8936116735140482, | |
| "num_tokens": 19328971.0, | |
| "step": 160 | |
| }, | |
| { | |
| "entropy": 0.6179789533217748, | |
| "epoch": 0.591044776119403, | |
| "grad_norm": 0.11789222061634064, | |
| "learning_rate": 8.436363636363637e-06, | |
| "loss": 0.6446707725524903, | |
| "mean_token_accuracy": 0.894164169828097, | |
| "num_tokens": 19933263.0, | |
| "step": 165 | |
| }, | |
| { | |
| "entropy": 0.6100832750399907, | |
| "epoch": 0.608955223880597, | |
| "grad_norm": 0.10147394984960556, | |
| "learning_rate": 8.072727272727274e-06, | |
| "loss": 0.6355695724487305, | |
| "mean_token_accuracy": 0.8952938993771871, | |
| "num_tokens": 20537488.0, | |
| "step": 170 | |
| }, | |
| { | |
| "entropy": 0.6098018596569698, | |
| "epoch": 0.6268656716417911, | |
| "grad_norm": 0.10079645365476608, | |
| "learning_rate": 7.709090909090909e-06, | |
| "loss": 0.6381240844726562, | |
| "mean_token_accuracy": 0.8944214711586634, | |
| "num_tokens": 21142040.0, | |
| "step": 175 | |
| }, | |
| { | |
| "entropy": 0.6117002467314402, | |
| "epoch": 0.6447761194029851, | |
| "grad_norm": 0.09079346060752869, | |
| "learning_rate": 7.345454545454546e-06, | |
| "loss": 0.6400082111358643, | |
| "mean_token_accuracy": 0.8940785964330037, | |
| "num_tokens": 21746525.0, | |
| "step": 180 | |
| }, | |
| { | |
| "entropy": 0.6024523983399074, | |
| "epoch": 0.6626865671641791, | |
| "grad_norm": 0.08825452625751495, | |
| "learning_rate": 6.981818181818183e-06, | |
| "loss": 0.6298256397247315, | |
| "mean_token_accuracy": 0.8952003945906957, | |
| "num_tokens": 22350402.0, | |
| "step": 185 | |
| }, | |
| { | |
| "entropy": 0.5973447283109029, | |
| "epoch": 0.6805970149253732, | |
| "grad_norm": 0.08620734512805939, | |
| "learning_rate": 6.618181818181819e-06, | |
| "loss": 0.6251591682434082, | |
| "mean_token_accuracy": 0.8961992383003234, | |
| "num_tokens": 22953710.0, | |
| "step": 190 | |
| }, | |
| { | |
| "entropy": 0.6138423581918081, | |
| "epoch": 0.6985074626865672, | |
| "grad_norm": 0.09157232195138931, | |
| "learning_rate": 6.254545454545455e-06, | |
| "loss": 0.6392334461212158, | |
| "mean_token_accuracy": 0.8931001037359237, | |
| "num_tokens": 23558187.0, | |
| "step": 195 | |
| }, | |
| { | |
| "entropy": 0.6071017513672511, | |
| "epoch": 0.7164179104477612, | |
| "grad_norm": 0.08755206316709518, | |
| "learning_rate": 5.890909090909091e-06, | |
| "loss": 0.6317390441894531, | |
| "mean_token_accuracy": 0.8944406092166901, | |
| "num_tokens": 24162342.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.5952692975600561, | |
| "epoch": 0.7343283582089553, | |
| "grad_norm": 0.08511675149202347, | |
| "learning_rate": 5.527272727272728e-06, | |
| "loss": 0.6186740398406982, | |
| "mean_token_accuracy": 0.8966383894284566, | |
| "num_tokens": 24766525.0, | |
| "step": 205 | |
| }, | |
| { | |
| "entropy": 0.595616739988327, | |
| "epoch": 0.7522388059701492, | |
| "grad_norm": 0.08550413697957993, | |
| "learning_rate": 5.163636363636364e-06, | |
| "loss": 0.6194860935211182, | |
| "mean_token_accuracy": 0.8966623097658157, | |
| "num_tokens": 25370207.0, | |
| "step": 210 | |
| }, | |
| { | |
| "entropy": 0.5983099659283956, | |
| "epoch": 0.7701492537313432, | |
| "grad_norm": 0.0764346793293953, | |
| "learning_rate": 4.800000000000001e-06, | |
| "loss": 0.6212547779083252, | |
| "mean_token_accuracy": 0.8959661453962326, | |
| "num_tokens": 25974375.0, | |
| "step": 215 | |
| }, | |
| { | |
| "entropy": 0.5938634812831879, | |
| "epoch": 0.7880597014925373, | |
| "grad_norm": 0.08139371871948242, | |
| "learning_rate": 4.436363636363637e-06, | |
| "loss": 0.6151386260986328, | |
| "mean_token_accuracy": 0.8968420445919036, | |
| "num_tokens": 26577376.0, | |
| "step": 220 | |
| }, | |
| { | |
| "entropy": 0.5941700955231984, | |
| "epoch": 0.8059701492537313, | |
| "grad_norm": 0.08152152597904205, | |
| "learning_rate": 4.072727272727273e-06, | |
| "loss": 0.6150354862213134, | |
| "mean_token_accuracy": 0.896748689810435, | |
| "num_tokens": 27181169.0, | |
| "step": 225 | |
| }, | |
| { | |
| "entropy": 0.5950504938761393, | |
| "epoch": 0.8238805970149253, | |
| "grad_norm": 0.08036106079816818, | |
| "learning_rate": 3.7090909090909092e-06, | |
| "loss": 0.6148160457611084, | |
| "mean_token_accuracy": 0.8963960329691569, | |
| "num_tokens": 27785895.0, | |
| "step": 230 | |
| }, | |
| { | |
| "entropy": 0.5958336045344671, | |
| "epoch": 0.8417910447761194, | |
| "grad_norm": 0.08042708039283752, | |
| "learning_rate": 3.3454545454545456e-06, | |
| "loss": 0.6153737068176269, | |
| "mean_token_accuracy": 0.8960448642571767, | |
| "num_tokens": 28389947.0, | |
| "step": 235 | |
| }, | |
| { | |
| "entropy": 0.5918162196874619, | |
| "epoch": 0.8597014925373134, | |
| "grad_norm": 0.07697876542806625, | |
| "learning_rate": 2.981818181818182e-06, | |
| "loss": 0.6076783657073974, | |
| "mean_token_accuracy": 0.8973629623651505, | |
| "num_tokens": 28993414.0, | |
| "step": 240 | |
| }, | |
| { | |
| "entropy": 0.5922670801480611, | |
| "epoch": 0.8776119402985074, | |
| "grad_norm": 0.07848487794399261, | |
| "learning_rate": 2.6181818181818187e-06, | |
| "loss": 0.6099064826965332, | |
| "mean_token_accuracy": 0.8969596823056539, | |
| "num_tokens": 29597147.0, | |
| "step": 245 | |
| }, | |
| { | |
| "entropy": 0.5942047655582428, | |
| "epoch": 0.8955223880597015, | |
| "grad_norm": 0.0873173326253891, | |
| "learning_rate": 2.254545454545455e-06, | |
| "loss": 0.6153602600097656, | |
| "mean_token_accuracy": 0.8964566598335902, | |
| "num_tokens": 30200988.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.600983374317487, | |
| "epoch": 0.9134328358208955, | |
| "grad_norm": 0.08199329674243927, | |
| "learning_rate": 1.890909090909091e-06, | |
| "loss": 0.621838903427124, | |
| "mean_token_accuracy": 0.8946800877650579, | |
| "num_tokens": 30805455.0, | |
| "step": 255 | |
| }, | |
| { | |
| "entropy": 0.5971830387910207, | |
| "epoch": 0.9313432835820895, | |
| "grad_norm": 0.07193537801504135, | |
| "learning_rate": 1.5272727272727275e-06, | |
| "loss": 0.6158740043640136, | |
| "mean_token_accuracy": 0.8959302544593811, | |
| "num_tokens": 31410023.0, | |
| "step": 260 | |
| }, | |
| { | |
| "entropy": 0.5944863051176071, | |
| "epoch": 0.9492537313432836, | |
| "grad_norm": 0.08086840808391571, | |
| "learning_rate": 1.1636363636363638e-06, | |
| "loss": 0.6138696670532227, | |
| "mean_token_accuracy": 0.8967396537462871, | |
| "num_tokens": 32013494.0, | |
| "step": 265 | |
| }, | |
| { | |
| "entropy": 0.6020720998446146, | |
| "epoch": 0.9671641791044776, | |
| "grad_norm": 0.07237952947616577, | |
| "learning_rate": 8.000000000000001e-07, | |
| "loss": 0.6218283653259278, | |
| "mean_token_accuracy": 0.8951333224773407, | |
| "num_tokens": 32618958.0, | |
| "step": 270 | |
| }, | |
| { | |
| "entropy": 0.5867068449656169, | |
| "epoch": 0.9850746268656716, | |
| "grad_norm": 0.07601916790008545, | |
| "learning_rate": 4.363636363636364e-07, | |
| "loss": 0.6081205844879151, | |
| "mean_token_accuracy": 0.8974658062060674, | |
| "num_tokens": 33223069.0, | |
| "step": 275 | |
| }, | |
| { | |
| "entropy": 0.5958041596412659, | |
| "epoch": 1.0, | |
| "grad_norm": 0.08687923103570938, | |
| "learning_rate": 7.272727272727274e-08, | |
| "loss": 0.6103767395019531, | |
| "mean_token_accuracy": 0.8959024047851563, | |
| "num_tokens": 33726294.0, | |
| "step": 280 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 280, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 200, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.85072233152512e+17, | |
| "train_batch_size": 20, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |